Skip to content

Commit

Permalink
Verbosity flags for optimisation and compilation now used (GPflow#202)
Browse files Browse the repository at this point in the history
* Verbosity flags for optimisation and compilation now used.

* Jitter level now read from config.

* Exclude testing print statements

* Added missed jitter levels.

* bugfix in gpmc.py
  • Loading branch information
Mark van der Wilk authored and jameshensman committed Sep 9, 2016
1 parent 9fbdec8 commit 61b0659
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 15 deletions.
1 change: 1 addition & 0 deletions .coveragerc
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ exclude_lines =
raise AssertionError
raise NotImplementedError
if __name__ == .__main__.:
print
4 changes: 2 additions & 2 deletions GPflow/_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def namedtuplify(mapping): # thank you https://gist.github.com/hangtwenty/59604


def read_config_file(path=None):

c = configparser.ConfigParser()

if path is None: # pragma: no cover
Expand All @@ -58,8 +57,9 @@ def read_config_file(path=None):
if c.read(os.path.join(loc, '.gpflowrc')):
break
else:
assert(c.read(path))
assert (c.read(path))
return c


c = read_config_file()
settings = namedtuplify(c._sections)
3 changes: 2 additions & 1 deletion GPflow/conditionals.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .tf_hacks import eye
import tensorflow as tf
from .scoping import NameScoped
from ._settings import settings


@NameScoped("conditional")
Expand Down Expand Up @@ -61,7 +62,7 @@ def conditional(Xnew, X, kern, f, full_cov=False, q_sqrt=None, whiten=False):
# compute kernel stuff
num_data = tf.shape(X)[0]
Kmn = kern.K(X, Xnew)
Kmm = kern.K(X) + eye(num_data) * 1e-6
Kmm = kern.K(X) + eye(num_data) * settings.numerics.jitter_level
Lm = tf.cholesky(Kmm)

# Compute the projection matrix A
Expand Down
1 change: 1 addition & 0 deletions GPflow/gpflowrc
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
[verbosity]
tf_compile_verb = False
hmc_verb = True
optimisation_verb = False

[dtypes]
float_type = float64
Expand Down
3 changes: 2 additions & 1 deletion GPflow/gpmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from .priors import Gaussian
from .mean_functions import Zero
from .tf_hacks import eye
from ._settings import settings


class GPMC(GPModel):
Expand Down Expand Up @@ -74,7 +75,7 @@ def build_likelihood(self):
"""
K = self.kern.K(self.X)
L = tf.cholesky(K) + eye(tf.shape(self.X)[0])*1e-6
L = tf.cholesky(K + eye(tf.shape(self.X)[0])*settings.numerics.jitter_level)
F = tf.matmul(L, self.V) + self.mean_function(self.X)

return tf.reduce_sum(self.likelihood.logp(F, self.Y))
Expand Down
16 changes: 10 additions & 6 deletions GPflow/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import numpy as np
import tensorflow as tf
from . import hmc, tf_hacks
from ._settings import settings
import sys


Expand Down Expand Up @@ -143,7 +144,8 @@ def _compile(self, optimizer=None):
self._session.run(init)

# build tensorflow functions for computing the likelihood
print("compiling tensorflow function...")
if settings.verbosity.tf_compile_verb:
print("compiling tensorflow function...")
sys.stdout.flush()

def obj(x):
Expand All @@ -153,7 +155,8 @@ def obj(x):
feed_dict=feed_dict)

self._objective = obj
print("done")
if settings.verbosity.tf_compile_verb:
print("done")
sys.stdout.flush()
self._needs_recompile = False

Expand Down Expand Up @@ -270,7 +273,7 @@ def _optimize_np(self, method='L-BFGS-B', tol=None, callback=None,
if self._needs_recompile:
self._compile()

options = dict(disp=True, maxiter=maxiter)
options = dict(disp=settings.verbosity.optimisation_verb, maxiter=maxiter)
if 'max_iters' in kw: # pragma: no cover
options['maxiter'] = kw.pop('max_iters')
import warnings
Expand All @@ -293,13 +296,14 @@ def _optimize_np(self, method='L-BFGS-B', tol=None, callback=None,
tol=tol,
callback=callback,
options=options)
except (KeyboardInterrupt):
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, setting \
model with most recent state.")
self.set_state(obj._previous_x)
return None

print("optimization terminated, setting model state")
if settings.verbosity.optimisation_verb:
print("optimization terminated, setting model state")
self.set_state(result.x)
return result

Expand Down Expand Up @@ -374,7 +378,7 @@ def predict_f_samples(self, Xnew, num_samples):
Xnew.
"""
mu, var = self.build_predict(Xnew, full_cov=True)
jitter = tf_hacks.eye(tf.shape(mu)[0]) * 1e-6
jitter = tf_hacks.eye(tf.shape(mu)[0]) * settings.numerics.jitter_level
samples = []
for i in range(self.num_latent):
L = tf.cholesky(var[:, :, i] + jitter)
Expand Down
7 changes: 4 additions & 3 deletions GPflow/sgpr.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from .mean_functions import Zero
from . import likelihoods
from .tf_hacks import eye
from ._settings import settings


class SGPR(GPModel):
Expand Down Expand Up @@ -73,7 +74,7 @@ def build_likelihood(self):
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kuu = self.kern.K(self.Z) + eye(num_inducing) * settings.numerics.jitter_level
L = tf.cholesky(Kuu)
sigma = tf.sqrt(self.likelihood.variance)

Expand Down Expand Up @@ -105,7 +106,7 @@ def build_predict(self, Xnew, full_cov=False):
num_inducing = tf.shape(self.Z)[0]
err = self.Y - self.mean_function(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kuu = self.kern.K(self.Z) + eye(num_inducing) * settings.numerics.jitter_level
Kus = self.kern.K(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
Expand Down Expand Up @@ -170,7 +171,7 @@ def build_common_terms(self):
err = self.Y - self.mean_function(self.X) # size N x R
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kuu = self.kern.K(self.Z) + eye(num_inducing) * settings.numerics.jitter_level

Luu = tf.cholesky(Kuu) # => Luu^T Luu = Kuu
V = tf.matrix_triangular_solve(Luu, Kuf) # => V^T V = Qff
Expand Down
3 changes: 2 additions & 1 deletion GPflow/svgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from . import transforms, conditionals, kullback_leiblers
from .mean_functions import Zero
from .tf_hacks import eye
from ._settings import settings


class MinibatchData(DataHolder):
Expand Down Expand Up @@ -111,7 +112,7 @@ def build_prior_KL(self):
else:
KL = kullback_leiblers.gauss_kl_white(self.q_mu, self.q_sqrt)
else:
K = self.kern.K(self.Z) + eye(self.num_inducing) * 1e-6
K = self.kern.K(self.Z) + eye(self.num_inducing) * settings.numerics.jitter_level
if self.q_diag:
KL = kullback_leiblers.gauss_kl_diag(self.q_mu, self.q_sqrt, K)
else:
Expand Down
2 changes: 1 addition & 1 deletion testing/test_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ class TestLowerTriTransform(unittest.TestCase):
"""
Some extra tests for the LowerTriangle transformation.
"""

def setUp(self):
self.t = GPflow.transforms.LowerTriangular(3)

def testErrors(self):

self.t.free_state_size((6, 6, 3))
with self.assertRaises(ValueError):
self.t.free_state_size((6, 6, 2))
Expand Down

0 comments on commit 61b0659

Please sign in to comment.