Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
bd90b03
changed name of previous hmc routine, changed to column major order
Red-Portal Dec 21, 2019
891eb85
Added effective size diagnostic, StatsBase dependecy
Red-Portal Dec 21, 2019
7df6981
added the No-U-Turn sampler, nuts, based on AdvancedHMC.jl
Red-Portal Dec 21, 2019
f968d5c
CompatHelper: bump compat for "Distributions" to "0.22"
github-actions[bot] Jan 6, 2020
26bf6a5
fix Documenter versions conflict
maximerischard Jan 12, 2020
cb72748
CompatHelper: bump compat for "SpecialFunctions" to "0.10"
github-actions[bot] Jan 20, 2020
3d1d1e0
Update Project.toml
fairbrot Jan 22, 2020
9561252
add a sinusoid to underlying function in optim tests
maximerischard Jan 25, 2020
6090cb7
CompatHelper: bump compat for "Optim" to "0.20"
github-actions[bot] Jan 23, 2020
1bf5f47
CompatHelper: bump compat for "RecipesBase" to "0.8"
github-actions[bot] Feb 6, 2020
db4b978
Merge branch 'master' of https://github.com/STOR-i/GaussianProcesses.…
Red-Portal Feb 10, 2020
0c56059
removed effective sample size calculation, StatsBase dependency
Red-Portal Feb 10, 2020
aee587e
removed StatsBase dependency
Red-Portal Feb 10, 2020
5c7b351
removed effective sample size calculation, StatsBase dependency
Red-Portal Feb 10, 2020
9a4ebc7
Merge branch 'advancedHMC' of github.com:Red-Portal/GaussianProcesses…
Red-Portal Feb 10, 2020
4f5d333
fixed bug in the elliptical slice implementation
Red-Portal Feb 10, 2020
a2ccbfb
Merge pull request #138 from STOR-i/compathelper/new_version/2020-02-…
chris-nemeth Feb 10, 2020
f8b65e3
Merge pull request #139 from Red-Portal/master
thomaspinder Feb 11, 2020
139a91b
Update notebook with ESS
thomaspinder Feb 11, 2020
40f3017
Version update
fairbrot Feb 12, 2020
a80e44c
fixed StanHMCAdaptor deprication warning
Red-Portal Mar 11, 2020
f3795e7
Merge branch 'master' of https://github.com/STOR-i/GaussianProcesses.…
Red-Portal Mar 11, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ os:
- osx
julia:
- 1.0
- 1.1
- 1.2
- 1.3
- nightly
notifications:
email: false
Expand All @@ -21,7 +24,7 @@ matrix:
jobs:
include:
- stage: "Documentation"
julia: 1.0
julia: 1.3
os: linux
script:
- julia --project=docs/ -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()'
Expand Down
10 changes: 5 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "GaussianProcesses"
uuid = "891a1506-143c-57d2-908e-e1f8e92e6de9"
license = "MIT"
version = "0.11.0"
version = "0.11.2"

[deps]
AdvancedHMC = "0bf59076-c3b1-5ca4-86bd-e02cd72cde3d"
Expand All @@ -28,18 +28,18 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

[compat]
Distances = "0.7, 0.8"
Distributions = "0.16, 0.17, 0.18, 0.19, 0.20, 0.21"
Distributions = "0.16, 0.17, 0.18, 0.19, 0.20, 0.21, 0.22"
Documenter = "0.24"
ElasticArrays = "0.2, 0.3, 0.4, 1.0"
ElasticPDMats = "0.2"
FastGaussQuadrature = "0.3, 0.4"
ForwardDiff = "0.10"
Optim = "0.19"
Optim = "0.19, 0.20"
PDMats = "0.9"
ProgressMeter = "1.2"
RecipesBase = "0.6, 0.7"
RecipesBase = "0.6, 0.7, 0.8"
ScikitLearnBase = "0.4, 0.5"
SpecialFunctions = "0.7, 0.8, 0.9"
SpecialFunctions = "0.7, 0.8, 0.9, 0.10"
StaticArrays = "0.6, 0.9, 0.12"
StatsFuns = "0.7, 0.8, 0.9"
Zygote = "0.3, 0.4"
Expand Down
10 changes: 5 additions & 5 deletions docs/Manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.1"

[[Documenter]]
deps = ["Base64", "DocStringExtensions", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"]
git-tree-sha1 = "d45c163c7a3ae293c15361acc52882c0f853f97c"
deps = ["Base64", "Dates", "DocStringExtensions", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"]
git-tree-sha1 = "d497bcc45bb98a1fbe19445a774cfafeabc6c6df"
uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
version = "0.23.4"
version = "0.24.5"

[[InteractiveUtils]]
deps = ["Markdown"]
Expand All @@ -48,9 +48,9 @@ uuid = "a63ad114-7e13-5084-954f-fe012c677804"

[[Parsers]]
deps = ["Dates", "Test"]
git-tree-sha1 = "0139ba59ce9bc680e2925aec5b7db79065d60556"
git-tree-sha1 = "d112c19ccca00924d5d3a38b11ae2b4b268dda39"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "0.3.10"
version = "0.3.11"

[[Pkg]]
deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
Expand Down
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"

[compat]
Documenter = "~0.23"
Documenter = "0.24"
211 changes: 83 additions & 128 deletions notebooks/Regression.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion src/GaussianProcesses.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import PDMats: dim, Matrix, diag, pdadd!, *, \, inv, logdet, eigmax, eigmin, whi
export GPBase, GP, GPE, GPA, ElasticGPE, Approx, predict_f, predict_y, Kernel, Likelihood, CompositeKernel, SumKernel, ProdKernel, Masked, FixedKernel, fix, Noise, Const, SE, SEIso, SEArd, Periodic, Poly, RQ, RQIso, RQArd, Lin, LinIso, LinArd, Matern, Mat12Iso, Mat12Ard, Mat32Iso, Mat32Ard, Mat52Iso, Mat52Ard, #kernel functions
MeanZero, MeanConst, MeanLin, MeanPoly, SumMean, ProdMean, MeanPeriodic, #mean functions
GaussLik, BernLik, ExpLik, StuTLik, PoisLik, BinLik, #likelihood functions
mcmc, ess, lss, optimize!, vi, var_exp, dv_var_exp, elbo, initialise_Q, #inference functions
mcmc, ess, hmc, nuts, nuts_hamiltonian, lss, optimize!, vi, var_exp, dv_var_exp, elbo, initialise_Q, #inference functions
set_priors!,set_params!, update_target!, autodiff, update_Q!
using ForwardDiff: GradientConfig, Dual, partials, copyto!, Chunk
import ForwardDiff: seed!
Expand Down
139 changes: 115 additions & 24 deletions src/mcmc.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,78 @@
using ProgressMeter

function mcmc(gp::GPBase; nIter::Int=1000, burn::Int=100, thin::Int=1, ε::Float64=0.1,
Lmin::Int=5, Lmax::Int=15, lik::Bool=true, noise::Bool=true,
domean::Bool=true, kern::Bool=true)
return hmc(gp, nIter=nIter, burn=burn, thin=thin, ε=ε, Lmin=Lmin, Lmax=Lmax,
lik=lik, noise=noise, domean=domean, kern=kern)
end

"""
mcmc(gp::GPBase; kwargs...)
nuts_hamiltonian(gp::GPBase, metric::AdvancedHMC.AbstractMetric)

Runs Hamiltonian Monte Carlo algorithm for estimating the hyperparameters of Gaussian process `GPE` and the latent function in the case of `GPA`.
Generate Hamiltonian for the GP target. A stupid API hack but it works?
"""
function mcmc(gp::GPBase; nIter::Int=1000, burn::Int=1, thin::Int=1, ε::Float64=0.1,
Lmin::Int=5, Lmax::Int=15, lik::Bool=true, noise::Bool=true,
domean::Bool=true, kern::Bool=true)
function nuts_hamiltonian(gp::GPBase; lik::Bool=true, noise::Bool=true, domean::Bool=true, kern::Bool=true,
metric=DiagEuclideanMetric(num_params(gp; get_params_kwargs(gp, domean=domean, kern=kern, noise=noise, lik=lik)...)))
precomp = init_precompute(gp)
params_kwargs = get_params_kwargs(gp; domean=domean, kern=kern, noise=noise, lik=lik)
function calc_target_and_dtarget!(θ::AbstractVector)
set_params!(gp, θ; params_kwargs...)
# Cholesky exceptions are handled by DynamicHMC
update_target_and_dtarget!(gp, precomp; params_kwargs...)
return (gp.target, gp.dtarget)
end

function calc_target!(θ::AbstractVector)
set_params!(gp, θ; params_kwargs...)
update_target!(gp; params_kwargs...)
return gp.target
end
return Hamiltonian(metric, calc_target!, calc_target_and_dtarget!)
end

"""
nuts(gp::GPBase; kwargs...)

Runs Hamiltonian Monte Carlo algorithm for estimating the hyperparameters of
Gaussian process `GPE` and the latent function in the case of `GPA`.
Refer to AdvancedHMC.jl for more info about the keyword options.
"""
function nuts(gp::GPBase; nIter::Int=1000, burn::Int=100, thin::Int=1,
lik::Bool=true, noise::Bool=true, domean::Bool=true, kern::Bool=true,
metric=DiagEuclideanMetric(num_params(gp; get_params_kwargs(gp, domean=domean, kern=kern, noise=noise, lik=lik)...)),
hamiltonian=nuts_hamiltonian(gp; metric=metric),
ε::Float64=find_good_eps(hamiltonian, get_params(gp; get_params_kwargs(gp, domean=domean, kern=kern, noise=noise, lik=lik)...)),
maxDepth::Int64=10, δ::Float64=0.8, integrator=Leapfrog(ε),
proposals=NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator, maxDepth),
adaptor=StanHMCAdaptor(Preconditioner(metric), NesterovDualAveraging(δ, integrator)),
progress=true)
params_kwargs = get_params_kwargs(gp; domean=domean, kern=kern, noise=noise, lik=lik)
θ_init = get_params(gp; params_kwargs...)
dim = length(θ_init)
post, stats = sample(hamiltonian, proposals, θ_init, nIter - burn, adaptor,
burn; drop_warmup=true, progress=progress, verbose=false)
post = hcat(post...)
post = post[:,1:thin:end]
set_params!(gp, θ_init; params_kwargs...)

step_stats = [[step_stat.acceptance_rate, step_stat.tree_depth] for step_stat in stats]
avg_accept, avg_depth = mean(step_stats)
ε = stats[end-1].step_size
@printf("Number of iterations = %d, Thinning = %d, Burn-in = %d \n", nIter,thin,burn)
@printf("Step size = %f, Average tree depth = %f \n", ε,avg_depth)
@printf("Acceptance rate: %f \n", avg_accept)
return post
end

"""
hmc(gp::GPBase; kwargs...)

Runs Hamiltonian Monte Carlo algorithm for estimating the hyperparameters of
Gaussian process `GPE` and the latent function in the case of `GPA`.
"""
function hmc(gp::GPBase; nIter::Int=1000, burn::Int=100, thin::Int=1, ε::Float64=0.1,
Lmin::Int=5, Lmax::Int=15, lik::Bool=true, noise::Bool=true,
domean::Bool=true, kern::Bool=true)
precomp = init_precompute(gp)
params_kwargs = get_params_kwargs(gp; domean=domean, kern=kern, noise=noise, lik=lik)
count = 0
Expand All @@ -33,8 +98,8 @@ function mcmc(gp::GPBase; nIter::Int=1000, burn::Int=1, thin::Int=1, ε::Float64
θ_cur = get_params(gp; params_kwargs...)
D = length(θ_cur)
leapSteps = 0 #accumulator to track number of leap-frog steps
post = Array{Float64}(undef, nIter, D) #posterior samples
post[1,:] = θ_cur
post = Array{Float64}(undef, D, nIter) #posterior samples
post[:,1] = θ_cur

@assert calc_target!(gp, θ_cur)
target_cur, grad_cur = gp.target, gp.dtarget
Expand All @@ -61,7 +126,7 @@ function mcmc(gp::GPBase; nIter::Int=1000, burn::Int=1, thin::Int=1, ε::Float64
ν -= 0.5*ε * grad

if reject
post[t,:] = θ_cur
post[:,t] = θ_cur
else
α = target - 0.5 * ν'ν - target_cur + 0.5 * ν_cur'ν_cur
u = log(rand())
Expand All @@ -72,18 +137,41 @@ function mcmc(gp::GPBase; nIter::Int=1000, burn::Int=1, thin::Int=1, ε::Float64
target_cur = target
grad_cur = grad
end
post[t,:] = θ_cur
post[:,t] = θ_cur
end
end
post = post[burn:thin:end,:]
post = post[:,burn:thin:end]
set_params!(gp, θ_cur; params_kwargs...)
@printf("Number of iterations = %d, Thinning = %d, Burn-in = %d \n", nIter,thin,burn)
@printf("Step size = %f, Average number of leapfrog steps = %f \n", ε,leapSteps/nIter)
println("Number of function calls: ", count)
@printf("Acceptance rate: %f \n", num_acceptances/nIter)
return post'
return post
end

function get_joint_priors(gp::GPE; noise::Bool=true, domean::Bool=true, kern::Bool=true)
priors = UnivariateDistribution[]
if noise && num_params(gp.logNoise) != 0
noise_priors = get_priors(gp.logNoise)
@assert !isempty(noise_priors) "prior distributions of logNoise should be set"
append!(priors, noise_priors)
end
if domean && num_params(gp.mean) != 0
mean_priors = get_priors(gp.mean)
@assert !isempty(mean_priors) "prior distributions of mean should be set"
append!(priors, mean_priors)
end
if kern && num_params(gp.kernel) != 0
kernel_priors = get_priors(gp.kernel)
@assert !isempty(kernel_priors) "prior distributions of kernel should be set"
append!(priors, kernel_priors)
end
@assert all([typeof(prior) <: Normal for prior in priors]) "ess requires prior distributions to be Normal"
mu = mean.(priors)
sigma = std.(priors)
joint_prior = MvNormal(mu, sigma)
return joint_prior
end

"""
ess(gp::GPBase; kwargs...)
Expand All @@ -95,10 +183,13 @@ Journal of Machine Learning Research 9 (2010): 541-548.

Requires hyperparameter priors to be Gaussian.
"""
function ess(gp::GPE; nIter::Int=1000, burn::Int=1, thin::Int=1, lik::Bool=true,
noise::Bool=true, domean::Bool=true, kern::Bool=true)
params_kwargs = get_params_kwargs(gp; domean=domean, kern=kern, noise=noise, lik=lik)
function ess(gp::GPE; nIter::Int=1000, burn::Int=1, thin::Int=1, noise::Bool=true,
domean::Bool=true, kern::Bool=true, lik::Bool=false)
params_kwargs = get_params_kwargs(gp; domean=domean, kern=kern, noise=noise, lik=false)
count = 0
prior = get_joint_priors(gp; params_kwargs...)
means = mean(prior)

function calc_target!(θ::AbstractVector)
count += 1
try
Expand All @@ -117,44 +208,44 @@ function ess(gp::GPE; nIter::Int=1000, burn::Int=1, thin::Int=1, lik::Bool=true,
end

function sample!(f::AbstractVector)
v = sample_params(gp; params_kwargs...)
v = rand(prior) - means
u = rand()
logy = calc_target!(f) + log(u);
θ = rand()*2*π;
θ_min = θ - 2*π;
θ_max = θ;
f_prime = f * cos(θ) + v * sin(θ);
f_prime = (f - means) * cos(θ) + v * sin(θ);
props = 1
while calc_target!(f_prime) <= logy
while calc_target!(f_prime + means) <= logy
props += 1
if θ < 0
θ_min = θ;
else
θ_max = θ;
end
θ = rand() * (θ_max - θ_min) + θ_min;
f_prime = f * cos(θ) + v * sin(θ);
f_prime = (f - means) * cos(θ) + v * sin(θ);
end
return f_prime, props
return f_prime + means, props
end

total_proposals = 0
θ_cur = get_params(gp; params_kwargs...)
D = length(θ_cur)
post = Array{Float64}(undef, nIter, D)
post = Array{Float64}(undef, D, nIter)

for i = 1:nIter
θ_cur, num_proposals = sample!(θ_cur)
post[i,:] = θ_cur
post[:,i] = θ_cur
total_proposals += num_proposals
end

post = post[burn:thin:end,:]
post = post[:,burn:thin:end]
set_params!(gp, θ_cur; params_kwargs...)
@printf("Number of iterations = %d, Thinning = %d, Burn-in = %d \n", nIter,thin,burn)
println("Number of function calls: ", count)
@printf("Acceptance rate: %f \n", nIter / total_proposals)
return post'
return post
end


Loading