context_start_lineno
int64 1
913
| line_no
int64 16
984
| repo
stringclasses 5
values | id
int64 0
416
| target_function_prompt
stringlengths 201
13.6k
| function_signature
stringlengths 201
13.6k
| solution_position
listlengths 2
2
| raw_solution
stringlengths 201
13.6k
| focal_code
stringlengths 201
13.6k
| function_name
stringlengths 2
38
| start_line
int64 1
913
| end_line
int64 16
984
| file_path
stringlengths 10
52
| context
stringlengths 4.52k
9.85k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
250
| 319
|
Turing.jl
| 400
|
function StatsBase.coeftable(m::ModeResult; level::Real=0.95, numerrors_warnonly::Bool=true)
# Get columns for coeftable.
terms = string.(StatsBase.coefnames(m))
estimates = m.values.array[:, 1]
# If numerrors_warnonly is true, and if either the information matrix is singular or has
# negative entries on its diagonal, then `notes` will be a list of strings for each
# value in `m.values`, explaining why the standard error is NaN.
notes = nothing
local stderrors
if numerrors_warnonly
infmat = StatsBase.informationmatrix(m)
local vcov
try
vcov = inv(infmat)
catch e
if isa(e, LinearAlgebra.SingularException)
stderrors = fill(NaN, length(m.values))
notes = fill("Information matrix is singular", length(m.values))
else
rethrow(e)
end
else
vars = LinearAlgebra.diag(vcov)
stderrors = eltype(vars)[]
if any(x -> x < 0, vars)
notes = []
end
for var in vars
if var >= 0
push!(stderrors, sqrt(var))
if notes !== nothing
push!(notes, "")
end
else
push!(stderrors, NaN)
if notes !== nothing
push!(notes, "Negative variance")
end
end
end
end
else
stderrors = StatsBase.stderror(m)
end
zscore = estimates ./ stderrors
p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore)
# Confidence interval (CI)
q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2)
ci_low = estimates .- q .* stderrors
ci_high = estimates .+ q .* stderrors
level_ = 100 * level
level_percentage = isinteger(level_) ? Int(level_) : level_
cols = Vector[estimates, stderrors, zscore, p, ci_low, ci_high]
colnms = [
"Coef.",
"Std. Error",
"z",
"Pr(>|z|)",
"Lower $(level_percentage)%",
"Upper $(level_percentage)%",
]
if notes !== nothing
push!(cols, notes)
push!(colnms, "Error notes")
end
return StatsBase.CoefTable(cols, colnms, terms)
end
|
function StatsBase.coeftable(m::ModeResult; level::Real=0.95, numerrors_warnonly::Bool=true)
# Get columns for coeftable.
terms = string.(StatsBase.coefnames(m))
estimates = m.values.array[:, 1]
# If numerrors_warnonly is true, and if either the information matrix is singular or has
# negative entries on its diagonal, then `notes` will be a list of strings for each
# value in `m.values`, explaining why the standard error is NaN.
notes = nothing
local stderrors
if numerrors_warnonly
infmat = StatsBase.informationmatrix(m)
local vcov
try
vcov = inv(infmat)
catch e
if isa(e, LinearAlgebra.SingularException)
stderrors = fill(NaN, length(m.values))
notes = fill("Information matrix is singular", length(m.values))
else
rethrow(e)
end
else
vars = LinearAlgebra.diag(vcov)
stderrors = eltype(vars)[]
if any(x -> x < 0, vars)
notes = []
end
for var in vars
if var >= 0
push!(stderrors, sqrt(var))
if notes !== nothing
push!(notes, "")
end
else
push!(stderrors, NaN)
if notes !== nothing
push!(notes, "Negative variance")
end
end
end
end
else
stderrors = StatsBase.stderror(m)
end
zscore = estimates ./ stderrors
p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore)
# Confidence interval (CI)
q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2)
ci_low = estimates .- q .* stderrors
ci_high = estimates .+ q .* stderrors
level_ = 100 * level
level_percentage = isinteger(level_) ? Int(level_) : level_
cols = Vector[estimates, stderrors, zscore, p, ci_low, ci_high]
colnms = [
"Coef.",
"Std. Error",
"z",
"Pr(>|z|)",
"Lower $(level_percentage)%",
"Upper $(level_percentage)%",
]
if notes !== nothing
push!(cols, notes)
push!(colnms, "Error notes")
end
return StatsBase.CoefTable(cols, colnms, terms)
end
|
[
250,
319
] |
function StatsBase.coeftable(m::ModeResult; level::Real=0.95, numerrors_warnonly::Bool=true)
# Get columns for coeftable.
terms = string.(StatsBase.coefnames(m))
estimates = m.values.array[:, 1]
# If numerrors_warnonly is true, and if either the information matrix is singular or has
# negative entries on its diagonal, then `notes` will be a list of strings for each
# value in `m.values`, explaining why the standard error is NaN.
notes = nothing
local stderrors
if numerrors_warnonly
infmat = StatsBase.informationmatrix(m)
local vcov
try
vcov = inv(infmat)
catch e
if isa(e, LinearAlgebra.SingularException)
stderrors = fill(NaN, length(m.values))
notes = fill("Information matrix is singular", length(m.values))
else
rethrow(e)
end
else
vars = LinearAlgebra.diag(vcov)
stderrors = eltype(vars)[]
if any(x -> x < 0, vars)
notes = []
end
for var in vars
if var >= 0
push!(stderrors, sqrt(var))
if notes !== nothing
push!(notes, "")
end
else
push!(stderrors, NaN)
if notes !== nothing
push!(notes, "Negative variance")
end
end
end
end
else
stderrors = StatsBase.stderror(m)
end
zscore = estimates ./ stderrors
p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore)
# Confidence interval (CI)
q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2)
ci_low = estimates .- q .* stderrors
ci_high = estimates .+ q .* stderrors
level_ = 100 * level
level_percentage = isinteger(level_) ? Int(level_) : level_
cols = Vector[estimates, stderrors, zscore, p, ci_low, ci_high]
colnms = [
"Coef.",
"Std. Error",
"z",
"Pr(>|z|)",
"Lower $(level_percentage)%",
"Upper $(level_percentage)%",
]
if notes !== nothing
push!(cols, notes)
push!(colnms, "Error notes")
end
return StatsBase.CoefTable(cols, colnms, terms)
end
|
function StatsBase.coeftable(m::ModeResult; level::Real=0.95, numerrors_warnonly::Bool=true)
# Get columns for coeftable.
terms = string.(StatsBase.coefnames(m))
estimates = m.values.array[:, 1]
# If numerrors_warnonly is true, and if either the information matrix is singular or has
# negative entries on its diagonal, then `notes` will be a list of strings for each
# value in `m.values`, explaining why the standard error is NaN.
notes = nothing
local stderrors
if numerrors_warnonly
infmat = StatsBase.informationmatrix(m)
local vcov
try
vcov = inv(infmat)
catch e
if isa(e, LinearAlgebra.SingularException)
stderrors = fill(NaN, length(m.values))
notes = fill("Information matrix is singular", length(m.values))
else
rethrow(e)
end
else
vars = LinearAlgebra.diag(vcov)
stderrors = eltype(vars)[]
if any(x -> x < 0, vars)
notes = []
end
for var in vars
if var >= 0
push!(stderrors, sqrt(var))
if notes !== nothing
push!(notes, "")
end
else
push!(stderrors, NaN)
if notes !== nothing
push!(notes, "Negative variance")
end
end
end
end
else
stderrors = StatsBase.stderror(m)
end
zscore = estimates ./ stderrors
p = map(z -> StatsAPI.pvalue(Distributions.Normal(), z; tail=:both), zscore)
# Confidence interval (CI)
q = Statistics.quantile(Distributions.Normal(), (1 + level) / 2)
ci_low = estimates .- q .* stderrors
ci_high = estimates .+ q .* stderrors
level_ = 100 * level
level_percentage = isinteger(level_) ? Int(level_) : level_
cols = Vector[estimates, stderrors, zscore, p, ci_low, ci_high]
colnms = [
"Coef.",
"Std. Error",
"z",
"Pr(>|z|)",
"Lower $(level_percentage)%",
"Upper $(level_percentage)%",
]
if notes !== nothing
push!(cols, notes)
push!(colnms, "Error notes")
end
return StatsBase.CoefTable(cols, colnms, terms)
end
|
StatsBase.coeftable
| 250
| 319
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/test/optimisation/Optimisation.jl
##CHUNK 1
return y ~ MvNormal(a .* x .+ b .* x, 1)
end
model = collinear(xs, ys)
mle_estimate = Turing.Optimisation.estimate_mode(model, MLE())
tab = coeftable(mle_estimate)
@assert isnan(tab.cols[2][1])
@assert tab.colnms[end] == "Error notes"
@assert occursin("singular", tab.cols[end][1])
end
@testset "Negative variance" begin
# A model for which the likelihood has a saddle point at x=0, y=0.
# Creating an optimisation result for this model at the x=0, y=0 results in negative
# variance for one of the variables, because the variance is calculated as the
# diagonal of the inverse of the Hessian.
@model function saddle_model()
x ~ Normal(0, 1)
y ~ Normal(x, 1)
@addlogprob! x^2 - y^2
##CHUNK 2
end
end
@testset "StatsBase integration" begin
Random.seed!(54321)
mle_est = maximum_likelihood(gdemo_default)
# Calculated based on the two data points in gdemo_default, [1.5, 2.0]
true_values = [0.0625, 1.75]
@test coefnames(mle_est) == [:s, :m]
diffs = coef(mle_est).array - [0.0625031; 1.75001]
@test all(isapprox.(diffs, 0.0, atol=0.1))
infomat = [2/(2 * true_values[1]^2) 0.0; 0.0 2/true_values[1]]
@test all(isapprox.(infomat - informationmatrix(mle_est), 0.0, atol=0.01))
vcovmat = [2 * true_values[1]^2/2 0.0; 0.0 true_values[1]/2]
@test all(isapprox.(vcovmat - vcov(mle_est), 0.0, atol=0.01))
##CHUNK 3
@testset "Negative variance" begin
# A model for which the likelihood has a saddle point at x=0, y=0.
# Creating an optimisation result for this model at the x=0, y=0 results in negative
# variance for one of the variables, because the variance is calculated as the
# diagonal of the inverse of the Hessian.
@model function saddle_model()
x ~ Normal(0, 1)
y ~ Normal(x, 1)
@addlogprob! x^2 - y^2
return nothing
end
m = saddle_model()
ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
optim_ld = Turing.Optimisation.OptimLogDensity(m, ctx)
vals = Turing.Optimisation.NamedArrays.NamedArray([0.0, 0.0])
m = Turing.Optimisation.ModeResult(vals, nothing, 0.0, optim_ld)
ct = coeftable(m)
@assert isnan(ct.cols[2][1])
@assert ct.colnms[end] == "Error notes"
##CHUNK 4
diffs = coef(mle_est).array - [0.0625031; 1.75001]
@test all(isapprox.(diffs, 0.0, atol=0.1))
infomat = [2/(2 * true_values[1]^2) 0.0; 0.0 2/true_values[1]]
@test all(isapprox.(infomat - informationmatrix(mle_est), 0.0, atol=0.01))
vcovmat = [2 * true_values[1]^2/2 0.0; 0.0 true_values[1]/2]
@test all(isapprox.(vcovmat - vcov(mle_est), 0.0, atol=0.01))
ctable = coeftable(mle_est)
@test ctable isa StatsBase.CoefTable
s = stderror(mle_est).array
@test all(isapprox.(s - [0.06250415643292194, 0.17677963626053916], 0.0, atol=0.01))
@test coefnames(mle_est) == Distributions.params(mle_est)
@test vcov(mle_est) == inv(informationmatrix(mle_est))
@test isapprox(loglikelihood(mle_est), -0.0652883561466624, atol=0.01)
##CHUNK 5
ctable = coeftable(mle_est)
@test ctable isa StatsBase.CoefTable
s = stderror(mle_est).array
@test all(isapprox.(s - [0.06250415643292194, 0.17677963626053916], 0.0, atol=0.01))
@test coefnames(mle_est) == Distributions.params(mle_est)
@test vcov(mle_est) == inv(informationmatrix(mle_est))
@test isapprox(loglikelihood(mle_est), -0.0652883561466624, atol=0.01)
end
@testset "Linear regression test" begin
@model function regtest(x, y)
beta ~ MvNormal(Zeros(2), I)
mu = x * beta
return y ~ MvNormal(mu, I)
end
Random.seed!(987)
#FILE: Turing.jl/test/ext/OptimInterface.jl
##CHUNK 1
@test coefnames(mle_est) == [:s, :m]
diffs = coef(mle_est).array - [0.0625031; 1.75001]
@test all(isapprox.(diffs, 0.0, atol=0.1))
infomat = [2/(2 * true_values[1]^2) 0.0; 0.0 2/true_values[1]]
@test all(isapprox.(infomat - informationmatrix(mle_est), 0.0, atol=0.01))
vcovmat = [2 * true_values[1]^2/2 0.0; 0.0 true_values[1]/2]
@test all(isapprox.(vcovmat - vcov(mle_est), 0.0, atol=0.01))
ctable = coeftable(mle_est)
@test ctable isa StatsBase.CoefTable
s = stderror(mle_est).array
@test all(isapprox.(s - [0.06250415643292194, 0.17677963626053916], 0.0, atol=0.01))
@test coefnames(mle_est) == Distributions.params(mle_est)
@test vcov(mle_est) == inv(informationmatrix(mle_est))
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
@model function dynamic_model_for_ess()
b ~ Bernoulli()
x_length = b ? 1 : 2
x = Vector{Float64}(undef, x_length)
for i in 1:x_length
x[i] ~ Normal(i, 1.0)
end
end
m = dynamic_model_for_ess()
chain = sample(m, Gibbs(:b => PG(10), :x => ESS()), 2000; discard_initial=100)
means = Dict(:b => 0.5, "x[1]" => 1.0, "x[2]" => 2.0)
stds = Dict(:b => 0.5, "x[1]" => 1.0, "x[2]" => 1.0)
for vn in keys(means)
@test isapprox(mean(skipmissing(chain[:, vn, 1])), means[vn]; atol=0.1)
@test isapprox(std(skipmissing(chain[:, vn, 1])), stds[vn]; atol=0.1)
end
end
@testset "dynamic model with dot tilde" begin
#FILE: Turing.jl/test/test_utils/numerical_tests.jl
##CHUNK 1
function check_dist_numerical(dist, chn; mean_tol=0.1, var_atol=1.0, var_tol=0.5)
@testset "numerical" begin
# Extract values.
chn_xs = Array(chn[1:2:end, namesingroup(chn, :x), :])
# Check means.
dist_mean = mean(dist)
mean_shape = size(dist_mean)
if !all(isnan, dist_mean) && !all(isinf, dist_mean)
chn_mean = vec(mean(chn_xs; dims=1))
chn_mean = length(chn_mean) == 1 ? chn_mean[1] : reshape(chn_mean, mean_shape)
atol_m = if length(chn_mean) > 1
mean_tol * length(chn_mean)
else
max(mean_tol, mean_tol * chn_mean)
end
@test chn_mean ≈ dist_mean atol = atol_m
end
# Check variances.
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
Return a table with coefficients and related statistics of the model. level determines the
level for confidence intervals (by default, 95%).
In case the `numerrors_warnonly` argument is true (the default) numerical errors encountered
during the computation of the standard errors will be caught and reported in an extra
"Error notes" column.
"""
function StatsBase.informationmatrix(
m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...
)
# Calculate Hessian and information matrix.
# Convert the values to their unconstrained states to make sure the
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.ldf.varinfo)
if linked
new_vi = DynamicPPL.invlink!!(m.f.ldf.varinfo, m.f.ldf.model)
##CHUNK 2
end
function Base.show(io::IO, m::ModeResult)
return show(io, m.values.array)
end
# Various StatsBase methods for ModeResult
"""
StatsBase.coeftable(m::ModeResult; level::Real=0.95, numerrors_warnonly::Bool=true)
Return a table with coefficients and related statistics of the model. level determines the
level for confidence intervals (by default, 95%).
In case the `numerrors_warnonly` argument is true (the default) numerical errors encountered
during the computation of the standard errors will be caught and reported in an extra
"Error notes" column.
"""
|
321
| 348
|
Turing.jl
| 401
|
function StatsBase.informationmatrix(
m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...
)
# Calculate Hessian and information matrix.
# Convert the values to their unconstrained states to make sure the
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.ldf.varinfo)
if linked
new_vi = DynamicPPL.invlink!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
# Calculate the Hessian, which is the information matrix because the negative of the log
# likelihood was optimized
varnames = StatsBase.coefnames(m)
info = hessian_function(m.f, m.values.array[:, 1])
# Link it back if we invlinked it.
if linked
new_vi = DynamicPPL.link!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
return NamedArrays.NamedArray(info, (varnames, varnames))
end
|
function StatsBase.informationmatrix(
m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...
)
# Calculate Hessian and information matrix.
# Convert the values to their unconstrained states to make sure the
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.ldf.varinfo)
if linked
new_vi = DynamicPPL.invlink!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
# Calculate the Hessian, which is the information matrix because the negative of the log
# likelihood was optimized
varnames = StatsBase.coefnames(m)
info = hessian_function(m.f, m.values.array[:, 1])
# Link it back if we invlinked it.
if linked
new_vi = DynamicPPL.link!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
return NamedArrays.NamedArray(info, (varnames, varnames))
end
|
[
321,
348
] |
function StatsBase.informationmatrix(
m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...
)
# Calculate Hessian and information matrix.
# Convert the values to their unconstrained states to make sure the
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.ldf.varinfo)
if linked
new_vi = DynamicPPL.invlink!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
# Calculate the Hessian, which is the information matrix because the negative of the log
# likelihood was optimized
varnames = StatsBase.coefnames(m)
info = hessian_function(m.f, m.values.array[:, 1])
# Link it back if we invlinked it.
if linked
new_vi = DynamicPPL.link!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
return NamedArrays.NamedArray(info, (varnames, varnames))
end
|
function StatsBase.informationmatrix(
m::ModeResult; hessian_function=ForwardDiff.hessian, kwargs...
)
# Calculate Hessian and information matrix.
# Convert the values to their unconstrained states to make sure the
# Hessian is computed with respect to the untransformed parameters.
linked = DynamicPPL.istrans(m.f.ldf.varinfo)
if linked
new_vi = DynamicPPL.invlink!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
# Calculate the Hessian, which is the information matrix because the negative of the log
# likelihood was optimized
varnames = StatsBase.coefnames(m)
info = hessian_function(m.f, m.values.array[:, 1])
# Link it back if we invlinked it.
if linked
new_vi = DynamicPPL.link!!(m.f.ldf.varinfo, m.f.ldf.model)
new_f = OptimLogDensity(m.f.ldf.model, new_vi, m.f.ldf.context)
m = Accessors.@set m.f = new_f
end
return NamedArrays.NamedArray(info, (varnames, varnames))
end
|
StatsBase.informationmatrix
| 321
| 348
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
function make_updated_varinfo(
f::DynamicPPL.LogDensityFunction, external_transition, external_state
)
# Set the parameters.
# NOTE: This is Turing.Inference.getparams, not AbstractMCMC.getparams (!!!!!)
# The latter uses the state rather than the transition.
# TODO(penelopeysm): Make this use AbstractMCMC.getparams instead
new_parameters = getparams(f.model, external_transition)
new_varinfo = DynamicPPL.unflatten(f.varinfo, new_parameters)
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
##CHUNK 2
end
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
"""
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
Base.get(m::ModeResult, var_symbol::Symbol) = get(m, [var_symbol])
"""
ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
Create a `ModeResult` for a given `log_density` objective and a `solution` given by `solve`.
`Optimization.solve` returns its own result type. This function converts that into the
richer format of `ModeResult`. It also takes care of transforming them back to the original
parameter space in case the optimization was done in a transformed space.
"""
function ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
varinfo_new = DynamicPPL.unflatten(log_density.ldf.varinfo, solution.u)
##CHUNK 2
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
##CHUNK 3
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
##CHUNK 4
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
|
364
| 383
|
Turing.jl
| 402
|
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
varnames = collect(
map(first, Turing.Inference.getparams(log_density.model, log_density.varinfo))
)
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
|
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
varnames = collect(
map(first, Turing.Inference.getparams(log_density.model, log_density.varinfo))
)
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
|
[
364,
383
] |
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
varnames = collect(
map(first, Turing.Inference.getparams(log_density.model, log_density.varinfo))
)
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
|
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
varnames = collect(
map(first, Turing.Inference.getparams(log_density.model, log_density.varinfo))
)
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
|
getsym
| 364
| 383
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
end
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
getlogevidence(transitions, sampler, state) = missing
##CHUNK 2
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
##CHUNK 3
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
##CHUNK 4
return getparams(model, DynamicPPL.typed_varinfo(untyped_vi))
end
function getparams(::DynamicPPL.Model, ::DynamicPPL.VarInfo{NamedTuple{(),Tuple{}}})
return Dict{VarName,Any}()
end
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
s = sum(y) - sum(z)
q = Wrapper(0.0)
q.a ~ Normal(s, 1)
r = Vector{Float64}(undef, 1)
r[1] ~ Normal(q.a, 1)
obs1 ~ Normal(r[1], 1)
obs2 ~ Poisson(y[3])
return obs1, obs2, variance, z, y, s
end
model = test_model(1.2, 2, 10, 2.5)
all_varnames = DynamicPPL.VarName[
@varname(variance), @varname(z), @varname(y), @varname(q.a), @varname(r[1])
]
# All combinations of elements in all_varnames.
target_vn_combinations = Iterators.flatten(
Iterators.map(
n -> Combinatorics.combinations(all_varnames, n), 1:length(all_varnames)
),
)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
if length(vns) == 1
# Only one variable, assign the values to it
DynamicPPL.setindex!(vi, vals, vns[1])
else
# Spread the values across the variables
length(vns) == length(vals) || error("Unequal number of variables and values")
for (vn, val) in zip(vns, vals)
DynamicPPL.setindex!(vi, val, vn)
end
end
end
end
"""
MHLogDensityFunction
A log density function for the MH sampler.
This variant uses the `set_namedtuple!` function to update the `VarInfo`.
"""
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
##CHUNK 2
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
Create a `ModeResult` for a given `log_density` objective and a `solution` given by `solve`.
`Optimization.solve` returns its own result type. This function converts that into the
richer format of `ModeResult`. It also takes care of transforming them back to the original
parameter space in case the optimization was done in a transformed space.
"""
function ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
varinfo_new = DynamicPPL.unflatten(log_density.ldf.varinfo, solution.u)
# `getparams` performs invlinking if needed
vals = Turing.Inference.getparams(log_density.ldf.model, varinfo_new)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
vns_vals_iter = mapreduce(collect, vcat, iters)
syms = map(Symbol ∘ first, vns_vals_iter)
vals = map(last, vns_vals_iter)
return ModeResult(
NamedArrays.NamedArray(vals, syms), solution, -solution.objective, log_density
)
end
"""
##CHUNK 2
StatsBase.coefnames(m::ModeResult) = names(m.values)[1]
StatsBase.params(m::ModeResult) = StatsBase.coefnames(m)
StatsBase.vcov(m::ModeResult) = inv(StatsBase.informationmatrix(m))
StatsBase.loglikelihood(m::ModeResult) = m.lp
"""
Base.get(m::ModeResult, var_symbol::Symbol)
Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
Return the values of all the variables with the symbol(s) `var_symbol` in the mode result
`m`. The return value is a `NamedTuple` with `var_symbols` as the key(s). The second
argument should be either a `Symbol` or a vector of `Symbol`s.
"""
end
Base.get(m::ModeResult, var_symbol::Symbol) = get(m, [var_symbol])
"""
ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
|
450
| 469
|
Turing.jl
| 403
|
function generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
if initial_params === nothing && has_generic_constraints(constraints)
throw(
ArgumentError(
"You must provide an initial value when using generic constraints."
),
)
end
return if initial_params !== nothing
copy(initial_params)
elseif has_box_constraints(constraints)
[
rand(Distributions.Uniform(lower, upper)) for
(lower, upper) in zip(constraints.lb, constraints.ub)
]
else
rand(Vector, model)
end
end
|
function generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
if initial_params === nothing && has_generic_constraints(constraints)
throw(
ArgumentError(
"You must provide an initial value when using generic constraints."
),
)
end
return if initial_params !== nothing
copy(initial_params)
elseif has_box_constraints(constraints)
[
rand(Distributions.Uniform(lower, upper)) for
(lower, upper) in zip(constraints.lb, constraints.ub)
]
else
rand(Vector, model)
end
end
|
[
450,
469
] |
function generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
if initial_params === nothing && has_generic_constraints(constraints)
throw(
ArgumentError(
"You must provide an initial value when using generic constraints."
),
)
end
return if initial_params !== nothing
copy(initial_params)
elseif has_box_constraints(constraints)
[
rand(Distributions.Uniform(lower, upper)) for
(lower, upper) in zip(constraints.lb, constraints.ub)
]
else
rand(Vector, model)
end
end
|
function generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
if initial_params === nothing && has_generic_constraints(constraints)
throw(
ArgumentError(
"You must provide an initial value when using generic constraints."
),
)
end
return if initial_params !== nothing
copy(initial_params)
elseif has_box_constraints(constraints)
[
rand(Distributions.Uniform(lower, upper)) for
(lower, upper) in zip(constraints.lb, constraints.ub)
]
else
rand(Vector, model)
end
end
|
generate_initial_params
| 450
| 469
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
##CHUNK 2
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
end
has_constraints(c) = has_box_constraints(c) || has_generic_constraints(c)
"""
generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
Generate an initial value for the optimization problem.
If `initial_params` is not `nothing`, a copy of it is returned. Otherwise initial parameter
rand(Vector, model)
end
end
function default_solver(constraints::ModeEstimationConstraints)
return if has_generic_constraints(constraints)
OptimizationOptimJL.IPNewton()
else
OptimizationOptimJL.LBFGS()
end
end
##CHUNK 2
lb::Lb
ub::Ub
cons::Cons
lcons::LCons
ucons::UCons
end
has_box_constraints(c::ModeEstimationConstraints) = c.ub !== nothing || c.lb !== nothing
function has_generic_constraints(c::ModeEstimationConstraints)
return (c.cons !== nothing || c.lcons !== nothing || c.ucons !== nothing)
end
has_constraints(c) = has_box_constraints(c) || has_generic_constraints(c)
"""
generate_initial_params(model::DynamicPPL.Model, initial_params, constraints)
Generate an initial value for the optimization problem.
If `initial_params` is not `nothing`, a copy of it is returned. Otherwise initial parameter
rand(Vector, model)
##CHUNK 3
- `check_model::Bool=true`: If true, the model is checked for errors before
optimisation begins.
- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the
optimization. Optional, unless non-box constraints are specified. If omitted it is
generated by either sampling from the prior distribution or uniformly from the box
constraints, if any.
- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use.
- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the
optimization problem. Please see [`ModeEstimationConstraints`](@ref) for more details.
- Any extra keyword arguments are passed to `Optimization.solve`.
"""
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
##CHUNK 4
# Arguments
- `model::DynamicPPL.Model`: The model for which to estimate the mode.
- `estimator::ModeEstimator`: Can be either `MLE()` for maximum likelihood estimation or
`MAP()` for maximum a posteriori estimation.
- `solver=nothing`. The optimization algorithm to use. Optional. Can be any solver
recognised by Optimization.jl. If omitted a default solver is used: LBFGS, or IPNewton
if non-box constraints are present.
# Keyword arguments
- `check_model::Bool=true`: If true, the model is checked for errors before
optimisation begins.
- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the
optimization. Optional, unless non-box constraints are specified. If omitted it is
generated by either sampling from the prior distribution or uniformly from the box
constraints, if any.
- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use.
- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the
optimization problem. Please see [`ModeEstimationConstraints`](@ref) for more details.
- Any extra keyword arguments are passed to `Optimization.solve`.
##CHUNK 5
# Note that OptimLogDensity is a callable that evaluates the model with given
# parameters. Hence we can use it in the objective function as below.
f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons)
initial_params = log_density.ldf.varinfo[:]
prob = if !has_constraints(constraints)
Optimization.OptimizationProblem(f, initial_params)
else
Optimization.OptimizationProblem(
f,
initial_params;
lcons=constraints.lcons,
ucons=constraints.ucons,
lb=constraints.lb,
ub=constraints.ub,
)
end
return prob
end
"""
##CHUNK 6
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
##CHUNK 7
"""
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
|
487
| 505
|
Turing.jl
| 404
|
function Optimization.OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
# Note that OptimLogDensity is a callable that evaluates the model with given
# parameters. Hence we can use it in the objective function as below.
f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons)
initial_params = log_density.ldf.varinfo[:]
prob = if !has_constraints(constraints)
Optimization.OptimizationProblem(f, initial_params)
else
Optimization.OptimizationProblem(
f,
initial_params;
lcons=constraints.lcons,
ucons=constraints.ucons,
lb=constraints.lb,
ub=constraints.ub,
)
end
return prob
end
|
function Optimization.OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
# Note that OptimLogDensity is a callable that evaluates the model with given
# parameters. Hence we can use it in the objective function as below.
f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons)
initial_params = log_density.ldf.varinfo[:]
prob = if !has_constraints(constraints)
Optimization.OptimizationProblem(f, initial_params)
else
Optimization.OptimizationProblem(
f,
initial_params;
lcons=constraints.lcons,
ucons=constraints.ucons,
lb=constraints.lb,
ub=constraints.ub,
)
end
return prob
end
|
[
487,
505
] |
function Optimization.OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
# Note that OptimLogDensity is a callable that evaluates the model with given
# parameters. Hence we can use it in the objective function as below.
f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons)
initial_params = log_density.ldf.varinfo[:]
prob = if !has_constraints(constraints)
Optimization.OptimizationProblem(f, initial_params)
else
Optimization.OptimizationProblem(
f,
initial_params;
lcons=constraints.lcons,
ucons=constraints.ucons,
lb=constraints.lb,
ub=constraints.ub,
)
end
return prob
end
|
function Optimization.OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
# Note that OptimLogDensity is a callable that evaluates the model with given
# parameters. Hence we can use it in the objective function as below.
f = Optimization.OptimizationFunction(log_density, adtype; cons=constraints.cons)
initial_params = log_density.ldf.varinfo[:]
prob = if !has_constraints(constraints)
Optimization.OptimizationProblem(f, initial_params)
else
Optimization.OptimizationProblem(
f,
initial_params;
lcons=constraints.lcons,
ucons=constraints.ucons,
lb=constraints.lb,
ub=constraints.ub,
)
end
return prob
end
|
Optimization.OptimizationProblem
| 487
| 505
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
_optimize(f::OptimLogDensity, optimizer=Optim.LBFGS(), args...; kwargs...)
Estimate a mode, i.e., compute a MLE or MAP estimate.
"""
function _optimize(
f::Optimisation.OptimLogDensity,
init_vals::AbstractArray=DynamicPPL.getparams(f.ldf),
optimizer::Optim.AbstractOptimizer=Optim.LBFGS(),
options::Optim.Options=Optim.Options(),
args...;
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
##CHUNK 2
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
end
end
function default_solver(constraints::ModeEstimationConstraints)
return if has_generic_constraints(constraints)
OptimizationOptimJL.IPNewton()
else
OptimizationOptimJL.LBFGS()
end
end
"""
OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
Create an `OptimizationProblem` for the objective function defined by `log_density`.
end
return prob
end
##CHUNK 2
"""
OptimizationProblem(log_density::OptimLogDensity, adtype, constraints)
Create an `OptimizationProblem` for the objective function defined by `log_density`.
end
return prob
end
"""
estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
[solver];
kwargs...
)
Find the mode of the probability distribution of a model.
##CHUNK 3
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
##CHUNK 4
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
##CHUNK 5
return OptimLogDensity(DynamicPPL.LogDensityFunction(model, vi, ctx; adtype=adtype))
end
# No varinfo
function OptimLogDensity(
model::DynamicPPL.Model,
ctx::OptimizationContext;
adtype::ADTypes.AbstractADType=AutoForwardDiff(),
)
return OptimLogDensity(
DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype)
)
end
"""
(f::OptimLogDensity)(z)
(f::OptimLogDensity)(z, _)
Evaluate the negative log joint or log likelihood at the array `z`. Which one is evaluated
depends on the context of `f`.
##CHUNK 6
DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype)
)
end
"""
(f::OptimLogDensity)(z)
(f::OptimLogDensity)(z, _)
Evaluate the negative log joint or log likelihood at the array `z`. Which one is evaluated
depends on the context of `f`.
Any second argument is ignored. The two-argument method only exists to match interface the
required by Optimization.jl.
"""
(f::OptimLogDensity)(z::AbstractVector) = -LogDensityProblems.logdensity(f.ldf, z)
(f::OptimLogDensity)(z, _) = f(z)
# NOTE: The format of this function is dictated by Optim. The first argument sets whether to
# compute the function value, the second whether to compute the gradient (and stores the
# gradient). The last one is the actual argument of the objective function.
##CHUNK 7
`z`, you should manually call
```julia
LogDensityProblems.logdensity(f.ldf, z)
```
However, it is a callable object which returns the *negative* log density of
the underlying LogDensityFunction at the point `z`. This is done to satisfy
the Optim.jl interface.
```julia
optim_ld = OptimLogDensity(model, varinfo, ctx)
optim_ld(z) # returns -logp
```
"""
struct OptimLogDensity{
M<:DynamicPPL.Model,
V<:DynamicPPL.VarInfo,
C<:OptimizationContext,
AD<:ADTypes.AbstractADType,
|
539
| 595
|
Turing.jl
| 405
|
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
|
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
|
[
539,
595
] |
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
|
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
ucons=nothing,
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
|
estimate_mode
| 539
| 595
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
##CHUNK 2
f = Optimisation.OptimLogDensity(model, ctx)
init_vals = DynamicPPL.getparams(f.ldf)
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
init_vals::AbstractArray,
optimizer::Optim.AbstractOptimizer,
options::Optim.Options=Optim.Options();
kwargs...,
)
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function _mle_optimize(model::DynamicPPL.Model, args...; kwargs...)
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
return _optimize(Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
end
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
##CHUNK 2
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
##CHUNK 3
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
#FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
Approximating the target `model` via variational inference by optimizing `objective` with the initialization `q`.
This is a thin wrapper around `AdvancedVI.optimize`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `q`: The initial variational approximation.
- `n_iterations`: Number of optimization steps.
# Keyword Arguments
- `objective`: Variational objective to be optimized.
- `show_progress`: Whether to show the progress bar.
- `optimizer`: Optimization algorithm.
- `averager`: Parameter averaging strategy.
- `operator`: Operator applied after each optimization step.
- `adtype`: Automatic differentiation backend.
See the docs of `AdvancedVI.optimize` for additional keyword arguments.
# Returns
- `q`: Variational distribution formed by the last iterate of the optimization run.
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
# Arguments
- `model::DynamicPPL.Model`: The model for which to estimate the mode.
- `estimator::ModeEstimator`: Can be either `MLE()` for maximum likelihood estimation or
`MAP()` for maximum a posteriori estimation.
- `solver=nothing`. The optimization algorithm to use. Optional. Can be any solver
recognised by Optimization.jl. If omitted a default solver is used: LBFGS, or IPNewton
if non-box constraints are present.
# Keyword arguments
- `check_model::Bool=true`: If true, the model is checked for errors before
optimisation begins.
- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the
optimization. Optional, unless non-box constraints are specified. If omitted it is
generated by either sampling from the prior distribution or uniformly from the box
constraints, if any.
- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use.
- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
##CHUNK 2
- `check_model::Bool=true`: If true, the model is checked for errors before
optimisation begins.
- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the
optimization. Optional, unless non-box constraints are specified. If omitted it is
generated by either sampling from the prior distribution or uniformly from the box
constraints, if any.
- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use.
- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
"""
maximum_a_posteriori(
model::DynamicPPL.Model,
[solver];
kwargs...
)
Find the maximum a posteriori estimate of a model.
##CHUNK 3
)
end
return (; zip(var_symbols, value_vectors)...)
end
Base.get(m::ModeResult, var_symbol::Symbol) = get(m, [var_symbol])
"""
ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
Create a `ModeResult` for a given `log_density` objective and a `solution` given by `solve`.
`Optimization.solve` returns its own result type. This function converts that into the
richer format of `ModeResult`. It also takes care of transforming them back to the original
parameter space in case the optimization was done in a transformed space.
"""
function ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
varinfo_new = DynamicPPL.unflatten(log_density.ldf.varinfo, solution.u)
# `getparams` performs invlinking if needed
vals = Turing.Inference.getparams(log_density.ldf.model, varinfo_new)
|
164
| 176
|
Turing.jl
| 406
|
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
|
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
|
[
164,
176
] |
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
|
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
|
unsafe_logpdf_ordered_logistic
| 164
| 176
|
src/stdlib/distributions.jl
|
#FILE: Turing.jl/test/stdlib/distributions.jl
##CHUNK 1
@testset "distributions.jl" begin
rng = StableRNG(12345)
@testset "distributions functions" begin
ns = 10
logitp = randn(rng)
d1 = BinomialLogit(ns, logitp)
d2 = Binomial(ns, logistic(logitp))
k = 3
@test logpdf(d1, k) ≈ logpdf(d2, k)
end
@testset "distributions functions" begin
d = OrderedLogistic(-2, [-1, 1])
n = 1_000_000
y = rand(rng, d, n)
K = length(d.cutpoints) + 1
p = [mean(==(k), y) for k in 1:K] # empirical probs
pmf = [exp(logpdf(d, k)) for k in 1:K]
#CURRENT FILE: Turing.jl/src/stdlib/distributions.jl
##CHUNK 1
function Distributions.logpdf(d::OrderedLogistic, k::Real)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 1
logp = unsafe_logpdf_ordered_logistic(η, cutpoints, K, _k)
return _insupport ? logp : oftype(logp, -Inf)
end
function Base.rand(rng::Random.AbstractRNG, d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
k = rand(rng, Categorical(ps))
##CHUNK 2
result = logconstant + _k * logitp - SpecialFunctions.logbeta(n - _k + 1, _k + 1)
return _insupport ? result : oftype(result, -Inf)
end
function Base.rand(rng::Random.AbstractRNG, d::BinomialLogit)
return rand(rng, Binomial(d.n, logistic(d.logitp)))
end
Distributions.sampler(d::BinomialLogit) = sampler(Binomial(d.n, logistic(d.logitp)))
"""
OrderedLogistic(η, c::AbstractVector)
The *ordered logistic distribution* with real-valued parameter `η` and cutpoints `c` has the
probability mass function
```math
P(X = k) = \\begin{cases}
1 - \\text{logistic}(\\eta - c_1) & \\text{if } k = 1, \\\\
\\text{logistic}(\\eta - c_{k-1}) - \\text{logistic}(\\eta - c_k) & \\text{if } 1 < k < K, \\\\
\\text{logistic}(\\eta - c_{K-1}) & \\text{if } k = K,
##CHUNK 3
return new{typeof(η),typeof(cutpoints)}(η, cutpoints)
end
end
function OrderedLogistic(η, cutpoints::AbstractVector)
return OrderedLogistic{typeof(η),typeof(cutpoints)}(η, cutpoints)
end
Base.minimum(d::OrderedLogistic) = 1
Base.maximum(d::OrderedLogistic) = length(d.cutpoints) + 1
function Distributions.logpdf(d::OrderedLogistic, k::Real)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 1
logp = unsafe_logpdf_ordered_logistic(η, cutpoints, K, _k)
return _insupport ? logp : oftype(logp, -Inf)
##CHUNK 4
end
function Base.rand(rng::Random.AbstractRNG, d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
k = rand(rng, Categorical(ps))
return k
end
function Distributions.sampler(d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
return sampler(Categorical(ps))
##CHUNK 5
OrderedLogistic(η, c::AbstractVector)
The *ordered logistic distribution* with real-valued parameter `η` and cutpoints `c` has the
probability mass function
```math
P(X = k) = \\begin{cases}
1 - \\text{logistic}(\\eta - c_1) & \\text{if } k = 1, \\\\
\\text{logistic}(\\eta - c_{k-1}) - \\text{logistic}(\\eta - c_k) & \\text{if } 1 < k < K, \\\\
\\text{logistic}(\\eta - c_{K-1}) & \\text{if } k = K,
\\end{cases}
```
where `K = length(c) + 1`.
"""
struct OrderedLogistic{T1,T2<:AbstractVector} <: DiscreteUnivariateDistribution
η::T1
cutpoints::T2
function OrderedLogistic{T1,T2}(η::T1, cutpoints::T2) where {T1,T2}
issorted(cutpoints) || error("cutpoints are not sorted")
##CHUNK 6
\\end{cases}
```
where `K = length(c) + 1`.
"""
struct OrderedLogistic{T1,T2<:AbstractVector} <: DiscreteUnivariateDistribution
η::T1
cutpoints::T2
function OrderedLogistic{T1,T2}(η::T1, cutpoints::T2) where {T1,T2}
issorted(cutpoints) || error("cutpoints are not sorted")
return new{typeof(η),typeof(cutpoints)}(η, cutpoints)
end
end
function OrderedLogistic(η, cutpoints::AbstractVector)
return OrderedLogistic{typeof(η),typeof(cutpoints)}(η, cutpoints)
end
Base.minimum(d::OrderedLogistic) = 1
Base.maximum(d::OrderedLogistic) = length(d.cutpoints) + 1
##CHUNK 7
return k
end
function Distributions.sampler(d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
return sampler(Categorical(ps))
end
# unsafe version without bounds checking
"""
LogPoisson(logλ)
The *Poisson distribution* with logarithmic parameterization of the rate parameter
describes the number of independent events occurring within a unit time interval, given the
average rate of occurrence ``\\exp(\\log\\lambda)``.
##CHUNK 8
logλ::T
λ::S
function LogPoisson{T}(logλ::T) where {T}
λ = exp(logλ)
return new{T,typeof(λ)}(logλ, λ)
end
end
LogPoisson(logλ::Real) = LogPoisson{typeof(logλ)}(logλ)
Base.minimum(d::LogPoisson) = 0
Base.maximum(d::LogPoisson) = Inf
function Distributions.logpdf(d::LogPoisson, k::Real)
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 0
logp = _k * d.logλ - d.λ - SpecialFunctions.loggamma(_k + 1)
return _insupport ? logp : oftype(logp, -Inf)
##CHUNK 9
n::Int
logitp::T
logconstant::S
function BinomialLogit{T}(n::Int, logitp::T) where {T}
n >= 0 || error("parameter `n` has to be non-negative")
logconstant = -(log1p(n) + n * StatsFuns.log1pexp(logitp))
return new{T,typeof(logconstant)}(n, logitp, logconstant)
end
end
BinomialLogit(n::Int, logitp::Real) = BinomialLogit{typeof(logitp)}(n, logitp)
Base.minimum(::BinomialLogit) = 0
Base.maximum(d::BinomialLogit) = d.n
function Distributions.logpdf(d::BinomialLogit, k::Real)
n, logitp, logconstant = d.n, d.logitp, d.logconstant
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 0
|
133
| 156
|
Turing.jl
| 407
|
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.α)
return table
end
|
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.α)
return table
end
|
[
133,
156
] |
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.α)
return table
end
|
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.α)
return table
end
|
_logpdf_table
| 133
| 156
|
src/stdlib/RandomMeasures.jl
|
#FILE: Turing.jl/src/stdlib/distributions.jl
##CHUNK 1
Base.maximum(::FlatPos) = Inf
Base.rand(rng::Random.AbstractRNG, d::FlatPos) = rand(rng) + d.l
function Distributions.logpdf(d::FlatPos, x::Real)
z = float(zero(x))
return x <= d.l ? oftype(z, -Inf) : z
end
# For vec support
function Distributions.loglikelihood(d::FlatPos, x::AbstractVector{<:Real})
lower = d.l
T = float(eltype(x))
return any(xi <= lower for xi in x) ? T(-Inf) : zero(T)
end
"""
BinomialLogit(n, logitp)
The *Binomial distribution* with logit parameterization characterizes the number of
successes in a sequence of independent trials.
##CHUNK 2
It has two parameters: `n`, the number of trials, and `logitp`, the logit of the probability
of success in an individual trial, with the distribution
```math
P(X = k) = {n \\choose k}{(\\text{logistic}(logitp))}^k (1 - \\text{logistic}(logitp))^{n-k}, \\quad \\text{ for } k = 0,1,2, \\ldots, n.
```
See also: [`Binomial`](@ref)
"""
struct BinomialLogit{T<:Real,S<:Real} <: DiscreteUnivariateDistribution
n::Int
logitp::T
logconstant::S
function BinomialLogit{T}(n::Int, logitp::T) where {T}
n >= 0 || error("parameter `n` has to be non-negative")
logconstant = -(log1p(n) + n * StatsFuns.log1pexp(logitp))
return new{T,typeof(logconstant)}(n, logitp, logconstant)
end
end
##CHUNK 3
return k
end
function Distributions.sampler(d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
return sampler(Categorical(ps))
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
##CHUNK 4
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
"""
LogPoisson(logλ)
##CHUNK 5
function Distributions.logpdf(d::OrderedLogistic, k::Real)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 1
logp = unsafe_logpdf_ordered_logistic(η, cutpoints, K, _k)
return _insupport ? logp : oftype(logp, -Inf)
end
function Base.rand(rng::Random.AbstractRNG, d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
k = rand(rng, Categorical(ps))
#FILE: Turing.jl/test/stdlib/distributions.jl
##CHUNK 1
end
@testset "distributions functions" begin
d = OrderedLogistic(-2, [-1, 1])
n = 1_000_000
y = rand(rng, d, n)
K = length(d.cutpoints) + 1
p = [mean(==(k), y) for k in 1:K] # empirical probs
pmf = [exp(logpdf(d, k)) for k in 1:K]
@test all(((x, y),) -> abs(x - y) < 0.001, zip(p, pmf))
end
@testset "distribution functions" begin
d = OrderedLogistic(0, [1, 2, 3])
K = length(d.cutpoints) + 1
@test support(d) == 1:K
#FILE: Turing.jl/test/mcmc/is.jl
##CHUNK 1
function reference(n)
as = Vector{Float64}(undef, n)
bs = Vector{Float64}(undef, n)
logps = Vector{Float64}(undef, n)
for i in 1:n
as[i], bs[i], logps[i] = reference()
end
logevidence = logsumexp(logps) - log(n)
return (as=as, bs=bs, logps=logps, logevidence=logevidence)
end
function reference()
x = rand(Normal(4, 5))
y = rand(Normal(x, 1))
loglik = logpdf(Normal(x, 2), 3) + logpdf(Normal(y, 2), 1.5)
return x, y, loglik
end
#CURRENT FILE: Turing.jl/src/stdlib/RandomMeasures.jl
##CHUNK 1
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
##CHUNK 2
function distribution(d::SizeBiasedSamplingProcess{<:PitmanYorProcess})
d_rpm = d.rpm
d_rpm_d = d.rpm.d
dist = Beta(one(d_rpm_d) - d_rpm_d, d_rpm.θ + d_rpm.t * d_rpm_d)
return LocationScale(zero(d_rpm_d), d.surplus, dist)
end
function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# sanity check
@assert d.t == sum(!iszero, m)
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
##CHUNK 3
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
return table
end
## ####### ##
## Exports ##
## ####### ##
export DirichletProcess, PitmanYorProcess
export SizeBiasedSamplingProcess, StickBreakingProcess, ChineseRestaurantProcess
|
218
| 233
|
Turing.jl
| 408
|
function stickbreak(v)
K = length(v) + 1
cumprod_one_minus_v = cumprod(1 .- v)
eta = [
if k == 1
v[1]
elseif k == K
cumprod_one_minus_v[K - 1]
else
v[k] * cumprod_one_minus_v[k - 1]
end for k in 1:K
]
return eta
end
|
function stickbreak(v)
K = length(v) + 1
cumprod_one_minus_v = cumprod(1 .- v)
eta = [
if k == 1
v[1]
elseif k == K
cumprod_one_minus_v[K - 1]
else
v[k] * cumprod_one_minus_v[k - 1]
end for k in 1:K
]
return eta
end
|
[
218,
233
] |
function stickbreak(v)
K = length(v) + 1
cumprod_one_minus_v = cumprod(1 .- v)
eta = [
if k == 1
v[1]
elseif k == K
cumprod_one_minus_v[K - 1]
else
v[k] * cumprod_one_minus_v[k - 1]
end for k in 1:K
]
return eta
end
|
function stickbreak(v)
K = length(v) + 1
cumprod_one_minus_v = cumprod(1 .- v)
eta = [
if k == 1
v[1]
elseif k == K
cumprod_one_minus_v[K - 1]
else
v[k] * cumprod_one_minus_v[k - 1]
end for k in 1:K
]
return eta
end
|
stickbreak
| 218
| 233
|
src/stdlib/RandomMeasures.jl
|
#FILE: Turing.jl/test/stdlib/RandomMeasures.jl
##CHUNK 1
# # Infinite (truncated) collection of breaking points on unit stick.
# v = tzeros(Float64, trunc)
# # Cluster locations.
# x = tzeros(Float64, trunc)
# # Draw weights and locations.
# for k in 1:trunc
# v[k] ~ StickBreakingProcess(rpm)
# x[k] ~ H
# end
# # Weights.
# w = vcat(v[1], v[2:end] .* cumprod(1 .- v[1:end-1]))
# # Normalize weights to ensure they sum exactly to one.
# # This is required by the Categorical distribution in Distributions.
# w ./= sum(w)
#FILE: Turing.jl/src/stdlib/distributions.jl
##CHUNK 1
return k
end
function Distributions.sampler(d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
return sampler(Categorical(ps))
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
##CHUNK 2
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
"""
LogPoisson(logλ)
##CHUNK 3
function Distributions.logpdf(d::OrderedLogistic, k::Real)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 1
logp = unsafe_logpdf_ordered_logistic(η, cutpoints, K, _k)
return _insupport ? logp : oftype(logp, -Inf)
end
function Base.rand(rng::Random.AbstractRNG, d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
k = rand(rng, Categorical(ps))
##CHUNK 4
OrderedLogistic(η, c::AbstractVector)
The *ordered logistic distribution* with real-valued parameter `η` and cutpoints `c` has the
probability mass function
```math
P(X = k) = \\begin{cases}
1 - \\text{logistic}(\\eta - c_1) & \\text{if } k = 1, \\\\
\\text{logistic}(\\eta - c_{k-1}) - \\text{logistic}(\\eta - c_k) & \\text{if } 1 < k < K, \\\\
\\text{logistic}(\\eta - c_{K-1}) & \\text{if } k = K,
\\end{cases}
```
where `K = length(c) + 1`.
"""
struct OrderedLogistic{T1,T2<:AbstractVector} <: DiscreteUnivariateDistribution
η::T1
cutpoints::T2
function OrderedLogistic{T1,T2}(η::T1, cutpoints::T2) where {T1,T2}
issorted(cutpoints) || error("cutpoints are not sorted")
#FILE: Turing.jl/test/stdlib/distributions.jl
##CHUNK 1
# 1. UnivariateDistribution
# NOTE: Noncentral distributions are commented out because of
# AD incompatibility of their logpdf functions
dist_uni = [
Arcsine(1, 3),
Beta(2, 1),
# NoncentralBeta(2, 1, 1),
BetaPrime(1, 1),
Biweight(0, 1),
Chi(7),
Chisq(7),
# NoncentralChisq(7, 1),
Cosine(0, 1),
Epanechnikov(0, 1),
Erlang(2, 3),
Exponential(0.1),
FDist(7, 7),
# NoncentralF(7, 7, 1),
Frechet(2, 0.5),
Normal(0, 1),
##CHUNK 2
TriangularDist(1, 3, 2),
Triweight(0, 1),
Uniform(0, 1),
# VonMises(0, 1), WARNING: this is commented are because the
# test is broken
Weibull(2, 1),
# Cauchy(0, 1), # mean and variance are undefined for Cauchy
]
# 2. MultivariateDistribution
dist_multi = [
MvNormal(zeros(multi_dim), I),
MvNormal(zeros(2), [2.0 1.0; 1.0 4.0]),
Dirichlet(multi_dim, 2.0),
]
# 3. MatrixDistribution
dist_matrix = [
Wishart(7, [1.0 0.5; 0.5 1.0]), InverseWishart(7, [1.0 0.5; 0.5 1.0])
]
#FILE: Turing.jl/test/mcmc/hmc.jl
##CHUNK 1
r_mean = dropdims(mean(r; dims=1); dims=1)
@test isapprox(r_mean, mean(dist); atol=0.2)
end
@testset "multivariate support" begin
# Define NN flow
function nn(x, b1, w11, w12, w13, bo, wo)
h = tanh.([w11 w12 w13]' * x .+ b1)
return logistic(dot(wo, h) + bo)
end
# Generating training data
N = 20
M = N ÷ 4
x1s = rand(M) * 5
x2s = rand(M) * 5
xt1s = Array([[x1s[i]; x2s[i]] for i in 1:M])
append!(xt1s, Array([[x1s[i] - 6; x2s[i] - 6] for i in 1:M]))
xt0s = Array([[x1s[i]; x2s[i] - 6] for i in 1:M])
#CURRENT FILE: Turing.jl/src/stdlib/RandomMeasures.jl
##CHUNK 1
d_rpm = d.rpm
d_rpm_d = d.rpm.d
return Beta(one(d_rpm_d) - d_rpm_d, d_rpm.θ + d_rpm.t * d_rpm_d)
end
@doc raw"""
Stick-breaking function.
This function accepts a vector (`v`) of length $K - 1$ where each element
is assumed to be in the unit interval, and returns a simplex of length
$K$. If the supplied vector `v` is a vector of independent draws from
a Beta distribution (i.e., vⱼ | a ~ Beta(1, a), for j=1,...,K), then
returned simplex is generated via a stick-breaking process where
the first element of the stick is w₁ = v₁, the last element w_K =
∏ⱼ (1 - vⱼ), and the other elements are wₖ = vₖ ∏ⱼ₌₁ᵏ⁻¹(1 - vⱼ).
As $K$ goes to infinity, w is a draw from the Chinese Restaurant process
with mass parameter a.
Arguments
=========
##CHUNK 2
$K$. If the supplied vector `v` is a vector of independent draws from
a Beta distribution (i.e., vⱼ | a ~ Beta(1, a), for j=1,...,K), then
returned simplex is generated via a stick-breaking process where
the first element of the stick is w₁ = v₁, the last element w_K =
∏ⱼ (1 - vⱼ), and the other elements are wₖ = vₖ ∏ⱼ₌₁ᵏ⁻¹(1 - vⱼ).
As $K$ goes to infinity, w is a draw from the Chinese Restaurant process
with mass parameter a.
Arguments
=========
- `v`: A vector of length $K - 1$, where $K > 1$.
Return
======
- A simplex (w) of dimension $K$. Where ∑ₖ wₖ = 1, and each wₖ ≥ 0.
"""
function distribution(d::SizeBiasedSamplingProcess{<:PitmanYorProcess})
d_rpm = d.rpm
|
242
| 267
|
Turing.jl
| 409
|
function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# sanity check
@assert d.t == sum(!iszero, m)
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
return table
end
|
function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# sanity check
@assert d.t == sum(!iszero, m)
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
return table
end
|
[
242,
267
] |
function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# sanity check
@assert d.t == sum(!iszero, m)
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
return table
end
|
function _logpdf_table(d::PitmanYorProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# sanity check
@assert d.t == sum(!iszero, m)
# construct table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
!iszero(m[i]) && (table[i] = T(log(m[i] - d.d)))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.θ + d.d * d.t)
return table
end
|
_logpdf_table
| 242
| 267
|
src/stdlib/RandomMeasures.jl
|
#FILE: Turing.jl/src/stdlib/distributions.jl
##CHUNK 1
Base.maximum(::FlatPos) = Inf
Base.rand(rng::Random.AbstractRNG, d::FlatPos) = rand(rng) + d.l
function Distributions.logpdf(d::FlatPos, x::Real)
z = float(zero(x))
return x <= d.l ? oftype(z, -Inf) : z
end
# For vec support
function Distributions.loglikelihood(d::FlatPos, x::AbstractVector{<:Real})
lower = d.l
T = float(eltype(x))
return any(xi <= lower for xi in x) ? T(-Inf) : zero(T)
end
"""
BinomialLogit(n, logitp)
The *Binomial distribution* with logit parameterization characterizes the number of
successes in a sequence of independent trials.
##CHUNK 2
It has two parameters: `n`, the number of trials, and `logitp`, the logit of the probability
of success in an individual trial, with the distribution
```math
P(X = k) = {n \\choose k}{(\\text{logistic}(logitp))}^k (1 - \\text{logistic}(logitp))^{n-k}, \\quad \\text{ for } k = 0,1,2, \\ldots, n.
```
See also: [`Binomial`](@ref)
"""
struct BinomialLogit{T<:Real,S<:Real} <: DiscreteUnivariateDistribution
n::Int
logitp::T
logconstant::S
function BinomialLogit{T}(n::Int, logitp::T) where {T}
n >= 0 || error("parameter `n` has to be non-negative")
logconstant = -(log1p(n) + n * StatsFuns.log1pexp(logitp))
return new{T,typeof(logconstant)}(n, logitp, logconstant)
end
end
##CHUNK 3
return k
end
function Distributions.sampler(d::OrderedLogistic)
η, cutpoints = d.η, d.cutpoints
K = length(cutpoints) + 1
# evaluate probability mass function
ps = map(1:K) do k
exp(unsafe_logpdf_ordered_logistic(η, cutpoints, K, k))
end
return sampler(Categorical(ps))
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
##CHUNK 4
end
# unsafe version without bounds checking
function unsafe_logpdf_ordered_logistic(η, cutpoints, K, k::Int)
@inbounds begin
logp = if k == 1
-StatsFuns.log1pexp(η - cutpoints[k])
elseif k < K
tmp = StatsFuns.log1pexp(cutpoints[k - 1] - η)
-tmp + StatsFuns.log1mexp(tmp - StatsFuns.log1pexp(cutpoints[k] - η))
else
-StatsFuns.log1pexp(cutpoints[k - 1] - η)
end
end
return logp
end
"""
LogPoisson(logλ)
##CHUNK 5
BinomialLogit(n::Int, logitp::Real) = BinomialLogit{typeof(logitp)}(n, logitp)
Base.minimum(::BinomialLogit) = 0
Base.maximum(d::BinomialLogit) = d.n
function Distributions.logpdf(d::BinomialLogit, k::Real)
n, logitp, logconstant = d.n, d.logitp, d.logconstant
_insupport = insupport(d, k)
_k = _insupport ? round(Int, k) : 0
result = logconstant + _k * logitp - SpecialFunctions.logbeta(n - _k + 1, _k + 1)
return _insupport ? result : oftype(result, -Inf)
end
function Base.rand(rng::Random.AbstractRNG, d::BinomialLogit)
return rand(rng, Binomial(d.n, logistic(d.logitp)))
end
Distributions.sampler(d::BinomialLogit) = sampler(Binomial(d.n, logistic(d.logitp)))
"""
#FILE: Turing.jl/test/stdlib/distributions.jl
##CHUNK 1
end
@testset "distributions functions" begin
d = OrderedLogistic(-2, [-1, 1])
n = 1_000_000
y = rand(rng, d, n)
K = length(d.cutpoints) + 1
p = [mean(==(k), y) for k in 1:K] # empirical probs
pmf = [exp(logpdf(d, k)) for k in 1:K]
@test all(((x, y),) -> abs(x - y) < 0.001, zip(p, pmf))
end
@testset "distribution functions" begin
d = OrderedLogistic(0, [1, 2, 3])
K = length(d.cutpoints) + 1
@test support(d) == 1:K
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
@model function dynamic_model_for_ess()
b ~ Bernoulli()
x_length = b ? 1 : 2
x = Vector{Float64}(undef, x_length)
for i in 1:x_length
x[i] ~ Normal(i, 1.0)
end
end
m = dynamic_model_for_ess()
chain = sample(m, Gibbs(:b => PG(10), :x => ESS()), 2000; discard_initial=100)
means = Dict(:b => 0.5, "x[1]" => 1.0, "x[2]" => 2.0)
stds = Dict(:b => 0.5, "x[1]" => 1.0, "x[2]" => 1.0)
for vn in keys(means)
@test isapprox(mean(skipmissing(chain[:, vn, 1])), means[vn]; atol=0.1)
@test isapprox(std(skipmissing(chain[:, vn, 1])), stds[vn]; atol=0.1)
end
end
@testset "dynamic model with dot tilde" begin
#CURRENT FILE: Turing.jl/src/stdlib/RandomMeasures.jl
##CHUNK 1
end
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
##CHUNK 2
if iszero(m)
table[1] = T(0)
return table
end
# compute logpdf for each occupied table
@inbounds for i in 1:(K - 1)
table[i] = T(log(m[i]))
end
# logpdf for new table
k_new = first_zero === nothing ? K : first_zero
table[k_new] = log(d.α)
return table
end
"""
PitmanYorProcess(d, θ, t)
##CHUNK 3
end
function distribution(d::StickBreakingProcess{<:DirichletProcess})
α = d.rpm.α
return Beta(one(α), α)
end
function distribution(d::SizeBiasedSamplingProcess{<:DirichletProcess})
α = d.rpm.α
return LocationScale(zero(α), d.surplus, Beta(one(α), α))
end
function _logpdf_table(d::DirichletProcess{T}, m::AbstractVector{Int}) where {T<:Real}
# construct the table
first_zero = findfirst(iszero, m)
K = first_zero === nothing ? length(m) + 1 : length(m)
table = fill(T(-Inf), K)
# exit if m is empty or contains only zeros
|
16
| 27
|
Turing.jl
| 410
|
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
|
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
|
[
16,
27
] |
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
|
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
|
ADVI
| 16
| 27
|
src/variational/deprecated.jl
|
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
return Unconstrained
end
"""
externalsampler(sampler::AbstractSampler; adtype=AutoForwardDiff(), unconstrained=true)
Wrap a sampler so it can be used as an inference algorithm.
# Arguments
- `sampler::AbstractSampler`: The sampler to wrap.
# Keyword Arguments
- `adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff()`: The automatic differentiation (AD) backend to use.
- `unconstrained::Bool=true`: Whether the sampler requires unconstrained space.
"""
function externalsampler(
sampler::AbstractSampler; adtype=Turing.DEFAULT_ADTYPE, unconstrained::Bool=true
)
return ExternalSampler(sampler, adtype, Val(unconstrained))
end
#FILE: Turing.jl/test/ad.jl
##CHUNK 1
"""
ADTYPES = [AutoForwardDiff(), AutoReverseDiff(; compile=false)]
if INCLUDE_MOONCAKE
push!(ADTYPES, AutoMooncake(; config=nothing))
end
# Check that ADTypeCheckContext itself works as expected.
@testset "ADTypeCheckContext" begin
@model test_model() = x ~ Normal(0, 1)
tm = test_model()
adtypes = (
AutoForwardDiff(),
AutoReverseDiff(),
# Don't need to test Mooncake as it doesn't use tracer types
)
for actual_adtype in adtypes
sampler = HMC(0.1, 5; adtype=actual_adtype)
for expected_adtype in adtypes
contextualised_tm = DynamicPPL.contextualize(
tm, ADTypeCheckContext(expected_adtype, tm.context)
##CHUNK 2
alg = HMC(0.1, 10; adtype=adtype)
m = DynamicPPL.contextualize(
gdemo_default, ADTypeCheckContext(adtype, gdemo_default.context)
)
# These will error if the adbackend being used is not the one set.
sample(StableRNG(seed), m, alg, 10)
maximum_likelihood(m; adtype=adtype)
maximum_a_posteriori(m; adtype=adtype)
end
end
@testset verbose = true "AD / SamplingContext" begin
# AD tests for gradient-based samplers need to be run with SamplingContext
# because samplers can potentially use this to define custom behaviour in
# the tilde-pipeline and thus change the code executed during model
# evaluation.
@testset "adtype=$adtype" for adtype in ADTYPES
@testset "alg=$alg" for alg in [
HMC(0.1, 10; adtype=adtype),
HMCDA(0.8, 0.75; adtype=adtype),
##CHUNK 3
@testset verbose = true "AD / SamplingContext" begin
# AD tests for gradient-based samplers need to be run with SamplingContext
# because samplers can potentially use this to define custom behaviour in
# the tilde-pipeline and thus change the code executed during model
# evaluation.
@testset "adtype=$adtype" for adtype in ADTYPES
@testset "alg=$alg" for alg in [
HMC(0.1, 10; adtype=adtype),
HMCDA(0.8, 0.75; adtype=adtype),
NUTS(1000, 0.8; adtype=adtype),
SGHMC(; learning_rate=0.02, momentum_decay=0.5, adtype=adtype),
SGLD(; stepsize=PolynomialStepsize(0.25), adtype=adtype),
]
@info "Testing AD for $alg"
@testset "model=$(model.f)" for model in DEMO_MODELS
rng = StableRNG(123)
ctx = DynamicPPL.SamplingContext(rng, DynamicPPL.Sampler(alg))
@test run_ad(model, adtype; context=ctx, test=true, benchmark=false) isa Any
#FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
#CURRENT FILE: Turing.jl/src/variational/deprecated.jl
##CHUNK 1
import DistributionsAD
export ADVI
Base.@deprecate meanfield(model) q_meanfield_gaussian(model)
struct ADVI{AD}
"Number of samples used to estimate the ELBO in each optimization step."
samples_per_step::Int
"Maximum number of gradient steps."
max_iters::Int
"AD backend used for automatic differentiation."
adtype::AD
end
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
##CHUNK 2
max_iters::Int
"AD backend used for automatic differentiation."
adtype::AD
end
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
##CHUNK 3
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
##CHUNK 4
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
##CHUNK 5
import DistributionsAD
export ADVI
Base.@deprecate meanfield(model) q_meanfield_gaussian(model)
struct ADVI{AD}
"Number of samples used to estimate the ELBO in each optimization step."
samples_per_step::Int
"Maximum number of gradient steps."
|
29
| 42
|
Turing.jl
| 411
|
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
[
29,
42
] |
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
vi
| 29
| 42
|
src/variational/deprecated.jl
|
#FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
- `q_avg`: Variational distribution formed by the averaged iterates according to `averager`.
- `state`: Collection of states used for optimization. This can be used to resume from a past call to `vi`.
- `info`: Information generated during the optimization run.
"""
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
##CHUNK 2
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
#FILE: Turing.jl/test/variational/advi.jl
##CHUNK 1
@testset "default interface" begin
for q0 in [q_meanfield_gaussian(gdemo_default), q_fullrank_gaussian(gdemo_default)]
_, q, _, _ = vi(gdemo_default, q0, 100; show_progress=Turing.PROGRESS[])
c1 = rand(q, 10)
end
end
@testset "custom interface $name" for (name, objective, operator, optimizer) in [
(
"ADVI with closed-form entropy",
AdvancedVI.RepGradELBO(10),
AdvancedVI.ProximalLocationScaleEntropy(),
AdvancedVI.DoG(),
),
(
"ADVI with proximal entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
##CHUNK 2
),
(
"ADVI with proximal entropy",
RepGradELBO(10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
(
"ADVI with STL entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.StickingTheLandingEntropy()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
]
rng = StableRNG(0x517e1d9bf89bf94f)
T = 1000
q, q_avg, _, _ = vi(
rng,
gdemo_default,
##CHUNK 3
gdemo_default,
q_meanfield_gaussian(gdemo_default),
T;
objective,
optimizer,
operator,
show_progress=Turing.PROGRESS[],
)
N = 1000
c1 = rand(q_avg, N)
c2 = rand(q, N)
end
@testset "inference $name" for (name, objective, operator, optimizer) in [
(
"ADVI with closed-form entropy",
AdvancedVI.RepGradELBO(10),
AdvancedVI.ProximalLocationScaleEntropy(),
AdvancedVI.DoG(),
##CHUNK 4
),
(
"ADVI with STL entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.StickingTheLandingEntropy()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
]
T = 1000
q, q_avg, _, _ = vi(
gdemo_default,
q_meanfield_gaussian(gdemo_default),
T;
objective,
optimizer,
operator,
show_progress=Turing.PROGRESS[],
)
N = 1000
##CHUNK 5
c1 = rand(q_avg, N)
c2 = rand(q, N)
end
@testset "inference $name" for (name, objective, operator, optimizer) in [
(
"ADVI with closed-form entropy",
AdvancedVI.RepGradELBO(10),
AdvancedVI.ProximalLocationScaleEntropy(),
AdvancedVI.DoG(),
),
(
"ADVI with proximal entropy",
RepGradELBO(10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
(
"ADVI with STL entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.StickingTheLandingEntropy()),
#CURRENT FILE: Turing.jl/src/variational/deprecated.jl
##CHUNK 1
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
##CHUNK 2
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
##CHUNK 3
max_iters::Int
"AD backend used for automatic differentiation."
adtype::AD
end
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
function vi(
|
44
| 61
|
Turing.jl
| 412
|
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
[
44,
61
] |
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
function vi(
model::DynamicPPL.Model,
alg::ADVI,
q::Bijectors.TransformedDistribution{<:DistributionsAD.TuringDiagMvNormal};
kwargs...,
)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
|
vi
| 44
| 61
|
src/variational/deprecated.jl
|
#FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
- `q_avg`: Variational distribution formed by the averaged iterates according to `averager`.
- `state`: Collection of states used for optimization. This can be used to resume from a past call to `vi`.
- `info`: Information generated during the optimization run.
"""
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
##CHUNK 2
10; entropy = AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool = Turing.PROGRESS[],
optimizer::Optimisers.AbstractRule = AdvancedVI.DoWG(),
averager::AdvancedVI.AbstractAverager = AdvancedVI.PolynomialAveraging(),
operator::AdvancedVI.AbstractOperator = AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE,
kwargs...
)
Approximating the target `model` via variational inference by optimizing `objective` with the initialization `q`.
This is a thin wrapper around `AdvancedVI.optimize`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `q`: The initial variational approximation.
- `n_iterations`: Number of optimization steps.
# Keyword Arguments
- `objective`: Variational objective to be optimized.
##CHUNK 3
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
##CHUNK 4
return q_fullrank_gaussian(Random.default_rng(), model; kwargs...)
end
"""
vi(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
q,
n_iterations::Int;
objective::AdvancedVI.AbstractVariationalObjective = AdvancedVI.RepGradELBO(
10; entropy = AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool = Turing.PROGRESS[],
optimizer::Optimisers.AbstractRule = AdvancedVI.DoWG(),
averager::AdvancedVI.AbstractAverager = AdvancedVI.PolynomialAveraging(),
operator::AdvancedVI.AbstractOperator = AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE,
kwargs...
)
##CHUNK 5
- `show_progress`: Whether to show the progress bar.
- `optimizer`: Optimization algorithm.
- `averager`: Parameter averaging strategy.
- `operator`: Operator applied after each optimization step.
- `adtype`: Automatic differentiation backend.
See the docs of `AdvancedVI.optimize` for additional keyword arguments.
# Returns
- `q`: Variational distribution formed by the last iterate of the optimization run.
- `q_avg`: Variational distribution formed by the averaged iterates according to `averager`.
- `state`: Collection of states used for optimization. This can be used to resume from a past call to `vi`.
- `info`: Information generated during the optimization run.
"""
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
#FILE: Turing.jl/test/variational/advi.jl
##CHUNK 1
),
(
"ADVI with STL entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.StickingTheLandingEntropy()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
]
T = 1000
q, q_avg, _, _ = vi(
gdemo_default,
q_meanfield_gaussian(gdemo_default),
T;
objective,
optimizer,
operator,
show_progress=Turing.PROGRESS[],
)
N = 1000
##CHUNK 2
@testset "default interface" begin
for q0 in [q_meanfield_gaussian(gdemo_default), q_fullrank_gaussian(gdemo_default)]
_, q, _, _ = vi(gdemo_default, q0, 100; show_progress=Turing.PROGRESS[])
c1 = rand(q, 10)
end
end
@testset "custom interface $name" for (name, objective, operator, optimizer) in [
(
"ADVI with closed-form entropy",
AdvancedVI.RepGradELBO(10),
AdvancedVI.ProximalLocationScaleEntropy(),
AdvancedVI.DoG(),
),
(
"ADVI with proximal entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
#CURRENT FILE: Turing.jl/src/variational/deprecated.jl
##CHUNK 1
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
return q_avg
end
##CHUNK 2
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
##CHUNK 3
max_iters::Int
"AD backend used for automatic differentiation."
adtype::AD
end
function ADVI(
samples_per_step::Int=1,
max_iters::Int=1000;
adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff(),
)
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
|
130
| 170
|
Turing.jl
| 413
|
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
# TODO: Replace with `length` once this is implemented for `VarInfo`.
varinfo_linked = DynamicPPL.link(varinfo, model)
num_params = length(varinfo_linked[:])
μ = if isnothing(location)
zeros(num_params)
else
@assert length(location) == num_params "Length of the provided location vector, $(length(location)), does not match dimension of the target distribution, $(num_params)."
location
end
L = if isnothing(scale)
if meanfield
q_initialize_scale(rng, model, μ, Diagonal(ones(num_params)), basedist; kwargs...)
else
L0 = LowerTriangular(Matrix{Float64}(I, num_params, num_params))
q_initialize_scale(rng, model, μ, L0, basedist; kwargs...)
end
else
@assert size(scale) == (num_params, num_params) "Dimensions of the provided scale matrix, $(size(scale)), does not match the dimension of the target distribution, $(num_params)."
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
|
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
# TODO: Replace with `length` once this is implemented for `VarInfo`.
varinfo_linked = DynamicPPL.link(varinfo, model)
num_params = length(varinfo_linked[:])
μ = if isnothing(location)
zeros(num_params)
else
@assert length(location) == num_params "Length of the provided location vector, $(length(location)), does not match dimension of the target distribution, $(num_params)."
location
end
L = if isnothing(scale)
if meanfield
q_initialize_scale(rng, model, μ, Diagonal(ones(num_params)), basedist; kwargs...)
else
L0 = LowerTriangular(Matrix{Float64}(I, num_params, num_params))
q_initialize_scale(rng, model, μ, L0, basedist; kwargs...)
end
else
@assert size(scale) == (num_params, num_params) "Dimensions of the provided scale matrix, $(size(scale)), does not match the dimension of the target distribution, $(num_params)."
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
|
[
130,
170
] |
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
# TODO: Replace with `length` once this is implemented for `VarInfo`.
varinfo_linked = DynamicPPL.link(varinfo, model)
num_params = length(varinfo_linked[:])
μ = if isnothing(location)
zeros(num_params)
else
@assert length(location) == num_params "Length of the provided location vector, $(length(location)), does not match dimension of the target distribution, $(num_params)."
location
end
L = if isnothing(scale)
if meanfield
q_initialize_scale(rng, model, μ, Diagonal(ones(num_params)), basedist; kwargs...)
else
L0 = LowerTriangular(Matrix{Float64}(I, num_params, num_params))
q_initialize_scale(rng, model, μ, L0, basedist; kwargs...)
end
else
@assert size(scale) == (num_params, num_params) "Dimensions of the provided scale matrix, $(size(scale)), does not match the dimension of the target distribution, $(num_params)."
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
|
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
# TODO: Replace with `length` once this is implemented for `VarInfo`.
varinfo_linked = DynamicPPL.link(varinfo, model)
num_params = length(varinfo_linked[:])
μ = if isnothing(location)
zeros(num_params)
else
@assert length(location) == num_params "Length of the provided location vector, $(length(location)), does not match dimension of the target distribution, $(num_params)."
location
end
L = if isnothing(scale)
if meanfield
q_initialize_scale(rng, model, μ, Diagonal(ones(num_params)), basedist; kwargs...)
else
L0 = LowerTriangular(Matrix{Float64}(I, num_params, num_params))
q_initialize_scale(rng, model, μ, L0, basedist; kwargs...)
end
else
@assert size(scale) == (num_params, num_params) "Dimensions of the provided scale matrix, $(size(scale)), does not match the dimension of the target distribution, $(num_params)."
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
|
q_locationscale
| 130
| 170
|
src/variational/VariationalInference.jl
|
#CURRENT FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
function q_initialize_scale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
location::AbstractVector,
scale::AbstractMatrix,
basedist::Distributions.UnivariateDistribution;
num_samples::Int=10,
num_max_trials::Int=10,
reduce_factor::Real=one(eltype(scale)) / 2,
)
prob = make_logdensity(model)
ℓπ = Base.Fix1(LogDensityProblems.logdensity, prob)
varinfo = DynamicPPL.VarInfo(model)
n_trial = 0
while true
q = AdvancedVI.MvLocationScale(location, scale, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
q_trans = Bijectors.transformed(q, Bijectors.inverse(b))
energy = mean(ℓπ, eachcol(rand(rng, q_trans, num_samples)))
##CHUNK 2
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
- `meanfield`: Whether to use the mean-field approximation. If `true`, `scale` is converted into a `Diagonal` matrix. Otherwise, it is converted into a `LowerTriangular` matrix.
- `basedist`: The base distribution of the location-scale family.
The remaining keywords are passed to `q_initialize_scale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_locationscale(model::DynamicPPL.Model; kwargs...)
return q_locationscale(Random.default_rng(), model; kwargs...)
end
"""
q_meanfield_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal} = nothing,
##CHUNK 3
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
##CHUNK 4
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
function q_fullrank_gaussian(model::DynamicPPL.Model; kwargs...)
return q_fullrank_gaussian(Random.default_rng(), model; kwargs...)
##CHUNK 5
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
kwargs...
##CHUNK 6
```julia
u = rand(basedist, d)
z = scale * u + location
```
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
- `meanfield`: Whether to use the mean-field approximation. If `true`, `scale` is converted into a `Diagonal` matrix. Otherwise, it is converted into a `LowerTriangular` matrix.
- `basedist`: The base distribution of the location-scale family.
The remaining keywords are passed to `q_initialize_scale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
##CHUNK 7
weight = 1.0
ctx = DynamicPPL.MiniBatchContext(DynamicPPL.DefaultContext(), weight)
return DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx)
end
"""
q_initialize_scale(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model,
location::AbstractVector,
scale::AbstractMatrix,
basedist::Distributions.UnivariateDistribution;
num_samples::Int = 10,
num_max_trials::Int = 10,
reduce_factor::Real = one(eltype(scale)) / 2
)
Given an initial location-scale distribution `q` formed by `location`, `scale`, and `basedist`, shrink `scale` until the expectation of log-densities of `model` taken over `q` are finite.
If the log-densities are not finite even after `num_max_trials`, throw an error.
##CHUNK 8
scale::AbstractMatrix,
basedist::Distributions.UnivariateDistribution;
num_samples::Int = 10,
num_max_trials::Int = 10,
reduce_factor::Real = one(eltype(scale)) / 2
)
Given an initial location-scale distribution `q` formed by `location`, `scale`, and `basedist`, shrink `scale` until the expectation of log-densities of `model` taken over `q` are finite.
If the log-densities are not finite even after `num_max_trials`, throw an error.
For reference, a location-scale distribution \$q\$ formed by `location`, `scale`, and `basedist` is a distribution where its sampling process \$z \\sim q\$ can be represented as
```julia
u = rand(basedist, d)
z = scale * u + location
```
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `location`: The location parameter of the initialization.
- `scale`: The scale parameter of the initialization.
##CHUNK 9
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
kwargs...
)
Find a numerically non-degenerate Gaussian `q` with a scale with full-rank factors (traditionally referred to as a "full-rank family") for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
##CHUNK 10
- `basedist`: The base distribution of the location-scale family.
# Keyword Arguments
- `num_samples`: Number of samples used to compute the average log-density at each trial.
- `num_max_trials`: Number of trials until throwing an error.
- `reduce_factor`: Factor for shrinking the scale. After `n` trials, the scale is then `scale*reduce_factor^n`.
# Returns
- `scale_adj`: The adjusted scale matrix matching the type of `scale`.
"""
function q_initialize_scale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
location::AbstractVector,
scale::AbstractMatrix,
basedist::Distributions.UnivariateDistribution;
num_samples::Int=10,
num_max_trials::Int=10,
reduce_factor::Real=one(eltype(scale)) / 2,
)
|
199
| 209
|
Turing.jl
| 414
|
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
|
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
|
[
199,
209
] |
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
|
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
|
q_meanfield_gaussian
| 199
| 209
|
src/variational/VariationalInference.jl
|
#CURRENT FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
function q_locationscale(model::DynamicPPL.Model; kwargs...)
return q_locationscale(Random.default_rng(), model; kwargs...)
end
"""
q_meanfield_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal} = nothing,
kwargs...
)
Find a numerically non-degenerate mean-field Gaussian `q` for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
##CHUNK 2
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
kwargs...
)
Find a numerically non-degenerate Gaussian `q` with a scale with full-rank factors (traditionally referred to as a "full-rank family") for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
##CHUNK 3
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
kwargs...
##CHUNK 4
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
function q_fullrank_gaussian(model::DynamicPPL.Model; kwargs...)
return q_fullrank_gaussian(Random.default_rng(), model; kwargs...)
##CHUNK 5
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
function q_locationscale(model::DynamicPPL.Model; kwargs...)
return q_locationscale(Random.default_rng(), model; kwargs...)
end
"""
q_meanfield_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
##CHUNK 6
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
- `meanfield`: Whether to use the mean-field approximation. If `true`, `scale` is converted into a `Diagonal` matrix. Otherwise, it is converted into a `LowerTriangular` matrix.
- `basedist`: The base distribution of the location-scale family.
The remaining keywords are passed to `q_initialize_scale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
##CHUNK 7
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
# TODO: Replace with `length` once this is implemented for `VarInfo`.
varinfo_linked = DynamicPPL.link(varinfo, model)
num_params = length(varinfo_linked[:])
μ = if isnothing(location)
zeros(num_params)
else
@assert length(location) == num_params "Length of the provided location vector, $(length(location)), does not match dimension of the target distribution, $(num_params)."
location
end
##CHUNK 8
scale::Union{Nothing,<:Diagonal} = nothing,
kwargs...
)
Find a numerically non-degenerate mean-field Gaussian `q` for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
##CHUNK 9
end
"""
q_locationscale(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular} = nothing,
meanfield::Bool = true,
basedist::Distributions.UnivariateDistribution = Normal()
)
Find a numerically non-degenerate variational distribution `q` for approximating the target `model` within the location-scale variational family formed by the type of `scale` and `basedist`.
The distribution can be manually specified by setting `location`, `scale`, and `basedist`.
Otherwise, it chooses a standard Gaussian by default.
Whether the default choice is used or not, the `scale` may be adjusted via `q_initialize_scale` so that the log-densities of `model` are finite over the samples from `q`.
If `meanfield` is set as `true`, the scale of `q` is restricted to be a diagonal matrix and only the diagonal of `scale` is used.
For reference, a location-scale distribution \$q\$ formed by `location`, `scale`, and `basedist` is a distribution where its sampling process \$z \\sim q\$ can be represented as
##CHUNK 10
if isfinite(energy)
return scale
elseif n_trial == num_max_trials
error("Could not find an initial")
end
scale = reduce_factor * scale
n_trial += 1
end
end
"""
q_locationscale(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular} = nothing,
meanfield::Bool = true,
basedist::Distributions.UnivariateDistribution = Normal()
|
238
| 248
|
Turing.jl
| 415
|
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
|
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
|
[
238,
248
] |
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
|
function q_fullrank_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:LowerTriangular}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=false, basedist=Normal(), kwargs...
)
end
|
q_fullrank_gaussian
| 238
| 248
|
src/variational/VariationalInference.jl
|
#CURRENT FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
##CHUNK 2
function q_meanfield_gaussian(model::DynamicPPL.Model; kwargs...)
return q_meanfield_gaussian(Random.default_rng(), model; kwargs...)
end
"""
q_fullrank_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:LowerTriangular} = nothing,
kwargs...
)
Find a numerically non-degenerate Gaussian `q` with a scale with full-rank factors (traditionally referred to as a "full-rank family") for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
##CHUNK 3
function q_locationscale(model::DynamicPPL.Model; kwargs...)
return q_locationscale(Random.default_rng(), model; kwargs...)
end
"""
q_meanfield_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal} = nothing,
kwargs...
)
Find a numerically non-degenerate mean-field Gaussian `q` for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
##CHUNK 4
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal}=nothing,
kwargs...,
)
return q_locationscale(
rng, model; location, scale, meanfield=true, basedist=Normal(), kwargs...
)
end
##CHUNK 5
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
- `meanfield`: Whether to use the mean-field approximation. If `true`, `scale` is converted into a `Diagonal` matrix. Otherwise, it is converted into a `LowerTriangular` matrix.
- `basedist`: The base distribution of the location-scale family.
The remaining keywords are passed to `q_initialize_scale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_locationscale(
rng::Random.AbstractRNG,
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector}=nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular}=nothing,
meanfield::Bool=true,
basedist::Distributions.UnivariateDistribution=Normal(),
kwargs...,
)
varinfo = DynamicPPL.VarInfo(model)
# Use linked `varinfo` to determine the correct number of parameters.
##CHUNK 6
scale::Union{Nothing,<:Diagonal} = nothing,
kwargs...
)
Find a numerically non-degenerate mean-field Gaussian `q` for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_meanfield_gaussian(
rng::Random.AbstractRNG,
##CHUNK 7
if meanfield
Diagonal(diag(scale))
else
LowerTriangular(Matrix(scale))
end
end
q = AdvancedVI.MvLocationScale(μ, L, basedist)
b = Bijectors.bijector(model; varinfo=varinfo)
return Bijectors.transformed(q, Bijectors.inverse(b))
end
function q_locationscale(model::DynamicPPL.Model; kwargs...)
return q_locationscale(Random.default_rng(), model; kwargs...)
end
"""
q_meanfield_gaussian(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
##CHUNK 8
kwargs...
)
Find a numerically non-degenerate Gaussian `q` with a scale with full-rank factors (traditionally referred to as a "full-rank family") for approximating the target `model`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
# Keyword Arguments
- `location`: The location parameter of the initialization. If `nothing`, a vector of zeros is used.
- `scale`: The scale parameter of the initialization. If `nothing`, an identity matrix is used.
The remaining keyword arguments are passed to `q_locationscale`.
# Returns
- `q::Bijectors.TransformedDistribution`: A `AdvancedVI.LocationScale` distribution matching the support of `model`.
"""
function q_fullrank_gaussian(model::DynamicPPL.Model; kwargs...)
return q_fullrank_gaussian(Random.default_rng(), model; kwargs...)
##CHUNK 9
end
"""
q_locationscale(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular} = nothing,
meanfield::Bool = true,
basedist::Distributions.UnivariateDistribution = Normal()
)
Find a numerically non-degenerate variational distribution `q` for approximating the target `model` within the location-scale variational family formed by the type of `scale` and `basedist`.
The distribution can be manually specified by setting `location`, `scale`, and `basedist`.
Otherwise, it chooses a standard Gaussian by default.
Whether the default choice is used or not, the `scale` may be adjusted via `q_initialize_scale` so that the log-densities of `model` are finite over the samples from `q`.
If `meanfield` is set as `true`, the scale of `q` is restricted to be a diagonal matrix and only the diagonal of `scale` is used.
For reference, a location-scale distribution \$q\$ formed by `location`, `scale`, and `basedist` is a distribution where its sampling process \$z \\sim q\$ can be represented as
##CHUNK 10
if isfinite(energy)
return scale
elseif n_trial == num_max_trials
error("Could not find an initial")
end
scale = reduce_factor * scale
n_trial += 1
end
end
"""
q_locationscale(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
location::Union{Nothing,<:AbstractVector} = nothing,
scale::Union{Nothing,<:Diagonal,<:LowerTriangular} = nothing,
meanfield::Bool = true,
basedist::Distributions.UnivariateDistribution = Normal()
|
295
| 323
|
Turing.jl
| 416
|
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
kwargs...,
)
end
|
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
kwargs...,
)
end
|
[
295,
323
] |
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
kwargs...,
)
end
|
function vi(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
q,
n_iterations::Int;
objective=AdvancedVI.RepGradELBO(
10; entropy=AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool=PROGRESS[],
optimizer=AdvancedVI.DoWG(),
averager=AdvancedVI.PolynomialAveraging(),
operator=AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType=DEFAULT_ADTYPE,
kwargs...,
)
return AdvancedVI.optimize(
rng,
make_logdensity(model),
objective,
q,
n_iterations;
show_progress=show_progress,
adtype,
optimizer,
averager,
operator,
kwargs...,
)
end
|
vi
| 295
| 323
|
src/variational/VariationalInference.jl
|
#FILE: Turing.jl/test/variational/advi.jl
##CHUNK 1
),
(
"ADVI with STL entropy",
AdvancedVI.RepGradELBO(10; entropy=AdvancedVI.StickingTheLandingEntropy()),
AdvancedVI.ClipScale(),
AdvancedVI.DoG(),
),
]
T = 1000
q, q_avg, _, _ = vi(
gdemo_default,
q_meanfield_gaussian(gdemo_default),
T;
objective,
optimizer,
operator,
show_progress=Turing.PROGRESS[],
)
N = 1000
#FILE: Turing.jl/src/variational/deprecated.jl
##CHUNK 1
Base.depwarn(
"The type ADVI will be removed in future releases. Please refer to the new interface for `vi`",
:ADVI;
force=true,
)
return ADVI{typeof(adtype)}(samples_per_step, max_iters, adtype)
end
function vi(model::DynamicPPL.Model, alg::ADVI; kwargs...)
Base.depwarn(
"This specialization along with the type `ADVI` will be deprecated in future releases. Please refer to the new interface for `vi`.",
:vi;
force=true,
)
q = q_meanfield_gaussian(Random.default_rng(), model)
objective = AdvancedVI.RepGradELBO(
alg.samples_per_step; entropy=AdvancedVI.ClosedFormEntropy()
)
operator = AdvancedVI.IdentityOperator()
_, q_avg, _, _ = vi(model, q, alg.max_iters; objective, operator, kwargs...)
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
options::Optim.Options=Optim.Options();
kwargs...,
)
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
f = Optimisation.OptimLogDensity(model, ctx)
init_vals = DynamicPPL.getparams(f.ldf)
optimizer = Optim.LBFGS()
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
init_vals::AbstractArray,
options::Optim.Options=Optim.Options();
kwargs...,
)
optimizer = Optim.LBFGS()
##CHUNK 2
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
init_vals::AbstractArray,
options::Optim.Options=Optim.Options();
kwargs...,
)
optimizer = Optim.LBFGS()
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
optimizer::Optim.AbstractOptimizer,
options::Optim.Options=Optim.Options();
kwargs...,
)
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
##CHUNK 3
f = Optimisation.OptimLogDensity(model, ctx)
init_vals = DynamicPPL.getparams(f.ldf)
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MLE,
init_vals::AbstractArray,
optimizer::Optim.AbstractOptimizer,
options::Optim.Options=Optim.Options();
kwargs...,
)
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
end
function _mle_optimize(model::DynamicPPL.Model, args...; kwargs...)
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
return _optimize(Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
end
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
- `check_model::Bool=true`: If true, the model is checked for errors before
optimisation begins.
- `initial_params::Union{AbstractVector,Nothing}=nothing`: Initial value for the
optimization. Optional, unless non-box constraints are specified. If omitted it is
generated by either sampling from the prior distribution or uniformly from the box
constraints, if any.
- `adtype::AbstractADType=AutoForwardDiff()`: The automatic differentiation type to use.
- Keyword arguments `lb`, `ub`, `cons`, `lcons`, and `ucons` define constraints for the
optimization problem. Please see [`ModeEstimationConstraints`](@ref) for more details.
- Any extra keyword arguments are passed to `Optimization.solve`.
"""
function estimate_mode(
model::DynamicPPL.Model,
estimator::ModeEstimator,
solver=nothing;
check_model::Bool=true,
initial_params=nothing,
adtype=ADTypes.AutoForwardDiff(),
cons=nothing,
lcons=nothing,
#CURRENT FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
10; entropy = AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool = Turing.PROGRESS[],
optimizer::Optimisers.AbstractRule = AdvancedVI.DoWG(),
averager::AdvancedVI.AbstractAverager = AdvancedVI.PolynomialAveraging(),
operator::AdvancedVI.AbstractOperator = AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE,
kwargs...
)
Approximating the target `model` via variational inference by optimizing `objective` with the initialization `q`.
This is a thin wrapper around `AdvancedVI.optimize`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `q`: The initial variational approximation.
- `n_iterations`: Number of optimization steps.
# Keyword Arguments
- `objective`: Variational objective to be optimized.
##CHUNK 2
return q_fullrank_gaussian(Random.default_rng(), model; kwargs...)
end
"""
vi(
[rng::Random.AbstractRNG,]
model::DynamicPPL.Model;
q,
n_iterations::Int;
objective::AdvancedVI.AbstractVariationalObjective = AdvancedVI.RepGradELBO(
10; entropy = AdvancedVI.ClosedFormEntropyZeroGradient()
),
show_progress::Bool = Turing.PROGRESS[],
optimizer::Optimisers.AbstractRule = AdvancedVI.DoWG(),
averager::AdvancedVI.AbstractAverager = AdvancedVI.PolynomialAveraging(),
operator::AdvancedVI.AbstractOperator = AdvancedVI.ProximalLocationScaleEntropy(),
adtype::ADTypes.AbstractADType = Turing.DEFAULT_ADTYPE,
kwargs...
)
##CHUNK 3
Approximating the target `model` via variational inference by optimizing `objective` with the initialization `q`.
This is a thin wrapper around `AdvancedVI.optimize`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `q`: The initial variational approximation.
- `n_iterations`: Number of optimization steps.
# Keyword Arguments
- `objective`: Variational objective to be optimized.
- `show_progress`: Whether to show the progress bar.
- `optimizer`: Optimization algorithm.
- `averager`: Parameter averaging strategy.
- `operator`: Operator applied after each optimization step.
- `adtype`: Automatic differentiation backend.
See the docs of `AdvancedVI.optimize` for additional keyword arguments.
# Returns
- `q`: Variational distribution formed by the last iterate of the optimization run.
##CHUNK 4
- `show_progress`: Whether to show the progress bar.
- `optimizer`: Optimization algorithm.
- `averager`: Parameter averaging strategy.
- `operator`: Operator applied after each optimization step.
- `adtype`: Automatic differentiation backend.
See the docs of `AdvancedVI.optimize` for additional keyword arguments.
# Returns
- `q`: Variational distribution formed by the last iterate of the optimization run.
- `q_avg`: Variational distribution formed by the averaged iterates according to `averager`.
- `state`: Collection of states used for optimization. This can be used to resume from a past call to `vi`.
- `info`: Information generated during the optimization run.
"""
function vi(model::DynamicPPL.Model, q, n_iterations::Int; kwargs...)
return vi(Random.default_rng(), model, q, n_iterations; kwargs...)
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.