SciMLExpectations.jl
SciMLExpectations.jl copied to clipboard
Gradient of chance constraint in tutorial
Opening an issue after asking on the sciml-bridged slack channel.
In the tutorial " Optimization Under Uncertainty with DiffEqUncertainty.jl " a chance constraint is introduced, and it's gradient is evaluated by ForwardDiff, i.e. the code snippet below. However if I check the gradient with some input, say ForwardDiff.gradient(𝔼_constraint,[-1.0, 222.0, 50.0])
, I get a vector of zeros. Is the tutorial out of date or is there some other problem?
constraint_obs(sol) = sol[1,end] ≈ constraint[1] ? one(sol[1,end]) : zero(sol[1,end])
function 𝔼_constraint(θ)
u0 = [θ[1],θ[2],θ[3], 0.0]
expectation(constraint_obs, prob, u0, p_uncertain, Koopman(), Tsit5(),
ireltol= 1e-9, iabstol = 1e-9,callback=constraint_cbs)[1]
end
function 𝔼_constraint_nlopt(x,∇)
length(∇) > 0 ? ForwardDiff.gradient!(∇, 𝔼_constraint,x) : nothing
𝔼_constraint(x) - 0.01
end
Example for convenience @ θ = [-0.05713109086715892, 2.4366742783682063, 49.99835957072762] Taking finite differences shows a gradient in theta 1, however ForwardDiff evaluates to 0.0
using OrdinaryDiffEq, Distributions
using DiffEqSensitivity, ForwardDiff, DiffEqUncertainty
function ball!(du,u,p,t)
du[1] = u[2]
du[2] = 0.0
du[3] = u[4]
du[4] = -p[1]
end
ground_condition(u,t,integrator) = u[3]
ground_affect!(integrator) = integrator.u[4] = -integrator.p[2] * integrator.u[4]
ground_cb = ContinuousCallback(ground_condition, ground_affect!)
u0 = [0.0,2.0,50.0,0.0]
tspan = (0.0,50.0)
p = [9.807, 0.9]
prob = ODEProblem(ball!,u0,tspan,p)
stop_condition(u,t,integrator) = u[1] - 25.0
stop_cb = ContinuousCallback(stop_condition, terminate!)
cbs = CallbackSet(ground_cb, stop_cb)
tspan = (0.0, 1500.0)
sol = solve(prob,Tsit5(),callback=cbs)
cor_dist = truncated(Normal(0.9, 0.02), 0.9-3*0.02, 1.0)
make_u0(θ) = [θ[1],θ[2],θ[3], 0.0]
constraint_condition(u,t,integrator) = u[1] - constraint[1]
constraint_affect!(integrator) = integrator.u[3] < constraint[2] ? terminate!(integrator) : nothing
constraint_cb = ContinuousCallback(constraint_condition, constraint_affect!, save_positions=(true,false));
constraint_cbs = CallbackSet(ground_cb, stop_cb, constraint_cb)
constraint_obs(sol) = sol[1,end] ≈ constraint[1] ? one(sol[1,end]) : zero(sol[1,end])
p_uncertain = [9.807, cor_dist]
constraint = [20.0, 25.0]
function E_constraint(θ)
u0 = [θ[1],θ[2],θ[3], 0.0]
expectation(constraint_obs, prob, u0, p_uncertain, Koopman(), Tsit5(),
ireltol= 1e-9, iabstol = 1e-9,callback=constraint_cbs)[1]
end
function E_constraint_nlopt(x,∇)
length(∇) > 0 ? ForwardDiff.gradient!(∇, E_constraint,x) : nothing
𝔼_constraint(x) - 0.01
end
θ = [-0.05713109086715892, 2.4366742783682063, 49.99835957072762]
# finite difference shows a gradient in theta 1
h = 1e-8
f_1 = [-0.05713109086715892+(h), 2.4366742783682063, 49.99835957072762]
f_2 = [-0.05713109086715892-(h), 2.4366742783682063, 49.99835957072762]
finite_diff = (E_constraint(f_1) .- E_constraint(f_2))./(2*h)
ad = ForwardDiff.gradient(E_constraint,θ)[1]