NeuralPDE.jl
NeuralPDE.jl copied to clipboard
Second order ODE, simple supported beam problem
I am trying to solve the simple supported beam problem by PINNs. The problem can be described by 2-order of ODEs as following
M"(x) = q(x) M(x=0)=0 M(x=L)=0 0<=x<=L
The below code is working well, but when I change the unit of length to millimeter, the pde_losses is too large and the convergence is not satisfied. Please help me for the reason.
############################################################################## using NeuralPDE, Lux, ModelingToolkit using Optimization, OptimizationOptimJL, OptimizationOptimisers import ModelingToolkit: Interval, infimum, supremum using Plots
Define beam parameters
q = 1.0 :: Float64 # Uniform loading, Unit: kN/m or N/mm L = 6.0 :: Float64 # Span, Unit: m --> If this unit changes to mm L=6000.0-->Not working as pde_losses is too large. ################################################################################
Using NeuralPDE for solving simple supported beam problem
################################################################################ @parameters x @variables M(..)
D2x = Differential(x)^2
1D equation for simple supported beam problem
eq = D2x(M(x)) ~ -q
Boundary conditions
bcs = [M(0.)~0.0,M(n_nhip*L)~0.0]
Space and time domains
domains = [x ∈ Interval(0.0,L)]
Neural network
dim = 1 # number of dimensions inn = 16 chain = Lux.Chain(Dense(dim,inn,Lux.σ), Dense(inn,inn,Lux.σ), Dense(inn,dim))
Discretization-
dx = delta_x strategy = QuasiRandomTraining(200) discretization = PhysicsInformedNN(chain,strategy)
@named pde_system = PDESystem(eq,bcs,domains,[x],[M(x)]) prob = discretize(pde_system,discretization) sym_prob = symbolic_discretize(pde_system,discretization)
#Optimizer opt = OptimizationOptimJL.LBFGS()
#Callback function pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions
callback = function (p, l) println("loss: ", l) println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions)) println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions)) return false end
res = Optimization.solve(prob, opt, callback = callback, maxiters=500) phi = discretization.phi
xs = [infimum(d.domain):dx/10:supremum(d.domain) for d in domains][1] u_predict = [first(phi(x,res.u)) for x in xs] x_plot = collect(xs) plot(x_plot,u_predict,title="predict")
Can you put them in one code block?
like this
You should nondimensionalize your problem tho. Nothing strange about it would fail.
I have added some variables to make the problem "semi dimensionless" and check how well is the answer depending on the "semi dimenstionless parameter" (it is n
on the code). You can see that the results highly depend on this parameter
So a lot of care must be taken when we try to solve something using this (cool) libraries
The code is here
using NeuralPDE, Lux, ModelingToolkit
using Optimization, OptimizationOptimJL, OptimizationOptimisers
import ModelingToolkit: Interval, infimum, supremum
using DelimitedFiles
################################################################################
################################################################################
function calculate(q, L_1, L_2; fname = "out", opt = OptimizationOptimJL.LBFGS(), maxiters=500, small=1e-4, printi=50)
@parameters x
@variables M(..)
D2x = Differential(x)^2
eq = D2x(M(x)) ~ -L_1^2*q
bcs = [M(0.)~0.0,M(L_2)~0.0]
domains = [x ∈ Interval(0.0,L_2)]
dim = 1; # number of dimensions
inn = 16;
chain = Lux.Chain(Dense(dim,inn,Lux.σ),
Dense(inn,inn,Lux.σ),
Dense(inn,dim))
strategy = QuasiRandomTraining(200)
discretization = PhysicsInformedNN(chain,strategy)
@named pde_system = PDESystem(eq,bcs,domains,[x],[M(x)])
prob = discretize(pde_system,discretization)
sym_prob = symbolic_discretize(pde_system,discretization)
#Callback function
pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions
cont = [0]
callback = function (p, l)
if mod(cont[1],printi) == 0
println("\n cont $(cont[1]) out of $(maxiters) ")
println("loss: ", l)
println("pde_losses: ", map(l_ -> l_(p), pde_inner_loss_functions))
println("bcs_losses: ", map(l_ -> l_(p), bcs_inner_loss_functions))
end
cont[1] += 1
if l < small
return true
end
return false
end
res = Optimization.solve(prob, opt, callback = callback, maxiters=maxiters)
dx = 100;
xs = [range(infimum(d.domain),supremum(d.domain),length=dx) for d in domains][1];
phi = discretization.phi;
u_predict = [first(phi(x,res.u)) for x in xs];
x_plot = collect(xs);
u_real = [ -(L_1^2*q*x^2)/2 + (q*L_1^2*L_2*x)/2 for x in xs ]
resi = hcat(x_plot,u_predict, u_real)
writedlm(fname, resi )
return res, resi
end
function main(q, L, n)
L_1 = L^(n);
L_2 = L/L_1 ;
return calculate(q, L_1, L_2; fname="out_n_$(n)_L_$(L)")
end
q = 1.0 :: Float64; # Uniform loading, Unit: kN/m or N/mm
L = 60.0 :: Float64; # Span, Unit: m --> If this unit changes to mm L=6000.0-->Not working as pde_losses is too large.
n = [0.3, 0.5, 0.7];
for nn in n
println("\n################\n############### doing n=$(nn)")
main(q, L, nn)
end
I don't know what n
means here, maybe this paper can help.
Thanks @gabo-di for the explanation.