NeuralPDE.jl icon indicating copy to clipboard operation
NeuralPDE.jl copied to clipboard

Helping to solve complex system of DAE by NeuralPDE.

Open HuynhTran0301 opened this issue 10 months ago • 32 comments

I am trying to solve the DAE system. From the results, although the loss function is very small, the status of the optimization solver is a success, but the results are not the same with the ODE or DAE numerical solvers. My code is:

using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, LineSearches
import ModelingToolkit: Interval
using CSV
using DataFrames
data = CSV.File("3gens.csv");
Y1 = CSV.read("Y_des.CSV", DataFrame, types=Complex{Float64}); #decreasing

#Input of the system.
E1 = 1.054;
E2 = 1.050;
E3 = 1.017;

#Define equation of the system
@variables delta1(..) delta2(..) delta3(..) omega1(..) omega2(..) omega3(..) TM1(..) TM2(..) TM3(..) Pe1(..) Pe2(..) Pe3(..) Psv1(..) Psv2(..) Psv3(..)
@parameters t 
D = Differential(t)
omega_s = 120*pi
function eq_norm(omegaN,deltaN,TMN,PeN,PsvN)
    eq1 = [
        D(delta1(t)) ~ (omega1(t) + omegaN - omega_s)/deltaN,
        #
        D(delta2(t)) ~ (omega2(t) + omegaN - omega_s)/deltaN,
        #
        D(delta3(t)) ~ (omega3(t) + omegaN - omega_s)/deltaN,
        #
        D(omega1(t)) ~ (TM1(t) + TMN[1] - Pe1(t) - PeN[1])/(2*data["H"][1])*omega_s,
        #
        D(omega2(t)) ~ (TM2(t) + TMN[2] - Pe2(t) - PeN[2])/(2*data["H"][2])*omega_s,
        #
        D(omega3(t)) ~ (TM3(t) + TMN[3] - Pe3(t) - PeN[3])/(2*data["H"][3])*omega_s,
        #
        0 ~ Pe1(t) + PeN[1] - (E1^2*Y1[1,1].re + E1*E2*sin(deltaN*(delta1(t)-delta2(t)))*Y1[1,2].im + E1*E2*cos(deltaN*(delta1(t)-delta2(t)))*Y1[1,2].re + E1*E3*sin(deltaN*(delta1(t)-delta3(t)))*Y1[1,3].im 
                + E1*E3*cos(deltaN*(delta1(t)-delta3(t)))*Y1[1,3].re),
        #
        0 ~ Pe2(t) + PeN[2] - (E2^2*Y1[2,2].re + E1*E2*sin(deltaN*(delta2(t)-delta1(t)))*Y1[2,1].im + E1*E2*cos(deltaN*(delta2(t)-delta1(t)))*Y1[2,1].re + E2*E3*sin(deltaN*(delta2(t)-delta3(t)))*Y1[2,3].im 
                + E2*E3*cos(deltaN*(delta2(t)-delta3(t)))*Y1[2,3].re),
        #
        0 ~ Pe3(t) + PeN[3] - (E3^2*Y1[3,3].re + E3*E1*sin(deltaN*(delta3(t)-delta1(t)))*Y1[3,1].im + E3*E1*cos(deltaN*(delta3(t)-delta1(t)))*Y1[3,1].re + E3*E2*sin(deltaN*(delta3(t)-delta2(t)))*Y1[3,2].im 
                + E3*E2*cos(deltaN*(delta3(t)-delta2(t)))*Y1[3,2].re)];
    
    eq2 = [D(TM1(t)) ~ (-TM1(t) - TMN[1] + Psv1(t) + PsvN[1]) / data["TCH"][1],
        #
        D(TM2(t)) ~ (-TM2(t) - TMN[2] + Psv2(t) + PsvN[2]) / data["TCH"][2],
        #
        D(TM3(t)) ~ (-TM3(t) - TMN[3] + Psv3(t) + PsvN[3]) / data["TCH"][3]]

    eq3 = [D(Psv1(t)) ~ (-Psv1(t) - PsvN[1] + 0.70945 + 0.33*(-0.0024) - ((omega1(t) + omegaN)/omega_s - 1)/data["RD"][1])/data["TSV"][1],
        #
        D(Psv2(t)) ~ (-Psv2(t) - PsvN[2] + 1.62342 + 0.334*(-0.0024) - ((omega2(t) + omegaN)/omega_s - 1)/data["RD"][2])/data["TSV"][2],
        #
        D(Psv3(t)) ~ (-Psv3(t) - PsvN[3] + 0.84843 + 0.33*(-0.0024) - ((omega3(t) + omegaN)/omega_s - 1)/data["RD"][3])/data["TSV"][3]];
    
    eqs = [eq1;eq2;eq3];
    # return eqs
end;

omegaN = 120*pi;
deltaN = 1;
TMN = [0, 1.62342, 0];
PeN = [0, 1.62342, 0];
PsvN = [0, 1.62342, 0];
eqs = eq_norm(omegaN,deltaN,TMN,PeN,PsvN)
bcs = [delta1(0.0) ~ 0.03957/deltaN, delta2(0.0) ~ 0.3447/deltaN, delta3(0.0) ~ 0.23038/deltaN,
    omega1(0.0) ~ omega_s - omegaN, omega2(0.0) ~ omega_s - omegaN, omega3(0.0) ~ omega_s - omegaN,
    TM1(0.0) ~ 0.70945 - TMN[1], TM2(0.0) ~ 1.62342 - TMN[2], TM3(0.0) ~ 0.848433 - TMN[3],
    Pe1(0.0) ~ 0.70945 - PeN[1], Pe2(0.0) ~ 1.62342 - PeN[2], Pe3(0.0) ~ 0.848433 - PeN[3],
    Psv1(0.0) ~ 0.70945 - PsvN[1], Psv2(0.0) ~ 1.62342 - PsvN[2], Psv3(0.0)~ 0.848433 - PsvN[3]]

domains = [t ∈ Interval(0.0,25.0)]

n = 10
chain =[Lux.Chain(Dense(1,n,Lux.tanh),Dense(n,n,Lux.tanh),Dense(n,n,Lux.tanh),Dense(n,n,Lux.tanh),Dense(n,1)) for _ in 1:15]
dvs = [delta1(t),delta2(t),delta3(t),omega1(t),omega2(t),omega3(t),TM1(t),TM2(t),TM3(t),Pe1(t),Pe2(t),Pe3(t),Psv1(t),Psv2(t),Psv3(t)]

@named pde_system = PDESystem(eqs,bcs,domains,[t],dvs)

strategy = NeuralPDE.QuadratureTraining()
discretization = PhysicsInformedNN(chain, strategy, adaptive_loss  = NeuralPDE.GradientScaleAdaptiveLoss(10))

sym_prob = NeuralPDE.symbolic_discretize(pde_system, discretization)

pde_loss_functions = sym_prob.loss_functions.pde_loss_functions
bc_loss_functions = sym_prob.loss_functions.bc_loss_functions

iter = 0
maxiters = 10000
callback = function (p, l)
    global iter
    #Show the loss after 100 iterations.
    if iter % 100 == 0
        println("loss: ", l,"\t\t","Iterations: $iter")
    end
    iter += 1
    return false
end

loss_functions =  [pde_loss_functions;bc_loss_functions]

function loss_function(θ,p)
    sum(map(l->l(θ) ,loss_functions))
end

f_ = OptimizationFunction(loss_function, Optimization.AutoZygote())
prob = Optimization.OptimizationProblem(f_, sym_prob.flat_init_params);
phi = sym_prob.phi;
res = Optimization.solve(prob,OptimizationOptimJL.BFGS(linesearch=LineSearches.BackTracking()); callback = callback, maxiters = maxiters)

The information after solving is:

* Status: success

 * Candidate solution
    Final objective value:     6.250256e-06

 * Found with
    Algorithm:     BFGS

 * Convergence measures
    |x - x'|               = 0.00e+00 ≤ 0.0e+00
    |x - x'|/|x'|          = 0.00e+00 ≤ 0.0e+00
    |f(x) - f(x')|         = 0.00e+00 ≤ 0.0e+00
    |f(x) - f(x')|/|f(x')| = 0.00e+00 ≤ 0.0e+00
    |g(x)|                 = 3.20e-01 ≰ 1.0e-08

 * Work counters
    Seconds run:   140  (vs limit Inf)
    Iterations:    323
    f(x) calls:    1541
    ∇f(x) calls:   323

The following results from NeuralPDE: 8ce2b3f431f3d43b31f2c9a3ed7a747bf10b80c3

And the results from ODE solvers (Tsit5): f489e8a7189a0ca8910f02547ad44c82cb475476

The CSV files that I used on the code are attached below. 3gens.csv Y_des.csv

HuynhTran0301 avatar Aug 17 '23 13:08 HuynhTran0301