I want to decouple the ODE from which a time series data is generated and a Neural Network embedded in an ODE which is trying to learn the structure of this data. In other terms, I want to replicate the example on time-series extrapolation provided in https://julialang.org/blog/2019/01/fluxdiffeq/, but with a different underlying function, i.e. I am using Lotka-Voltera to generate the data.
My workflow in Julia is the following (Note that I am rather new to Julia, but I hope it's clear.):
train_size = 32
tspan_train = (0.0f0,4.00f0)
u0 = [1.0,1.0]
p = [1.5,1.0,3.0,1.0]
function lotka_volterra(du,u,p,t)
x, y = u
α, β, δ, γ = p
du[1] = dx = α*x - β*x*y
du[2] = dy = -δ*y + γ*x*y
end
t_train = range(tspan_train[1],tspan_train[2],length = train_size)
prob = ODEProblem(lotka_volterra, u0, tspan_train,p)
ode_data_train = Array(solve(prob, Tsit5(),saveat=t_train))
function create_neural_ode(solver, tspan, t_saveat)
dudt = Chain(
Dense(2,50,tanh),
Dense(50,2))
ps = Flux.params(dudt)
n_ode = NeuralODE(dudt, tspan, solver, saveat = t_saveat, reltol=1e-7, abstol=1e-9)
n_ode
end
function predict_n_ode(ps)
n_ode(u0,ps)
end
function loss_n_ode(ps)
pred = predict_n_ode(ps)
loss = sum(abs2, ode_data_train .- pred)
loss,pred
end
n_ode = create_neural_ode(Tsit5(), tspan_train, t_train)
final_p = Any[]
losses = []
cb = function(p,loss,pred)
display(loss)
display(p)
push!(final_p, copy(p))
push!(losses,loss)
pl = scatter(t_train, ode_data_train[1,:],label="data")
scatter!(pl,t_train,pred[1,:],label="prediction")
display(plot(pl))
end
sol = DiffEqFlux.sciml_train!(loss_n_ode, n_ode.p, ADAM(0.05), cb = cb, maxiters = 100)
# Plot and save training results
x = 1:100
plot_to_save = plot(x,losses,title=solver_name,label="loss")
plot(x,losses,title=solver_name, label="loss")
xlabel!("Epochs")
However I can observe that my NN is not learning much, it stagnates and the loss stays at around 155 with Euler and Tsit5, and behaves a bit better with RK4 (loss 142).
I would be very thankful if someone points out if I'm doing an error in my implementation or if this behaviour is expected.
Increasing the number for maxiters =
to 300 helped achieving better fits, but the training is extremely unstable.