diff --git a/docs/src/optimization_packages/optimization.md b/docs/src/optimization_packages/optimization.md index e36728b11..ddd3bf062 100644 --- a/docs/src/optimization_packages/optimization.md +++ b/docs/src/optimization_packages/optimization.md @@ -4,28 +4,28 @@ There are some solvers that are available in the Optimization.jl package directl ## Methods -- `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints. - - This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl. + - `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints. + + This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl. -- `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD. + - `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD. + + + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))` - + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))` - - + `η` is the learning rate - + `βs` are the decay of momentums - + `ϵ` is the epsilon value - + `λ` is the weight decay parameter - + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix - + `ρ` is the momentum - + Defaults: - - * `η = 0.001` - * `βs = (0.9, 0.999)` - * `ϵ = 1e-8` - * `λ = 0.1` - * `k = 10` - * `ρ = 0.04` + + `η` is the learning rate + + `βs` are the decay of momentums + + `ϵ` is the epsilon value + + `λ` is the weight decay parameter + + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix + + `ρ` is the momentum + + Defaults: + + * `η = 0.001` + * `βs = (0.9, 0.999)` + * `ϵ = 1e-8` + * `λ = 0.1` + * `k = 10` + * `ρ = 0.04` ## Examples diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index f72e628b4..31bc2a94a 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -8,22 +8,24 @@ ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [compat] ADTypes = "1.7" NLPModels = "0.21" Optimization = "4" Reexport = "1.2" +SparseArrays = "1" julia = "1.9" [extras] +Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" +OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" OptimizationOptimJL = "36348300-93cb-4f02-beb5-3c3902f8871e" ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" -Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" -OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" [targets] test = ["Test", "NLPModelsTest", "OptimizationOptimJL", "ReverseDiff", "Zygote", "Ipopt", "OptimizationMOI"] diff --git a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl index 65e67be20..a5513c581 100644 --- a/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl +++ b/lib/OptimizationNLPModels/src/OptimizationNLPModels.jl @@ -1,6 +1,6 @@ module OptimizationNLPModels -using Reexport +using Reexport, SparseArrays @reexport using NLPModels, Optimization, ADTypes """ @@ -21,9 +21,20 @@ function SciMLBase.OptimizationFunction(nlpmodel::AbstractNLPModel, cons(res, x, p) = NLPModels.cons!(nlpmodel, x, res) cons_j(J, x, p) = (J .= NLPModels.jac(nlpmodel, x)) cons_jvp(Jv, v, x, p) = NLPModels.jprod!(nlpmodel, x, v, Jv) + function lag_h(h, θ, σ, λ, p) + H = NLPModels.hess(nlpmodel, θ, λ; obj_weight = σ) + k = 0 + rows, cols, _ = findnz(H.data) + for (i, j) in zip(rows, cols) + if i <= j + k += 1 + h[k] = H[i, j] + end + end + end return OptimizationFunction( - f, adtype; grad, hess, hv, cons, cons_j, cons_jvp, kwargs...) + f, adtype; grad, hess, hv, cons, cons_j, cons_jvp, lag_h, kwargs...) end return OptimizationFunction(f, adtype; grad, hess, hv, kwargs...) diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl index 24de50d31..34a2ae679 100644 --- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl +++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl @@ -38,13 +38,12 @@ function __map_optimizer_args(cache::OptimizationCache, abstol::Union{Number, Nothing} = nothing, reltol::Union{Number, Nothing} = nothing, kwargs...) - mapped_args = (; extended_trace = true, kwargs...) if !isnothing(abstol) mapped_args = (; mapped_args..., f_abstol = abstol) end - + if !isnothing(callback) mapped_args = (; mapped_args..., callback = callback) end diff --git a/test/diffeqfluxtests.jl b/test/diffeqfluxtests.jl index 6ec24e2cd..4a6a170c0 100644 --- a/test/diffeqfluxtests.jl +++ b/test/diffeqfluxtests.jl @@ -70,7 +70,8 @@ ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps)) dudt2 = Lux.Chain(x -> x .^ 3, Lux.Dense(2, 50, tanh), Lux.Dense(50, 2)) -prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8) +prob_neuralode = NeuralODE( + dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8) pp, st = Lux.setup(rng, dudt2) pp = ComponentArray(pp)