diff --git a/docs/src/index.md b/docs/src/index.md
index 34f3edd07..9fbabead3 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -88,7 +88,7 @@ to add the specific wrapper packages.
- Second order
- Zeroth order
- Box Constraints
- - Constrained 🟡
+ - Constrained
- Global Methods
- Zeroth order
- Unconstrained
@@ -126,21 +126,21 @@ to add the specific wrapper packages.
- Zeroth order
- Unconstrained
- Box Constraints
- - Constrained 🟡
+ - Constrained
NLopt
- Local Methods
- First order
- Zeroth order
- - Second order 🟡
+ - Second order
- Box Constraints
- - Local Constrained 🟡
+ - Local Constrained
- Global Methods
- Zeroth order
- First order
- Unconstrained
- - Constrained 🟡
+ - Constrained
Optim
@@ -158,21 +158,21 @@ to add the specific wrapper packages.
PRIMA
- Local Methods
- - Derivative-Free: ✅
+ - Derivative-Free:
- **Constraints**
- - Box Constraints: ✅
- - Local Constrained: ✅
+ - Box Constraints:
+ - Local Constrained:
QuadDIRECT
- **Constraints**
- - Box Constraints: ✅
+ - Box Constraints:
- Global Methods
- - Unconstrained: ✅
+ - Unconstrained:
```
-🟡 = supported in downstream library but not yet implemented in `Optimization.jl`; PR to add this functionality are welcome
+= supported in downstream library but not yet implemented in `Optimization.jl`; PR to add this functionality are welcome
## Citation
diff --git a/docs/src/optimization_packages/optimization.md b/docs/src/optimization_packages/optimization.md
index e36728b11..ddd3bf062 100644
--- a/docs/src/optimization_packages/optimization.md
+++ b/docs/src/optimization_packages/optimization.md
@@ -4,28 +4,28 @@ There are some solvers that are available in the Optimization.jl package directl
## Methods
-- `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints.
-
- This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl.
+ - `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints.
+
+ This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl.
-- `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD.
+ - `Sophia`: Based on the recent paper https://arxiv.org/abs/2305.14342. It incorporates second order information in the form of the diagonal of the Hessian matrix hence avoiding the need to compute the complete hessian. It has been shown to converge faster than other first order methods such as Adam and SGD.
+
+ + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))`
- + `solve(problem, Sophia(; η, βs, ϵ, λ, k, ρ))`
-
- + `η` is the learning rate
- + `βs` are the decay of momentums
- + `ϵ` is the epsilon value
- + `λ` is the weight decay parameter
- + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix
- + `ρ` is the momentum
- + Defaults:
-
- * `η = 0.001`
- * `βs = (0.9, 0.999)`
- * `ϵ = 1e-8`
- * `λ = 0.1`
- * `k = 10`
- * `ρ = 0.04`
+ + `η` is the learning rate
+ + `βs` are the decay of momentums
+ + `ϵ` is the epsilon value
+ + `λ` is the weight decay parameter
+ + `k` is the number of iterations to re-compute the diagonal of the Hessian matrix
+ + `ρ` is the momentum
+ + Defaults:
+
+ * `η = 0.001`
+ * `βs = (0.9, 0.999)`
+ * `ϵ = 1e-8`
+ * `λ = 0.1`
+ * `k = 10`
+ * `ρ = 0.04`
## Examples
diff --git a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl
index 24de50d31..34a2ae679 100644
--- a/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl
+++ b/lib/OptimizationOptimJL/src/OptimizationOptimJL.jl
@@ -38,13 +38,12 @@ function __map_optimizer_args(cache::OptimizationCache,
abstol::Union{Number, Nothing} = nothing,
reltol::Union{Number, Nothing} = nothing,
kwargs...)
-
mapped_args = (; extended_trace = true, kwargs...)
if !isnothing(abstol)
mapped_args = (; mapped_args..., f_abstol = abstol)
end
-
+
if !isnothing(callback)
mapped_args = (; mapped_args..., callback = callback)
end
diff --git a/src/Optimization.jl b/src/Optimization.jl
index 4cfeead6e..8d0257dd1 100644
--- a/src/Optimization.jl
+++ b/src/Optimization.jl
@@ -24,6 +24,7 @@ include("utils.jl")
include("state.jl")
include("lbfgsb.jl")
include("sophia.jl")
+include("auglag.jl")
export solve
diff --git a/src/auglag.jl b/src/auglag.jl
new file mode 100644
index 000000000..f3a15036d
--- /dev/null
+++ b/src/auglag.jl
@@ -0,0 +1,181 @@
+@kwdef struct AugLag
+ inner::Any
+ τ = 0.5
+ γ = 10.0
+ λmin = -1e20
+ λmax = 1e20
+ μmin = 0.0
+ μmax = 1e20
+ ϵ = 1e-8
+end
+
+SciMLBase.supports_opt_cache_interface(::AugLag) = true
+SciMLBase.allowsbounds(::AugLag) = true
+SciMLBase.requiresgradient(::AugLag) = true
+SciMLBase.allowsconstraints(::AugLag) = true
+SciMLBase.requiresconsjac(::AugLag) = true
+
+function __map_optimizer_args(cache::Optimization.OptimizationCache, opt::AugLag;
+ callback = nothing,
+ maxiters::Union{Number, Nothing} = nothing,
+ maxtime::Union{Number, Nothing} = nothing,
+ abstol::Union{Number, Nothing} = nothing,
+ reltol::Union{Number, Nothing} = nothing,
+ verbose::Bool = false,
+ kwargs...)
+ if !isnothing(abstol)
+ @warn "common abstol is currently not used by $(opt)"
+ end
+ if !isnothing(maxtime)
+ @warn "common abstol is currently not used by $(opt)"
+ end
+
+ mapped_args = (;)
+
+ if cache.lb !== nothing && cache.ub !== nothing
+ mapped_args = (; mapped_args..., lb = cache.lb, ub = cache.ub)
+ end
+
+ if !isnothing(maxiters)
+ mapped_args = (; mapped_args..., maxiter = maxiters)
+ end
+
+ if !isnothing(reltol)
+ mapped_args = (; mapped_args..., pgtol = reltol)
+ end
+
+ return mapped_args
+end
+
+function SciMLBase.__solve(cache::OptimizationCache{
+ F,
+ RC,
+ LB,
+ UB,
+ LC,
+ UC,
+ S,
+ O,
+ D,
+ P,
+ C
+}) where {
+ F,
+ RC,
+ LB,
+ UB,
+ LC,
+ UC,
+ S,
+ O <:
+ AugLag,
+ D,
+ P,
+ C
+}
+ maxiters = Optimization._check_and_convert_maxiters(cache.solver_args.maxiters)
+
+ local x
+
+ solver_kwargs = __map_optimizer_args(cache, cache.opt; maxiters, cache.solver_args...)
+
+ if !isnothing(cache.f.cons)
+ eq_inds = [cache.lcons[i] == cache.ucons[i] for i in eachindex(cache.lcons)]
+ ineq_inds = (!).(eq_inds)
+
+ τ = cache.opt.τ
+ γ = cache.opt.γ
+ λmin = cache.opt.λmin
+ λmax = cache.opt.λmax
+ μmin = cache.opt.μmin
+ μmax = cache.opt.μmax
+ ϵ = cache.opt.ϵ
+
+ λ = zeros(eltype(cache.u0), sum(eq_inds))
+ μ = zeros(eltype(cache.u0), sum(ineq_inds))
+
+ cons_tmp = zeros(eltype(cache.u0), length(cache.lcons))
+ cache.f.cons(cons_tmp, cache.u0)
+ ρ = max(1e-6,
+ min(10, 2 * (abs(cache.f(cache.u0, iterate(cache.p)[1]))) / norm(cons_tmp)))
+
+ _loss = function (θ, p = cache.p)
+ x = cache.f(θ, p)
+ cons_tmp .= zero(eltype(θ))
+ cache.f.cons(cons_tmp, θ)
+ cons_tmp[eq_inds] .= cons_tmp[eq_inds] - cache.lcons[eq_inds]
+ cons_tmp[ineq_inds] .= cons_tmp[ineq_inds] .- cache.ucons[ineq_inds]
+ opt_state = Optimization.OptimizationState(u = θ, objective = x[1])
+ if cache.callback(opt_state, x...)
+ error("Optimization halted by callback.")
+ end
+ return x[1] + sum(@. λ * cons_tmp[eq_inds] + ρ / 2 * (cons_tmp[eq_inds] .^ 2)) +
+ 1 / (2 * ρ) * sum((max.(Ref(0.0), μ .+ (ρ .* cons_tmp[ineq_inds]))) .^ 2)
+ end
+
+ prev_eqcons = zero(λ)
+ θ = cache.u0
+ β = max.(cons_tmp[ineq_inds], Ref(0.0))
+ prevβ = zero(β)
+ eqidxs = [eq_inds[i] > 0 ? i : nothing for i in eachindex(ineq_inds)]
+ ineqidxs = [ineq_inds[i] > 0 ? i : nothing for i in eachindex(ineq_inds)]
+ eqidxs = eqidxs[eqidxs .!= nothing]
+ ineqidxs = ineqidxs[ineqidxs .!= nothing]
+ function aug_grad(G, θ, p)
+ cache.f.grad(G, θ, p)
+ if !isnothing(cache.f.cons_jac_prototype)
+ J = Float64.(cache.f.cons_jac_prototype)
+ else
+ J = zeros((length(cache.lcons), length(θ)))
+ end
+ cache.f.cons_j(J, θ)
+ __tmp = zero(cons_tmp)
+ cache.f.cons(__tmp, θ)
+ __tmp[eq_inds] .= __tmp[eq_inds] .- cache.lcons[eq_inds]
+ __tmp[ineq_inds] .= __tmp[ineq_inds] .- cache.ucons[ineq_inds]
+ G .+= sum(
+ λ[i] .* J[idx, :] + ρ * (__tmp[idx] .* J[idx, :])
+ for (i, idx) in enumerate(eqidxs);
+ init = zero(G)) #should be jvp
+ G .+= sum(
+ 1 / ρ * (max.(Ref(0.0), μ[i] .+ (ρ .* __tmp[idx])) .* J[idx, :])
+ for (i, idx) in enumerate(ineqidxs);
+ init = zero(G)) #should be jvp
+ end
+
+ opt_ret = ReturnCode.MaxIters
+ n = length(cache.u0)
+
+ augprob = OptimizationProblem(
+ OptimizationFunction(_loss; grad = aug_grad), cache.u0, cache.p)
+
+ solver_kwargs = Base.structdiff(solver_kwargs, (; lb = nothing, ub = nothing))
+
+ for i in 1:(maxiters / 10)
+ prev_eqcons .= cons_tmp[eq_inds] .- cache.lcons[eq_inds]
+ prevβ .= copy(β)
+ res = solve(augprob, cache.opt.inner, maxiters = maxiters / 10)
+ θ = res.u
+ cons_tmp .= 0.0
+ cache.f.cons(cons_tmp, θ)
+ λ = max.(min.(λmax, λ .+ ρ * (cons_tmp[eq_inds] .- cache.lcons[eq_inds])), λmin)
+ β = max.(cons_tmp[ineq_inds], -1 .* μ ./ ρ)
+ μ = min.(μmax, max.(μ .+ ρ * cons_tmp[ineq_inds], μmin))
+ if max(norm(cons_tmp[eq_inds] .- cache.lcons[eq_inds], Inf), norm(β, Inf)) >
+ τ * max(norm(prev_eqcons, Inf), norm(prevβ, Inf))
+ ρ = γ * ρ
+ end
+ if norm(
+ (cons_tmp[eq_inds] .- cache.lcons[eq_inds]) ./ cons_tmp[eq_inds], Inf) <
+ ϵ && norm(β, Inf) < ϵ
+ opt_ret = ReturnCode.Success
+ break
+ end
+ end
+ stats = Optimization.OptimizationStats(; iterations = maxiters,
+ time = 0.0, fevals = maxiters, gevals = maxiters)
+ return SciMLBase.build_solution(
+ cache, cache.opt, θ, x,
+ stats = stats, retcode = opt_ret)
+ end
+end
diff --git a/test/diffeqfluxtests.jl b/test/diffeqfluxtests.jl
index 6ec24e2cd..4a6a170c0 100644
--- a/test/diffeqfluxtests.jl
+++ b/test/diffeqfluxtests.jl
@@ -70,7 +70,8 @@ ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps))
dudt2 = Lux.Chain(x -> x .^ 3,
Lux.Dense(2, 50, tanh),
Lux.Dense(50, 2))
-prob_neuralode = NeuralODE(dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8)
+prob_neuralode = NeuralODE(
+ dudt2, tspan, Tsit5(), saveat = tsteps, abstol = 1e-8, reltol = 1e-8)
pp, st = Lux.setup(rng, dudt2)
pp = ComponentArray(pp)
diff --git a/test/lbfgsb.jl b/test/lbfgsb.jl
deleted file mode 100644
index 2b5ec1691..000000000
--- a/test/lbfgsb.jl
+++ /dev/null
@@ -1,27 +0,0 @@
-using Optimization
-using ForwardDiff, Zygote, ReverseDiff, FiniteDiff
-using Test
-
-x0 = zeros(2)
-rosenbrock(x, p = nothing) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2
-l1 = rosenbrock(x0)
-
-optf = OptimizationFunction(rosenbrock, AutoForwardDiff())
-prob = OptimizationProblem(optf, x0)
-@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
-@test res.retcode == Optimization.SciMLBase.ReturnCode.Success
-
-prob = OptimizationProblem(optf, x0, lb = [-1.0, -1.0], ub = [1.0, 1.0])
-@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
-@test res.retcode == Optimization.SciMLBase.ReturnCode.Success
-
-function con2_c(res, x, p)
- res .= [x[1]^2 + x[2]^2, (x[2] * sin(x[1]) + x[1]) - 5]
-end
-
-optf = OptimizationFunction(rosenbrock, AutoZygote(), cons = con2_c)
-prob = OptimizationProblem(optf, x0, lcons = [1.0, -Inf],
- ucons = [1.0, 0.0], lb = [-1.0, -1.0],
- ub = [1.0, 1.0])
-@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
-@test res.retcode == SciMLBase.ReturnCode.Success
diff --git a/test/native.jl b/test/native.jl
new file mode 100644
index 000000000..1f54f47d0
--- /dev/null
+++ b/test/native.jl
@@ -0,0 +1,63 @@
+using Optimization
+using ForwardDiff, Zygote, ReverseDiff, FiniteDiff
+using Test
+
+x0 = zeros(2)
+rosenbrock(x, p = nothing) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2
+l1 = rosenbrock(x0)
+
+optf = OptimizationFunction(rosenbrock, AutoForwardDiff())
+prob = OptimizationProblem(optf, x0)
+@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
+@test res.retcode == Optimization.SciMLBase.ReturnCode.Success
+
+prob = OptimizationProblem(optf, x0, lb = [-1.0, -1.0], ub = [1.0, 1.0])
+@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
+@test res.retcode == Optimization.SciMLBase.ReturnCode.Success
+
+function con2_c(res, x, p)
+ res .= [x[1]^2 + x[2]^2, (x[2] * sin(x[1]) + x[1]) - 5]
+end
+
+optf = OptimizationFunction(rosenbrock, AutoZygote(), cons = con2_c)
+prob = OptimizationProblem(optf, x0, lcons = [1.0, -Inf],
+ ucons = [1.0, 0.0], lb = [-1.0, -1.0],
+ ub = [1.0, 1.0])
+@time res = solve(prob, Optimization.LBFGS(), maxiters = 100)
+@test res.retcode == SciMLBase.ReturnCode.Success
+
+using MLUtils, OptimizationOptimisers
+
+x0 = (-pi):0.001:pi
+y0 = sin.(x0)
+data = MLUtils.DataLoader((x0, y0), batchsize = 100)
+function loss(coeffs, data)
+ ypred = [evalpoly(data[1][i], coeffs) for i in eachindex(data[1])]
+ return sum(abs2, ypred .- data[2])
+end
+
+function cons1(res, coeffs, p = nothing)
+ res[1] = coeffs[1] * coeffs[5] - 1
+ return nothing
+end
+
+optf = OptimizationFunction(loss, AutoSparseForwardDiff(), cons = cons1)
+callback = (st, l) -> (@show l; return false)
+
+initpars = rand(5)
+l0 = optf(initpars, (x0, y0))
+prob = OptimizationProblem(optf, initpars, (x0, y0), lcons = [-Inf], ucons = [0.5],
+ lb = [-10.0, -10.0, -10.0, -10.0, -10.0], ub = [10.0, 10.0, 10.0, 10.0, 10.0])
+opt1 = solve(prob, Optimization.LBFGS(), maxiters = 1000, callback = callback)
+@test opt1.objective < l0
+
+prob = OptimizationProblem(optf, initpars, data, lcons = [-Inf], ucons = [1],
+ lb = [-10.0, -10.0, -10.0, -10.0, -10.0], ub = [10.0, 10.0, 10.0, 10.0, 10.0])
+opt = solve(
+ prob, Optimization.AugLag(; inner = Adam()), maxiters = 10000, callback = callback)
+@test opt.objective < l0
+
+optf1 = OptimizationFunction(loss, AutoSparseForwardDiff())
+prob1 = OptimizationProblem(optf1, rand(5), data)
+sol1 = solve(prob1, OptimizationOptimisers.Adam(), maxiters = 1000, callback = callback)
+@test sol1.objective < l0
diff --git a/test/runtests.jl b/test/runtests.jl
index ba1714ca2..d15543484 100644
--- a/test/runtests.jl
+++ b/test/runtests.jl
@@ -36,7 +36,7 @@ end
include("AD_performance_regression.jl")
end
@safetestset "Optimization" begin
- include("lbfgsb.jl")
+ include("native.jl")
end
@safetestset "Mini batching" begin
include("minibatch.jl")