diff --git a/.github/workflows/Documentation.yml b/.github/workflows/Documentation.yml index b4fef6fde..4c072c373 100644 --- a/.github/workflows/Documentation.yml +++ b/.github/workflows/Documentation.yml @@ -21,7 +21,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # For authentication with GitHub Actions token DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} # For authentication with SSH deploy key - run: julia --project=docs/ --code-coverage=user docs/make.jl + run: julia --color=yes --project=docs/ --code-coverage=user docs/make.jl - uses: julia-actions/julia-processcoverage@v1 with: directories: src,lib/OptimizationBBO/src,lib/OptimizationCMAEvolutionStrategy/src,lib/OptimizationEvolutionary/src,lib/OptimizationGCMAES/src,lib/OptimizationMOI/src,lib/OptimizationMetaheuristics/src,lib/OptimizationMultistartOptimization/src,lib/OptimizationNLopt/src,lib/OptimizationNOMAD/src,lib/OptimizationOptimJL/src,lib/OptimizationOptimisers/src,lib/OptimizationPolyalgorithms/src,lib/OptimizationQuadDIRECT/src,lib/OptimizationSpeedMapping/src diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index 70d072fe8..ebd5b0b78 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -25,7 +25,7 @@ jobs: version: ${{ matrix.julia-version }} - uses: julia-actions/julia-downgrade-compat@v2 with: - skip: Pkg,TOML + skip: Pkg,TOML,LinearAlgebra,Logging,Printf,Random,SparseArrays,Test - uses: julia-actions/julia-buildpkg@v1 - uses: julia-actions/julia-runtest@v1 with: diff --git a/.github/workflows/DowngradeSublibraries.yml b/.github/workflows/DowngradeSublibraries.yml index e36aad085..25aceaa71 100644 --- a/.github/workflows/DowngradeSublibraries.yml +++ b/.github/workflows/DowngradeSublibraries.yml @@ -44,14 +44,16 @@ jobs: - uses: julia-actions/setup-julia@v2 with: version: ${{ matrix.julia-version }} + - if: ${{ matrix.project == 'lib/OptimizationQuadDIRECT' }} + run: julia --project -e 'using Pkg; Pkg.Registry.add(RegistrySpec(url = "https://github.com/HolyLab/HolyLabRegistry.git"));' - uses: julia-actions/julia-downgrade-compat@v2 with: - project: ${{ matrix.project }} - skip: Pkg,TOML + projects: ${{ matrix.project }} + skip: Pkg,TOML,LinearAlgebra,Logging,Printf,Random,SparseArrays,Test - uses: julia-actions/julia-buildpkg@v1 with: project: ${{ matrix.project }} - uses: julia-actions/julia-runtest@v1 with: project: ${{ matrix.project }} - ALLOW_RERESOLVE: false \ No newline at end of file + ALLOW_RERESOLVE: false diff --git a/Project.toml b/Project.toml index 7bc2875ba..79434da61 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "Optimization" uuid = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -version = "5.0.0" +version = "5.1.0" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -25,7 +25,7 @@ OptimizationOptimJL = {path = "lib/OptimizationOptimJL"} OptimizationOptimisers = {path = "lib/OptimizationOptimisers"} [compat] -ADTypes = "1.2" +ADTypes = "1.14" Aqua = "0.8" ArrayInterface = "7.10" BenchmarkTools = "1" @@ -50,18 +50,18 @@ Mooncake = "0.4.138" Optim = ">= 1.4.1" Optimisers = ">= 0.2.5" OptimizationBase = "4" -OptimizationLBFGSB = "1" -OptimizationMOI = "0.5" -OptimizationOptimJL = "0.4" -OptimizationOptimisers = "0.3" +OptimizationLBFGSB = "1.2" +OptimizationMOI = "0.5.9" +OptimizationOptimJL = "0.4.7" +OptimizationOptimisers = "0.3.14" OrdinaryDiffEqTsit5 = "1" Pkg = "1" Printf = "1.10" Random = "1.10" -Reexport = "1.2" +Reexport = "1.2.2" ReverseDiff = "1" SafeTestsets = "0.1" -SciMLBase = "2.104" +SciMLBase = "2.122.1" SciMLSensitivity = "7" SparseArrays = "1.10" Symbolics = "6" diff --git a/README.md b/README.md index 06b4c1882..a54b39d81 100644 --- a/README.md +++ b/README.md @@ -35,16 +35,30 @@ installation of dependencies. Below is the list of packages that need to be installed explicitly if you intend to use the specific optimization algorithms offered by them: + - OptimizationAuglag for augmented Lagrangian methods - OptimizationBBO for [BlackBoxOptim.jl](https://github.com/robertfeldt/BlackBoxOptim.jl) + - OptimizationCMAEvolutionStrategy for [CMAEvolutionStrategy.jl](https://github.com/jbrea/CMAEvolutionStrategy.jl) - OptimizationEvolutionary for [Evolutionary.jl](https://github.com/wildart/Evolutionary.jl) (see also [this documentation](https://wildart.github.io/Evolutionary.jl/dev/)) - OptimizationGCMAES for [GCMAES.jl](https://github.com/AStupidBear/GCMAES.jl) - - OptimizationMOI for [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) (usage of algorithm via MathOptInterface API; see also the API [documentation](https://jump.dev/MathOptInterface.jl/stable/)) + - OptimizationIpopt for [Ipopt.jl](https://github.com/jump-dev/Ipopt.jl) + - OptimizationLBFGSB for [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl) + - OptimizationMadNLP for [MadNLP.jl](https://github.com/MadNLP/MadNLP.jl) + - OptimizationManopt for [Manopt.jl](https://github.com/JuliaManifolds/Manopt.jl) (optimization on manifolds) - OptimizationMetaheuristics for [Metaheuristics.jl](https://github.com/jmejia8/Metaheuristics.jl) (see also [this documentation](https://jmejia8.github.io/Metaheuristics.jl/stable/)) + - OptimizationMOI for [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) (usage of algorithm via MathOptInterface API; see also the API [documentation](https://jump.dev/MathOptInterface.jl/stable/)) - OptimizationMultistartOptimization for [MultistartOptimization.jl](https://github.com/tpapp/MultistartOptimization.jl) (see also [this documentation](https://juliahub.com/docs/MultistartOptimization/cVZvi/0.1.0/)) - OptimizationNLopt for [NLopt.jl](https://github.com/JuliaOpt/NLopt.jl) (usage via the NLopt API; see also the available [algorithms](https://nlopt.readthedocs.io/en/latest/NLopt_Algorithms/)) + - OptimizationNLPModels for [NLPModels.jl](https://github.com/JuliaSmoothOptimizers/NLPModels.jl) - OptimizationNOMAD for [NOMAD.jl](https://github.com/bbopt/NOMAD.jl) (see also [this documentation](https://bbopt.github.io/NOMAD.jl/stable/)) - - OptimizationNonconvex for [Nonconvex.jl](https://github.com/JuliaNonconvex/Nonconvex.jl) (see also [this documentation](https://julianonconvex.github.io/Nonconvex.jl/stable/)) + - OptimizationODE for optimization of steady-state and time-dependent ODE problems + - OptimizationOptimJL for [Optim.jl](https://github.com/JuliaNLSolvers/Optim.jl) + - OptimizationOptimisers for [Optimisers.jl](https://github.com/FluxML/Optimisers.jl) (machine learning optimizers) + - OptimizationPolyalgorithms for polyalgorithm optimization strategies + - OptimizationPRIMA for [PRIMA.jl](https://github.com/libprima/PRIMA.jl) + - OptimizationPyCMA for Python's CMA-ES implementation via [PythonCall.jl](https://github.com/JuliaPy/PythonCall.jl) - OptimizationQuadDIRECT for [QuadDIRECT.jl](https://github.com/timholy/QuadDIRECT.jl) + - OptimizationSciPy for [SciPy](https://scipy.org/) optimization algorithms via [PythonCall.jl](https://github.com/JuliaPy/PythonCall.jl) + - OptimizationSophia for Sophia optimizer (second-order stochastic optimizer) - OptimizationSpeedMapping for [SpeedMapping.jl](https://github.com/NicolasL-S/SpeedMapping.jl) (see also [this documentation](https://nicolasl-s.github.io/SpeedMapping.jl/stable/)) ## Tutorials and Documentation @@ -72,9 +86,6 @@ prob = OptimizationProblem(rosenbrock, x0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0] sol = solve(prob, BBO_adaptive_de_rand_1_bin_radiuslimited()) ``` -Note that Optim.jl is a core dependency of Optimization.jl. However, BlackBoxOptim.jl -is not and must already be installed (see the list above). - *Warning:* The output of the second optimization task (`BBO_adaptive_de_rand_1_bin_radiuslimited()`) is currently misleading in the sense that it returns `Status: failure (reached maximum number of iterations)`. However, convergence is actually reached and the confusing message stems from the reliance on the Optim.jl output diff --git a/docs/Project.toml b/docs/Project.toml index 69af16b62..83d980220 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -19,12 +19,13 @@ NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" NLPModelsTest = "7998695d-6960-4d3a-85c4-e1bceb8cd856" NLopt = "76087f3c-5699-56af-9a33-bf431cd00edd" Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" -OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" OptimizationBBO = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" +OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" OptimizationCMAEvolutionStrategy = "bd407f91-200f-4536-9381-e4ba712f53f8" OptimizationEvolutionary = "cb963754-43f6-435e-8d4b-99009ff27753" OptimizationGCMAES = "6f0a0517-dbc2-4a7a-8a20-99ae7f27e911" OptimizationIpopt = "43fad042-7963-4b32-ab19-e2a4f9a67124" +OptimizationLBFGSB = "22f7324a-a79d-40f2-bebe-3af60c77bd15" OptimizationMOI = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" OptimizationManopt = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6" OptimizationMetaheuristics = "3aafef2f-86ae-4776-b337-85a36adf0b55" @@ -49,12 +50,13 @@ Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" [sources] Optimization = {path = ".."} -OptimizationBase = {path = "../lib/OptimizationBase"} OptimizationBBO = {path = "../lib/OptimizationBBO"} +OptimizationBase = {path = "../lib/OptimizationBase"} OptimizationCMAEvolutionStrategy = {path = "../lib/OptimizationCMAEvolutionStrategy"} OptimizationEvolutionary = {path = "../lib/OptimizationEvolutionary"} OptimizationGCMAES = {path = "../lib/OptimizationGCMAES"} OptimizationIpopt = {path = "../lib/OptimizationIpopt"} +OptimizationLBFGSB = {path = "../lib/OptimizationLBFGSB"} OptimizationMOI = {path = "../lib/OptimizationMOI"} OptimizationManopt = {path = "../lib/OptimizationManopt"} OptimizationMetaheuristics = {path = "../lib/OptimizationMetaheuristics"} @@ -87,8 +89,8 @@ NLPModels = "0.21" NLPModelsTest = "0.10" NLopt = "0.6, 1" Optimization = "5" -OptimizationBase = "4" OptimizationBBO = "0.4" +OptimizationBase = "4" OptimizationCMAEvolutionStrategy = "0.3" OptimizationEvolutionary = "0.4" OptimizationGCMAES = "0.3" diff --git a/docs/src/API/ad.md b/docs/src/API/ad.md index c07254b04..7fc32ebe5 100644 --- a/docs/src/API/ad.md +++ b/docs/src/API/ad.md @@ -7,7 +7,7 @@ The choices for the auto-AD fill-ins with quick descriptions are: - `AutoTracker()`: Like ReverseDiff but GPU-compatible - `AutoZygote()`: The fastest choice for non-mutating array-based (BLAS) functions - `AutoFiniteDiff()`: Finite differencing, not optimal but always applicable - - `AutoModelingToolkit()`: The fastest choice for large scalar optimizations + - `AutoSymbolics()`: The fastest choice for large scalar optimizations - `AutoEnzyme()`: Highly performant AD choice for type stable and optimized code - `AutoMooncake()`: Like Zygote and ReverseDiff, but supports GPU and mutating code @@ -21,7 +21,7 @@ OptimizationBase.AutoFiniteDiff OptimizationBase.AutoReverseDiff OptimizationBase.AutoZygote OptimizationBase.AutoTracker -OptimizationBase.AutoModelingToolkit +OptimizationBase.AutoSymbolics OptimizationBase.AutoEnzyme ADTypes.AutoMooncake ``` diff --git a/docs/src/API/modelingtoolkit.md b/docs/src/API/modelingtoolkit.md index 904d42cb8..35293e394 100644 --- a/docs/src/API/modelingtoolkit.md +++ b/docs/src/API/modelingtoolkit.md @@ -7,7 +7,7 @@ optimization of code. Optimizers can better interface with the extra symbolic information provided by the system. There are two ways that the user interacts with ModelingToolkit.jl. -One can use `OptimizationFunction` with `AutoModelingToolkit` for +One can use `OptimizationFunction` with `AutoSymbolics` for automatically transforming numerical codes into symbolic codes. See the [OptimizationFunction documentation](@ref optfunction) for more details. diff --git a/docs/src/examples/rosenbrock.md b/docs/src/examples/rosenbrock.md index 05c4cfba0..09d8d697a 100644 --- a/docs/src/examples/rosenbrock.md +++ b/docs/src/examples/rosenbrock.md @@ -5,7 +5,7 @@ flexibility of Optimization.jl. This is a gauntlet of many solvers to get a feel for common workflows of the package and give copy-pastable starting points. !!! note - + This example uses many different solvers of Optimization.jl. Each solver subpackage needs to be installed separate. For example, for the details on the installation and usage of OptimizationOptimJL.jl package, see the @@ -14,12 +14,12 @@ for common workflows of the package and give copy-pastable starting points. The objective of this exercise is to determine the $(x, y)$ value pair that minimizes the result of a Rosenbrock function $f$ with some parameter values $a$ and $b$. The Rosenbrock function is useful for testing because it is known *a priori* to have a global minimum at $(a, a^2)$. ```math f(x,\,y;\,a,\,b) = \left(a - x\right)^2 + b \left(y - x^2\right)^2 -``` +``` The Optimization.jl interface expects functions to be defined with a vector of optimization arguments $\bar{x}$ and a vector of parameters $\bar{p}$, i.e.: ```math f(\bar{x},\,\bar{p}) = \left(p_1 - x_1\right)^2 + p_2 \left(x_2 - x_1^2\right)^2 -``` +``` Parameters $a$ and $b$ are captured in a vector $\bar{p}$ and assigned some arbitrary values to produce a particular Rosenbrock function to be minimized. ```math @@ -164,7 +164,7 @@ sol = solve(prob, CMAEvolutionStrategyOpt()) ```@example rosenbrock using OptimizationNLopt, ModelingToolkit -optf = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) +optf = OptimizationFunction(rosenbrock, Optimization.AutoSymbolics()) prob = OptimizationProblem(optf, x0, _p) sol = solve(prob, Opt(:LN_BOBYQA, 2)) diff --git a/docs/src/getting_started.md b/docs/src/getting_started.md index 2b85e9a0f..266c61a60 100644 --- a/docs/src/getting_started.md +++ b/docs/src/getting_started.md @@ -14,7 +14,7 @@ The simplest copy-pasteable code using a quasi-Newton method (LBFGS) to solve th ```@example intro # Import the package and define the problem to optimize -using Optimization, Zygote +using Optimization, OptimizationLBFGSB, Zygote rosenbrock(u, p) = (p[1] - u[1])^2 + p[2] * (u[2] - u[1]^2)^2 u0 = zeros(2) p = [1.0, 100.0] @@ -22,7 +22,7 @@ p = [1.0, 100.0] optf = OptimizationFunction(rosenbrock, AutoZygote()) prob = OptimizationProblem(optf, u0, p) -sol = solve(prob, Optimization.LBFGS()) +sol = solve(prob, OptimizationLBFGSB.LBFGS()) ``` ```@example intro @@ -134,7 +134,7 @@ looks like: using ForwardDiff optf = OptimizationFunction(rosenbrock, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, u0, p) -sol = solve(prob, BFGS()) +sol = solve(prob, OptimizationOptimJL.BFGS()) ``` We can inspect the `original` to see the statistics on the number of steps @@ -157,7 +157,7 @@ We can demonstrate this via: using Zygote optf = OptimizationFunction(rosenbrock, Optimization.AutoZygote()) prob = OptimizationProblem(optf, u0, p) -sol = solve(prob, BFGS()) +sol = solve(prob, OptimizationOptimJL.BFGS()) ``` ## Setting Box Constraints @@ -170,7 +170,7 @@ optimization with box constraints by rebuilding the OptimizationProblem: ```@example intro prob = OptimizationProblem(optf, u0, p, lb = [-1.0, -1.0], ub = [1.0, 1.0]) -sol = solve(prob, BFGS()) +sol = solve(prob, OptimizationOptimJL.BFGS()) ``` For more information on handling constraints, in particular equality and diff --git a/docs/src/optimization_packages/mathoptinterface.md b/docs/src/optimization_packages/mathoptinterface.md index 241ecbab4..1c26636a6 100644 --- a/docs/src/optimization_packages/mathoptinterface.md +++ b/docs/src/optimization_packages/mathoptinterface.md @@ -20,7 +20,7 @@ the `maxtime` common keyword argument. `OptimizationMOI` supports an argument `mtkize` which takes a boolean (default to `false`) that allows automatic symbolic expression generation, this allows using any AD backend with solvers or interfaces such as AmplNLWriter that require the expression graph of the objective -and constraints. This always happens automatically in the case of the `AutoModelingToolkit` +and constraints. This always happens automatically in the case of the `AutoSymbolics` `adtype`. An optimizer which supports the `MathOptInterface` API can be called @@ -94,7 +94,7 @@ The following shows how to use integer linear programming within `Optimization`. [Juniper documentation](https://github.com/lanl-ansi/Juniper.jl) for more detail. - The integer domain is inferred based on the bounds of the variable: - + + Setting the lower bound to zero and the upper bound to one corresponds to `MOI.ZeroOne()` or a binary decision variable + Providing other or no bounds corresponds to `MOI.Integer()` diff --git a/docs/src/optimization_packages/optim.md b/docs/src/optimization_packages/optim.md index 611b539ec..72ac17bd3 100644 --- a/docs/src/optimization_packages/optim.md +++ b/docs/src/optimization_packages/optim.md @@ -340,7 +340,7 @@ using Optimization, OptimizationOptimJL, ModelingToolkit rosenbrock(x, p) = (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2 x0 = zeros(2) p = [1.0, 100.0] -f = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit()) +f = OptimizationFunction(rosenbrock, Optimization.AutoSymbolics()) prob = Optimization.OptimizationProblem(f, x0, p) sol = solve(prob, Optim.Newton()) ``` diff --git a/docs/src/optimization_packages/optimization.md b/docs/src/optimization_packages/optimization.md index 28ef8fc5b..03c3381b2 100644 --- a/docs/src/optimization_packages/optimization.md +++ b/docs/src/optimization_packages/optimization.md @@ -5,7 +5,7 @@ There are some solvers that are available in the Optimization.jl package directl ## Methods - `LBFGS`: The popular quasi-Newton method that leverages limited memory BFGS approximation of the inverse of the Hessian. Through a wrapper over the [L-BFGS-B](https://users.iems.northwestern.edu/%7Enocedal/lbfgsb.html) fortran routine accessed from the [LBFGSB.jl](https://github.com/Gnimuc/LBFGSB.jl/) package. It directly supports box-constraints. - + This can also handle arbitrary non-linear constraints through a Augmented Lagrangian method with bounds constraints described in 17.4 of Numerical Optimization by Nocedal and Wright. Thus serving as a general-purpose nonlinear optimization solver available directly in Optimization.jl. ```@docs @@ -18,7 +18,7 @@ Optimization.Sophia ```@example L-BFGS -using Optimization, Zygote +using Optimization, OptimizationLBFGSB, Zygote rosenbrock(x, p) = (p[1] - x[1])^2 + p[2] * (x[2] - x[1]^2)^2 x0 = zeros(2) @@ -26,7 +26,7 @@ p = [1.0, 100.0] optf = OptimizationFunction(rosenbrock, AutoZygote()) prob = Optimization.OptimizationProblem(optf, x0, p) -sol = solve(prob, Optimization.LBFGS()) +sol = solve(prob, LBFGS()) ``` ### With nonlinear and bounds constraints @@ -41,7 +41,7 @@ optf = OptimizationFunction(rosenbrock, AutoZygote(), cons = con2_c) prob = OptimizationProblem(optf, x0, p, lcons = [1.0, -Inf], ucons = [1.0, 0.0], lb = [-1.0, -1.0], ub = [1.0, 1.0]) -res = solve(prob, Optimization.LBFGS(), maxiters = 100) +res = solve(prob, LBFGS(), maxiters = 100) ``` ### Train NN with Sophia diff --git a/docs/src/tutorials/certification.md b/docs/src/tutorials/certification.md index 09132c2f3..133728c21 100644 --- a/docs/src/tutorials/certification.md +++ b/docs/src/tutorials/certification.md @@ -7,7 +7,7 @@ This works with the `structural_analysis` keyword argument to `OptimizationProbl We'll use a simple example to illustrate the convexity structure certification process. ```@example symanalysis -using SymbolicAnalysis, Zygote, LinearAlgebra, Optimization +using SymbolicAnalysis, Zygote, LinearAlgebra, Optimization, OptimizationLBFGSB function f(x, p = nothing) return exp(x[1]) + x[1]^2 @@ -16,7 +16,7 @@ end optf = OptimizationFunction(f, Optimization.AutoForwardDiff()) prob = OptimizationProblem(optf, [0.4], structural_analysis = true) -sol = solve(prob, Optimization.LBFGS(), maxiters = 1000) +sol = solve(prob, LBFGS(), maxiters = 1000) ``` The result can be accessed as the `analysis_results` field of the solution. diff --git a/docs/src/tutorials/constraints.md b/docs/src/tutorials/constraints.md index b4fa0b166..5510954db 100644 --- a/docs/src/tutorials/constraints.md +++ b/docs/src/tutorials/constraints.md @@ -81,7 +81,7 @@ x_1 * x_2 = 0.5 ``` ```@example constraints -optprob = OptimizationFunction(rosenbrock, Optimization.AutoModelingToolkit(), cons = cons) +optprob = OptimizationFunction(rosenbrock, Optimization.AutoSymbolics(), cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) ``` diff --git a/docs/src/tutorials/linearandinteger.md b/docs/src/tutorials/linearandinteger.md index af3ed4af2..f25dcb756 100644 --- a/docs/src/tutorials/linearandinteger.md +++ b/docs/src/tutorials/linearandinteger.md @@ -91,7 +91,7 @@ objective = (u, p) -> (v = p[1:5]; dot(v, u)) cons = (res, u, p) -> (w = p[6:10]; res .= [sum(w[i] * u[i]^2 for i in 1:5)]) -optf = OptimizationFunction(objective, Optimization.AutoModelingToolkit(), cons = cons) +optf = OptimizationFunction(objective, Optimization.AutoSymbolics(), cons = cons) optprob = OptimizationProblem(optf, zeros(5), vcat(v, w); diff --git a/docs/src/tutorials/remakecomposition.md b/docs/src/tutorials/remakecomposition.md index d074dc51a..edeb79977 100644 --- a/docs/src/tutorials/remakecomposition.md +++ b/docs/src/tutorials/remakecomposition.md @@ -11,7 +11,7 @@ The SciML interface provides a `remake` function which allows you to recreate th Let's look at a 10 dimensional schwefel function in the hypercube $x_i \in [-500, 500]$. ```@example polyalg -using Optimization, Random +using Optimization, OptimizationLBFGSB, Random using OptimizationBBO, ReverseDiff Random.seed!(122333) @@ -24,7 +24,7 @@ function f_schwefel(x, p = [418.9829]) return result end -optf = OptimizationFunction(f_schwefel, Optimization.AutoReverseDiff(compile = true)) +optf = OptimizationFunction(f_schwefel, AutoReverseDiff(compile = true)) x0 = ones(10) .* 200.0 prob = OptimizationProblem( @@ -47,7 +47,7 @@ This is a good start can we converge to the global optimum? ```@example polyalg prob = remake(prob, u0 = res1.minimizer) -res2 = solve(prob, Optimization.LBFGS(), maxiters = 100) +res2 = solve(prob, LBFGS(), maxiters = 100) @show res2.objective ``` diff --git a/lib/OptimizationAuglag/Project.toml b/lib/OptimizationAuglag/Project.toml index 56dd6d0f9..a50e51181 100644 --- a/lib/OptimizationAuglag/Project.toml +++ b/lib/OptimizationAuglag/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationAuglag" uuid = "2ea93f80-9333-43a1-a68d-1f53b957a421" authors = ["paramthakkar123 "] -version = "1.1.0" +version = "1.2.0" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationBBO/Project.toml b/lib/OptimizationBBO/Project.toml index 43db6c72e..c63beeaf5 100644 --- a/lib/OptimizationBBO/Project.toml +++ b/lib/OptimizationBBO/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationBBO" uuid = "3e6eede4-6085-4f62-9a71-46d9bc1eb92b" authors = ["Vaibhav Dixit and contributors"] -version = "0.4.3" +version = "0.4.4" [deps] BlackBoxOptim = "a134a8b2-14d6-55f6-9291-3336d3ab0209" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationBase/Project.toml b/lib/OptimizationBase/Project.toml index bbc5ef39b..957e793be 100644 --- a/lib/OptimizationBase/Project.toml +++ b/lib/OptimizationBase/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationBase" uuid = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" authors = ["Vaibhav Dixit and contributors"] -version = "4.0.0" +version = "4.0.1" [deps] ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b" @@ -40,7 +40,7 @@ OptimizationSymbolicAnalysisExt = "SymbolicAnalysis" OptimizationZygoteExt = "Zygote" [compat] -ADTypes = "1.9" +ADTypes = "1.14" ArrayInterface = "7.6" DifferentiationInterface = "0.7" DocStringExtensions = "0.9" diff --git a/lib/OptimizationBase/ext/OptimizationMTKExt.jl b/lib/OptimizationBase/ext/OptimizationMTKExt.jl index 99194dab3..7538d8c3c 100644 --- a/lib/OptimizationBase/ext/OptimizationMTKExt.jl +++ b/lib/OptimizationBase/ext/OptimizationMTKExt.jl @@ -3,7 +3,7 @@ module OptimizationMTKExt import OptimizationBase, OptimizationBase.ArrayInterface import SciMLBase import SciMLBase: OptimizationFunction -import OptimizationBase.ADTypes: AutoModelingToolkit, AutoSymbolics, AutoSparse +import OptimizationBase.ADTypes: AutoSymbolics, AutoSparse using ModelingToolkit function OptimizationBase.instantiate_function( diff --git a/lib/OptimizationCMAEvolutionStrategy/Project.toml b/lib/OptimizationCMAEvolutionStrategy/Project.toml index 6086d2508..4be63b93d 100644 --- a/lib/OptimizationCMAEvolutionStrategy/Project.toml +++ b/lib/OptimizationCMAEvolutionStrategy/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationCMAEvolutionStrategy" uuid = "bd407f91-200f-4536-9381-e4ba712f53f8" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.3" +version = "0.3.4" [deps] CMAEvolutionStrategy = "8d3b24bd-414e-49e0-94fb-163cc3a3e411" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationEvolutionary/Project.toml b/lib/OptimizationEvolutionary/Project.toml index 996d5430e..9257f9764 100644 --- a/lib/OptimizationEvolutionary/Project.toml +++ b/lib/OptimizationEvolutionary/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationEvolutionary" uuid = "cb963754-43f6-435e-8d4b-99009ff27753" authors = ["Vaibhav Dixit and contributors"] -version = "0.4.3" +version = "0.4.4" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Evolutionary = "86b6b26d-c046-49b6-aa0b-5f0f74682bd6" diff --git a/lib/OptimizationGCMAES/Project.toml b/lib/OptimizationGCMAES/Project.toml index 6d600f509..0c5f70c94 100644 --- a/lib/OptimizationGCMAES/Project.toml +++ b/lib/OptimizationGCMAES/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationGCMAES" uuid = "6f0a0517-dbc2-4a7a-8a20-99ae7f27e911" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.2" +version = "0.3.3" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" diff --git a/lib/OptimizationIpopt/Project.toml b/lib/OptimizationIpopt/Project.toml index 1f930e674..c03b7c0ca 100644 --- a/lib/OptimizationIpopt/Project.toml +++ b/lib/OptimizationIpopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationIpopt" uuid = "43fad042-7963-4b32-ab19-e2a4f9a67124" authors = ["Sebastian Micluța-Câmpeanu and contributors"] -version = "0.2.3" +version = "0.2.4" [deps] Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" diff --git a/lib/OptimizationLBFGSB/Project.toml b/lib/OptimizationLBFGSB/Project.toml index c232b0f18..7dcbbd3e6 100644 --- a/lib/OptimizationLBFGSB/Project.toml +++ b/lib/OptimizationLBFGSB/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationLBFGSB" uuid = "22f7324a-a79d-40f2-bebe-3af60c77bd15" authors = ["paramthakkar123 "] -version = "1.1.0" +version = "1.2.0" [deps] DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" LBFGSB = "5be7bae1-8223-5378-bac3-9e7378a2f6e6" diff --git a/lib/OptimizationMOI/Project.toml b/lib/OptimizationMOI/Project.toml index 8f4c57b2a..3e32fd297 100644 --- a/lib/OptimizationMOI/Project.toml +++ b/lib/OptimizationMOI/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationMOI" uuid = "fd9f6733-72f4-499f-8506-86b2bdd0dea1" authors = ["Vaibhav Dixit and contributors"] -version = "0.5.8" +version = "0.5.9" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Symbolics = "0c5d862f-8b57-4792-8d23-62f2024744c7" diff --git a/lib/OptimizationMOI/src/moi.jl b/lib/OptimizationMOI/src/moi.jl index 829d3277c..6f66fee6f 100644 --- a/lib/OptimizationMOI/src/moi.jl +++ b/lib/OptimizationMOI/src/moi.jl @@ -16,14 +16,14 @@ function MOIOptimizationCache(prob::OptimizationProblem, opt; kwargs...) f = prob.f reinit_cache = OptimizationBase.ReInitCache(prob.u0, prob.p) if isnothing(f.sys) - if f.adtype isa OptimizationBase.AutoModelingToolkit + if f.adtype isa OptimizationBase.AutoSymbolics num_cons = prob.ucons === nothing ? 0 : length(prob.ucons) f = OptimizationBase.instantiate_function(prob.f, reinit_cache, prob.f.adtype, num_cons) else - throw(ArgumentError("Expected an `OptimizationProblem` that was setup via an `OptimizationSystem`, or AutoModelingToolkit ad choice")) + throw(ArgumentError("Expected an `OptimizationProblem` that was setup via an `OptimizationSystem`, or AutoSymbolics ad choice")) end end @@ -35,16 +35,16 @@ function MOIOptimizationCache(prob::OptimizationProblem, opt; kwargs...) cons_expr = Vector{Expr}(undef, length(cons)) Threads.@sync for i in eachindex(cons) Threads.@spawn if prob.lcons[i] == prob.ucons[i] == 0 - cons_expr[i] = Expr(:call, :(==), + cons_expr[i] = Expr(:call, :(==), repl_getindex!(convert_to_expr(f.cons_expr[i], expr_map; - expand_expr = false)), 0) + expand_expr = false)), 0) else # MTK canonicalizes the expression form - cons_expr[i] = Expr(:call, :(<=), + cons_expr[i] = Expr(:call, :(<=), repl_getindex!(convert_to_expr(f.cons_expr[i], expr_map; - expand_expr = false)), 0) + expand_expr = false)), 0) end end diff --git a/lib/OptimizationMOI/test/runtests.jl b/lib/OptimizationMOI/test/runtests.jl index 6c134049e..186fd59f9 100644 --- a/lib/OptimizationMOI/test/runtests.jl +++ b/lib/OptimizationMOI/test/runtests.jl @@ -37,7 +37,7 @@ end _p = [1.0, 100.0] cons_circ = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optprob = OptimizationFunction( - rosenbrock, OptimizationBase.AutoZygote(); + rosenbrock, AutoZygote(); cons = cons_circ) prob = OptimizationProblem(optprob, x0, _p, ucons = [Inf], lcons = [0.0]) evaluator = init(prob, Ipopt.Optimizer()).evaluator @@ -63,7 +63,7 @@ end _p = [1.0, 100.0] l1 = rosenbrock(x0, _p) - optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), OptimizationBase.AutoZygote()) + optprob = OptimizationFunction((x, p) -> -rosenbrock(x, p), AutoZygote()) prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MaxSense) callback = function (state, l) @@ -79,7 +79,7 @@ end sol = solve!(cache) @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoZygote()) + optprob = OptimizationFunction(rosenbrock, AutoZygote()) prob = OptimizationProblem(optprob, x0, _p; sense = OptimizationBase.MinSense) opt = Ipopt.Optimizer() @@ -126,7 +126,7 @@ end cons_circ = (res, x, p) -> res .= [x[1]^2 + x[2]^2] optprob = OptimizationFunction( - rosenbrock, OptimizationBase.AutoModelingToolkit(true, true); + rosenbrock, AutoSparse(AutoSymbolics()); cons = cons_circ) prob = OptimizationProblem(optprob, x0, _p, ucons = [Inf], lcons = [0.0]) @@ -141,10 +141,8 @@ end @testset "backends" begin backends = ( - OptimizationBase.AutoModelingToolkit(false, false), - OptimizationBase.AutoModelingToolkit(true, false), - OptimizationBase.AutoModelingToolkit(false, true), - OptimizationBase.AutoModelingToolkit(true, true)) + AutoSymbolics(), + AutoSparse(AutoSymbolics())) for backend in backends @testset "$backend" begin _test_sparse_derivatives_hs071(backend, Ipopt.Optimizer()) @@ -167,7 +165,7 @@ end u0 = [0.0, 0.0, 0.0, 1.0] optfun = OptimizationFunction((u, p) -> -v'u, cons = (res, u, p) -> res .= w'u, - OptimizationBase.AutoForwardDiff()) + AutoForwardDiff()) optprob = OptimizationProblem(optfun, u0; lb = zero.(u0), ub = one.(u0), int = ones(Bool, length(u0)), @@ -185,7 +183,7 @@ end u0 = [1.0] optfun = OptimizationFunction((u, p) -> sum(abs2, x * u[1] .- y), - OptimizationBase.AutoForwardDiff()) + AutoForwardDiff()) optprob = OptimizationProblem(optfun, u0; lb = one.(u0), ub = 6.0 .* u0, int = ones(Bool, length(u0))) @@ -264,7 +262,7 @@ end cons(res, x, p) = (res .= [x[1]^2 + x[2]^2, x[1] * x[2]]) - optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit(); + optprob = OptimizationFunction(rosenbrock, AutoSymbolics(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) sol = solve(prob, AmplNLWriter.Optimizer(Ipopt_jll.amplexe)) @@ -285,7 +283,7 @@ end end lag_hess_prototype = sparse([1 1; 0 1]) - optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoForwardDiff(); + optprob = OptimizationFunction(rosenbrock, AutoForwardDiff(); cons = cons, lag_h = lagh, lag_hess_prototype) prob = OptimizationProblem(optprob, x0, _p, lcons = [1.0, 0.5], ucons = [1.0, 0.5]) sol = solve(prob, Ipopt.Optimizer()) diff --git a/lib/OptimizationMadNLP/Project.toml b/lib/OptimizationMadNLP/Project.toml index d9ddb0587..aa11b80f6 100644 --- a/lib/OptimizationMadNLP/Project.toml +++ b/lib/OptimizationMadNLP/Project.toml @@ -19,7 +19,7 @@ LinearAlgebra = "1.10.0" MadNLP = "0.8.12" ModelingToolkit = "10.23" NLPModels = "0.21.5" -OptimizationBase = "4" +OptimizationBase = "4.0.1" SciMLBase = "2.122.1" SparseArrays = "1.10.0" SymbolicIndexingInterface = "0.3.40" diff --git a/lib/OptimizationManopt/Project.toml b/lib/OptimizationManopt/Project.toml index a1d111a0f..1e93b5c77 100644 --- a/lib/OptimizationManopt/Project.toml +++ b/lib/OptimizationManopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationManopt" uuid = "e57b7fff-7ee7-4550-b4f0-90e9476e9fb6" authors = ["Mateusz Baran ", "Ronny Bergmann "] -version = "1.1.0" +version = "1.2.0" [deps] Manopt = "0fc0a36d-df90-57f3-8f93-d78a9fc72bb5" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationMetaheuristics/Project.toml b/lib/OptimizationMetaheuristics/Project.toml index 83d4c87da..8062ef581 100644 --- a/lib/OptimizationMetaheuristics/Project.toml +++ b/lib/OptimizationMetaheuristics/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationMetaheuristics" uuid = "3aafef2f-86ae-4776-b337-85a36adf0b55" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.3" +version = "0.3.4" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Metaheuristics = "bcdb8e00-2c21-11e9-3065-2b553b22f898" diff --git a/lib/OptimizationNLPModels/Project.toml b/lib/OptimizationNLPModels/Project.toml index 8aa607830..f167f3815 100644 --- a/lib/OptimizationNLPModels/Project.toml +++ b/lib/OptimizationNLPModels/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationNLPModels" uuid = "064b21be-54cf-11ef-1646-cdfee32b588f" authors = ["Vaibhav Dixit and contributors"] -version = "1.0.0" +version = "1.1.0" [deps] NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6" diff --git a/lib/OptimizationNLopt/Project.toml b/lib/OptimizationNLopt/Project.toml index ea735abe0..38b1eea89 100644 --- a/lib/OptimizationNLopt/Project.toml +++ b/lib/OptimizationNLopt/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationNLopt" uuid = "4e6fcdb7-1186-4e1f-a706-475e75c168bb" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.6" +version = "0.3.7" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationNOMAD/Project.toml b/lib/OptimizationNOMAD/Project.toml index a4fcfbe17..fba114926 100644 --- a/lib/OptimizationNOMAD/Project.toml +++ b/lib/OptimizationNOMAD/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationNOMAD" uuid = "2cab0595-8222-4775-b714-9828e6a9e01b" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.3" +version = "0.3.4" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" NOMAD = "02130f1c-4665-5b79-af82-ff1385104aa0" diff --git a/lib/OptimizationODE/Project.toml b/lib/OptimizationODE/Project.toml index ff59572b1..f7bc975de 100644 --- a/lib/OptimizationODE/Project.toml +++ b/lib/OptimizationODE/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationODE" uuid = "dfa73e59-e644-4d8a-bf84-188d7ecb34e4" authors = ["Paras Puneet Singh "] -version = "0.1.2" +version = "0.1.3" [deps] DiffEqBase = "2b5f629d-d688-5b77-993f-72d75c75574e" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" diff --git a/lib/OptimizationOptimJL/Project.toml b/lib/OptimizationOptimJL/Project.toml index d176cce3c..31ecd9bc2 100644 --- a/lib/OptimizationOptimJL/Project.toml +++ b/lib/OptimizationOptimJL/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationOptimJL" uuid = "36348300-93cb-4f02-beb5-3c3902f8871e" authors = ["Vaibhav Dixit and contributors"] -version = "0.4.6" +version = "0.4.7" [deps] PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" diff --git a/lib/OptimizationOptimJL/test/runtests.jl b/lib/OptimizationOptimJL/test/runtests.jl index 801825019..6d45be085 100644 --- a/lib/OptimizationOptimJL/test/runtests.jl +++ b/lib/OptimizationOptimJL/test/runtests.jl @@ -85,7 +85,7 @@ end @test sol.original.iterations > 2 cons = (res, x, p) -> res .= [x[1]^2 + x[2]^2] - optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit(); + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoSymbolics(); cons = cons) prob = OptimizationProblem(optprob, x0, _p, lcons = [-5.0], ucons = [10.0]) @@ -157,7 +157,7 @@ end sol = solve(prob, BFGS()) @test 10 * sol.objective < l1 - optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoModelingToolkit()) + optprob = OptimizationFunction(rosenbrock, OptimizationBase.AutoSymbolics()) prob = OptimizationProblem(optprob, x0, _p) sol = solve(prob, Optim.BFGS()) @test 10 * sol.objective < l1 diff --git a/lib/OptimizationOptimisers/Project.toml b/lib/OptimizationOptimisers/Project.toml index 05e07b123..6703c172c 100644 --- a/lib/OptimizationOptimisers/Project.toml +++ b/lib/OptimizationOptimisers/Project.toml @@ -1,12 +1,11 @@ name = "OptimizationOptimisers" uuid = "42dfb2eb-d2b4-4451-abcd-913932933ac1" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.13" +version = "0.3.14" [deps] Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" Optimisers = "3bd65402-5787-11e9-1adc-39752487f4e2" -Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba" OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" diff --git a/lib/OptimizationPRIMA/test/runtests.jl b/lib/OptimizationPRIMA/test/runtests.jl index 0aeb3f870..fe923e25f 100644 --- a/lib/OptimizationPRIMA/test/runtests.jl +++ b/lib/OptimizationPRIMA/test/runtests.jl @@ -42,7 +42,7 @@ using Test function con2_c(res, x, p) res .= [x[2] * sin(x[1]) - x[1]] end - optprob = OptimizationFunction(rosenbrock, AutoModelingToolkit(), cons = con2_c) + optprob = OptimizationFunction(rosenbrock, AutoSymbolics(), cons = con2_c) prob = OptimizationProblem(optprob, x0, _p, lcons = [10], ucons = [50]) sol = OptimizationBase.solve(prob, COBYLA(), maxiters = 1000) @test 10 * sol.objective < l1 diff --git a/lib/OptimizationPolyalgorithms/Project.toml b/lib/OptimizationPolyalgorithms/Project.toml index fd7d7bbe1..02677751d 100644 --- a/lib/OptimizationPolyalgorithms/Project.toml +++ b/lib/OptimizationPolyalgorithms/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationPolyalgorithms" uuid = "500b13db-7e66-49ce-bda4-eed966be6282" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.2" +version = "0.3.3" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" OptimizationOptimisers = "42dfb2eb-d2b4-4451-abcd-913932933ac1" diff --git a/lib/OptimizationPyCMA/Project.toml b/lib/OptimizationPyCMA/Project.toml index 8784572c0..7b9fd26f3 100644 --- a/lib/OptimizationPyCMA/Project.toml +++ b/lib/OptimizationPyCMA/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationPyCMA" uuid = "fb0822aa-1fe5-41d8-99a6-e7bf6c238d3b" authors = ["Maximilian Pochapski <67759684+mxpoch@users.noreply.github.com>"] -version = "1.1.0" +version = "1.2.0" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" diff --git a/lib/OptimizationQuadDIRECT/Project.toml b/lib/OptimizationQuadDIRECT/Project.toml index 96d23ec40..5bf769546 100644 --- a/lib/OptimizationQuadDIRECT/Project.toml +++ b/lib/OptimizationQuadDIRECT/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationQuadDIRECT" uuid = "842ac81e-713d-465f-80f7-84eddaced298" authors = ["Vaibhav Dixit and contributors"] -version = "0.3.2" +version = "0.3.3" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" QuadDIRECT = "dae52e8d-d666-5120-a592-9e15c33b8d7a" diff --git a/lib/OptimizationSciPy/Project.toml b/lib/OptimizationSciPy/Project.toml index 4e984b549..b0465f27e 100644 --- a/lib/OptimizationSciPy/Project.toml +++ b/lib/OptimizationSciPy/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationSciPy" uuid = "cce07bd8-c79b-4b00-aee8-8db9cce22837" authors = ["Aditya Pandey and contributors"] -version = "0.4.3" +version = "0.4.4" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" diff --git a/lib/OptimizationSophia/Project.toml b/lib/OptimizationSophia/Project.toml index 38215fb8b..3b063919d 100644 --- a/lib/OptimizationSophia/Project.toml +++ b/lib/OptimizationSophia/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationSophia" uuid = "892fee11-dca1-40d6-b698-84ba0d87399a" authors = ["paramthakkar123 "] -version = "1.1.0" +version = "1.2.0" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" diff --git a/lib/OptimizationSpeedMapping/Project.toml b/lib/OptimizationSpeedMapping/Project.toml index f96cad118..d99d67a38 100644 --- a/lib/OptimizationSpeedMapping/Project.toml +++ b/lib/OptimizationSpeedMapping/Project.toml @@ -1,7 +1,7 @@ name = "OptimizationSpeedMapping" uuid = "3d669222-0d7d-4eb9-8a9f-d8528b0d9b91" authors = ["Vaibhav Dixit and contributors"] -version = "0.2.1" +version = "0.2.2" [deps] OptimizationBase = "bca83a33-5cc9-4baa-983d-23429ab6bcbb" SpeedMapping = "f1835b91-879b-4a3f-a438-e4baacf14412" diff --git a/test/AD_performance_regression.jl b/test/AD_performance_regression.jl index fe1df569e..cf9515cff 100644 --- a/test/AD_performance_regression.jl +++ b/test/AD_performance_regression.jl @@ -1,4 +1,4 @@ -import Optimization +import Optimization, ADTypes using ReverseDiff, Enzyme, BenchmarkTools, Test lookup_pg = Dict(5 => 11, 4 => 13, 2 => 15, 3 => 17, 1 => 19) @@ -26,7 +26,7 @@ opf_objective = let lookup_pg = lookup_pg, ref_gen_idxs = ref_gen_idxs, end optprob = Optimization.OptimizationFunction(opf_objective, - Optimization.AutoReverseDiff(true)) + ADTypes.AutoReverseDiff(; compile = true)) test_u0 = [ 0.6292298794022337, @@ -134,21 +134,21 @@ res = zero(test_u0) _f = Optimization.instantiate_function(optprob, test_u0, - Optimization.AutoReverseDiff(false), + ADTypes.AutoReverseDiff(; compile = false), nothing; g = true) _f.f(test_u0, nothing) @test @ballocated($(_f.grad)($res, $test_u0)) > 0 _f2 = Optimization.instantiate_function(optprob, test_u0, - Optimization.AutoReverseDiff(true), + ADTypes.AutoReverseDiff(; compile = true), nothing; g = true) _f2.f(test_u0, nothing) @test @ballocated($(_f2.grad)($res, $test_u0)) > 0 _f3 = Optimization.instantiate_function(optprob, test_u0, - Optimization.AutoEnzyme(), + ADTypes.AutoEnzyme(), nothing; g = true) _f3.f(test_u0, nothing) @test @ballocated($(_f3.grad)($res, $test_u0)) == 0 diff --git a/test/ADtests.jl b/test/ADtests.jl index ab59fc0f5..5b48239ca 100644 --- a/test/ADtests.jl +++ b/test/ADtests.jl @@ -35,8 +35,8 @@ end @testset "No constraint" begin @testset "$adtype" for adtype in [AutoEnzyme(), AutoForwardDiff(), AutoZygote(), AutoReverseDiff(), - AutoFiniteDiff(), AutoModelingToolkit(), AutoSparseForwardDiff(), - AutoSparseReverseDiff(), AutoSparse(AutoZygote()), AutoModelingToolkit(true, true), AutoMooncake()] + AutoFiniteDiff(), AutoSymbolics(), AutoSparse(AutoForwardDiff()), + AutoSparse(AutoReverseDiff()), AutoSparse(AutoZygote()), AutoSparse(AutoSymbolics()), AutoMooncake()] optf = OptimizationFunction(rosenbrock, adtype) prob = OptimizationProblem(optf, x0) @@ -47,7 +47,7 @@ end @test sol.retcode == ReturnCode.Success end - # `Newton` requires Hession, which Mooncake doesn't support at the moment. + # `Newton` requires Hession, which Mooncake doesn't support at the moment. if adtype != AutoMooncake() sol = solve(prob, Optim.Newton()) @test 10 * sol.objective < l1 @@ -56,7 +56,7 @@ end end end - # Requires Hession, which Mooncake doesn't support at the moment. + # Requires Hession, which Mooncake doesn't support at the moment. # Enzyme Hessian-Free seems to have an issue that is hard to track down. # https://github.com/SciML/Optimization.jl/issues/1030 if adtype != AutoMooncake() && adtype != AutoEnzyme() @@ -75,8 +75,8 @@ end @testset "One constraint" begin @testset "$adtype" for adtype in [AutoEnzyme(), AutoForwardDiff(), AutoZygote(), AutoReverseDiff(), - AutoFiniteDiff(), AutoModelingToolkit(), AutoSparseForwardDiff(), - AutoSparseReverseDiff(), AutoSparse(AutoZygote()), AutoModelingToolkit(true, true), AutoMooncake()] + AutoFiniteDiff(), AutoSymbolics(), AutoSparse(AutoForwardDiff()), + AutoSparse(AutoReverseDiff()), AutoSparse(AutoZygote()), AutoSparse(AutoSymbolics()), AutoMooncake()] cons = (res, x, p) -> (res[1] = x[1]^2 + x[2]^2 - 1.0; return nothing) optf = OptimizationFunction(rosenbrock, adtype, cons = cons) @@ -86,7 +86,7 @@ end sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test 10 * sol.objective < l1 - # Requires Hession, which Mooncake doesn't support at the moment. + # Requires Hession, which Mooncake doesn't support at the moment. if adtype != AutoMooncake() sol = solve(prob, Ipopt.Optimizer(), max_iter = 1000; print_level = 0) @test 10 * sol.objective < l1 @@ -96,8 +96,8 @@ end @testset "Two constraints" begin @testset "$adtype" for adtype in [AutoForwardDiff(), AutoZygote(), AutoReverseDiff(), - AutoFiniteDiff(), AutoModelingToolkit(), AutoSparseForwardDiff(), - AutoSparseReverseDiff(), AutoSparse(AutoZygote()), AutoModelingToolkit(true, true), AutoMooncake()] + AutoFiniteDiff(), AutoSymbolics(), AutoSparseForwardDiff(), + AutoSparseReverseDiff(), AutoSparse(AutoZygote()), AutoSparse(AutoSymbolics()), AutoMooncake()] function con2_c(res, x, p) res[1] = x[1]^2 + x[2]^2 res[2] = x[2] * sin(x[1]) - x[1] @@ -111,7 +111,7 @@ end sol = solve(prob, OptimizationLBFGSB.LBFGSB(), maxiters = 1000) @test 10 * sol.objective < l1 - # Requires Hession, which Mooncake doesn't support at the moment. + # Requires Hession, which Mooncake doesn't support at the moment. if adtype != AutoMooncake() sol = solve(prob, Ipopt.Optimizer(), max_iter = 1000; print_level = 0) @test 10 * sol.objective < l1 diff --git a/test/minibatch.jl b/test/minibatch.jl index abd5a2610..6b53479dc 100644 --- a/test/minibatch.jl +++ b/test/minibatch.jl @@ -69,7 +69,7 @@ res1 = Optimization.solve(optprob, Optimisers.Adam(0.05), optfun = OptimizationFunction( (θ, p) -> loss_adjoint(θ, batch, time_batch), - Optimization.AutoModelingToolkit()) + AutoSymbolics()) optprob = OptimizationProblem(optfun, pp) using IterTools: ncycle @test_broken res1 = Optimization.solve(optprob, Optimisers.Adam(0.05), diff --git a/test/native.jl b/test/native.jl index 45bc73e1c..7d11884e3 100644 --- a/test/native.jl +++ b/test/native.jl @@ -16,13 +16,13 @@ function cons1(res, coeffs, p = nothing) return nothing end -optf = OptimizationFunction(loss, AutoSparseForwardDiff(), cons = cons1) +optf = OptimizationFunction(loss, AutoSparse(AutoForwardDiff()), cons = cons1) callback = (st, l) -> (@show l; return false) initpars = rand(5) l0 = optf(initpars, (x0, y0)) -optf1 = OptimizationFunction(loss, AutoSparseForwardDiff()) +optf1 = OptimizationFunction(loss, AutoSparse(AutoForwardDiff())) prob1 = OptimizationProblem(optf1, rand(5), data) sol1 = solve(prob1, OptimizationOptimisers.Adam(), maxiters = 1000, callback = callback) @test sol1.objective < l0