|
| 1 | +import SciMLBase: @add_kwonly, AbstractNonlinearProblem, AbstractNonlinearFunction, |
| 2 | + AbstractODEFunction, AbstractODEProblem, warn_paramtype, ConstructionBase, |
| 3 | + NullParameters, StandardNonlinearProblem, @reset, updated_u0_p, |
| 4 | + remake_initialization_data, maybe_eager_initialize_problem |
| 5 | + |
1 | 6 | @inbounds function uniform_itr( |
2 | 7 | dim::Int, lb::AbstractArray{T}, ub::AbstractArray{T}) where {T} |
3 | 8 | (rand(T) * (ub[i] - lb[i]) + lb[i] for i in 1:dim) |
@@ -342,10 +347,12 @@ Based on the paper: Particle swarm optimization method for constrained optimizat |
342 | 347 | penalty |
343 | 348 | end |
344 | 349 |
|
| 350 | +#TODO: Possible migration to DifferentiationInterface.jl, |
| 351 | +# however I cannot compile GPU-compatible gradients with Enzyme as Mar 2025 |
345 | 352 | @inline function instantiate_gradient(f, adtype::AutoForwardDiff) |
346 | | - (θ, p) -> ForwardDiff.gradient(f, θ) |
| 353 | + (θ, p) -> ForwardDiff.gradient(x -> f(x, p), θ) |
347 | 354 | end |
348 | 355 |
|
349 | 356 | @inline function instantiate_gradient(f, adtype::AutoEnzyme) |
350 | | - (θ, p) -> autodiff_deferred(Reverse, f, Active, Active(θ))[1][1] |
| 357 | + (θ, p) -> autodiff_deferred(Reverse, Const(x -> f(x, p)), Active, Active(θ))[1][1] |
351 | 358 | end |
0 commit comments