Skip to content

Commit fcb5e97

Browse files
committed
feat(jump): can use solver from jump
Integration from jump seems to work for very simple example
1 parent c0ba9bc commit fcb5e97

File tree

13 files changed

+764
-6
lines changed

13 files changed

+764
-6
lines changed

.vscode/settings.json

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
"CUDA",
2525
"CUSPARSE",
2626
"deploydocs",
27+
"diagind",
2728
"eigmax",
2829
"ENDATA",
2930
"eprint",
@@ -48,6 +49,7 @@
4849
"mathbb",
4950
"mathbf",
5051
"mathengine",
52+
"maxcut",
5153
"Mersenne",
5254
"Mohseni",
5355
"MOIU",

docs/src/reference/reference.md

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ CurrentModule = AOCoptimizer
33
DocTestSetup = quote
44
import AOCoptimizer as AOC
55
import AOCoptimizer.Solver as Solver
6+
import AOCoptimizer.api as API
67
end
78
DocTestFilters = [r"AOCoptimizer|AOC"]
89
```
@@ -355,3 +356,40 @@ Solver._optimal_batch_size
355356
```@docs
356357
Solver.Collector._default_best_assignment_collector
357358
```
359+
360+
## `API`
361+
362+
The `AOCoptimizer.api` module contains simplified (and rigid) interfaces to the solver.
363+
They're easier to use than the normal interfaces, but they don't give access to most configuration parameters.
364+
365+
```@docs
366+
api.adjust_inputs_to_engine
367+
```
368+
369+
```@docs
370+
api.GraphCutResult
371+
```
372+
373+
```@docs
374+
api.compute_max_cut
375+
```
376+
377+
```@docs
378+
api.IsingResult
379+
```
380+
381+
```@docs
382+
api.compute_ising
383+
```
384+
385+
```@docs
386+
api.compute_mixed_ising
387+
```
388+
389+
```@docs
390+
api.compute_qumo_positive
391+
```
392+
393+
```@docs
394+
api.compute_qumo
395+
```

ext/CUDAExt/CUDAExt.jl

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,4 +157,30 @@ AOCoptimizer.Solver._similar_vector(x::CuSparseMatrix, l) = CuVector{eltype(x)}(
157157

158158
include("engine.jl")
159159

160+
function AOCoptimizer.api.adjust_inputs_to_engine(
161+
::AOCoptimizer.Solver.EngineCuda,
162+
matrix::AbstractMatrix{T},
163+
linear::Union{Nothing,AbstractVector{T}} = nothing,
164+
) where T<:Real
165+
version = CUDA.capability(dev.dev)
166+
167+
if version < v"5.3" && T === Float16
168+
@warn "Computing with Float16 on a GPU with compute capability less than 5.3 is not supported. Switching to Float32."
169+
if linear !== nothing
170+
linear = Float32.(linear)
171+
end
172+
return Float32, Float32.(matrix), linear
173+
end
174+
175+
if version < v"8.0" && T === BFloat16
176+
@warn "Computing with BFloat16 on a GPU with compute capability less than 8.0 is not supported. Switching to Float32."
177+
if linear !== nothing
178+
linear = Float32.(linear)
179+
end
180+
return Float32, Float32.(matrix), linear
181+
end
182+
183+
return T, matrix, linear
184+
end
185+
160186
end # module CUDAExt

ext/JuMPExt/JuMPExt.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ const SQT{T} = MOI.ScalarQuadraticTerm{T}
3333
const SQF{T} = MOI.ScalarQuadraticFunction{T}
3434

3535
const Engine = AOC.Solver.Engine
36+
const aoc_api = AOC.api
3637

3738
include("variables.jl")
3839
include("wrapper.jl")

ext/JuMPExt/wrapper.jl

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -410,17 +410,26 @@ function MOI.optimize!(optimizer::Optimizer{T}) where {T<:Real}
410410
error("Unknown sense $(optimizer.sense)")
411411
end
412412

413-
@error "Solve here"
413+
result = aoc_api.compute_qumo(
414+
sense,
415+
quadratic,
416+
linear,
417+
optimizer.continuous,
418+
seed,
419+
time_limit;
420+
work_dir = work_dir,
421+
engine = backend,
422+
)
414423

415424
if !(T === num_type)
416425
output = Dict(
417-
"Objective" => convert(T, output["Objective"]),
418-
"Assignment" => convert.(T, output["Assignment"]),
426+
"Objective" => convert(T, result.Objective),
427+
"Assignment" => convert.(T, result.Assignment),
419428
)
420429
else
421430
output = Dict(
422-
"Objective" => output["Objective"],
423-
"Assignment" => output["Assignment"],
431+
"Objective" => result.Objective,
432+
"Assignment" => result.Assignment,
424433
)
425434
end
426435

notebooks/test-jump.jl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,9 @@ model = Model(AOCoptimizer.MOI.Optimizer[])
1717
# The following will solve with default settings,
1818
# using 60sec as timeout value.
1919
optimize!(model)
20+
21+
@show termination_status(model)
22+
@show objective_value(model)
23+
@show value(x)
24+
@show value(y)
25+
@show value(z)

src/AOCoptimizer.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ include("FileFormats/FileFormats.jl")
4747
include("Algorithms/Algorithms.jl")
4848

4949
include("Solver/Solver.jl")
50+
include("api/api.jl")
5051

5152
include("precompile.jl")
5253

src/Solver/core.jl

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -562,10 +562,19 @@ The `dt` parameter specifies the time step for the simulation.
562562
"""
563563
function solve_qumo end
564564

565+
const __solvers_registered::Ref{Bool} = Ref(false)
566+
565567
function __register_solvers()
566568
@info "Registering default solvers"
567569

570+
if __solvers_registered[]
571+
@warn "Solvers already registered, skipping"
572+
return
573+
end
574+
568575
@eval @make_solver(solve_mixed_ising, explore_mixed_ising)
569576
@eval @make_solver(solve_positive_qumo, explore_positive_qumo)
570577
@eval @make_solver(solve_qumo, explore_qumo)
578+
579+
__solvers_registered[] = true
571580
end

src/Solver/normalization.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ function _calculate_normalization_factor(interactions::AbstractMatrix{T}) where
5151
λ = ( abs(λ_max) + abs(λ_min) ) / T(2.0)
5252
end
5353

54-
@assert λ > 0.0 "Largest eigenvalue is not positive"
54+
@assert λ >= 0.0 "Largest eigenvalue is not positive"
5555
if λ <= 0.1
5656
@warn "Normalization factor is either negative or small; using 1.0 instead"
5757
λ = T(1.0)

0 commit comments

Comments
 (0)