Skip to content

Commit 6b59bf8

Browse files
committed
Start building benchmarking structure
1 parent a2d19f2 commit 6b59bf8

File tree

17 files changed

+354
-178
lines changed

17 files changed

+354
-178
lines changed

Snakefile

Lines changed: 30 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -1,105 +1,33 @@
1-
from shutil import move
2-
import gdown
3-
from urllib.request import urlretrieve
4-
from pathlib import Path
5-
6-
CATS_GITHUB_URL = "https://raw.githubusercontent.com/WISPO-POP/CATS-CaliforniaTestSystem/f260d8bd89e68997bf12d24e767475b2f2b88a77/GIS/"
7-
8-
ENERGY_BENCHMARKS = Path("benchmarks/energy_model")
9-
PREPROCESS_DIR = ENERGY_BENCHMARKS / "data/preprocess"
10-
POSTPROCESS_DIR = ENERGY_BENCHMARKS / "data/postprocess"
11-
SCRIPTS_DIR = ENERGY_BENCHMARKS / "scripts"
1+
configfile: "benchmarks/config.yaml"
2+
3+
def generate_all_runs():
4+
import itertools
5+
6+
problem_size_pairs = []
7+
for problem, problem_data in config["problems"].items():
8+
if problem_data is None or "size" not in problem_data:
9+
problem_size_pairs.append((problem, "0"))
10+
else:
11+
for size in problem_data["size"]:
12+
problem_size_pairs.append((problem, size))
13+
14+
runs = [
15+
(problem, size, library, solver)
16+
for (problem, size), library, solver in itertools.product(
17+
problem_size_pairs,
18+
config["libraries"],
19+
config["solvers"]
20+
)
21+
]
22+
return runs
1223

1324
rule all:
1425
input:
15-
loads=POSTPROCESS_DIR / "loads.parquet",
16-
lines=POSTPROCESS_DIR / "lines_simplified.parquet",
17-
generators=POSTPROCESS_DIR / "generators.parquet",
18-
yearly_limit=POSTPROCESS_DIR / "yearly_limits.parquet",
19-
vcf=POSTPROCESS_DIR / "variable_capacity_factors.parquet",
20-
default_target: True
21-
22-
23-
rule fetch_load_data:
24-
"""Downloads the load data from the Google Drive folder hosted by the CATS project (https://drive.google.com/drive/folders/1Zo6ZeZ1OSjHCOWZybbTd6PgO4DQFs8_K)"""
25-
output:
26-
PREPROCESS_DIR / "CATS_loads.csv",
27-
run:
28-
gdown.download(id="1Sz8st7g4Us6oijy1UYMPUvkA1XeZlIr8", output=output[0])
29-
30-
31-
rule fetch_generation_data:
32-
"""Downloads the generation data from the Google Drive folder hosted by the CATS project (https://drive.google.com/drive/folders/1Zo6ZeZ1OSjHCOWZybbTd6PgO4DQFs8_K)"""
33-
output:
34-
PREPROCESS_DIR / "CATS_generation.csv",
35-
run:
36-
gdown.download(id="1CxLlcwAEUy-JvJQdAfVydJ1p9Ecot-4d", output=output[0])
37-
38-
39-
rule fetch_line_data:
40-
output:
41-
PREPROCESS_DIR / "CATS_lines.json",
42-
run:
43-
urlretrieve(CATS_GITHUB_URL + "CATS_lines.json", output[0])
44-
45-
46-
rule fetch_generator_data:
47-
output:
48-
PREPROCESS_DIR / "CATS_generators.csv",
49-
run:
50-
urlretrieve(CATS_GITHUB_URL + "CATS_gens.csv", output[0])
51-
52-
53-
rule process_load_data:
54-
"""Convert the load data to narrow format and keep only the active loads."""
55-
input:
56-
PREPROCESS_DIR / "CATS_loads.csv",
57-
output:
58-
POSTPROCESS_DIR / "loads.parquet",
59-
notebook:
60-
str(SCRIPTS_DIR / "process_load_data.py.ipynb")
61-
62-
63-
rule process_line_data:
64-
"""Convert from .json to .parquet and keep only relevant columns."""
65-
input:
66-
PREPROCESS_DIR / "CATS_lines.json",
67-
output:
68-
POSTPROCESS_DIR / "lines.parquet",
69-
notebook:
70-
str(SCRIPTS_DIR / "process_lines_json.py.ipynb")
71-
72-
73-
rule process_generator_data:
74-
"""Group the generators by type and bus."""
75-
input:
76-
PREPROCESS_DIR / "CATS_generators.csv",
77-
output:
78-
POSTPROCESS_DIR / "generators.parquet",
79-
notebook:
80-
str(SCRIPTS_DIR / "process_generator_data.py.ipynb")
81-
82-
83-
rule compute_capacity_factors:
84-
"""Use the hourly generation data to create capacity factors by fuel type."""
85-
input:
86-
gen_capacity=POSTPROCESS_DIR / "generators.parquet",
87-
gen_dispatch=PREPROCESS_DIR / "CATS_generation.csv",
88-
output:
89-
yearly_limit=POSTPROCESS_DIR / "yearly_limits.parquet",
90-
vcf=POSTPROCESS_DIR / "variable_capacity_factors.parquet",
91-
notebook:
92-
str(SCRIPTS_DIR / "compute_capacity_factors.py.ipynb")
93-
94-
95-
rule simplify_network:
96-
input:
97-
lines=POSTPROCESS_DIR / "lines.parquet",
98-
generators=POSTPROCESS_DIR / "generators.parquet",
99-
loads=POSTPROCESS_DIR / "loads.parquet",
100-
output:
101-
POSTPROCESS_DIR / "lines_simplified.parquet",
102-
script:
103-
SCRIPTS_DIR / "simplify_network.py"
104-
105-
26+
[f"benchmarks/{problem}/results/{library}_{solver}_{size}.tsv"
27+
for problem, size, library, solver in generate_all_runs()]
28+
29+
rule run_benchmark:
30+
benchmark:
31+
repeat("benchmarks/{problem}/results/{library}_{solver}_{size}.tsv", config["repeat"])
32+
shell:
33+
"python benchmarks/run_benchmarks {wildcards.problem} --library {wildcards.library} --solver {wildcards.solver} --size {wildcards.size}"

benchmarks/__init__.py

Whitespace-only changes.

benchmarks/config.yaml

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
repeat: 3
2+
problems:
3+
facility_problem:
4+
size:
5+
- 25
6+
- 50
7+
- 75
8+
- 100
9+
# energy_problem:
10+
11+
solvers:
12+
- gurobi
13+
# - highs
14+
# - mosek
15+
16+
libraries:
17+
- pyoframe
18+
# - pyomo
19+
# - cvxpy
20+
# - gurobipy
21+
# - jump
22+
# - linopy
23+
# - pyoptinterface

benchmarks/energy_model/Snakefile

Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
from shutil import move
2+
import gdown
3+
from urllib.request import urlretrieve
4+
from pathlib import Path
5+
6+
CATS_GITHUB_URL = "https://raw.githubusercontent.com/WISPO-POP/CATS-CaliforniaTestSystem/f260d8bd89e68997bf12d24e767475b2f2b88a77/GIS/"
7+
8+
ENERGY_BENCHMARKS = Path("benchmarks/energy_model")
9+
PREPROCESS_DIR = ENERGY_BENCHMARKS / "data/preprocess"
10+
POSTPROCESS_DIR = ENERGY_BENCHMARKS / "data/postprocess"
11+
SCRIPTS_DIR = ENERGY_BENCHMARKS / "scripts"
12+
13+
rule all:
14+
input:
15+
loads=POSTPROCESS_DIR / "loads.parquet",
16+
lines=POSTPROCESS_DIR / "lines_simplified.parquet",
17+
generators=POSTPROCESS_DIR / "generators.parquet",
18+
yearly_limit=POSTPROCESS_DIR / "yearly_limits.parquet",
19+
vcf=POSTPROCESS_DIR / "variable_capacity_factors.parquet",
20+
21+
22+
rule fetch_load_data:
23+
"""Downloads the load data from the Google Drive folder hosted by the CATS project (https://drive.google.com/drive/folders/1Zo6ZeZ1OSjHCOWZybbTd6PgO4DQFs8_K)"""
24+
output:
25+
PREPROCESS_DIR / "CATS_loads.csv",
26+
run:
27+
gdown.download(id="1Sz8st7g4Us6oijy1UYMPUvkA1XeZlIr8", output=output[0])
28+
29+
30+
rule fetch_generation_data:
31+
"""Downloads the generation data from the Google Drive folder hosted by the CATS project (https://drive.google.com/drive/folders/1Zo6ZeZ1OSjHCOWZybbTd6PgO4DQFs8_K)"""
32+
output:
33+
PREPROCESS_DIR / "CATS_generation.csv",
34+
run:
35+
gdown.download(id="1CxLlcwAEUy-JvJQdAfVydJ1p9Ecot-4d", output=output[0])
36+
37+
38+
rule fetch_line_data:
39+
output:
40+
PREPROCESS_DIR / "CATS_lines.json",
41+
run:
42+
urlretrieve(CATS_GITHUB_URL + "CATS_lines.json", output[0])
43+
44+
45+
rule fetch_generator_data:
46+
output:
47+
PREPROCESS_DIR / "CATS_generators.csv",
48+
run:
49+
urlretrieve(CATS_GITHUB_URL + "CATS_gens.csv", output[0])
50+
51+
52+
rule process_load_data:
53+
"""Convert the load data to narrow format and keep only the active loads."""
54+
input:
55+
PREPROCESS_DIR / "CATS_loads.csv",
56+
output:
57+
POSTPROCESS_DIR / "loads.parquet",
58+
notebook:
59+
str(SCRIPTS_DIR / "process_load_data.py.ipynb")
60+
61+
62+
rule process_line_data:
63+
"""Convert from .json to .parquet and keep only relevant columns."""
64+
input:
65+
PREPROCESS_DIR / "CATS_lines.json",
66+
output:
67+
POSTPROCESS_DIR / "lines.parquet",
68+
notebook:
69+
str(SCRIPTS_DIR / "process_lines_json.py.ipynb")
70+
71+
72+
rule process_generator_data:
73+
"""Group the generators by type and bus."""
74+
input:
75+
PREPROCESS_DIR / "CATS_generators.csv",
76+
output:
77+
POSTPROCESS_DIR / "generators.parquet",
78+
notebook:
79+
str(SCRIPTS_DIR / "process_generator_data.py.ipynb")
80+
81+
82+
rule compute_capacity_factors:
83+
"""Use the hourly generation data to create capacity factors by fuel type."""
84+
input:
85+
gen_capacity=POSTPROCESS_DIR / "generators.parquet",
86+
gen_dispatch=PREPROCESS_DIR / "CATS_generation.csv",
87+
output:
88+
yearly_limit=POSTPROCESS_DIR / "yearly_limits.parquet",
89+
vcf=POSTPROCESS_DIR / "variable_capacity_factors.parquet",
90+
notebook:
91+
str(SCRIPTS_DIR / "compute_capacity_factors.py.ipynb")
92+
93+
94+
rule simplify_network:
95+
input:
96+
lines=POSTPROCESS_DIR / "lines.parquet",
97+
generators=POSTPROCESS_DIR / "generators.parquet",
98+
loads=POSTPROCESS_DIR / "loads.parquet",
99+
output:
100+
POSTPROCESS_DIR / "lines_simplified.parquet",
101+
script:
102+
SCRIPTS_DIR / "simplify_network.py"
103+

benchmarks/facility_problem/README.md

Whitespace-only changes.

benchmarks/facility_problem/__init__.py

Whitespace-only changes.

benchmarks/facility_problem/bm_cvxpy.py

Whitespace-only changes.

benchmarks/facility_problem/bm_gurobipy.py

Whitespace-only changes.
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
# Copyright (c) 2022: Miles Lubin and contributors
2+
#
3+
# Use of this source code is governed by an MIT-style license that can be found
4+
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
5+
# See https://github.com/jump-dev/JuMPPaperBenchmarks
6+
7+
using JuMP
8+
import Gurobi
9+
10+
function solve_facility(model, G, F)
11+
set_time_limit_sec(model, 0.0)
12+
set_optimizer_attribute(model, "Presolve", 0)
13+
@variables(model, begin
14+
0 <= y[1:F, 1:2] <= 1
15+
s[0:G, 0:G, 1:F] >= 0
16+
z[0:G, 0:G, 1:F], Bin
17+
r[0:G, 0:G, 1:F, 1:2]
18+
d
19+
end)
20+
@objective(model, Min, d)
21+
@constraint(model, [i in 0:G, j in 0:G], sum(z[i,j,f] for f in 1:F) == 1)
22+
# d is the maximum of distances between customers
23+
# and their facilities. The original constraint is # d >= ||x - y|| - M(1-z)
24+
# where M = 1 for our data. Because Gurobi/CPLEX can't take SOCPs directly,
25+
# we need to rewite as a set of constraints and auxiliary variables:
26+
# s = d + M(1 - z) >= 0
27+
# r = x - y
28+
# r'r <= s^2
29+
M = 2 * sqrt(2)
30+
for i in 0:G, j in 0:G, f in 1:F
31+
@constraints(model, begin
32+
s[i, j, f] == d + M * (1 - z[i, j, f])
33+
r[i, j, f, 1] == i / G - y[f, 1]
34+
r[i, j, f, 2] == j / G - y[f, 2]
35+
sum(r[i, j, f, k]^2 for k in 1:2) <= s[i, j, f]^2
36+
end)
37+
end
38+
optimize!(model)
39+
return model
40+
end
41+
42+
function get_model(arg)
43+
if arg == "direct"
44+
return direct_model(Gurobi.Optimizer())
45+
else
46+
return Model(Gurobi.Optimizer)
47+
end
48+
end
49+
50+
function main(io::IO, Ns = [25, 50, 75, 100])
51+
for type in ["direct", "default"]
52+
for n in Ns
53+
start = time()
54+
model = solve_facility(get_model(type), n, n)
55+
run_time = round(time() - start, digits=2)
56+
num_var = num_variables(model)
57+
println(io, "JuMP_$type,fac-$n,$num_var,$run_time")
58+
end
59+
end
60+
end
61+
62+
open(joinpath(@__DIR__, "benchmarks.csv"), "a") do io
63+
main(io)
64+
end

benchmarks/facility_problem/bm_linopy.py

Whitespace-only changes.

0 commit comments

Comments
 (0)