Skip to content

Commit b0a6ce4

Browse files
committed
Streamline plot making
1 parent 628218d commit b0a6ce4

18 files changed

+220
-189
lines changed

benchmarks/.gitignore

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,2 @@
11
*/data/**
2-
*/results/*.tsv
3-
*.csv
2+
*/results/*

benchmarks/Snakefile

Lines changed: 33 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,58 @@
1+
import importlib
2+
import os
3+
from snakemake.utils import available_cpu_count
4+
15
configfile: "config.yaml"
26

3-
def generate_all_runs():
7+
def generate_all_runs(problem):
48
import itertools
59

6-
problem_size_pairs = []
7-
for problem, problem_data in config["problems"].items():
8-
if problem_data is None or "size" not in problem_data:
9-
problem_size_pairs.append((problem, "0"))
10-
else:
11-
for size in problem_data["size"]:
12-
problem_size_pairs.append((problem, size))
10+
problem_data = config["problems"][problem]
11+
if problem_data is None or "size" not in problem_data:
12+
sizes = ["0"]
13+
else:
14+
sizes = problem_data["size"]
1315

1416
runs = [
15-
(problem, size, library, solver)
16-
for (problem, size), library, solver in itertools.product(
17-
problem_size_pairs,
17+
(size, library, solver)
18+
for size, library, solver in itertools.product(
19+
sizes,
1820
config["libraries"],
1921
config["solvers"]
2022
)
2123
]
24+
25+
benchmarks = [
26+
importlib.import_module(f"{problem}.bm_{library.lower()}").Bench
27+
for size, library, solver in runs
28+
]
29+
30+
runs = [
31+
r for r, benchmark in zip(
32+
runs, benchmarks
33+
) if benchmark.MAX_SIZE == None or r[0] <= benchmark.MAX_SIZE
34+
]
35+
2236
return runs
2337

2438
rule all:
2539
input:
26-
expand("{problem}/results/{solver}.png", problem=config["problems"], solver=config["solvers"]),
27-
"results.csv"
40+
expand("{problem}/results/benchmark_results.png", problem=config["problems"])
2841

2942
rule plot_results:
3043
input:
31-
"results.csv"
44+
lambda wildcards: [
45+
f"{wildcards.problem}/results/{library}_{solver}_{size}.tsv"
46+
for size, library, solver in generate_all_runs(wildcards.problem)
47+
]
3248
output:
33-
mem_plot="{problem}/results/memory_{solver}.png",
34-
time_plot="{problem}/results/time_{solver}.png"
49+
"{problem}/results/benchmark_results.csv",
50+
"{problem}/results/benchmark_results.png"
3551
script:
3652
"plot_results.py"
3753

38-
rule collect_benchmarks:
39-
input:
40-
[f"{problem}/results/{library}_{solver}_{size}.tsv"
41-
for problem, size, library, solver in generate_all_runs()]
42-
output:
43-
"results.csv"
44-
script:
45-
"collect_benchmarks.py"
46-
4754
rule run_benchmark:
55+
threads: available_cpu_count() # Force to use all cores to avoid parallel execution
4856
benchmark:
4957
repeat("{problem}/results/{library}_{solver}_{size}.tsv", config["repeat"])
5058
shell:

benchmarks/collect_benchmarks.py

Lines changed: 0 additions & 34 deletions
This file was deleted.

benchmarks/config.yaml

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
1-
repeat: 1
1+
repeat: 1 # TODO repeat
22
problems:
3-
facility_problem:
3+
facility_location:
44
size:
55
- 25
66
- 50
77
- 75
88
- 100
9+
- 150
910
# energy_problem:
1011

1112
solvers:
@@ -15,7 +16,7 @@ solvers:
1516

1617
libraries:
1718
- pyoframe
18-
# - pyomo
19+
- pyomo
1920
# - cvxpy
2021
- gurobipy
2122
# - jump

benchmarks/energy_planning/scripts/compute_capacity_factors.py.ipynb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": 10,
5+
"execution_count": null,
66
"id": "6211c84b",
77
"metadata": {},
88
"outputs": [],
99
"source": [
1010
"import polars as pl\n",
11-
"from util import mock_snakemake\n",
11+
"\n",
12+
"from benchmarks.util import mock_snakemake\n",
1213
"\n",
1314
"if \"snakemake\" not in globals() or hasattr(snakemake, \"mock\"): # noqa: F821\n",
1415
" snakemake = mock_snakemake(\"compute_capacity_factors\")\n",

benchmarks/energy_planning/scripts/process_generator_data.py.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
"outputs": [],
99
"source": [
1010
"import polars as pl\n",
11-
"from util import mock_snakemake\n",
11+
"\n",
12+
"from benchmarks.util import mock_snakemake\n",
1213
"\n",
1314
"if \"snakemake\" not in globals() or hasattr(snakemake, \"mock\"): # noqa: F821\n",
1415
" snakemake = mock_snakemake(\"process_generator_data\")"

benchmarks/energy_planning/scripts/process_lines_json.py.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,8 @@
1111
"\n",
1212
"import pandas as pd\n",
1313
"import polars as pl\n",
14-
"from util import mock_snakemake\n",
14+
"\n",
15+
"from benchmarks.util import mock_snakemake\n",
1516
"\n",
1617
"if \"snakemake\" not in globals() or hasattr(snakemake, \"mock\"): # noqa: F821\n",
1718
" snakemake = mock_snakemake(\"process_line_data\")"

benchmarks/energy_planning/scripts/process_load_data.py.ipynb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,14 @@
22
"cells": [
33
{
44
"cell_type": "code",
5-
"execution_count": 1,
5+
"execution_count": null,
66
"id": "372bc740",
77
"metadata": {},
88
"outputs": [],
99
"source": [
1010
"import polars as pl\n",
11-
"from util import mock_snakemake\n",
11+
"\n",
12+
"from benchmarks.util import mock_snakemake\n",
1213
"\n",
1314
"DEBUG = False\n",
1415
"\n",

benchmarks/energy_planning/scripts/simplify_network.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@
1010
"""
1111

1212
import polars as pl
13-
from util import mock_snakemake
13+
14+
from benchmarks.util import mock_snakemake
1415

1516
f_bus = pl.col("from_bus")
1617
t_bus = pl.col("to_bus")

benchmarks/energy_planning/scripts/util.py

Lines changed: 1 addition & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1,99 +1,6 @@
1-
"""
2-
`mock_snakemake` is inspired from PyPSA-Eur (MIT license, see https://github.com/PyPSA/pypsa-eur/blob/master/scripts/_helpers.py#L476)
3-
"""
4-
51
from __future__ import annotations
62

7-
from pathlib import Path
8-
9-
10-
def mock_snakemake(rulename, **wildcards):
11-
"""
12-
This function is expected to be executed from the 'scripts'-directory of '
13-
the snakemake project. It returns a snakemake.script.Snakemake object,
14-
based on the Snakefile.
15-
16-
If a rule has wildcards, you have to specify them in **wildcards.
17-
18-
**wildcards:
19-
keyword arguments fixing the wildcards. Only necessary if wildcards are
20-
needed.
21-
22-
Parameters
23-
----------
24-
rulename: str
25-
name of the rule for which the snakemake object should be generated
26-
"""
27-
import os
28-
29-
import snakemake as sm
30-
from snakemake.api import Workflow
31-
from snakemake.common import SNAKEFILE_CHOICES
32-
from snakemake.script import Snakemake
33-
from snakemake.settings.types import (
34-
ConfigSettings,
35-
DAGSettings,
36-
ResourceSettings,
37-
StorageSettings,
38-
WorkflowSettings,
39-
)
40-
41-
script_dir = Path(__file__).parent.resolve()
42-
root_dir = script_dir.parent.parent
43-
current_dir = os.getcwd()
44-
os.chdir(root_dir)
45-
46-
try:
47-
for p in SNAKEFILE_CHOICES:
48-
p = root_dir / p
49-
if os.path.exists(p):
50-
snakefile = p
51-
break
52-
else:
53-
raise FileNotFoundError(
54-
f"Could not find a Snakefile in {root_dir} or its subdirectories."
55-
)
56-
workflow = Workflow(
57-
ConfigSettings(),
58-
ResourceSettings(),
59-
WorkflowSettings(),
60-
StorageSettings(),
61-
DAGSettings(rerun_triggers=[]),
62-
storage_provider_settings=dict(),
63-
)
64-
workflow.include(snakefile)
65-
workflow.global_resources = {}
66-
rule = workflow.get_rule(rulename)
67-
dag = sm.dag.DAG(workflow, rules=[rule])
68-
job = sm.jobs.Job(rule, dag, wildcards)
69-
70-
def make_accessable(*ios):
71-
for io in ios:
72-
for i, _ in enumerate(io):
73-
io[i] = os.path.abspath(io[i])
74-
75-
make_accessable(job.input, job.output, job.log)
76-
snakemake = Snakemake(
77-
job.input,
78-
job.output,
79-
job.params,
80-
job.wildcards,
81-
job.threads,
82-
job.resources,
83-
job.log,
84-
job.dag.workflow.config,
85-
job.rule.name,
86-
None,
87-
)
88-
# create log and output dir if not existent
89-
for path in list(snakemake.log) + list(snakemake.output):
90-
Path(path).parent.mkdir(parents=True, exist_ok=True)
91-
finally:
92-
os.chdir(current_dir)
93-
94-
snakemake.mock = True
95-
return snakemake
96-
3+
from benchmarks.util import mock_snakemake
974

985
if __name__ == "__main__":
996
# Example usage

0 commit comments

Comments
 (0)