Skip to content

Commit 279b7d3

Browse files
authored
Update sweep bench (depracating .jsonl support) (#289)
* Update sweep bench (depracating .jsonl support) * Fix README.md
1 parent 98a264a commit 279b7d3

File tree

3 files changed

+58
-39
lines changed

3 files changed

+58
-39
lines changed

examples/sweep-bench/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ in each ubatch-sized window. Only a single token sequence is used.
77
The benchmark steps are:
88

99
for each ubatch-sized window in context:
10+
1011
1. generate ubatch/4 tokens (not the whole window to save some time)
1112
2. measure generation performance
1213
3. remove generated tokens from KV cache

examples/sweep-bench/sweep-bench-plot.py

Lines changed: 43 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,27 +9,54 @@
99

1010
df = None
1111

12-
for jsonl_file in args.file:
13-
# Read JSONL file into DataFrame
14-
df_part = pd.read_json(jsonl_file, lines=True)
15-
df_part['label'] = jsonl_file
16-
if df is None:
17-
df = df_part
18-
else:
19-
df = pd.concat([df, df_part])
20-
21-
# Group by model and n_kv, calculate mean and std for both speed metrics
12+
#for jsonl_file in args.file:
13+
# # Read JSONL file into DataFrame
14+
# df_part = pd.read_json(jsonl_file, lines=True)
15+
# df_part['label'] = jsonl_file
16+
# if df is None:
17+
# df = df_part
18+
# else:
19+
# df = pd.concat([df, df_part])
20+
#
21+
22+
23+
24+
for md_file in args.file:
25+
# Read markdown table file into DataFrame
26+
df_part = pd.read_csv(md_file, sep=r'\s*\|\s*', engine='python',
27+
header=0, skiprows=[1])
28+
29+
# Clean up columns (remove empty columns from markdown formatting)
30+
df_part = df_part.iloc[:, 1:-1]
31+
df_part.columns = [col.strip() for col in df_part.columns]
32+
33+
# Rename columns to match expected names
34+
df_part = df_part.rename(columns={
35+
'N_KV': 'n_kv',
36+
'S_PP t/s': 'speed_pp',
37+
'S_TG t/s': 'speed_tg'
38+
})
39+
40+
# Convert to numeric types
41+
df_part['n_kv'] = pd.to_numeric(df_part['n_kv'])
42+
df_part['speed_pp'] = pd.to_numeric(df_part['speed_pp'])
43+
df_part['speed_tg'] = pd.to_numeric(df_part['speed_tg'])
44+
45+
# Add label and append to main DataFrame
46+
df_part['label'] = md_file
47+
df = pd.concat([df, df_part]) if df is not None else df_part
48+
49+
# Group by label and n_kv, calculate mean and std for both speed metrics
2250
df_grouped = df.groupby(['label', 'n_kv']).agg({
2351
'speed_pp': ['mean', 'std'],
2452
'speed_tg': ['mean', 'std']
2553
}).reset_index()
2654

2755
# Flatten multi-index columns
28-
df_grouped.columns = ['label', 'n_kv', 'speed_pp_mean', 'speed_pp_std',
56+
df_grouped.columns = ['label', 'n_kv', 'speed_pp_mean', 'speed_pp_std',
2957
'speed_tg_mean', 'speed_tg_std']
3058

3159
# Replace NaN with 0 (std for a single sample is NaN)
32-
3360
df_grouped['speed_pp_std'] = df_grouped['speed_pp_std'].fillna(0)
3461
df_grouped['speed_tg_std'] = df_grouped['speed_tg_std'].fillna(0)
3562

@@ -45,25 +72,20 @@
4572
# Create prompt processing plot
4673
plt.figure(figsize=(10, 6))
4774
ax1 = plt.gca()
48-
4975
plt.grid()
50-
5176
ax1.set_xticks(x_ticks)
5277

5378
# Plot each label's data
5479
for label, color in zip(labels, colors):
5580
label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv')
56-
57-
# Plot prompt processing
58-
pp = ax1.errorbar(label_data['n_kv'], label_data['speed_pp_mean'],
59-
yerr=label_data['speed_pp_std'], color=color,
81+
pp = ax1.errorbar(label_data['n_kv'], label_data['speed_pp_mean'],
82+
yerr=label_data['speed_pp_std'], color=color,
6083
marker='o', linestyle='-', label=label)
61-
84+
6285
# Add labels and title
6386
ax1.set_xlabel('Context Length (tokens)')
6487
ax1.set_ylabel('Prompt Processing Rate (t/s)')
6588
plt.title('Prompt Processing Performance Comparison')
66-
6789
ax1.legend(loc='upper right')
6890

6991
# Adjust layout and save
@@ -74,24 +96,20 @@
7496
# Create token generation plot
7597
plt.figure(figsize=(10, 6))
7698
ax1 = plt.gca()
77-
7899
plt.grid()
79100
ax1.set_xticks(x_ticks)
80101

81102
# Plot each model's data
82103
for label, color in zip(labels, colors):
83104
label_data = df_grouped[df_grouped['label'] == label].sort_values('n_kv')
84-
85-
# Plot token generation
86105
tg = ax1.errorbar(label_data['n_kv'], label_data['speed_tg_mean'],
87-
yerr=label_data['speed_tg_std'], color=color,
106+
yerr=label_data['speed_tg_std'], color=color,
88107
marker='s', linestyle='-', label=label)
89108

90109
# Add labels and title
91110
ax1.set_xlabel('Context Length (n_kv)')
92111
ax1.set_ylabel('Token Generation Rate (t/s)')
93112
plt.title('Token Generation Performance Comparison')
94-
95113
ax1.legend(loc='upper right')
96114

97115
# Adjust layout and save

examples/sweep-bench/sweep-bench.cpp

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
#include <vector>
1919

2020
static void print_usage(int, char ** argv) {
21-
LOG("\nexample usage:\n");
22-
LOG("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]);
23-
LOG("\n");
21+
LOG_TEE("\nexample usage:\n");
22+
LOG_TEE("\n %s -m model.gguf -c 8192 -b 2048 -ub 512\n", argv[0]);
23+
LOG_TEE("\n");
2424
}
2525

2626
int main(int argc, char ** argv) {
@@ -83,7 +83,7 @@ int main(int argc, char ** argv) {
8383

8484
const int ret = llama_decode(ctx, batch_view);
8585
if (ret != 0) {
86-
LOG("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
86+
LOG_TEE("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
8787
return false;
8888
}
8989

@@ -97,11 +97,11 @@ int main(int argc, char ** argv) {
9797
const unsigned int tg = params.n_ubatch / 4;
9898

9999
if (!params.sweep_bench_output_jsonl) {
100-
LOG("\n");
101-
LOG("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
102-
LOG("\n");
103-
LOG("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s");
104-
LOG("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------");
100+
LOG_TEE("\n");
101+
LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
102+
LOG_TEE("\n");
103+
LOG_TEE("|%6s | %6s | %6s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s");
104+
LOG_TEE("|%6s-|-%6s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "------", "--------", "--------", "--------", "--------");
105105
}
106106

107107
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
@@ -111,7 +111,7 @@ int main(int argc, char ** argv) {
111111
llama_batch_add(batch, bos, 0, { 0 }, false);
112112

113113
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
114-
LOG("%s: llama_decode() failed\n", __func__);
114+
LOG_TEE("%s: llama_decode() failed\n", __func__);
115115
return 1;
116116
}
117117
}
@@ -131,7 +131,7 @@ int main(int argc, char ** argv) {
131131
llama_batch_add(batch, std::rand() % n_vocab, n_kv + i, { 0 }, true);
132132

133133
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
134-
LOG("%s: llama_decode() failed\n", __func__);
134+
LOG_TEE("%s: llama_decode() failed\n", __func__);
135135
return 1;
136136
}
137137
}
@@ -153,7 +153,7 @@ int main(int argc, char ** argv) {
153153
const auto t_pp_start = ggml_time_us();
154154

155155
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
156-
LOG("%s: llama_decode() failed\n", __func__);
156+
LOG_TEE("%s: llama_decode() failed\n", __func__);
157157
return 1;
158158
}
159159

@@ -167,14 +167,14 @@ int main(int argc, char ** argv) {
167167
const float speed_tg = tg / t_tg;
168168

169169
if(params.sweep_bench_output_jsonl) {
170-
LOG(
170+
LOG_TEE(
171171
"{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, "
172172
"\"pp\": %d, \"tg\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f }\n",
173173
n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
174174
pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg
175175
);
176176
} else {
177-
LOG("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg);
177+
LOG_TEE("|%6d | %6d | %6d | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, n_kv, t_pp, speed_pp, t_tg, speed_tg);
178178
}
179179
}
180180

0 commit comments

Comments
 (0)