Skip to content

Commit f9307d7

Browse files
ikawrakowIwan Kawrakow
andauthored
Improve DeepSeek batched processing speed (#282)
* Improve DeepSeek batched processing speed * Revert the commented out section in iqk_mul_mat.cpp It does have some benefit at long contexts. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
1 parent 5a4855e commit f9307d7

File tree

2 files changed

+15
-3
lines changed

2 files changed

+15
-3
lines changed

ggml/src/iqk/iqk_mul_mat.cpp

Lines changed: 14 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17265,13 +17265,25 @@ template <int step_k, typename KHelper, typename VHelper>
1726517265
inline void iqk_deepseek_helper(KHelper& kh, VHelper& vh,
1726617266
int nq1, int nk1, int stride_q, int stride_m, int stride_qkv,
1726717267
const float * q, const char * mask, float scale, float softcap, float * qkv, float * M, float * S) {
17268-
if (nq1 % 8 == 0) {
17268+
if (nq1 >= 8) {
1726917269
FlashAttn<576, 512, 8, step_k> fa(scale, softcap);
1727017270
fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
17271-
} else {
17271+
}
17272+
else if (nq1 >= 4) {
17273+
FlashAttn<576, 512, 4, step_k> fa(scale, softcap);
17274+
fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
17275+
}
17276+
else {
1727217277
FlashAttn<576, 512, 1, step_k> fa(scale, softcap);
1727317278
fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
1727417279
}
17280+
//if (nq1 % 8 == 0) {
17281+
// FlashAttn<576, 512, 8, step_k> fa(scale, softcap);
17282+
// fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
17283+
//} else {
17284+
// FlashAttn<576, 512, 1, step_k> fa(scale, softcap);
17285+
// fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
17286+
//}
1727517287
}
1727617288

1727717289
template <int step_k>

src/llama.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13896,7 +13896,7 @@ struct llm_build_context {
1389613896

1389713897
// whether to use n_tokens as the matrix dimension during multiplication or n_head
1389813898
// n_tokens is higher during prompt processing, this allows to optimize for this case
13899-
bool pp_opt = n_tokens > n_head;
13899+
bool pp_opt = n_tokens >= 128; // Is it a fixed constant or is it somehow relared to n_head? original: n_tokens > n_head;
1390013900

1390113901
for (int il = 0; il < n_layer; ++il) {
1390213902
struct ggml_tensor * inpSA = inpL;

0 commit comments

Comments
 (0)