Skip to content

Commit bcdd303

Browse files
ikawrakowIwan Kawrakow
andauthored
Attempt to fix #1014 (#1017)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
1 parent e60f718 commit bcdd303

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

ggml/src/ggml-cuda/fattn.cu

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,10 @@
1818

1919
#define FATTN_KQ_STRIDE 256
2020

21+
static inline bool mma_better_than_turing(const int cc) {
22+
return GGML_CUDA_CC_IS_NVIDIA(cc) && ggml_cuda_highest_compiled_arch(cc) > CC_TURING;
23+
}
24+
2125
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
2226
const ggml_tensor * KQV = dst;
2327
const ggml_tensor * Q = dst->src[0];
@@ -102,7 +106,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
102106
// Hence, we use it only for DeepSeek with MLA enabled, where head sizes are 576, 512,
103107
// so no other implementation works.
104108
//
105-
if (new_mma_available(cc) && ((K->ne[0] == 576 && V->ne[0] == 512) || (K->ne[0] == 192 && V->ne[0] == 128))) {
109+
if (new_mma_available(cc) && ((K->ne[0] == 576 && V->ne[0] == 512) || (K->ne[0] == 192 && V->ne[0] == 128 && mma_better_than_turing(cc)))) {
106110
ggml_cuda_flash_attn_ext_mma_new(ctx, dst);
107111
return;
108112
}
@@ -172,7 +176,7 @@ bool ggml_cuda_fattn_is_supported(ggml_backend_cuda_context & ctx, const ggml_te
172176
return ggml_cuda_fattn_vec_f32_is_supported(ctx, dst);
173177
}
174178

175-
if (new_mma_available(cc) && (Q->ne[0] == 576 || (K->ne[0] == 192 && V->ne[0] == 128))) {
179+
if (new_mma_available(cc) && (Q->ne[0] == 576 || (K->ne[0] == 192 && V->ne[0] == 128 && mma_better_than_turing(cc)))) {
176180
return true;
177181
}
178182

0 commit comments

Comments
 (0)