Skip to content

Commit 83bba6d

Browse files
authored
Merge branch 'ikawrakow:main' into main
2 parents d70a752 + 658ced0 commit 83bba6d

File tree

13 files changed

+155
-20
lines changed

13 files changed

+155
-20
lines changed

common/common.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1394,6 +1394,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
13941394
params.merge_qkv = true;
13951395
return true;
13961396
}
1397+
if (arg == "-khad" || arg == "--k-cache-hadamard") {
1398+
params.k_cache_hadamard = true;
1399+
return true;
1400+
}
13971401
if (arg == "--numa") {
13981402
CHECK_ARG
13991403
std::string value(argv[i]);
@@ -2074,6 +2078,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
20742078
options.push_back({ "*", "-gr, --graph-reuse", "enable graph reuse (default: %s)", params.graph_reuse ? "enabled" : "disabled" });
20752079
options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
20762080
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
2081+
options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard});
20772082
options.push_back({ "*", "-vq, --validate-quants", "validate quantized data while loading the model (default: %d)", params.validate_quants});
20782083
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
20792084
"in conversation mode, this will be used as system prompt\n"
@@ -3063,9 +3068,11 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
30633068
cparams.fused_mmad = params.fused_mmad;
30643069
cparams.rope_cache = params.rope_cache;
30653070
cparams.graph_reuse = params.graph_reuse;
3071+
cparams.k_cache_hadamard = params.k_cache_hadamard;
30663072
cparams.min_experts = params.min_experts;
30673073
cparams.thresh_experts = params.thresh_experts;
30683074
cparams.only_active_experts = params.only_active_exps;
3075+
cparams.k_cache_hadamard = params.k_cache_hadamard;
30693076

30703077
cparams.type_k = kv_cache_type_from_str(params.cache_type_k);
30713078
cparams.type_v = kv_cache_type_from_str(params.cache_type_v);
@@ -4209,6 +4216,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
42094216
fprintf(stream, "fused_mmad: %s # default: true\n", params.fused_mmad ? "true" : "false");
42104217
fprintf(stream, "rope_cache: %s # default: false\n", params.rope_cache ? "true" : "false");
42114218
fprintf(stream, "graph_reuse: %s # default: false\n", params.graph_reuse ? "true" : "false");
4219+
fprintf(stream, "k_cache_hadamard: %s # default: false\n", params.k_cache_hadamard ? "true" : "false");
42124220
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
42134221
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
42144222

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -276,6 +276,7 @@ struct gpt_params {
276276
bool validate_quants = false; // if true, check for NaNs while loading the model
277277
bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU)
278278
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
279+
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
279280

280281
std::string cache_type_k = "f16"; // KV cache data type for the K
281282
std::string cache_type_v = "f16"; // KV cache data type for the V

ggml/include/ggml.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -621,6 +621,7 @@ extern "C" {
621621
GGML_OP_FUSED_UP_GATE,
622622
GGML_OP_MOE_FUSED_UP_GATE,
623623
GGML_OP_MUL_MULTI_ADD,
624+
GGML_OP_HADAMARD,
624625

625626
GGML_OP_SCALE,
626627
GGML_OP_SET,
@@ -1092,6 +1093,11 @@ extern "C" {
10921093
struct ggml_tensor * a,
10931094
struct ggml_tensor * b);
10941095

1096+
GGML_API struct ggml_tensor * ggml_hadamard(
1097+
struct ggml_context * ctx,
1098+
struct ggml_tensor * a,
1099+
int n);
1100+
10951101
// dst = a
10961102
// view(dst, nb1, nb2, nb3, offset) += b
10971103
// return dst

ggml/src/ggml.c

Lines changed: 45 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4223,6 +4223,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
42234223
"FUSED_UP_GATE",
42244224
"MOE_FUSED_UP_GATE",
42254225
"MUL_MULTI_ADD",
4226+
"HADAMARD",
42264227

42274228
"SCALE",
42284229
"SET",
@@ -4292,7 +4293,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
42924293
"GLU",
42934294
};
42944295

4295-
static_assert(GGML_OP_COUNT == 91, "GGML_OP_COUNT != 91");
4296+
static_assert(GGML_OP_COUNT == 92, "GGML_OP_COUNT != 92");
42964297

42974298
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
42984299
"none",
@@ -4330,6 +4331,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
43304331
"X*Y1&X*Y2",
43314332
"X*Y1&X*Y2",
43324333
"x1*y1+x2*y2+...",
4334+
"hadamard(x)",
43334335

43344336
"x*v",
43354337
"y-\\>view(x)",
@@ -4399,7 +4401,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
43994401
"glu(x),"
44004402
};
44014403

4402-
static_assert(GGML_OP_COUNT == 91, "GGML_OP_COUNT != 91");
4404+
static_assert(GGML_OP_COUNT == 92, "GGML_OP_COUNT != 92");
44034405

44044406
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
44054407

@@ -6147,6 +6149,38 @@ struct ggml_tensor * ggml_mul_multi_add(
61476149
return result;
61486150
}
61496151

6152+
#if defined(_MSC_VER)
6153+
#pragma warning(disable: 4244 4267) // possible loss of data
6154+
#include <intrin.h>
6155+
#include <ammintrin.h>
6156+
#include <nmmintrin.h>
6157+
#include <immintrin.h>
6158+
#include <stdlib.h>
6159+
inline int popcount(uint32_t x) { return __popcnt(x); }
6160+
#else
6161+
inline int popcount(uint32_t x) { return __builtin_popcount(x); }
6162+
#endif
6163+
6164+
struct ggml_tensor * ggml_hadamard(
6165+
struct ggml_context * ctx,
6166+
struct ggml_tensor * a,
6167+
int n) {
6168+
6169+
GGML_ASSERT(a->type == GGML_TYPE_F32); // will not bother implementing for other data types
6170+
GGML_ASSERT(n > 1); // no point in Hadamard transforms with less than 2 elements
6171+
GGML_ASSERT(a->ne[0] % n == 0);
6172+
GGML_ASSERT(popcount(n) == 1); // must be a power of 2
6173+
6174+
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, a->ne);
6175+
6176+
result->op = GGML_OP_HADAMARD;
6177+
result->src[0] = a;
6178+
6179+
result->op_params[0] = n;
6180+
6181+
return result;
6182+
}
6183+
61506184
// ggml_add_cast
61516185

61526186
static struct ggml_tensor * ggml_add_cast_impl(
@@ -22660,6 +22694,10 @@ static int ggml_compute_forward(struct ggml_compute_params * params, struct ggml
2266022694
{
2266122695
iqk_mul_multi_add(tensor, params->ith, params->nth);
2266222696
} break;
22697+
case GGML_OP_HADAMARD:
22698+
{
22699+
iqk_hadamard(tensor, params->ith, params->nth);
22700+
} break;
2266322701
case GGML_OP_ACC:
2266422702
{
2266522703
ggml_compute_forward_acc(params, tensor);
@@ -23510,6 +23548,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
2351023548
{
2351123549
GGML_ABORT("fatal error"); // TODO: implement
2351223550
}
23551+
case GGML_OP_HADAMARD:
23552+
{
23553+
GGML_ABORT("fatal error"); // TODO: implement
23554+
}
2351323555
case GGML_OP_CONCAT:
2351423556
{
2351523557
GGML_ABORT("fatal error"); // TODO: implement
@@ -24625,6 +24667,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
2462524667
case GGML_OP_ACC:
2462624668
case GGML_OP_MULTI_ADD:
2462724669
case GGML_OP_MUL_MULTI_ADD:
24670+
case GGML_OP_HADAMARD:
2462824671
{
2462924672
n_tasks = n_threads;
2463024673
} break;

ggml/src/iqk/iqk_common.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -922,3 +922,22 @@ static IQK_ALWAYS_INLINE void prepare_iq4_nl_quants_r8(const int8x16_t& values,
922922
#endif
923923

924924
#endif
925+
926+
#if defined(_MSC_VER)
927+
#pragma warning(disable: 4244 4267) // possible loss of data
928+
#include <intrin.h>
929+
#include <ammintrin.h>
930+
#include <nmmintrin.h>
931+
#include <immintrin.h>
932+
#include <stdlib.h>
933+
inline int popcount(uint8_t x) { return __popcnt(x); }
934+
inline int popcount(uint16_t x) { return __popcnt(x); }
935+
inline int popcount(uint32_t x) { return __popcnt(x); }
936+
inline int popcount(uint64_t x) { return _mm_popcnt_u64(x); }
937+
#else
938+
constexpr int popcount(uint8_t x) { return __builtin_popcount(x); }
939+
constexpr int popcount(uint16_t x) { return __builtin_popcount(x); }
940+
constexpr int popcount(uint32_t x) { return __builtin_popcount(x); }
941+
constexpr int popcount(uint64_t x) { return __builtin_popcountll(x); }
942+
#endif
943+

ggml/src/iqk/iqk_cpu_ops.cpp

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88

99
#include "iqk_cpu_ops.h"
1010
#include "iqk_utils.h"
11+
#include "iqk_common.h"
1112
#include "ggml.h"
1213

1314
#include <cstdint>
@@ -454,3 +455,51 @@ void iqk_mul_multi_add(struct ggml_tensor * dst, int ith, int nth) {
454455
}
455456
}
456457
}
458+
459+
namespace {
460+
template <typename T>
461+
void fast_ht(int n, T * values) {
462+
constexpr float ksqrt2 = 0.707106781f;
463+
float scale = 1;
464+
for (int h = 1; h < n; h <<= 1) {
465+
for (int i = 0; i < n; i += 2*h) {
466+
for (int j = i; j < i + h; ++j) {
467+
T x = values[j], y = values[j + h];
468+
values[j+0] = x + y;
469+
values[j+h] = x - y;
470+
}
471+
}
472+
scale *= ksqrt2;
473+
}
474+
for (int i = 0; i < n; ++i) values[i] *= scale;
475+
}
476+
}
477+
478+
void iqk_hadamard(struct ggml_tensor * dst, int ith, int nth) {
479+
auto src = dst->src[0];
480+
GGML_ASSERT(src->type == GGML_TYPE_F32);
481+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
482+
GGML_ASSERT(ggml_are_same_shape(src, dst));
483+
int nh = dst->op_params[0];
484+
GGML_ASSERT(nh > 1 && popcount(uint32_t(nh)) == 1);
485+
GGML_ASSERT(dst->ne[0] % nh == 0);
486+
487+
int nc = dst->ne[0]/nh;
488+
int nr = ggml_nrows(dst) * nc;
489+
490+
int npt = (nr + nth - 1)/nth;
491+
int first = npt*ith;
492+
int last = std::min(first + npt, nr);
493+
494+
for (int ir = first; ir < last; ++ir) {
495+
int i3 = ir / (dst->ne[1] * dst->ne[2] * nc);
496+
int i2 = (ir - i3*dst->ne[1] * dst->ne[2] * nc)/(dst->ne[1] * nc);
497+
int i1 = (ir - i3*dst->ne[1] * dst->ne[2] * nc - i2*dst->ne[1]*nc)/nc;
498+
int ic = (ir - i3*dst->ne[1] * dst->ne[2] * nc - i2*dst->ne[1]*nc - i1*nc);
499+
500+
auto x = (const float *)((const char *)src->data + i3*src->nb[3] + i2*src->nb[2] + i1*src->nb[1]) + ic*nh;
501+
auto y = ( float *)(( char *)dst->data + i3*dst->nb[3] + i2*dst->nb[2] + i1*dst->nb[1]) + ic*nh;
502+
std::memcpy(y, x, nh*sizeof(float));
503+
fast_ht(nh, y);
504+
}
505+
}

ggml/src/iqk/iqk_cpu_ops.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ void iqk_openai_experts(struct ggml_tensor * topk, struct ggml_tensor * softmax,
2828

2929
void iqk_mul_multi_add(struct ggml_tensor * dst, int ith, int nth);
3030

31+
void iqk_hadamard(struct ggml_tensor * dst, int ith, int nth);
32+
3133
#ifdef __cplusplus
3234
}
3335
#endif

ggml/src/iqk/iqk_quantize.cpp

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -32,24 +32,6 @@
3232
#include <string>
3333
#include <functional>
3434

35-
#if defined(_MSC_VER)
36-
#pragma warning(disable: 4244 4267) // possible loss of data
37-
#include <intrin.h>
38-
#include <ammintrin.h>
39-
#include <nmmintrin.h>
40-
#include <immintrin.h>
41-
#include <stdlib.h>
42-
inline int popcount(uint8_t x) { return __popcnt(x); }
43-
inline int popcount(uint16_t x) { return __popcnt(x); }
44-
inline int popcount(uint32_t x) { return __popcnt(x); }
45-
inline int popcount(uint64_t x) { return _mm_popcnt_u64(x); }
46-
#else
47-
constexpr int popcount(uint8_t x) { return __builtin_popcount(x); }
48-
constexpr int popcount(uint16_t x) { return __builtin_popcount(x); }
49-
constexpr int popcount(uint32_t x) { return __builtin_popcount(x); }
50-
constexpr int popcount(uint64_t x) { return __builtin_popcountll(x); }
51-
#endif
52-
5335
namespace {
5436

5537
inline int nearest_int(float fval) {

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -440,6 +440,7 @@ extern "C" {
440440
int min_experts;
441441
float thresh_experts;
442442
bool only_active_experts;
443+
bool k_cache_hadamard; // if true, apply Hadamard transfrom to K-cache
443444

444445
// Abort callback
445446
// if it returns true, execution of llama_decode() will be aborted

src/llama-build-context.cpp

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ llm_build_context::llm_build_context(
5252
fused_up_gate (cparams.fused_up_gate),
5353
fused_mmad (cparams.fused_mmad),
5454
rope_cache (cparams.rope_cache),
55+
k_cache_hadamard (cparams.k_cache_hadamard),
5556
min_experts (cparams.min_experts),
5657
thresh_experts (cparams.thresh_experts),
5758
pooling_type (cparams.pooling_type),
@@ -1466,6 +1467,13 @@ ggml_tensor * llm_build_context::llm_build_kv(
14661467
const llama_hparams & hparams = lctx.model.hparams;
14671468
const llama_cparams & cparams = lctx.cparams;
14681469

1470+
if (cparams.k_cache_hadamard) {
1471+
q_cur = ggml_hadamard(ctx, q_cur, hparams.n_embd_head_k);
1472+
k_cur = ggml_hadamard(ctx, k_cur, hparams.n_embd_head_k);
1473+
cb(q_cur, "Qcur_hadamard", il);
1474+
cb(k_cur, "Kcur_hadamard", il);
1475+
}
1476+
14691477
// these nodes are added to the graph together so that they are not reordered
14701478
// by doing so, the number of splits in the graph is reduced
14711479
ggml_build_forward_expand(graph, q_cur);
@@ -9375,6 +9383,12 @@ ggml_tensor * llm_build_context::build_std_attention(ggml_cgraph * gf, ggml_tens
93759383
Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
93769384
cb(Qcur, "Qcur_temp_scaled", il_cb);
93779385
}
9386+
if (cparams.k_cache_hadamard) {
9387+
Qcur = ggml_hadamard(ctx0, Qcur, hparams.n_embd_head_k);
9388+
Kcur = ggml_hadamard(ctx0, Kcur, hparams.n_embd_head_k);
9389+
cb(Qcur, "Qcur_hadamard", il_cb);
9390+
cb(Kcur, "Kcur_hadamard", il_cb);
9391+
}
93789392
ggml_build_forward_expand(gf, Qcur);
93799393
ggml_build_forward_expand(gf, Kcur);
93809394
ggml_build_forward_expand(gf, Vcur);

0 commit comments

Comments
 (0)