Skip to content

Commit adb4fe2

Browse files
Merge pull request #193 from menloresearch/update-dev-from-master-2025-08-04-00-14
Sync master with upstream release b6081
2 parents a62f7d3 + d31192b commit adb4fe2

File tree

14 files changed

+156
-46
lines changed

14 files changed

+156
-46
lines changed

common/arg.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2647,6 +2647,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
26472647
params.n_out_freq = value;
26482648
}
26492649
).set_examples({LLAMA_EXAMPLE_IMATRIX}));
2650+
add_opt(common_arg(
2651+
{"--output-format"}, "{gguf,dat}",
2652+
string_format("output format for imatrix file (default: %s)", params.imat_dat ? "dat" : "gguf"),
2653+
[](common_params & params, const std::string & value) {
2654+
/**/ if (value == "gguf") { params.imat_dat = false; }
2655+
else if (value == "dat") { params.imat_dat = true; }
2656+
else { throw std::invalid_argument("invalid output format"); }
2657+
}
2658+
).set_examples({LLAMA_EXAMPLE_IMATRIX}));
26502659
add_opt(common_arg(
26512660
{"--save-frequency"}, "N",
26522661
string_format("save an imatrix copy every N iterations (default: %d)", params.n_save_freq),

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -439,6 +439,7 @@ struct common_params {
439439
int32_t n_out_freq = 10; // output the imatrix every n_out_freq iterations
440440
int32_t n_save_freq = 0; // save the imatrix every n_save_freq iterations
441441
int32_t i_chunk = 0; // start processing from this chunk
442+
bool imat_dat = false; // whether the legacy imatrix.dat format should be output
442443

443444
bool process_output = false; // collect data for the output tensor
444445
bool compute_ppl = true; // whether to compute perplexity

convert_hf_to_gguf.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -852,6 +852,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
852852
if chkhsh == "2085e1638f6c377a0aa4ead21b27bb4cb941bf800df86ed391011769c1758dfb":
853853
# ref: https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B
854854
res = "exaone4"
855+
if chkhsh == "a1e163ecab2e718a4c829d1148b6e86824ec36163bb71941c3dca9cd5ac25756":
856+
# ref: https://huggingface.co/JetBrains/Mellum-4b-base
857+
res = "mellum"
855858

856859
if res is None:
857860
logger.warning("\n")
@@ -6059,6 +6062,7 @@ def prepare_tensors(self):
60596062

60606063
@ModelBase.register("DeepseekV2ForCausalLM")
60616064
@ModelBase.register("DeepseekV3ForCausalLM")
6065+
@ModelBase.register("KimiVLForConditionalGeneration")
60626066
class DeepseekV2Model(TextModel):
60636067
model_arch = gguf.MODEL_ARCH.DEEPSEEK2
60646068

@@ -6161,6 +6165,13 @@ def set_gguf_parameters(self):
61616165
_experts: list[dict[str, Tensor]] | None = None
61626166

61636167
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
6168+
# skip vision tensors and remove "language_model." for Kimi-VL
6169+
if "vision_tower" in name or "multi_modal_projector" in name:
6170+
return []
6171+
6172+
if name.startswith("language_model."):
6173+
name = name.replace("language_model.", "")
6174+
61646175
# rename e_score_correction_bias tensors
61656176
if name.endswith("e_score_correction_bias"):
61666177
name = name.replace("e_score_correction_bias", "e_score_correction.bias")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,7 @@ class TOKENIZER_TYPE(IntEnum):
138138
{"name": "midm-2.0", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/K-intelligence/Midm-2.0-Base-Instruct", },
139139
{"name": "lfm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LiquidAI/LFM2-Tokenizer"},
140140
{"name": "exaone4", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B", },
141+
{"name": "mellum", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/JetBrains/Mellum-4b-base", },
141142
]
142143

143144
# some models are known to be broken upstream, so we will skip them as exceptions

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3096,6 +3096,10 @@ static void ggml_vk_load_shaders(vk_device& device) {
30963096
uint32_t conv2d_SHMEM_PAD = 4;
30973097
bool conv2d_UNROLL = true;
30983098

3099+
if (device->coopmat2) {
3100+
conv2d_SHMEM_PAD = 8; // 8 float16_t
3101+
}
3102+
30993103
if (device->vendor_id == VK_VENDOR_ID_INTEL) {
31003104
conv2d_SHMEM_PAD = 0;
31013105
conv2d_UNROLL = false;
@@ -3154,7 +3158,14 @@ static void ggml_vk_load_shaders(vk_device& device) {
31543158
std::array<uint32_t, 3> wg_denoms = { conv2d_BS_K, conv2d_BS_NPQ, 1 };
31553159
std::vector<uint32_t> spec_constants = { conv2d_WG_SIZE, conv2d_BS_K, conv2d_BS_CRS, conv2d_BS_NPQ, conv2d_TS_K, use_collectives, conv2d_SHMEM_PAD };
31563160

3157-
if (conv2d_UNROLL) {
3161+
if (device->coopmat2) {
3162+
ggml_vk_create_pipeline(
3163+
device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_cm2_len, conv2d_f32_cm2_data, "main", 3,
3164+
sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
3165+
ggml_vk_create_pipeline(
3166+
device, device->pipeline_conv2d_f16_f32[s], "conv2d_f16_f32", conv2d_f16_f32_cm2_len, conv2d_f16_f32_cm2_data, "main", 3,
3167+
sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);
3168+
} else if (conv2d_UNROLL) {
31583169
ggml_vk_create_pipeline(
31593170
device, device->pipeline_conv2d_f32[s], "conv2d_f32", conv2d_f32_unroll_len, conv2d_f32_unroll_data, "main", 3,
31603171
sizeof(vk_op_conv2d_push_constants), wg_denoms, spec_constants, 1, true, use_collectives);

ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp

Lines changed: 54 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
#version 450
22

33
#extension GL_EXT_control_flow_attributes : enable
4+
#ifdef COOPMAT2
5+
#extension GL_NV_cooperative_matrix2 : enable
6+
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
7+
#extension GL_KHR_memory_scope_semantics : enable
8+
#endif
49

510
#ifdef USE_COLLECTIVES
611
# extension GL_KHR_shader_subgroup_shuffle : enable
@@ -91,6 +96,12 @@ uint32_t n_elems_out = K * NPQ;
9196
// Number of blocktiles per input
9297
uint32_t NB_CRS = splitWork(CRS, BS_CRS);
9398

99+
#ifdef COOPMAT2
100+
#define SHMEM_TYPE float16_t
101+
#else
102+
#define SHMEM_TYPE float
103+
#endif
104+
94105
const uint32_t Ash_stride = BS_CRS + SHMEM_PAD;
95106
const uint32_t Bsh_stride = BS_NPQ + SHMEM_PAD;
96107

@@ -100,8 +111,8 @@ const uint32_t Bsh_numel = BS_CRS * BS_NPQ;
100111
const uint32_t Ash_len = BS_K * Ash_stride;
101112
const uint32_t Bsh_len = BS_CRS * Bsh_stride;
102113

103-
shared float Ash[Ash_len]; // K x CRS
104-
shared float Bsh[Bsh_len]; // CRS x NPQ
114+
shared SHMEM_TYPE Ash[Ash_len]; // K x CRS
115+
shared SHMEM_TYPE Bsh[Bsh_len]; // CRS x NPQ
105116

106117
// Threadtile sizes
107118
const uint32_t TS_NPQ = BS_K * BS_NPQ / WG_SIZE / TS_K;
@@ -110,10 +121,6 @@ const uint32_t TS_NPQ = BS_K * BS_NPQ / WG_SIZE / TS_K;
110121
const uint32_t NT_K = BS_K / TS_K;
111122
const uint32_t NT_NPQ = BS_NPQ / TS_NPQ;
112123

113-
float regA[TS_K];
114-
float regB[TS_NPQ];
115-
float regC[TS_K][TS_NPQ];
116-
117124
/*
118125
Compute
119126
KxCRS @ CRSxNPQ = K x NPQ
@@ -145,12 +152,36 @@ uint fastdiv(uint n, uint mp, uint L) {
145152
return (msbs + n) >> L;
146153
}
147154

155+
#ifdef COOPMAT2
156+
#define ACC_TYPE float16_t
157+
158+
ACC_TYPE perElemOpStore(const in uint32_t r, const in uint32_t c, const in ACC_TYPE elem)
159+
{
160+
uint32_t K_idx = B_idx_K * BS_K + r;
161+
uint32_t NPQ_idx = B_idx_NPQ * BS_NPQ + c;
162+
uint32_t N_idx = fastdiv(NPQ_idx, p.OWOHmp, p.OWOHL); // divide by p.OH * p.OW;
163+
uint32_t OH_idx = fastdiv(NPQ_idx - N_idx * p.OH * p.OW, p.OWmp, p.OWL); // divide by p.OW;
164+
uint32_t OW_idx = NPQ_idx - N_idx * p.OH * p.OW - OH_idx * p.OW;
165+
uint32_t dst_idx = OW_idx + OH_idx * p.nb1 + K_idx * p.nb2 + N_idx * p.nb3;
166+
if (K_idx < K && NPQ_idx < NPQ) {
167+
dst_data[dst_idx] = D_TYPE(elem);
168+
}
169+
return elem;
170+
}
171+
#endif
172+
148173
void main() {
174+
#ifdef COOPMAT2
175+
coopmat<ACC_TYPE, gl_ScopeWorkgroup, BS_K, BS_NPQ, gl_MatrixUseAccumulator> matC;
176+
matC = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BS_K, BS_NPQ, gl_MatrixUseAccumulator>(0.0);
177+
#else
178+
float regC[TS_K][TS_NPQ];
149179
for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
150180
for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
151181
regC[T_ly][T_lx] = 0.0;
152182
}
153183
}
184+
#endif
154185
/* Advance block in CRS dim */
155186
for (uint32_t B_idx_CRS = 0; B_idx_CRS < NB_CRS; B_idx_CRS++) {
156187
uint32_t CRS_idx_a;
@@ -199,7 +230,7 @@ void main() {
199230
if (K_idx >= K || CRS_idx_a >= CRS) {
200231
val = 0.0;
201232
}
202-
Ash[B_ly * Ash_stride + B_lx] = val;
233+
Ash[B_ly * Ash_stride + B_lx] = SHMEM_TYPE(val);
203234
}
204235
/* Load input to B_block: (BS_CRS x BS_NPQ) */
205236
UNROLL for (uint32_t r_offset = 0; r_offset < BS_CRS; r_offset += BrpWg) {
@@ -244,11 +275,21 @@ void main() {
244275
if (CRS_idx_b >= CRS || NPQ_idx >= NPQ || H_idx < 0 || H_idx >= p.H || W_idx < 0 || W_idx >= p.W) {
245276
val = 0.0;
246277
}
247-
Bsh[B_ly * Bsh_stride + B_lx] = val;
278+
Bsh[B_ly * Bsh_stride + B_lx] = SHMEM_TYPE(val);
248279
}
249280
barrier();
281+
#ifdef COOPMAT2
282+
coopmat<float16_t, gl_ScopeWorkgroup, BS_K, BS_CRS, gl_MatrixUseA> matA;
283+
coopmat<float16_t, gl_ScopeWorkgroup, BS_CRS, BS_NPQ, gl_MatrixUseB> matB;
284+
285+
coopMatLoad(matA, Ash, 0, Ash_stride, gl_CooperativeMatrixLayoutRowMajor);
286+
coopMatLoad(matB, Bsh, 0, Bsh_stride, gl_CooperativeMatrixLayoutRowMajor);
287+
matC = coopMatMulAdd(matA, matB, matC);
288+
#else
250289
if (T_y * TS_K < K) {
251290
UNROLL for (uint32_t CRS_lidx = 0; CRS_lidx < BS_CRS; CRS_lidx++) {
291+
float regA[TS_K];
292+
float regB[TS_NPQ];
252293
for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
253294
regA[T_ly] = Ash[(T_y * TS_K + T_ly) * Ash_stride + CRS_lidx];
254295
}
@@ -262,9 +303,13 @@ void main() {
262303
}
263304
}
264305
}
306+
#endif
265307
barrier();
266308
}
267309
/* Save C* */
310+
#ifdef COOPMAT2
311+
coopMatPerElementNV(matC, matC, perElemOpStore);
312+
#else
268313
if (T_y * TS_K < K) {
269314
for (uint32_t T_ly = 0; T_ly < TS_K; T_ly++) {
270315
for (uint32_t T_lx = 0; T_lx < TS_NPQ; T_lx++) {
@@ -280,4 +325,5 @@ void main() {
280325
}
281326
}
282327
}
328+
#endif
283329
}

ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -661,6 +661,9 @@ void process_shaders() {
661661
string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
662662
string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", ""}});
663663

664+
string_to_spv("conv2d_f32", "conv2d_mm.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true);
665+
string_to_spv("conv2d_f16_f32", "conv2d_mm.comp", {{"A_TYPE", "float16_t"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"USE_COLLECTIVES", "1"}, {"UNROLL", "[[unroll]]"}, {"COOPMAT2", "1"}}, true, false, true);
666+
664667
string_to_spv("conv2d_dw_whcn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"WHCN", "1"}}));
665668
string_to_spv("conv2d_dw_cwhn_f32", "conv2d_dw.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"CWHN", "1"}}));
666669

gguf-py/gguf/vocab.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,11 @@ def _try_load_from_config_json(self, path: Path) -> bool:
312312
with open(config_file, encoding = 'utf-8') as f:
313313
config = json.load(f)
314314
for typ in self.special_token_types:
315-
self._set_special_token(typ, config.get(f'{typ}_token_id'))
315+
token_id = config.get(f'{typ}_token_id')
316+
# If not found at root, check in text_config (for multimodal models like Kimi-VL)
317+
if token_id is None and 'text_config' in config:
318+
token_id = config['text_config'].get(f'{typ}_token_id')
319+
self._set_special_token(typ, token_id)
316320
return True
317321

318322

src/llama-memory-hybrid.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ llama_memory_hybrid::llama_memory_hybrid(
2525
/* common */
2626
uint32_t n_seq_max,
2727
bool offload,
28+
bool unified,
2829
/* layer filters */
2930
layer_filter_cb && filter_attn,
3031
layer_filter_cb && filter_recr) :
@@ -38,7 +39,7 @@ llama_memory_hybrid::llama_memory_hybrid(
3839
type_v,
3940
v_trans,
4041
offload,
41-
1,
42+
unified,
4243
kv_size,
4344
n_seq_max,
4445
n_pad,

src/llama-memory-hybrid.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ class llama_memory_hybrid : public llama_memory_i {
3939
/* common */
4040
uint32_t n_seq_max,
4141
bool offload,
42+
bool unified,
4243
/* layer filters */
4344
layer_filter_cb && filter_attn = nullptr,
4445
layer_filter_cb && filter_recr = nullptr);

0 commit comments

Comments
 (0)