diff --git a/.devops/intel.Dockerfile b/.devops/intel.Dockerfile
index 9ce80a71eb9..cd2f9aa79bd 100644
--- a/.devops/intel.Dockerfile
+++ b/.devops/intel.Dockerfile
@@ -1,8 +1,8 @@
-ARG ONEAPI_VERSION=2025.1.1-0-devel-ubuntu24.04
+ARG ONEAPI_VERSION=2025.2.2-0-devel-ubuntu24.04
## Build Image
-FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
+FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS build
ARG GGML_SYCL_F16=OFF
RUN apt-get update && \
@@ -31,7 +31,7 @@ RUN mkdir -p /app/full \
&& cp requirements.txt /app/full \
&& cp .devops/tools.sh /app/full/tools.sh
-FROM intel/oneapi-basekit:$ONEAPI_VERSION AS base
+FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS base
RUN apt-get update \
&& apt-get install -y libgomp1 curl\
diff --git a/.devops/rocm.Dockerfile b/.devops/rocm.Dockerfile
index 0596d518ac3..df9058d946a 100644
--- a/.devops/rocm.Dockerfile
+++ b/.devops/rocm.Dockerfile
@@ -16,7 +16,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
# gfx803, gfx900, gfx906, gfx1032, gfx1101, gfx1102,not officialy supported
# check https://rocm.docs.amd.com/projects/install-on-linux/en/docs-6.4.1/reference/system-requirements.html
-ARG ROCM_DOCKER_ARCH='gfx803;gfx900;gfx906;gfx1010;gfx1030;gfx1032;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx1151'
+ARG ROCM_DOCKER_ARCH='gfx803;gfx900;gfx906;gfx908;gfx90a;gfx942;gfx1010;gfx1030;gfx1032;gfx1100;gfx1101;gfx1102;gfx1200;gfx1201;gfx1151'
#ARG ROCM_DOCKER_ARCH='gfx1151'
# Set ROCm architectures
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index ffd81ae5e07..2fb35a82d40 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -362,11 +362,11 @@ jobs:
id: checkout
uses: actions/checkout@v4
- - name: ccache
- uses: ggml-org/ccache-action@v1.2.16
- with:
- key: ubuntu-latest-cmake-rpc
- evict-old-files: 1d
+ # - name: ccache
+ # uses: ggml-org/ccache-action@v1.2.16
+ # with:
+ # key: ubuntu-latest-cmake-rpc
+ # evict-old-files: 1d
- name: Dependencies
id: depends
@@ -387,8 +387,8 @@ jobs:
cd build
ctest -L main --verbose
- ubuntu-22-cmake-vulkan:
- runs-on: ubuntu-22.04
+ ubuntu-24-cmake-vulkan:
+ runs-on: ubuntu-24.04
steps:
- name: Clone
@@ -398,20 +398,40 @@ jobs:
- name: ccache
uses: ggml-org/ccache-action@v1.2.16
with:
- key: ubuntu-22-cmake-vulkan
+ key: ubuntu-24-cmake-vulkan
evict-old-files: 1d
- name: Dependencies
id: depends
run: |
- wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add -
- sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
+ sudo add-apt-repository -y ppa:kisak/kisak-mesa
sudo apt-get update -y
- sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk libcurl4-openssl-dev
+ sudo apt-get install -y build-essential mesa-vulkan-drivers libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev libcurl4-openssl-dev
+
+ - name: Get latest Vulkan SDK version
+ id: vulkan_sdk_version
+ run: |
+ echo "VULKAN_SDK_VERSION=$(curl https://vulkan.lunarg.com/sdk/latest/linux.txt)" >> "$GITHUB_ENV"
+
+ - name: Cache Vulkan SDK
+ id: cache_vulkan_sdk
+ uses: actions/cache@v4
+ with:
+ path: ./vulkan_sdk
+ key: vulkan-sdk-${{ env.VULKAN_SDK_VERSION }}-${{ runner.os }}
+
+ - name: Install Vulkan SDK
+ if: steps.cache_vulkan_sdk.outputs.cache-hit != 'true'
+ id: vulkan_sdk_install
+ run: |
+ mkdir -p vulkan_sdk
+ cd vulkan_sdk
+ curl --no-progress-meter https://sdk.lunarg.com/sdk/download/latest/linux/vulkan_sdk.tar.xz | tar -Jx --strip-components=1
- name: Build
id: cmake_build
run: |
+ source ./vulkan_sdk/setup-env.sh
cmake -B build \
-DGGML_VULKAN=ON
cmake --build build --config Release -j $(nproc)
@@ -421,6 +441,7 @@ jobs:
run: |
cd build
export GGML_VK_VISIBLE_DEVICES=0
+ export GGML_VK_DISABLE_F16=1
# This is using llvmpipe and runs slower than other backends
ctest -L main --verbose --timeout 4200
@@ -1059,7 +1080,7 @@ jobs:
shell: bash
env:
- WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
+ WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
steps:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 91fd43a32eb..e28958fcdb2 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -462,7 +462,7 @@ jobs:
shell: bash
env:
- WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/7cd9bba0-7aab-4e30-b3ae-2221006a4a05/intel-oneapi-base-toolkit-2025.1.1.34_offline.exe
+ WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/24751ead-ddc5-4479-b9e6-f9fe2ff8b9f2/intel-deep-learning-essentials-2025.2.1.25_offline.exe
WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel
ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI"
@@ -505,6 +505,7 @@ jobs:
cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin
+ cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero_v2.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin
@@ -513,10 +514,15 @@ jobs:
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin
+ cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl-ls.exe" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin
cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin
+ cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/tcm.dll" ./build/bin
+ cp "${{ env.ONEAPI_ROOT }}/tcm/latest/bin/libhwloc-15.dll" ./build/bin
+ cp "${{ env.ONEAPI_ROOT }}/umf/latest/bin/umf.dll" ./build/bin
+
echo "cp oneAPI running time dll files to ./build/bin done"
7z a llama-bin-win-sycl-x64.zip ./build/bin/*
diff --git a/CODEOWNERS b/CODEOWNERS
index 89b84ce8506..15e3559fda5 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -59,6 +59,9 @@
/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
/ggml/src/ggml-cuda/mmvf.* @JohannesGaessler
/ggml/src/ggml-cuda/mmvq.* @JohannesGaessler
+/ggml/src/ggml-cuda/fattn-wmma* @IMbackK
+/ggml/src/ggml-hip/ @IMbackK
+/ggml/src/ggml-cuda/vendors/hip.h @IMbackK
/ggml/src/ggml-impl.h @ggerganov @slaren
/ggml/src/ggml-metal/ @ggerganov
/ggml/src/ggml-opencl/ @lhez @max-krasnyansky
diff --git a/ci/run.sh b/ci/run.sh
index b0af51723bc..c6e31fa0b81 100755
--- a/ci/run.sh
+++ b/ci/run.sh
@@ -34,9 +34,9 @@ mkdir -p "$2"
OUT=$(realpath "$1")
MNT=$(realpath "$2")
-rm -f "$OUT/*.log"
-rm -f "$OUT/*.exit"
-rm -f "$OUT/*.md"
+rm -f $OUT/*.log
+rm -f $OUT/*.exit
+rm -f $OUT/*.md
sd=`dirname $0`
cd $sd/../
@@ -607,6 +607,7 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then
fi
ret=0
+
test $ret -eq 0 && gg_run ctest_debug
test $ret -eq 0 && gg_run ctest_release
@@ -624,4 +625,6 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then
test $ret -eq 0 && gg_run ctest_with_model_release
fi
+cat $OUT/README.md
+
exit $ret
diff --git a/common/chat-parser.cpp b/common/chat-parser.cpp
index 96ba8f533ef..b3362519a68 100644
--- a/common/chat-parser.cpp
+++ b/common/chat-parser.cpp
@@ -75,6 +75,35 @@ bool common_chat_msg_parser::add_tool_calls(const json & arr) {
}
return true;
}
+
+bool common_chat_msg_parser::add_tool_call_short_form(const json & tool_call) {
+ if (!tool_call.is_object() || tool_call.size() != 1) {
+ return false;
+ }
+
+ // Get the tool name (the single key in the object)
+ auto it = tool_call.begin();
+ std::string name = it.key();
+
+ if (name.empty()) {
+ return false;
+ }
+
+ // Get the arguments (the nested object)
+ const json & args_json = it.value();
+ std::string arguments = "";
+
+ if (args_json.is_object()) {
+ arguments = args_json.dump();
+ } else if (args_json.is_string()) {
+ arguments = args_json;
+ } else if (!args_json.is_null()) {
+ // For other types, convert to string representation
+ arguments = args_json.dump();
+ }
+
+ return add_tool_call(name, "", arguments);
+}
void common_chat_msg_parser::finish() {
if (!is_partial_ && pos_ != input_.size()) {
throw std::runtime_error("Unexpected content at end of input");// + input_.substr(pos_));
diff --git a/common/chat-parser.h b/common/chat-parser.h
index 0e64c341a50..c8cdc63fb50 100644
--- a/common/chat-parser.h
+++ b/common/chat-parser.h
@@ -64,6 +64,9 @@ class common_chat_msg_parser {
// Adds an array of tool calls using their "name", "id" and "arguments" fields.
bool add_tool_calls(const nlohmann::ordered_json & arr);
+ // Adds a tool call using the short form: { "tool_name": { "arg1": val, "arg2": val } }
+ bool add_tool_call_short_form(const nlohmann::ordered_json & tool_call);
+
void finish();
bool consume_spaces();
diff --git a/common/chat.cpp b/common/chat.cpp
index e2bacdcf527..87212322ec2 100644
--- a/common/chat.cpp
+++ b/common/chat.cpp
@@ -638,6 +638,7 @@ const char * common_chat_format_name(common_chat_format format) {
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
case COMMON_CHAT_FORMAT_NEMOTRON_V2: return "Nemotron V2";
+ case COMMON_CHAT_FORMAT_APERTUS: return "Apertus";
default:
throw std::runtime_error("Unknown chat format");
}
@@ -801,6 +802,7 @@ static std::string apply(
}
tmpl_inputs.add_generation_prompt = inputs.add_generation_prompt;
tmpl_inputs.extra_context = inputs.extra_context;
+ tmpl_inputs.extra_context["enable_thinking"] = inputs.enable_thinking;
if (additional_context) {
tmpl_inputs.extra_context.merge_patch(*additional_context);
}
@@ -1264,6 +1266,75 @@ static common_chat_params common_chat_params_init_nemotron_v2(const common_chat_
}
return data;
}
+
+static common_chat_params common_chat_params_init_apertus(const common_chat_template & tmpl, const struct templates_params & inputs) {
+ common_chat_params data;
+
+ // Generate the prompt using the apply() function with the template
+ data.prompt = apply(tmpl, inputs);
+ data.format = COMMON_CHAT_FORMAT_APERTUS;
+
+ // Handle thinking tags appropriately based on inputs.enable_thinking
+ if (string_ends_with(data.prompt, "<|inner_prefix|>")) {
+ if (!inputs.enable_thinking) {
+ data.prompt += "<|inner_suffix|>";
+ } else {
+ data.thinking_forced_open = true;
+ }
+ }
+
+ // When tools are present, build grammar for the <|tools_prefix|> format
+ if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
+ data.grammar_lazy = true;
+ data.grammar = build_grammar([&](const common_grammar_builder & builder) {
+ auto schemas = json::array();
+ foreach_function(inputs.tools, [&](const json & tool) {
+ const auto & function = tool.at("function");
+ schemas.push_back({
+ { "type", "object" },
+ { "properties",
+ {
+ { function.at("name"), function.at("parameters") }
+ } },
+ { "required", json::array({ function.at("name") }) },
+ });
+ });
+ auto schema = json{
+ { "type", "array" },
+ { "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
+ { "minItems", 1 },
+ };
+ if (!inputs.parallel_tool_calls) {
+ schema["maxItems"] = 1;
+ }
+ builder.add_rule("root",
+ std::string(data.thinking_forced_open ? "( \"<|inner_suffix|>\" space )? " : "") +
+ "\"<|tools_prefix|>\"" + builder.add_schema("tool_calls", schema) + "\"<|tools_suffix|>\"");
+ });
+ data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
+ // If thinking_forced_open, then we capture the <|inner_suffix|> tag in the grammar,
+ // (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
+ std::string(data.thinking_forced_open ?
+ "[\\s\\S]*?(<\\|inner_suffix\\|>\\s*)" :
+ "(?:<\\|inner_prefix\\|>[\\s\\S]*?<\\|inner_suffix\\|>\\s*)?") +
+ "(<\\|tools_prefix\\|>)[\\s\\S]*" });
+ data.preserved_tokens = {
+ "<|system_start|>",
+ "<|system_end|>",
+ "<|developer_start|>",
+ "<|developer_end|>",
+ "<|user_start|>",
+ "<|user_end|>",
+ "<|assistant_start|>",
+ "<|assistant_end|>",
+ "<|inner_prefix|>",
+ "<|inner_suffix|>",
+ "<|tools_prefix|>",
+ "<|tools_suffix|>",
+ };
+ }
+ return data;
+}
static void common_chat_parse_llama_3_1(common_chat_msg_parser & builder, bool with_builtin_tools = false) {
if (!builder.syntax().parse_tool_calls) {
builder.add_content(builder.consume_rest());
@@ -2323,6 +2394,37 @@ static void common_chat_parse_nemotron_v2(common_chat_msg_parser & builder) {
builder.add_content(builder.consume_rest());
}
+static void common_chat_parse_apertus(common_chat_msg_parser & builder) {
+ // Parse thinking tags
+ builder.try_parse_reasoning("<|inner_prefix|>", "<|inner_suffix|>");
+ if (!builder.syntax().parse_tool_calls) {
+ builder.add_content(builder.consume_rest());
+ return;
+ }
+
+ // Look for tool calls
+ static const common_regex tool_call_regex(regex_escape("<|tools_prefix|>"));
+ if (auto res = builder.try_find_regex(tool_call_regex)) {
+ builder.move_to(res->groups[0].end);
+
+ auto tool_calls_data = builder.consume_json();
+ if (tool_calls_data.json.is_array()) {
+ builder.consume_spaces();
+ if (!builder.try_consume_literal("<|tools_suffix|>")) {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ for (const auto & value : tool_calls_data.json) {
+ if (value.is_object()) {
+ builder.add_tool_call_short_form(value);
+ }
+ }
+ } else {
+ throw common_chat_msg_partial_exception("Incomplete tool call");
+ }
+ }
+ builder.add_content(builder.consume_rest());
+}
+
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
// Parse thinking tags first - this handles the main reasoning content
builder.try_parse_reasoning("", "");
@@ -2567,6 +2669,11 @@ static common_chat_params common_chat_templates_apply_jinja(
return common_chat_params_init_nemotron_v2(tmpl, params);
}
+ // Apertus format detection
+ if (src.find("<|system_start|>") != std::string::npos && src.find("<|tools_prefix|>") != std::string::npos) {
+ return common_chat_params_init_apertus(tmpl, params);
+ }
+
// Use generic handler when mixing tools + JSON schema.
// TODO: support that mix in handlers below.
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2734,6 +2841,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
case COMMON_CHAT_FORMAT_NEMOTRON_V2:
common_chat_parse_nemotron_v2(builder);
break;
+ case COMMON_CHAT_FORMAT_APERTUS:
+ common_chat_parse_apertus(builder);
+ break;
default:
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
}
diff --git a/common/chat.h b/common/chat.h
index 5170fc14f4e..3c277e15eba 100644
--- a/common/chat.h
+++ b/common/chat.h
@@ -114,6 +114,7 @@ enum common_chat_format {
COMMON_CHAT_FORMAT_GPT_OSS,
COMMON_CHAT_FORMAT_SEED_OSS,
COMMON_CHAT_FORMAT_NEMOTRON_V2,
+ COMMON_CHAT_FORMAT_APERTUS,
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
};
diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index ae0079d1878..2fc04173aa2 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -8945,6 +8945,43 @@ def prepare_tensors(self):
raise ValueError(f"Unprocessed experts: {experts}")
+@ModelBase.register("ApertusForCausalLM")
+class ApertusModel(LlamaModel):
+ model_arch = gguf.MODEL_ARCH.APERTUS
+ undo_permute = False
+
+ _alpha_n = {}
+ _alpha_p = {}
+ _beta = {}
+ _eps = {}
+
+ def modify_tensors(self, data_torch, name, bid):
+ # Handle xIELU activation parameters
+ n_layers = self.hparams["num_hidden_layers"]
+ if name.endswith(".act_fn.alpha_n"):
+ self._alpha_n[bid] = data_torch.to("cpu").float().item()
+ if (len(self._alpha_n) == n_layers):
+ self.gguf_writer.add_xielu_alpha_n([self._alpha_n[k] for k in sorted(self._alpha_n)])
+ return []
+ if name.endswith(".act_fn.alpha_p"):
+ self._alpha_p[bid] = data_torch.to("cpu").float().item()
+ if (len(self._alpha_p) == n_layers):
+ self.gguf_writer.add_xielu_alpha_p([self._alpha_p[k] for k in sorted(self._alpha_p)])
+ return []
+ if name.endswith(".act_fn.beta"):
+ self._beta[bid] = data_torch.to("cpu").float().item()
+ if (len(self._beta) == n_layers):
+ self.gguf_writer.add_xielu_beta([self._beta[k] for k in sorted(self._beta)])
+ return []
+ if name.endswith(".act_fn.eps"):
+ self._eps[bid] = data_torch.to("cpu").float().item()
+ if (len(self._eps) == n_layers):
+ self.gguf_writer.add_xielu_eps([self._eps[k] for k in sorted(self._eps)])
+ return []
+
+ return super().modify_tensors(data_torch, name, bid)
+
+
class MistralModel(LlamaModel):
model_arch = gguf.MODEL_ARCH.LLAMA
model_name = "Mistral"
@@ -9112,7 +9149,7 @@ def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -
def from_safetensors_slice(cls, st_slice: Any) -> Tensor:
dtype = cls._dtype_str_map[st_slice.get_dtype()]
shape: tuple[int, ...] = tuple(st_slice.get_shape())
- lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:])
+ lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[...] if len(s.get_shape()) == 0 else s[:])
return cast(torch.Tensor, lazy)
@classmethod
diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md
index 6e9b88935da..92ab27066b4 100644
--- a/docs/backend/SYCL.md
+++ b/docs/backend/SYCL.md
@@ -145,12 +145,13 @@ The docker build option is currently limited to *Intel GPU* targets.
```sh
# Using FP16
docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" --target light -f .devops/intel.Dockerfile .
+
+# Using FP32
+docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=OFF" --target light -f .devops/intel.Dockerfile .
```
*Notes*:
-To build in default FP32 *(Slower than FP16 alternative)*, set `--build-arg="GGML_SYCL_F16=OFF"` in the previous command.
-
You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative.
Check the [documentation for Docker](../docker.md) to see the available images.
@@ -160,7 +161,7 @@ Check the [documentation for Docker](../docker.md) to see the available images.
# First, find all the DRI cards
ls -la /dev/dri
# Then, pick the card that you want to use (here for e.g. /dev/dri/card1).
-docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33
+docker run -it --rm -v "/path/to/models:/models" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card0:/dev/dri/card0 llama-cpp-sycl -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -c 4096 -s 0
```
*Notes:*
@@ -215,9 +216,19 @@ To target AMD GPUs with SYCL, the ROCm stack must be installed first.
2. **Install Intel® oneAPI Base toolkit**
+SYCL backend depends on:
+ - Intel® oneAPI DPC++/C++ compiler/running-time.
+ - Intel® oneAPI DPC++/C++ library (oneDPL).
+ - Intel® oneAPI Deep Neural Network Library (oneDNN).
+ - Intel® oneAPI Math Kernel Library (oneMKL).
+
- **For Intel GPU**
-The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page.
+All above are included in both **Intel® oneAPI Base toolkit** and **Intel® Deep Learning Essentials** packages.
+
+It's recommended to install **Intel® Deep Learning Essentials** which only provides the necessary libraries with less size.
+
+The **Intel® oneAPI Base toolkit** and **Intel® Deep Learning Essentials** can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page.
Please follow the instructions for downloading and installing the Toolkit for Linux, and preferably keep the default installation values unchanged, notably the installation path *(`/opt/intel/oneapi` by default)*.
@@ -225,6 +236,12 @@ Following guidelines/code snippets assume the default installation values. Other
Upon a successful installation, SYCL is enabled for the available intel devices, along with relevant libraries such as oneAPI oneDNN for Intel GPUs.
+|Verified release|
+|-|
+|2025.2.1|
+|2025.1|
+|2024.1|
+
- **Adding support to Nvidia GPUs**
**oneAPI Plugin**: In order to enable SYCL support on Nvidia GPUs, please install the [Codeplay oneAPI Plugin for Nvidia GPUs](https://developer.codeplay.com/products/oneapi/nvidia/download). User should also make sure the plugin version matches the installed base toolkit one *(previous step)* for a seamless "oneAPI on Nvidia GPU" setup.
@@ -255,10 +272,11 @@ sycl-ls
When targeting an intel GPU, the user should expect one or more devices among the available SYCL devices. Please make sure that at least one GPU is present via `sycl-ls`, for instance `[level_zero:gpu]` in the sample output below:
```
-[opencl:acc][opencl:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000]
-[opencl:cpu][opencl:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000]
-[opencl:gpu][opencl:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50]
-[level_zero:gpu][level_zero:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918]
+[level_zero:gpu][level_zero:0] Intel(R) oneAPI Unified Runtime over Level-Zero, Intel(R) Arc(TM) A770 Graphics 12.55.8 [1.3.29735+27]
+[level_zero:gpu][level_zero:1] Intel(R) oneAPI Unified Runtime over Level-Zero, Intel(R) UHD Graphics 730 12.2.0 [1.3.29735+27]
+[opencl:cpu][opencl:0] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i5-13400 OpenCL 3.0 (Build 0) [2025.20.8.0.06_160000]
+[opencl:gpu][opencl:1] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [24.39.31294]
+[opencl:gpu][opencl:2] Intel(R) OpenCL Graphics, Intel(R) UHD Graphics 730 OpenCL 3.0 NEO [24.39.31294]
```
- **Nvidia GPU**
@@ -353,7 +371,7 @@ cmake --build build --config Release -j -v
#### Retrieve and prepare model
-You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model preparation, or download an already quantized model like [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) or [Meta-Llama-3-8B-Instruct-Q4_0.gguf](https://huggingface.co/aptha/Meta-Llama-3-8B-Instruct-Q4_0-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_0.gguf).
+You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model preparation, or download an already quantized model like [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/resolve/main/llama-2-7b.Q4_0.gguf?download=true) or [Meta-Llama-3-8B-Instruct-Q4_0.gguf](https://huggingface.co/aptha/Meta-Llama-3-8B-Instruct-Q4_0-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_0.gguf).
##### Check device
@@ -466,7 +484,17 @@ If you already have a recent version of Microsoft Visual Studio, you can skip th
3. Install Intel® oneAPI Base toolkit
-The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page.
+SYCL backend depends on:
+ - Intel® oneAPI DPC++/C++ compiler/running-time.
+ - Intel® oneAPI DPC++/C++ library (oneDPL).
+ - Intel® oneAPI Deep Neural Network Library (oneDNN).
+ - Intel® oneAPI Math Kernel Library (oneMKL).
+
+All above are included in both **Intel® oneAPI Base toolkit** and **Intel® Deep Learning Essentials** packages.
+
+It's recommended to install **Intel® Deep Learning Essentials** which only provides the necessary libraries with less size.
+
+The **Intel® oneAPI Base toolkit** and **Intel® Deep Learning Essentials** can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page.
Please follow the instructions for downloading and installing the Toolkit for Windows, and preferably keep the default installation values unchanged, notably the installation path *(`C:\Program Files (x86)\Intel\oneAPI` by default)*.
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 5028a9cebf2..60c6b63d059 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -576,6 +576,7 @@ extern "C" {
GGML_UNARY_OP_HARDSIGMOID,
GGML_UNARY_OP_EXP,
GGML_UNARY_OP_GELU_ERF,
+ GGML_UNARY_OP_XIELU,
GGML_UNARY_OP_COUNT,
};
@@ -1150,6 +1151,18 @@ extern "C" {
struct ggml_context * ctx,
struct ggml_tensor * a);
+ // xIELU activation function
+ // x = x * (c_a(alpha_n) + c_b(alpha_p, beta) * sigmoid(beta * x)) + eps * (x > 0)
+ // where c_a = softplus and c_b(a, b) = softplus(a) + b are constraining functions
+ // that constrain the positive and negative source alpha values respectively
+ GGML_API struct ggml_tensor * ggml_xielu(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ float alpha_n,
+ float alpha_p,
+ float beta,
+ float eps);
+
// gated linear unit ops
// A: n columns, r rows,
// result is n / 2 columns, r rows,
@@ -1617,6 +1630,13 @@ extern "C" {
float scale,
float max_bias);
+ GGML_API struct ggml_tensor * ggml_soft_max_ext_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * mask,
+ float scale,
+ float max_bias);
+
GGML_API void ggml_soft_max_add_sinks(
struct ggml_tensor * a,
struct ggml_tensor * sinks);
diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c
index dbc07301b29..eded6eb77ed 100644
--- a/ggml/src/ggml-cpu/ggml-cpu.c
+++ b/ggml/src/ggml-cpu/ggml-cpu.c
@@ -2187,6 +2187,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
case GGML_UNARY_OP_GELU_ERF:
case GGML_UNARY_OP_GELU_QUICK:
case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_XIELU:
{
n_tasks = n_threads;
} break;
diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp
index 14f7dcf4f41..6275c8305a9 100644
--- a/ggml/src/ggml-cpu/ops.cpp
+++ b/ggml/src/ggml-cpu/ops.cpp
@@ -8637,7 +8637,7 @@ static void ggml_compute_forward_ssm_scan_f32(
// n_head
for (int h = ih0; h < ih1; ++h) {
// ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16
- const float dt_soft_plus = dt[h] <= 20.0f ? log1pf(expf(dt[h])) : dt[h];
+ const float dt_soft_plus = ggml_softplus(dt[h]);
const float dA = expf(dt_soft_plus * A[h]);
const int g = h / (nh / ng); // repeat_interleave
@@ -8734,7 +8734,7 @@ static void ggml_compute_forward_ssm_scan_f32(
// n_head
for (int h = ih0; h < ih1; ++h) {
// ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16
- const float dt_soft_plus = dt[h] <= 20.0f ? log1pf(expf(dt[h])) : dt[h];
+ const float dt_soft_plus = ggml_softplus(dt[h]);
const int g = h / (nh / ng); // repeat_interleave
// dim
@@ -8997,6 +8997,10 @@ void ggml_compute_forward_unary(
{
ggml_compute_forward_exp(params, dst);
} break;
+ case GGML_UNARY_OP_XIELU:
+ {
+ ggml_compute_forward_xielu(params, dst);
+ } break;
default:
{
GGML_ABORT("fatal error");
diff --git a/ggml/src/ggml-cpu/unary-ops.cpp b/ggml/src/ggml-cpu/unary-ops.cpp
index 4fce569b3bf..cf1a4615d04 100644
--- a/ggml/src/ggml-cpu/unary-ops.cpp
+++ b/ggml/src/ggml-cpu/unary-ops.cpp
@@ -52,6 +52,15 @@ static inline float op_sqrt(float x) {
return sqrtf(x);
}
+static inline float op_xielu(float x, float alpha_n, float alpha_p, float beta, float eps) {
+ if (x > 0.0f) {
+ return alpha_p * x * x + beta * x;
+ } else {
+ const float min_x_eps = fminf(x, eps);
+ return (expm1f(min_x_eps) - x) * alpha_n + beta * x;
+ }
+}
+
static inline float op_sin(float x) {
return sinf(x);
}
@@ -121,6 +130,86 @@ static void unary_op(const ggml_compute_params * params, ggml_tensor * dst) {
}
}
+template
+static void unary_op_params(const ggml_compute_params * params, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
+ apply_unary_op(params, dst);
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
+ apply_unary_op(params, dst);
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
+ apply_unary_op(params, dst);
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) {
+ apply_unary_op(params, dst);
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
+ apply_unary_op(params, dst);
+ } else {
+ fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__,
+ ggml_type_name(dst->type), ggml_type_name(src0->type));
+ GGML_ABORT("fatal error");
+ }
+}
+
+// Extend vec_unary_op to support functors
+template
+static inline void vec_unary_op_functor(int64_t n, dst_t * y, const src0_t * x, Op op) {
+ constexpr auto src0_to_f32 = type_conversion_table::to_f32;
+ constexpr auto f32_to_dst = type_conversion_table::from_f32;
+
+ for (int i = 0; i < n; i++) {
+ y[i] = f32_to_dst(op(src0_to_f32(x[i])));
+ }
+}
+
+// Extend apply_unary_op to support functors
+template
+static void apply_unary_op_functor(const ggml_compute_params * params, ggml_tensor * dst, Op op) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ GGML_ASSERT(ggml_is_contiguous_1(src0) && ggml_is_contiguous_1(dst) && ggml_are_same_shape(src0, dst));
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+
+ GGML_ASSERT( nb0 == sizeof(dst_t));
+ GGML_ASSERT(nb00 == sizeof(src0_t));
+
+ const auto [ir0, ir1] = get_thread_range(params, src0);
+
+ for (int64_t ir = ir0; ir < ir1; ++ir) {
+ const int64_t i03 = ir/(ne02*ne01);
+ const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
+ const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);
+
+ dst_t * dst_ptr = (dst_t *) ((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
+ const src0_t * src0_ptr = (const src0_t *) ((const char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01);
+
+ vec_unary_op_functor(ne0, dst_ptr, src0_ptr, op);
+ }
+}
+
+// Generic dispatcher for functors
+template
+static void unary_op_functor(const ggml_compute_params * params, ggml_tensor * dst, Op op) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ /* */ if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { // all f32
+ apply_unary_op_functor(params, dst, op);
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) { // all f16
+ apply_unary_op_functor(params, dst, op);
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_BF16) { // all bf16
+ apply_unary_op_functor(params, dst, op);
+ } else if (src0->type == GGML_TYPE_BF16 && dst->type == GGML_TYPE_F32) {
+ apply_unary_op_functor(params, dst, op);
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
+ apply_unary_op_functor(params, dst, op);
+ } else {
+ fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s\n", __func__,
+ ggml_type_name(dst->type), ggml_type_name(src0->type));
+ GGML_ABORT("fatal error");
+ }
+}
+
void ggml_compute_forward_abs(const ggml_compute_params * params, ggml_tensor * dst) {
unary_op(params, dst);
}
@@ -184,3 +273,17 @@ void ggml_compute_forward_cos(const ggml_compute_params * params, ggml_tensor *
void ggml_compute_forward_log(const ggml_compute_params * params, ggml_tensor * dst) {
unary_op(params, dst);
}
+
+void ggml_compute_forward_xielu(const ggml_compute_params * params, ggml_tensor * dst) {
+ const float alpha_n = ggml_get_op_params_f32(dst, 1);
+ const float alpha_p = ggml_get_op_params_f32(dst, 2);
+ const float beta = ggml_get_op_params_f32(dst, 3);
+ const float eps = ggml_get_op_params_f32(dst, 4);
+
+ const auto xielu_op_params = [alpha_n, alpha_p, beta, eps](float f) {
+ return op_xielu(f, alpha_n, alpha_p, beta, eps);
+ };
+
+ unary_op_functor(params, dst, xielu_op_params);
+}
+
diff --git a/ggml/src/ggml-cpu/unary-ops.h b/ggml/src/ggml-cpu/unary-ops.h
index b1ade2c8e34..697c1e0da0a 100644
--- a/ggml/src/ggml-cpu/unary-ops.h
+++ b/ggml/src/ggml-cpu/unary-ops.h
@@ -22,6 +22,7 @@ void ggml_compute_forward_sqrt(const struct ggml_compute_params * params, struct
void ggml_compute_forward_sin(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_cos(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_log(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+void ggml_compute_forward_xielu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
#ifdef __cplusplus
}
diff --git a/ggml/src/ggml-cuda/fattn-vec.cuh b/ggml/src/ggml-cuda/fattn-vec.cuh
index 59c62553b01..89ab0f1638b 100644
--- a/ggml/src/ggml-cuda/fattn-vec.cuh
+++ b/ggml/src/ggml-cuda/fattn-vec.cuh
@@ -535,8 +535,6 @@ void ggml_cuda_flash_attn_ext_vec_case(ggml_backend_cuda_context & ctx, ggml_ten
float logit_softcap;
memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float));
- const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
-
if (Q->ne[1] == 1) {
constexpr int cols_per_block = 1;
if (logit_softcap == 0.0f) {
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
index b7e81b21bcb..26e72bbc2b9 100644
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
@@ -2334,6 +2334,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
case GGML_UNARY_OP_ELU:
ggml_cuda_op_elu(ctx, dst);
break;
+ case GGML_UNARY_OP_XIELU:
+ ggml_cuda_op_xielu(ctx, dst);
+ break;
default:
return false;
}
diff --git a/ggml/src/ggml-cuda/topk-moe.cu b/ggml/src/ggml-cuda/topk-moe.cu
index 039f2847196..afe4aee2403 100644
--- a/ggml/src/ggml-cuda/topk-moe.cu
+++ b/ggml/src/ggml-cuda/topk-moe.cu
@@ -13,7 +13,7 @@
It is intended as fusion of softmax->top-k->get_rows pipeline for MoE models
*/
-template
+template
__launch_bounds__(4 * WARP_SIZE, 1) __global__ void topk_moe_cuda(const float * logits,
float * weights,
int32_t * ids,
@@ -204,8 +204,6 @@ void ggml_cuda_op_topk_moe(ggml_backend_cuda_context & ctx,
GGML_ASSERT(ids->nb[1] / ggml_type_size(ids->type) == (size_t) n_experts);
- cudaStream_t stream = ctx.stream();
-
const int n_expert_used = weights->ne[1];
if (with_norm) {
diff --git a/ggml/src/ggml-cuda/unary.cu b/ggml/src/ggml-cuda/unary.cu
index 5aff8a876af..3c564566a51 100644
--- a/ggml/src/ggml-cuda/unary.cu
+++ b/ggml/src/ggml-cuda/unary.cu
@@ -1,4 +1,5 @@
#include "unary.cuh"
+#include "convert.cuh"
static __device__ __forceinline__ float op_abs(float x) {
return fabsf(x);
@@ -375,6 +376,59 @@ void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
swiglu_oai_cuda(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream);
}
+/* CUDA kernel + launcher for xIELU */
+
+template
+static __global__ void xielu_kernel(const T * x, T * dst, const int k, float alpha_n, float alpha_p, float beta, float eps) {
+ const int i = blockDim.x*blockIdx.x + threadIdx.x;
+
+ if (i >= k) {
+ return;
+ }
+
+ const float xi = ggml_cuda_cast(x[i]);
+
+ const float gate_pos = (xi > 0.0f);
+ const float y_pos = alpha_p * xi * xi + beta * xi;
+ const float min_v_eps = fminf(xi, eps);
+ const float y_neg = (expm1f(min_v_eps) - xi) * alpha_n + beta * xi;
+ const float out = gate_pos * y_pos + (1.0f - gate_pos) * y_neg;
+
+ dst[i] = ggml_cuda_cast(out);
+}
+
+template
+static void xielu_cuda(const T * x, T * dst, const int k, float alpha_n, float alpha_p, float beta, float eps, cudaStream_t stream) {
+ const int num_blocks = (k + CUDA_XIELU_BLOCK_SIZE) / CUDA_XIELU_BLOCK_SIZE;
+ xielu_kernel<<>>(x, dst, k, alpha_n, alpha_p, beta, eps);
+}
+
+void ggml_cuda_op_xielu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ const void * src0_d = src0->data;
+ void * dst_d = dst->data;
+ cudaStream_t stream = ctx.stream();
+
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+ GGML_ASSERT(src0->type == dst->type);
+
+ const float alpha_n = ggml_get_op_params_f32(dst, 1);
+ const float alpha_p = ggml_get_op_params_f32(dst, 2);
+ const float beta = ggml_get_op_params_f32(dst, 3);
+ const float eps = ggml_get_op_params_f32(dst, 4);
+
+ if (src0->type == GGML_TYPE_F16) {
+ xielu_cuda((const half *)src0_d, (half *)dst_d, ggml_nelements(src0), alpha_n, alpha_p, beta, eps, stream);
+ } else {
+ xielu_cuda((const float *)src0_d, (float *)dst_d, ggml_nelements(src0), alpha_n, alpha_p, beta, eps, stream);
+ }
+}
+
+
+
/* silu_back */
static __device__ __forceinline__ float op_silu_back(float grad, float x) {
diff --git a/ggml/src/ggml-cuda/unary.cuh b/ggml/src/ggml-cuda/unary.cuh
index da3caf1d896..8e7644fcd9a 100644
--- a/ggml/src/ggml-cuda/unary.cuh
+++ b/ggml/src/ggml-cuda/unary.cuh
@@ -16,6 +16,7 @@
#define CUDA_SIN_BLOCK_SIZE 256
#define CUDA_COS_BLOCK_SIZE 256
#define CUDA_GLU_BLOCK_SIZE 256
+#define CUDA_XIELU_BLOCK_SIZE 256
void ggml_cuda_op_abs(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
@@ -72,3 +73,5 @@ void ggml_cuda_op_swiglu_oai(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
void ggml_cuda_op_geglu_erf(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
void ggml_cuda_op_geglu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+
+void ggml_cuda_op_xielu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h
index 86a1ebf62b7..d0fb3bccad2 100644
--- a/ggml/src/ggml-impl.h
+++ b/ggml/src/ggml-impl.h
@@ -102,6 +102,9 @@ static bool ggml_op_is_empty(enum ggml_op op) {
}
}
+static inline float ggml_softplus(float input) {
+ return (input > 20.0f) ? input : logf(1 + expf(input));
+}
//
// logging
//
diff --git a/ggml/src/ggml-musa/CMakeLists.txt b/ggml/src/ggml-musa/CMakeLists.txt
index cdb3818c786..f8477a2ef35 100644
--- a/ggml/src/ggml-musa/CMakeLists.txt
+++ b/ggml/src/ggml-musa/CMakeLists.txt
@@ -56,7 +56,7 @@ if (MUSAToolkit_FOUND)
set_source_files_properties(${GGML_SOURCES_MUSA} PROPERTIES LANGUAGE CXX)
foreach(SOURCE ${GGML_SOURCES_MUSA})
- set(COMPILE_FLAGS "-fsigned-char -x musa -mtgpu")
+ set(COMPILE_FLAGS "-Od3 -fno-strict-aliasing -ffast-math -fsigned-char -x musa -mtgpu -fmusa-flush-denormals-to-zero")
foreach(ARCH ${MUSA_ARCHITECTURES})
set(COMPILE_FLAGS "${COMPILE_FLAGS} --cuda-gpu-arch=mp_${ARCH}")
endforeach()
diff --git a/ggml/src/ggml-webgpu/ggml-webgpu.cpp b/ggml/src/ggml-webgpu/ggml-webgpu.cpp
index 93200a4d29f..de68c5689bb 100644
--- a/ggml/src/ggml-webgpu/ggml-webgpu.cpp
+++ b/ggml/src/ggml-webgpu/ggml-webgpu.cpp
@@ -28,6 +28,7 @@
/* Constants */
#define WEBGPU_COMMAND_SUBMIT_BATCH_SIZE 16
+#define WEBGPU_WAIT_ANY_BATCH_SIZE 64
#define WEBGPU_MUL_MAT_WG_SIZE 64
#define WEBGPU_NUM_PARAM_BUFS 100
#define WEBGPU_PARAMS_BUF_SIZE_BYTES 128 // enough for 32 parameters
@@ -35,6 +36,9 @@
#define WEBGPU_SET_ROWS_ERROR_BUF_SIZE_BYTES 4
#define WEBGPU_STORAGE_BUF_BINDING_MULT 4 // a storage buffer binding size must be a multiple of 4
+// For operations which process a row in parallel, this seems like a reasonable default
+#define WEBGPU_ROW_SPLIT_WG_SIZE 64
+
/* End Constants */
// This is a "fake" base pointer, since WebGPU buffers do not have pointers to their locations.
@@ -130,15 +134,16 @@ struct webgpu_context_struct {
wgpu::ComputePipeline set_rows_pipeline;
wgpu::ComputePipeline get_rows_pipeline[30];
wgpu::ComputePipeline get_rows_f32_no_vec_pipeline;
- wgpu::ComputePipeline cpy_pipeline[2][2]; // src type, dst type
- wgpu::ComputePipeline add_pipeline[2][2]; // type, inplace
- wgpu::ComputePipeline sub_pipeline[2][2]; // type, inplace
- wgpu::ComputePipeline mul_pipeline[2][2]; // type, inplace
- wgpu::ComputePipeline div_pipeline[2][2]; // type, inplace
- wgpu::ComputePipeline rms_norm_pipeline[2]; // inplace
- wgpu::ComputePipeline rope_pipeline[2][2][2]; // type, ff, inplace
- wgpu::ComputePipeline glu_pipeline[7][2][2]; // glu-op, type, split
- wgpu::ComputePipeline scale_pipeline[2]; // inplace
+ wgpu::ComputePipeline cpy_pipeline[2][2]; // src type, dst type
+ wgpu::ComputePipeline add_pipeline[2][2]; // type, inplace
+ wgpu::ComputePipeline sub_pipeline[2][2]; // type, inplace
+ wgpu::ComputePipeline mul_pipeline[2][2]; // type, inplace
+ wgpu::ComputePipeline div_pipeline[2][2]; // type, inplace
+ wgpu::ComputePipeline rms_norm_pipeline[2]; // inplace
+ wgpu::ComputePipeline rope_pipeline[2][2][2]; // type, ff, inplace
+ wgpu::ComputePipeline glu_pipeline[7][2][2]; // glu-op, type, split
+ wgpu::ComputePipeline scale_pipeline[2]; // inplace
+ wgpu::ComputePipeline soft_max_pipeline[3][2][2]; // (no_mask, f32_mask, f16_mask), has_sink, inplace
size_t memset_bytes_per_thread;
@@ -256,8 +261,12 @@ static void ggml_backend_webgpu_wait_on_submission(webgpu_context & ctx) {
}),
UINT64_MAX);
} else {
- // existing callbacks, wait on them
- ctx->instance.WaitAny(ctx->callback_futures.size(), ctx->callback_futures.data(), UINT64_MAX);
+ // WebGPU implementations may limit the number of futures that can be waited on at once,
+ // so wait in batches (64 is what Dawn supports).
+ for (size_t i = 0; i < ctx->callback_futures.size(); i += WEBGPU_WAIT_ANY_BATCH_SIZE) {
+ size_t end = std::min(i + WEBGPU_WAIT_ANY_BATCH_SIZE, ctx->callback_futures.size());
+ ctx->instance.WaitAny(end - i, ctx->callback_futures.data() + i, UINT64_MAX);
+ }
ctx->callback_futures.clear();
}
}
@@ -726,9 +735,7 @@ static void ggml_webgpu_rms_norm(webgpu_context & ctx, ggml_tensor * src, ggml_t
.size = ggml_webgpu_tensor_binding_size(ctx, dst) });
}
- size_t max_wg_size = ctx->max_wg_size_x;
- uint32_t wg_x = (src->ne[1] * src->ne[2] * src->ne[3] + max_wg_size - 1) / max_wg_size;
- ggml_backend_webgpu_build_and_enqueue(ctx, ctx->rms_norm_pipeline[inplace], params, entries, wg_x,
+ ggml_backend_webgpu_build_and_enqueue(ctx, ctx->rms_norm_pipeline[inplace], params, entries, ggml_nrows(src),
ggml_op_name(dst->op));
}
@@ -912,6 +919,79 @@ static void ggml_webgpu_scale(webgpu_context & ctx, ggml_tensor * src, ggml_tens
ggml_op_name(dst->op));
}
+static void ggml_webgpu_soft_max(webgpu_context & ctx,
+ ggml_tensor * src0,
+ ggml_tensor * src1,
+ ggml_tensor * src2,
+ ggml_tensor * dst) {
+ const int inplace = ggml_webgpu_tensor_equal(src0, dst);
+ const int mask_type = (src1 != nullptr) ? src1->type : 2; // use 2 for no mask here
+ const int has_sink = (src2 != nullptr);
+ float max_bias;
+ memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
+ float n_head_log2 = float(1u << (uint32_t) floor(log2(src0->ne[2])));
+ float m0 = powf(2.0f, -(max_bias) / n_head_log2);
+ float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+ std::vector params = {
+ (uint32_t) (ggml_webgpu_tensor_misalignment(ctx, src0) / ggml_type_size(src0->type)),
+ mask_type < 2 ? (uint32_t) (ggml_webgpu_tensor_misalignment(ctx, src1) / ggml_type_size(src1->type)) : 0,
+ has_sink ? (uint32_t) (ggml_webgpu_tensor_misalignment(ctx, src2) / ggml_type_size(src2->type)) : 0,
+ (uint32_t) (ggml_webgpu_tensor_misalignment(ctx, dst) / ggml_type_size(dst->type)),
+ (uint32_t) (src0->nb[1] / ggml_type_size(src0->type)),
+ (uint32_t) (src0->nb[2] / ggml_type_size(src0->type)),
+ (uint32_t) (src0->nb[3] / ggml_type_size(src0->type)),
+ mask_type < 2 ? (uint32_t) (src1->nb[1] / ggml_type_size(src1->type)) : 0,
+ mask_type < 2 ? (uint32_t) (src1->nb[2] / ggml_type_size(src1->type)) : 0,
+ mask_type < 2 ? (uint32_t) (src1->nb[3] / ggml_type_size(src1->type)) : 0,
+ (uint32_t) (dst->nb[1] / ggml_type_size(dst->type)),
+ (uint32_t) (dst->nb[2] / ggml_type_size(dst->type)),
+ (uint32_t) (dst->nb[3] / ggml_type_size(dst->type)),
+ (uint32_t) ggml_nelements(dst),
+ (uint32_t) src0->ne[0],
+ (uint32_t) src0->ne[1],
+ (uint32_t) src0->ne[2],
+ mask_type < 2 ? (uint32_t) src1->ne[2] : 0,
+ mask_type < 2 ? (uint32_t) src1->ne[3] : 0,
+ *(uint32_t *) dst->op_params, // scale
+ *(uint32_t *) &max_bias,
+ *(uint32_t *) &n_head_log2,
+ *(uint32_t *) &m0,
+ *(uint32_t *) &m1
+ };
+
+ std::vector entries = {
+ { .binding = 0,
+ .buffer = ggml_webgpu_tensor_buf(src0),
+ .offset = ggml_webgpu_tensor_align_offset(ctx, src0),
+ .size = ggml_webgpu_tensor_binding_size(ctx, src0) }
+ };
+ uint32_t binding_num = 1;
+ if (mask_type < 2) {
+ entries.push_back({ .binding = binding_num,
+ .buffer = ggml_webgpu_tensor_buf(src1),
+ .offset = ggml_webgpu_tensor_align_offset(ctx, src1),
+ .size = ggml_webgpu_tensor_binding_size(ctx, src1) });
+ binding_num++;
+ }
+ if (has_sink) {
+ entries.push_back({ .binding = binding_num,
+ .buffer = ggml_webgpu_tensor_buf(src2),
+ .offset = ggml_webgpu_tensor_align_offset(ctx, src2),
+ .size = ggml_webgpu_tensor_binding_size(ctx, src2) });
+ binding_num++;
+ }
+ if (!inplace) {
+ entries.push_back({ .binding = binding_num,
+ .buffer = ggml_webgpu_tensor_buf(dst),
+ .offset = ggml_webgpu_tensor_align_offset(ctx, dst),
+ .size = ggml_webgpu_tensor_binding_size(ctx, dst) });
+ }
+
+ ggml_backend_webgpu_build_and_enqueue(ctx, ctx->soft_max_pipeline[mask_type][has_sink][inplace], params, entries,
+ ggml_nrows(dst), ggml_op_name(dst->op));
+}
+
// Returns true if node has enqueued work into the queue, false otherwise
static bool ggml_webgpu_encode_node(webgpu_context ctx, ggml_tensor * node) {
if (ggml_is_empty(node)) {
@@ -1237,11 +1317,11 @@ static ggml_guid_t ggml_backend_webgpu_guid(void) {
return reinterpret_cast((void *) guid_str);
}
-// The max workgroup size is a common constant
-static std::vector ggml_webgpu_max_wg_size_entry(webgpu_context & webgpu_ctx) {
+// Workgroup size is a common constant
+static std::vector ggml_webgpu_wg_size_entry(uint32_t wg_size) {
std::vector constants(1);
constants[0].key = "wg_size";
- constants[0].value = webgpu_ctx->max_wg_size_x;
+ constants[0].value = wg_size;
return constants;
}
@@ -1309,11 +1389,11 @@ static void ggml_webgpu_init_mul_mat_pipeline(webgpu_context & webgpu_ctx) {
static void ggml_webgpu_init_set_rows_pipeline(webgpu_context & webgpu_ctx) {
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->set_rows_pipeline, wgsl_set_rows, "set_rows",
- ggml_webgpu_max_wg_size_entry(webgpu_ctx));
+ ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x));
}
static void ggml_webgpu_init_get_rows_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->get_rows_pipeline[GGML_TYPE_F32], wgsl_get_rows_f32_vec,
"get_rows_f32_vec", constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->get_rows_f32_no_vec_pipeline, wgsl_get_rows_f32,
@@ -1363,7 +1443,7 @@ static void ggml_webgpu_init_get_rows_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_cpy_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->cpy_pipeline[GGML_TYPE_F32][GGML_TYPE_F32],
wgsl_cpy_f32_f32, "cpy_f32_f32", constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->cpy_pipeline[GGML_TYPE_F32][GGML_TYPE_F16],
@@ -1375,7 +1455,7 @@ static void ggml_webgpu_init_cpy_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_add_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->add_pipeline[GGML_TYPE_F32][0], wgsl_add_f32, "add_f32",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->add_pipeline[GGML_TYPE_F16][0], wgsl_add_f16, "add_f16",
@@ -1387,7 +1467,7 @@ static void ggml_webgpu_init_add_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_sub_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->sub_pipeline[GGML_TYPE_F32][0], wgsl_sub_f32, "sub_f32",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->sub_pipeline[GGML_TYPE_F16][0], wgsl_sub_f16, "sub_f16",
@@ -1399,7 +1479,7 @@ static void ggml_webgpu_init_sub_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_mul_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->mul_pipeline[GGML_TYPE_F32][0], wgsl_mul_f32, "mul_f32",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->mul_pipeline[GGML_TYPE_F16][0], wgsl_mul_f16, "mul_f16",
@@ -1411,7 +1491,7 @@ static void ggml_webgpu_init_mul_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_div_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->div_pipeline[GGML_TYPE_F32][0], wgsl_div_f32, "div_f32",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->div_pipeline[GGML_TYPE_F16][0], wgsl_div_f16, "div_f16",
@@ -1423,7 +1503,7 @@ static void ggml_webgpu_init_div_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_rms_norm_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(WEBGPU_ROW_SPLIT_WG_SIZE);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->rms_norm_pipeline[0], wgsl_rms_norm, "rms_norm",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->rms_norm_pipeline[1], wgsl_rms_norm_inplace,
@@ -1431,7 +1511,7 @@ static void ggml_webgpu_init_rms_norm_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_rope_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->rope_pipeline[GGML_TYPE_F32][0][0], wgsl_rope_f32,
"rope_f32", constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->rope_pipeline[GGML_TYPE_F32][0][1],
@@ -1451,7 +1531,7 @@ static void ggml_webgpu_init_rope_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_glu_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
// reglu
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->glu_pipeline[GGML_GLU_OP_REGLU][GGML_TYPE_F32][0],
wgsl_reglu_f32, "reglu_f32", constants);
@@ -1505,13 +1585,43 @@ static void ggml_webgpu_init_glu_pipeline(webgpu_context & webgpu_ctx) {
}
static void ggml_webgpu_init_scale_pipeline(webgpu_context & webgpu_ctx) {
- std::vector constants = ggml_webgpu_max_wg_size_entry(webgpu_ctx);
+ std::vector constants = ggml_webgpu_wg_size_entry(webgpu_ctx->max_wg_size_x);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->scale_pipeline[0], wgsl_scale_f32, "scale_f32",
constants);
ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->scale_pipeline[1], wgsl_scale_f32_inplace,
"scale_f32_inplace", constants);
}
+static void ggml_webgpu_init_soft_max_pipeline(webgpu_context & webgpu_ctx) {
+ std::vector constants = ggml_webgpu_wg_size_entry(WEBGPU_ROW_SPLIT_WG_SIZE);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[2][0][0], wgsl_soft_max_f32,
+ "soft_max_f32", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[2][0][1], wgsl_soft_max_f32_inplace,
+ "soft_max_f32_inplace", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[2][1][0], wgsl_soft_max_f32_sink,
+ "soft_max_f32_sink", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[2][1][1],
+ wgsl_soft_max_f32_sink_inplace, "soft_max_f32_sink_inplace", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[0][0][0], wgsl_soft_max_f32_mask_f32,
+ "soft_max_f32_mask_f32", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[0][0][1],
+ wgsl_soft_max_f32_mask_f32_inplace, "soft_max_f32_mask_f32_inplace", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[1][0][0], wgsl_soft_max_f32_mask_f16,
+ "soft_max_f32_mask_f16", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[1][0][1],
+ wgsl_soft_max_f32_mask_f16_inplace, "soft_max_f32_mask_f16_inplace", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[0][1][0],
+ wgsl_soft_max_f32_mask_f32_sink, "soft_max_f32_mask_f32_sink", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[0][1][1],
+ wgsl_soft_max_f32_mask_f32_sink_inplace, "soft_max_f32_mask_f32_sink_inplace",
+ constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[1][1][0],
+ wgsl_soft_max_f32_mask_f16_sink, "soft_max_f32_mask_f16_sink", constants);
+ ggml_webgpu_create_pipeline(webgpu_ctx->device, webgpu_ctx->soft_max_pipeline[1][1][1],
+ wgsl_soft_max_f32_mask_f16_sink_inplace, "soft_max_f32_mask_f16_sink_inplace",
+ constants);
+}
+
static ggml_backend_t ggml_backend_webgpu_device_init(ggml_backend_dev_t dev, const char * params) {
GGML_UNUSED(params);
@@ -1593,6 +1703,7 @@ static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const
ggml_tensor * src0 = op->src[0];
ggml_tensor * src1 = op->src[1];
+ ggml_tensor * src2 = op->src[2];
// on smaller devices (or CI), tensors may be larger than the max storage buffer size
if (ggml_nbytes(op) > webgpu_ctx->limits.maxStorageBufferBindingSize ||
@@ -1623,7 +1734,7 @@ static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const
(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
break;
case GGML_OP_SET_ROWS:
- supports_op = (op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_I64);
+ supports_op = (op->type == GGML_TYPE_F16 && src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_I64);
break;
case GGML_OP_GET_ROWS:
if (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_I32 ||
@@ -1698,13 +1809,25 @@ static bool ggml_backend_webgpu_device_supports_op(ggml_backend_dev_t dev, const
default:
break;
}
-#ifdef GGML_WEBGPU_DEBUG
+ if (ggml_nbytes(op) > webgpu_ctx->limits.maxStorageBufferBindingSize ||
+ (src0 != nullptr && ggml_nbytes(src0) > webgpu_ctx->limits.maxStorageBufferBindingSize) ||
+ (src1 != nullptr && ggml_nbytes(src1) > webgpu_ctx->limits.maxStorageBufferBindingSize) ||
+ (src2 != nullptr && ggml_nbytes(src2) > webgpu_ctx->limits.maxStorageBufferBindingSize)) {
+ supports_op = false;
+ WEBGPU_LOG_DEBUG("ggml_webgpu op not supported due to size: ");
+ }
+
if (!supports_op) {
- WEBGPU_LOG_DEBUG("not supported: " << ggml_op_name(op->op) << " with types dst: " << ggml_type_name(op->type)
- << ", src0: " << (op->src[0] ? ggml_type_name(op->src[0]->type) : "null")
- << ", src1: " << (op->src[1] ? ggml_type_name(op->src[1]->type) : "null"));
+ WEBGPU_LOG_DEBUG("ggml_webgpu op not supported: "
+ << ggml_op_name(op->op) << " with types dst: " << ggml_type_name(op->type)
+ << ", src0: " << (op->src[0] ? ggml_type_name(op->src[0]->type) : "null")
+ << ", src1: " << (op->src[1] ? ggml_type_name(op->src[1]->type) : "null"));
+ } else {
+ WEBGPU_LOG_DEBUG("ggml_webgpu op supported: "
+ << ggml_op_name(op->op) << " with types dst: " << ggml_type_name(op->type)
+ << ", src0: " << (op->src[0] ? ggml_type_name(op->src[0]->type) : "null")
+ << ", src1: " << (op->src[1] ? ggml_type_name(op->src[1]->type) : "null"));
}
-#endif
return supports_op;
}
diff --git a/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl b/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl
index a275eeb9783..4f72bb1c851 100644
--- a/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl
+++ b/ggml/src/ggml-webgpu/wgsl-shaders/rms_norm.wgsl
@@ -71,14 +71,14 @@ var src: array;
DECLS
override wg_size: u32;
+var scratch: array;
+
@compute @workgroup_size(wg_size)
-fn main(@builtin(global_invocation_id) gid: vec3) {
- if (gid.x >= params.ne1 * params.ne2 * params.ne3) {
- return;
- }
+fn main(@builtin(workgroup_id) wid: vec3,
+ @builtin(local_invocation_id) lid: vec3) {
// one thread per row
- var i = gid.x;
+ var i = wid.x;
let i3 = i / (params.ne2 * params.ne1);
i = i % (params.ne2 * params.ne1);
let i2 = i / params.ne1;
@@ -86,13 +86,38 @@ fn main(@builtin(global_invocation_id) gid: vec3) {
let i_src_row = params.offset_src + i3 * params.stride_src3 + i2 * params.stride_src2 + i1 * params.stride_src1;
let i_dst_row = params.offset_src + i3 * params.stride_dst3 + i2 * params.stride_dst2 + i1 * params.stride_dst1;
+ let elems = (params.ne0 + wg_size - 1) / wg_size;
+
var sum = 0.0f;
- for (var j: u32 = 0; j < params.ne0; j++) {
- sum += src[i_src_row + j] * src[i_src_row + j];
+ var col = lid.x;
+ for (var j: u32 = 0; j < elems; j++) {
+ if (col >= params.ne0) {
+ break;
+ }
+ sum += pow(src[i_src_row + col], 2.0);
+ col += wg_size;
}
+
+ scratch[lid.x] = sum;
+ workgroupBarrier();
+ var offset = wg_size / 2;
+ while (offset > 0) {
+ if (lid.x < offset) {
+ scratch[lid.x] += scratch[lid.x + offset];
+ }
+ offset = offset / 2;
+ workgroupBarrier();
+ }
+ sum = scratch[0];
+
let scale = 1.0/sqrt(sum/f32(params.ne0) + params.eps);
- for (var j: u32 = 0; j < params.ne0; j++) {
- update(i_src_row + j, i_dst_row + j, scale);
+ col = lid.x;
+ for (var j: u32 = 0; j < elems; j++) {
+ if (col >= params.ne0) {
+ break;
+ }
+ update(i_src_row + col, i_dst_row + col, scale);
+ col += wg_size;
}
}
#end(SHADER)
diff --git a/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl b/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl
new file mode 100644
index 00000000000..64ab576c083
--- /dev/null
+++ b/ggml/src/ggml-webgpu/wgsl-shaders/soft_max.tmpl.wgsl
@@ -0,0 +1,344 @@
+#define(VARIANTS)
+[
+ {
+ "SHADER_NAME": "soft_max_f32",
+ "DECLS": ["BASE_BINDINGS", "NOT_INPLACE", "NO_MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_inplace",
+ "DECLS": ["BASE_BINDINGS_INPLACE", "INPLACE", "NO_MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_sink",
+ "DECLS": ["SINK_BINDINGS", "NOT_INPLACE", "NO_MASK", "SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_sink_inplace",
+ "DECLS": ["SINK_BINDINGS_INPLACE", "INPLACE", "NO_MASK", "SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f32",
+ "REPLS": {
+ "MASK_TYPE" : "f32",
+ },
+ "DECLS": ["MASK_BINDINGS", "NOT_INPLACE", "MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f32_inplace",
+ "REPLS": {
+ "MASK_TYPE" : "f32",
+ },
+ "DECLS": ["MASK_BINDINGS_INPLACE", "INPLACE", "MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f16",
+ "REPLS": {
+ "MASK_TYPE" : "f16",
+ },
+ "DECLS": ["MASK_BINDINGS", "NOT_INPLACE", "MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f16_inplace",
+ "REPLS": {
+ "MASK_TYPE" : "f16",
+ },
+ "DECLS": ["MASK_BINDINGS_INPLACE", "INPLACE", "MASK", "NO_SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f32_sink",
+ "REPLS": {
+ "MASK_TYPE" : "f32",
+ },
+ "DECLS": ["MASK_SINK_BINDINGS", "NOT_INPLACE", "MASK", "SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f32_sink_inplace",
+ "REPLS": {
+ "MASK_TYPE" : "f32",
+ },
+ "DECLS": ["MASK_SINK_BINDINGS_INPLACE", "INPLACE", "MASK", "SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f16_sink",
+ "REPLS": {
+ "MASK_TYPE" : "f16",
+ },
+ "DECLS": ["MASK_SINK_BINDINGS", "NOT_INPLACE", "MASK", "SINK"]
+ },
+ {
+ "SHADER_NAME": "soft_max_f32_mask_f16_sink_inplace",
+ "REPLS": {
+ "MASK_TYPE" : "f16",
+ },
+ "DECLS": ["MASK_SINK_BINDINGS_INPLACE", "INPLACE", "MASK", "SINK"]
+ }
+]
+#end(VARIANTS)
+
+#define(DECLS)
+
+#decl(BASE_BINDINGS)
+@group(0) @binding(1)
+var dst: array;
+
+@group(0) @binding(2)
+var params: Params;
+#enddecl(BASE_BINDINGS)
+
+#decl(BASE_BINDINGS_INPLACE)
+@group(0) @binding(1)
+var params: Params;
+#enddecl(BASE_BINDINGS_INPLACE)
+
+#decl(SINK_BINDINGS)
+@group(0) @binding(1)
+var sinks: array;
+
+@group(0) @binding(2)
+var dst: array;
+
+@group(0) @binding(3)
+var params: Params;
+#enddecl(SINK_BINDINGS)
+
+#decl(SINK_BINDINGS_INPLACE)
+@group(0) @binding(1)
+var sinks: array;
+
+@group(0) @binding(2)
+var params: Params;
+#enddecl(SINK_BINDINGS_INPLACE)
+
+#decl(MASK_BINDINGS)
+@group(0) @binding(1)
+var mask: array<{{MASK_TYPE}}>;
+
+@group(0) @binding(2)
+var dst: array;
+
+@group(0) @binding(3)
+var params: Params;
+#enddecl(MASK_BINDINGS)
+
+#decl(MASK_BINDINGS_INPLACE)
+@group(0) @binding(1)
+var mask: array<{{MASK_TYPE}}>;
+
+@group(0) @binding(2)
+var params: Params;
+#enddecl(MASK_BINDINGS_INPLACE)
+
+#decl(MASK_SINK_BINDINGS)
+@group(0) @binding(1)
+var mask: array<{{MASK_TYPE}}>;
+
+@group(0) @binding(2)
+var sinks: array;
+
+@group(0) @binding(3)
+var dst: array;
+
+@group(0) @binding(4)
+var params: Params;
+#enddecl(MASK_SINK_BINDINGS)
+
+#decl(MASK_SINK_BINDINGS_INPLACE)
+@group(0) @binding(1)
+var mask: array<{{MASK_TYPE}}>;
+
+@group(0) @binding(2)
+var sinks: array;
+
+@group(0) @binding(3)
+var params: Params;
+#enddecl(MASK_SINK_BINDINGS_INPLACE)
+
+#decl(NOT_INPLACE)
+fn inter_value(i: u32) -> f32 {
+ return dst[i];
+}
+
+fn update(i: u32, val: f32) {
+ dst[i] = val;
+}
+#enddecl(NOT_INPLACE)
+
+#decl(INPLACE)
+fn inter_value(i: u32) -> f32 {
+ return src[i];
+}
+
+fn update(i: u32, val: f32) {
+ src[i] = val;
+}
+#enddecl(INPLACE)
+
+#decl(NO_MASK)
+fn mask_val(i: u32) -> f32 {
+ return 0.0;
+}
+#enddecl(NO_MASK)
+
+#decl(MASK)
+fn mask_val(i: u32) -> f32 {
+ return f32(mask[i]);
+}
+#enddecl(MASK)
+
+#decl(NO_SINK)
+fn lower_max_bound(i2: u32) -> f32 {
+ return -1e30;
+}
+
+fn add_sinks(val: f32, i2: u32, max_val: f32) -> f32 {
+ return val;
+}
+#enddecl(NO_SINK)
+
+#decl(SINK)
+fn lower_max_bound(i2: u32) -> f32 {
+ return sinks[params.offset_sinks + i2];
+}
+
+fn add_sinks(val: f32, i2: u32, max_val: f32) -> f32 {
+ return val + exp(sinks[params.offset_sinks + i2] - max_val);
+}
+#enddecl(SINK)
+
+#end(DECLS)
+
+#define(SHADER)
+enable f16;
+
+struct Params {
+ offset_src0: u32,
+ offset_src1: u32,
+ offset_sinks: u32,
+ offset_dst: u32,
+
+ // Strides (in elements)
+ stride_src01: u32,
+ stride_src02: u32,
+ stride_src03: u32,
+
+ stride_src11: u32,
+ stride_src12: u32,
+ stride_src13: u32,
+
+ stride_dst1: u32,
+ stride_dst2: u32,
+ stride_dst3: u32,
+
+ // shape of src0/dst
+ ne: u32,
+ ne0: u32,
+ ne1: u32,
+ ne2: u32,
+
+ // shape of src1
+ ne12: u32,
+ ne13: u32,
+
+ scale: f32,
+ max_bias: f32,
+ n_head_log2: f32,
+ m0: f32,
+ m1: f32,
+};
+
+@group(0) @binding(0)
+var src: array;
+
+DECLS
+
+const CACHE_SIZE: u32 = 16;
+
+override wg_size: u32;
+var scratch: array;
+
+@compute @workgroup_size(wg_size)
+fn main(@builtin(workgroup_id) wid: vec3,
+ @builtin(local_invocation_id) lid: vec3) {
+
+ var i = wid.x;
+ let i3 = i / (params.ne2 * params.ne1);
+ i = i % (params.ne2 * params.ne1);
+ let i2 = i / params.ne1;
+ let i1 = i % params.ne1;
+ let i_src0_row = params.offset_src0 + i3 * params.stride_src03 + i2 * params.stride_src02 + i1 * params.stride_src01;
+ let i_src1_row = params.offset_src1 + (i3 % params.ne13) * params.stride_src13 + (i2 % params.ne12) * params.stride_src12 + i1 * params.stride_src11;
+ let i_dst_row = params.offset_dst + i3 * params.stride_dst3 + i2 * params.stride_dst2 + i1 * params.stride_dst1;
+ let elems = (params.ne0 + wg_size - 1) / wg_size;
+
+ let head = f32(i2);
+ let slope = select(1, select(pow(params.m1, 2 * (head - params.n_head_log2) + 1), pow(params.m0, head + 1), head < params.n_head_log2), params.max_bias > 0);
+
+ var cache: array;
+
+ var max_val = lower_max_bound(i2);
+ var col = lid.x;
+ for (var j: u32 = 0; j < elems; j++) {
+ if (col >= params.ne0) {
+ break;
+ }
+ let val = src[i_src0_row + col] * params.scale + slope * mask_val(i_src1_row + col);
+ max_val = max(max_val, val);
+ if (col < CACHE_SIZE) {
+ cache[col] = val;
+ }
+ col += wg_size;
+ }
+
+ scratch[lid.x] = max_val;
+ workgroupBarrier();
+ var offset = wg_size / 2;
+ while (offset > 0) {
+ if (lid.x < offset) {
+ scratch[lid.x] = max(scratch[lid.x], scratch[lid.x + offset]);
+ }
+ offset = offset / 2;
+ workgroupBarrier();
+ }
+ let row_max = scratch[0];
+
+ var sum = 0.0f;
+ col = lid.x;
+ for (var j: u32 = 0; j < elems; j++) {
+ if (col >= params.ne0) {
+ break;
+ }
+ let val = select(src[i_src0_row + col] * params.scale + slope * mask_val(i_src1_row + col),
+ cache[col], col < CACHE_SIZE);
+ let ex = exp(val - row_max);
+ sum += ex;
+ if (col < CACHE_SIZE) {
+ cache[col] = ex;
+ } else {
+ update(i_dst_row + col, ex);
+ }
+ col += wg_size;
+ }
+
+ scratch[lid.x] = sum;
+ workgroupBarrier();
+ offset = wg_size / 2;
+ while (offset > 0) {
+ if (lid.x < offset) {
+ scratch[lid.x] += scratch[lid.x + offset];
+ }
+ offset = offset / 2;
+ workgroupBarrier();
+ }
+ let row_sum = add_sinks(scratch[0], i2, row_max);
+
+ let sum_recip = 1.0 / row_sum;
+ col = lid.x;
+ for (var j: u32 = 0; j < elems; j++) {
+ if (col >= params.ne0) {
+ break;
+ }
+ update(i_dst_row + col, select(inter_value(i_dst_row + col), cache[col], col < CACHE_SIZE) * sum_recip);
+ col += wg_size;
+ }
+}
+#end(SHADER)
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index aecbdad5a3d..2bce1375ba3 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1143,10 +1143,10 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
"HARDSIGMOID",
"EXP",
"GELU_ERF",
+ "XIELU",
};
-static_assert(GGML_UNARY_OP_COUNT == 15, "GGML_UNARY_OP_COUNT != 15");
-
+static_assert(GGML_UNARY_OP_COUNT == 16, "GGML_UNARY_OP_COUNT != 16");
static const char * GGML_GLU_OP_NAME[GGML_GLU_OP_COUNT] = {
"REGLU",
@@ -2652,6 +2652,29 @@ struct ggml_tensor * ggml_silu_inplace(
return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SILU);
}
+// ggml_xielu
+
+struct ggml_tensor * ggml_xielu(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ float alpha_n,
+ float alpha_p,
+ float beta,
+ float eps) {
+ struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
+
+ ggml_set_op_params_i32(result, 0, (int32_t) GGML_UNARY_OP_XIELU);
+ ggml_set_op_params_f32(result, 1, beta + ggml_softplus(alpha_n));
+ ggml_set_op_params_f32(result, 2, ggml_softplus(alpha_p));
+ ggml_set_op_params_f32(result, 3, beta);
+ ggml_set_op_params_f32(result, 4, eps);
+
+ result->op = GGML_OP_UNARY;
+ result->src[0] = a;
+
+ return result;
+}
+
// ggml_silu_back
struct ggml_tensor * ggml_silu_back(
@@ -3829,6 +3852,15 @@ struct ggml_tensor * ggml_soft_max_ext(
return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
}
+struct ggml_tensor * ggml_soft_max_ext_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * mask,
+ float scale,
+ float max_bias) {
+ return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, true);
+}
+
void ggml_soft_max_add_sinks(
struct ggml_tensor * a,
struct ggml_tensor * sinks) {
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 88ea9f32f8c..1600405ea86 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -297,6 +297,13 @@ class Projector:
class Diffusion:
SHIFT_LOGITS = "diffusion.shift_logits"
+ class xIELU:
+ ALPHA_P = "xielu.alpha_p"
+ ALPHA_N = "xielu.alpha_n"
+ BETA = "xielu.beta"
+ EPS = "xielu.eps"
+
+
#
# recommended mapping of model tensor names for storage in gguf
#
@@ -405,6 +412,7 @@ class MODEL_ARCH(IntEnum):
LLADA_MOE = auto()
SEED_OSS = auto()
GROVEMOE = auto()
+ APERTUS = auto()
class VISION_PROJECTOR_TYPE(IntEnum):
@@ -746,6 +754,7 @@ class MODEL_TENSOR(IntEnum):
MODEL_ARCH.LLADA_MOE: "llada-moe",
MODEL_ARCH.SEED_OSS: "seed_oss",
MODEL_ARCH.GROVEMOE: "grovemoe",
+ MODEL_ARCH.APERTUS: "apertus",
}
VISION_PROJECTOR_TYPE_NAMES: dict[VISION_PROJECTOR_TYPE, str] = {
@@ -2706,6 +2715,24 @@ class MODEL_TENSOR(IntEnum):
MODEL_TENSOR.FFN_DOWN_EXP,
MODEL_TENSOR.FFN_UP_EXP,
],
+ MODEL_ARCH.APERTUS: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.ATTN_Q_NORM,
+ MODEL_TENSOR.ATTN_K_NORM,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
MODEL_ARCH.LLADA_MOE: [
MODEL_TENSOR.TOKEN_EMBD,
MODEL_TENSOR.OUTPUT_NORM,
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index 3152a30d7b2..30fc1a05ec0 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -1084,6 +1084,18 @@ def add_audio_num_mel_bins(self, value: int) -> None:
def add_audio_stack_factor(self, value: int) -> None:
self.add_uint32(Keys.ClipAudio.Projector.STACK_FACTOR, value)
+ def add_xielu_alpha_p(self, values: Sequence[float]):
+ self.add_array(Keys.xIELU.ALPHA_P, values)
+
+ def add_xielu_alpha_n(self, values: Sequence[float]):
+ self.add_array(Keys.xIELU.ALPHA_N, values)
+
+ def add_xielu_beta(self, values: Sequence[float]):
+ self.add_array(Keys.xIELU.BETA, values)
+
+ def add_xielu_eps(self, values: Sequence[float]):
+ self.add_array(Keys.xIELU.EPS, values)
+
# diffusion models
def add_diffusion_shift_logits(self, value: bool) -> None:
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py
index c533b55c012..67b27413405 100644
--- a/gguf-py/gguf/tensor_mapping.py
+++ b/gguf-py/gguf/tensor_mapping.py
@@ -148,6 +148,7 @@ class TensorNameMap:
"model.layers.{bid}.operator_norm", # lfm2
"model.transformer.blocks.{bid}.attn_norm", # llada
"layers.{bid}.input_layernorm", # qwen3-embedding
+ "model.layers.{bid}.attention_layernorm" # apertus
),
# Attention norm 2
@@ -325,6 +326,7 @@ class TensorNameMap:
"model.layers.layers.{bid}.pre_mlp_norm", # plamo2
"model.transformer.blocks.{bid}.ff_norm", # llada
"layers.{bid}.post_attention_layernorm", # qwen3-embedding
+ "model.layers.{bid}.feedforward_layernorm", # apertus
),
# Post feed-forward norm
@@ -547,6 +549,7 @@ class TensorNameMap:
"transformer.layers.{bid}.attn.q_norm", # openelm
"model.layers.layers.{bid}.mixer.q", # plamo2
"layers.{bid}.self_attn.q_norm", # qwen3-embedding
+ "model.layers.{bid}.attention.query_layernorm", # apertus
),
MODEL_TENSOR.ATTN_K_NORM: (
@@ -560,6 +563,7 @@ class TensorNameMap:
"transformer.layers.{bid}.attn.k_norm", # openelm
"model.layers.layers.{bid}.mixer.k", # plamo2
"layers.{bid}.self_attn.k_norm", # qwen3-embedding
+ "model.layers.{bid}.attention.key_layernorm", # apertus
),
MODEL_TENSOR.ROPE_FREQS: (
diff --git a/models/templates/Apertus-8B-Instruct.jinja b/models/templates/Apertus-8B-Instruct.jinja
new file mode 100644
index 00000000000..10826ff6901
--- /dev/null
+++ b/models/templates/Apertus-8B-Instruct.jinja
@@ -0,0 +1,327 @@
+{%- macro render_typescript_type(param_spec, required_params, is_nullable=false) -%}
+ {%- if param_spec.type == "array" -%}
+ {%- if param_spec['items'] -%}
+ {%- if param_spec['items']['type'] == "string" -%}
+ {{- "string[]" }}
+ {%- elif param_spec['items']['type'] == "number" -%}
+ {{- "number[]" }}
+ {%- elif param_spec['items']['type'] == "integer" -%}
+ {{- "number[]" }}
+ {%- elif param_spec['items']['type'] == "boolean" -%}
+ {{- "boolean[]" }}
+ {%- else -%}
+ {%- set inner_type = render_typescript_type(param_spec['items'], required_params) -%}
+ {%- if inner_type == "object | object" or inner_type|length > 50 -%}
+ {{- "any[]" }}
+ {%- else -%}
+ {{- inner_type + "[]" }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- if param_spec.nullable -%}
+ {{- " | null" }}
+ {%- endif -%}
+ {%- else -%}
+ {{- "any[]" }}
+ {%- if param_spec.nullable -%}
+ {{- " | null" }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- elif param_spec.type is defined and param_spec.type is iterable and param_spec.type is not string and param_spec.type is not mapping and param_spec.type[0] is defined -%}
+ {#- Handle array of types like ["object", "object"] from Union[dict, list] #}
+ {%- if param_spec.type | length > 1 -%}
+ {{- param_spec.type | join(" | ") }}
+ {%- else -%}
+ {{- param_spec.type[0] }}
+ {%- endif -%}
+ {%- elif param_spec.oneOf -%}
+ {#- Handle oneOf schemas - check for complex unions and fallback to any #}
+ {%- set has_object_variants = false -%}
+ {%- for variant in param_spec.oneOf -%}
+ {%- if variant.type == "object" -%}
+ {%- set has_object_variants = true -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- if has_object_variants and param_spec.oneOf|length > 1 -%}
+ {{- "any" }}
+ {%- else -%}
+ {%- for variant in param_spec.oneOf -%}
+ {{- render_typescript_type(variant, required_params) -}}
+ {%- if variant.description %}
+ {{- "// " + variant.description }}
+ {%- endif -%}
+ {%- if variant.default is defined %}
+ {{ "// default: " + variant.default|tojson }}
+ {%- endif -%}
+ {%- if not loop.last %}
+ {{- " | " }}
+ {% endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- elif param_spec.type == "string" -%}
+ {%- if param_spec.enum -%}
+ {{- '"' + param_spec.enum|join('" | "') + '"' -}}
+ {%- else -%}
+ {{- "string" }}
+ {%- if param_spec.nullable %}
+ {{- " | null" }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- elif param_spec.type == "number" -%}
+ {{- "number" }}
+ {%- elif param_spec.type == "integer" -%}
+ {{- "number" }}
+ {%- elif param_spec.type == "boolean" -%}
+ {{- "boolean" }}
+ {%- elif param_spec.type == "object" -%}
+ {%- if param_spec.properties -%}
+ {{- "{\n" }}
+ {%- for prop_name, prop_spec in param_spec.properties.items() -%}
+ {{- prop_name -}}
+ {%- if prop_name not in (param_spec.required or []) -%}
+ {{- "?" }}
+ {%- endif -%}
+ {{- ": " }}
+ {{ render_typescript_type(prop_spec, param_spec.required or []) }}
+ {%- if not loop.last -%}
+ {{-", " }}
+ {%- endif -%}
+ {%- endfor -%}
+ {{- "}" }}
+ {%- else -%}
+ {{- "object" }}
+ {%- endif -%}
+ {%- else -%}
+ {{- "any" }}
+ {%- endif -%}
+{%- endmacro -%}
+
+{%- macro render_tools(tools) -%}
+ {%- for tool in tools %}
+ {{- "// " + tool.description + "\n" }}
+ {{- "type "+ tool.name + " = " }}
+ {%- if tool.parameters and tool.parameters.properties %}
+ {{- "(_: {\n" }}
+ {%- for param_name, param_spec in tool.parameters.properties.items() %}
+ {%- if param_spec.description %}
+ {{- "// " + param_spec.description + "\n" }}
+ {%- endif %}
+ {{- param_name }}
+ {%- if param_name not in (tool.parameters.required or []) -%}
+ {{- "?" }}
+ {%- endif -%}
+ {{- ": " }}
+ {{- render_typescript_type(param_spec, tool.parameters.required or []) }}
+ {%- if param_spec.default is defined -%}
+ {%- if param_spec.enum %}
+ {{- ", // default: " + param_spec.default }}
+ {%- elif param_spec.oneOf %}
+ {{- "// default: " + param_spec.default }}
+ {%- else %}
+ {{- ", // default: " + param_spec.default|tojson }}
+ {%- endif -%}
+ {%- endif -%}
+ {%- if not loop.last %}
+ {{- ",\n" }}
+ {%- else %}
+ {{- "\n" }}
+ {%- endif -%}
+ {%- endfor %}
+ {{- "}) => any;" }}
+ {%- else -%}
+ {{- "() => any;" }}
+ {%- endif -%}
+ {%- if not loop.last -%}
+ {{- "\n" }}
+ {%- endif -%}
+ {%- endfor %}
+{%- endmacro -%}
+
+{{ bos_token }}
+
+{%- set system_token = '<|system_start|>' -%}
+{%- set end_system_token = '<|system_end|>' -%}
+{%- set developer_token = '<|developer_start|>' -%}
+{%- set end_developer_token = '<|developer_end|>' -%}
+{%- set user_token = '<|user_start|>' -%}
+{%- set end_user_token = '<|user_end|>' -%}
+{%- set assistant_token = '<|assistant_start|>' -%}
+{%- set end_assistant_token = '<|assistant_end|>' -%}
+{%- set inner_token = '<|inner_prefix|>' -%}
+{%- set outer_token = '<|inner_suffix|>' -%}
+{%- set tool_calls_token = '<|tools_prefix|>' -%}
+{%- set end_tool_calls_token = '<|tools_suffix|>' -%}
+
+{%- set ns = namespace(in_assistant=false, in_tool=false, in_inner=false, assistant_format=none) -%}
+
+{%- if messages and messages[0].role == 'system' -%}
+ {%- if "content" in messages[0] -%}
+ {%- if messages[0].content is string -%}
+ {{ system_token + messages[0].content + end_system_token }}
+ {%- elif messages[0].content is mapping and "text" in messages[0].content -%}
+ {{ system_token + messages[0].content.text + end_system_token }}
+ {%- else -%}
+ {{- raise_exception("Invalid system message") -}}
+ {%- endif -%}
+ {%- else -%}
+ {{- raise_exception("Invalid system message") -}}
+ {%- endif -%}
+ {%- set loop_messages = messages[1:] -%}
+{%- else -%}
+ {{ system_token + 'You are Apertus, a helpful assistant created by the SwissAI initiative.\nKnowledge cutoff: 2024-04\nCurrent date: ' + strftime_now('%Y-%m-%d') + end_system_token }}
+ {%- set loop_messages = messages -%}
+{%- endif -%}
+
+{{ developer_token + 'Deliberation: ' }}
+{%- if enable_thinking is defined and enable_thinking -%}
+ {{ 'enabled\n' }}
+{%- else -%}
+ {{ 'disabled\n' }}
+{%- endif -%}
+{%- if tools is defined and tools -%}
+ {{ 'Tool Capabilities:\n' + render_tools(tools) }}
+{%- else -%}
+ {{ 'Tool Capabilities: disabled' }}
+{%- endif -%}
+{{ end_developer_token }}
+
+{%- for message in loop_messages -%}
+ {%- if message.role == 'user' -%}
+ {%- set ns.in_inner = false -%}
+ {%- if ns.in_tool -%}
+ {{ ']' }}
+ {%- set ns.in_tool = false -%}
+ {%- endif -%}
+ {%- if ns.in_assistant -%}
+ {{ end_assistant_token }}
+ {%- set ns.in_assistant = false -%}
+ {%- endif -%}
+ {%- if "content" in message -%}
+ {{ user_token }}
+ {%- if message.content is string -%}
+ {{ message.content }}
+ {%- elif message.content is mapping and "parts" in message.content -%}
+ {%- set parts = message.content.parts -%}
+ {%- for part in parts -%}
+ {%- if part.type == "text" -%}
+ {{ part.text }}
+ {%- else -%}
+ {{- raise_exception("Invalid user part: " + part.type) -}}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- else -%}
+ {{- raise_exception("Invalid user message: " + message.role) -}}
+ {%- endif -%}
+ {{ end_user_token }}
+ {%- endif -%}
+ {%- elif message.role == 'assistant' -%}
+ {%- if not ns.in_assistant -%}
+ {{ assistant_token }}
+ {%- set ns.in_assistant = true -%}
+ {%- endif -%}
+ {%- if "content" in message and message.content is not none -%}
+ {%- if message.content is string and (ns.assistant_format is none or ns.assistant_format == "string") -%}
+ {%- if ns.in_tool -%}
+ {{ ']' }}
+ {%- set ns.in_tool = false -%}
+ {%- endif -%}
+ {%- set ns.assistant_format = "string" -%}
+ {{ message.content }}
+ {%- elif message.content is mapping and "blocks" in message.content and (ns.assistant_format is none or ns.assistant_format == "mapping") -%}
+ {%- set ns.assistant_format = "mapping" -%}
+ {%- set blocks = message.content.blocks -%}
+ {%- for block in blocks -%}
+ {%- if block.type == 'thoughts' -%}
+ {%- if ns.in_tool -%}
+ {{ ']' }}
+ {%- set ns.in_tool = false -%}
+ {%- endif -%}
+ {%- if not ns.in_inner -%}
+ {%- set ns.in_inner = true -%}
+ {{ inner_token }}
+ {%- endif -%}
+ {{ block.text }}
+ {%- elif block.type == 'tool_calls' -%}
+ {%- if ns.in_tool -%}
+ {{ ']' }}
+ {%- set ns.in_tool = false -%}
+ {%- endif -%}
+ {%- if ns.in_inner and not loop.first and block.calls|length == 1 and block.calls[0].name == 'display_answers' -%}
+ {%- set ns.in_inner = false -%}
+ {{ outer_token }}
+ {%- endif -%}
+ {{ tool_calls_token + '[' }}
+ {%- for tool_call in block.calls -%}
+ {{- '{"' + tool_call.name + '": ' + tool_call.arguments + '}' }}
+ {%- if not loop.last -%}
+ {{- ", " }}
+ {%- endif -%}
+ {%- endfor -%}
+ {{ ']' + end_tool_calls_token }}
+ {%- elif block.type == 'tool_outputs' -%}
+ {%- if ns.in_tool -%}
+ {{- raise_exception("Cannot have both tool outputs as separate messages and tool outputs as blocks") -}}
+ {%- endif -%}
+ {{ '[' }}
+ {%- for tool_output in block.outputs -%}
+ {{- tool_output.output }}
+ {%- if not loop.last -%}
+ {{- ", " }}
+ {%- endif -%}
+ {%- endfor -%}
+ {{- ']' }}
+ {%- elif block.type == 'response' -%}
+ {%- if ns.in_tool -%}
+ {{ ']' }}
+ {%- set ns.in_tool = false -%}
+ {%- endif -%}
+ {%- if (not loop.first and ns.in_inner) or (ns.in_assistant and ns.in_inner) -%}
+ {%- set ns.in_inner = false -%}
+ {{ outer_token }}
+ {%- endif -%}
+ {{ block.text }}
+ {%- else -%}
+ {{- raise_exception("Invalid assistant block type: " + block.type) -}}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- else -%}
+ {{- raise_exception("Invalid assistant content '" + message.content + "', expected " + ns.assistant_format) -}}
+ {%- endif -%}
+ {%- elif "tool_calls" not in message -%}
+ {{- raise_exception("Invalid assistant message " + message) -}}
+ {%- endif -%}
+ {%- if "tool_calls" in message and message.tool_calls -%}
+ {{ tool_calls_token + '[' }}
+ {%- for tool_call in message.tool_calls -%}
+ {%- if tool_call.type == 'function' -%}
+ {%- set function = tool_call.function -%}
+ {{- '{"' + function.name + '": ' + function.arguments + '}' }}
+ {%- if not loop.last -%}
+ {{- ", " }}
+ {%- endif -%}
+ {%- else -%}
+ {{- raise_exception("Invalid tool call type: " + tool_call.type) -}}
+ {%- endif -%}
+ {%- endfor -%}
+ {{ ']' + end_tool_calls_token }}
+ {%- endif -%}
+ {%- elif message.role == 'tool' -%}
+ {%- if not ns.in_assistant -%}
+ {{- raise_exception("Tool message outside of assistant") -}}
+ {%- endif -%}
+ {%- if not ns.in_tool -%}
+ {{ '[' }}
+ {%- set ns.in_tool = true -%}
+ {%- else -%}
+ {{ ", "}}
+ {%- endif -%}
+ {{ message.content }}
+ {%- else -%}
+ {{- raise_exception("Invalid message role") -}}
+ {%- endif -%}
+{%- endfor -%}
+{%- if ns.in_tool -%}
+ {{ ']' }}
+{%- endif -%}
+{%- if add_generation_prompt -%}
+ {{ assistant_token }}
+{%- endif -%}
\ No newline at end of file
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index 4e8d54c4193..4fd083aa048 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -99,6 +99,7 @@ static const std::map LLM_ARCH_NAMES = {
{ LLM_ARCH_LLADA_MOE, "llada-moe" },
{ LLM_ARCH_SEED_OSS, "seed_oss" },
{ LLM_ARCH_GROVEMOE, "grovemoe" },
+ { LLM_ARCH_APERTUS, "apertus" },
{ LLM_ARCH_UNKNOWN, "(unknown)" },
};
@@ -256,6 +257,11 @@ static const std::map LLM_KV_NAMES = {
{ LLM_KV_ADAPTER_LORA_PROMPT_PREFIX, "adapter.lora.prompt_prefix" },
{ LLM_KV_ADAPTER_ALORA_INVOCATION_TOKENS, "adapter.alora.invocation_tokens" },
+ { LLM_KV_XIELU_ALPHA_N, "xielu.alpha_n" },
+ { LLM_KV_XIELU_ALPHA_P, "xielu.alpha_p" },
+ { LLM_KV_XIELU_BETA, "xielu.beta" },
+ { LLM_KV_XIELU_EPS, "xielu.eps" },
+
// deprecated
{ LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
{ LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
@@ -2119,6 +2125,25 @@ static const std::map> LLM_TENSOR_N
{ LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }
},
},
+ {
+ LLM_ARCH_APERTUS,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
{
LLM_ARCH_DREAM,
{
diff --git a/src/llama-arch.h b/src/llama-arch.h
index b5c6f3d76a6..bc4b04bb4e0 100644
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
@@ -103,6 +103,7 @@ enum llm_arch {
LLM_ARCH_LLADA_MOE,
LLM_ARCH_SEED_OSS,
LLM_ARCH_GROVEMOE,
+ LLM_ARCH_APERTUS,
LLM_ARCH_UNKNOWN,
};
@@ -260,6 +261,11 @@ enum llm_kv {
LLM_KV_SHORTCONV_L_CACHE,
+ LLM_KV_XIELU_ALPHA_N,
+ LLM_KV_XIELU_ALPHA_P,
+ LLM_KV_XIELU_BETA,
+ LLM_KV_XIELU_EPS,
+
// deprecated:
LLM_KV_TOKENIZER_PREFIX_ID,
LLM_KV_TOKENIZER_SUFFIX_ID,
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
index 132cf3ac76a..f29b23eeffe 100644
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
@@ -169,6 +169,12 @@ struct llama_hparams {
uint32_t laurel_rank = 64;
uint32_t n_embd_altup = 256;
+ // xIELU
+ std::array xielu_alpha_n;
+ std::array xielu_alpha_p;
+ std::array xielu_beta;
+ std::array xielu_eps;
+
// needed by encoder-decoder models (e.g. T5, FLAN-T5)
// ref: https://github.com/ggerganov/llama.cpp/pull/8141
llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
index 8182a9adf53..aa3a65f87a5 100644
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
@@ -465,6 +465,8 @@ namespace GGUFMeta {
// TODO: this is not very clever - figure out something better
template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required);
template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required);
+ template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required);
+
llama_model_loader::llama_model_loader(
const std::string & fname,
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index a3c3e4dd78a..19643d60353 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -512,9 +512,13 @@ void llama_model::load_hparams(llama_model_loader & ml) {
llm_arch_is_recurrent(ml.get_arch()));
std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
-
std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
+ std::fill(hparams.xielu_alpha_n.begin(), hparams.xielu_alpha_n.end(), 0.0f);
+ std::fill(hparams.xielu_alpha_p.begin(), hparams.xielu_alpha_p.end(), 0.0f);
+ std::fill(hparams.xielu_beta.begin(), hparams.xielu_beta.end(), 0.0f);
+ std::fill(hparams.xielu_eps.begin(), hparams.xielu_eps.end(), 0.0f);
+
ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false);
ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
@@ -2033,6 +2037,19 @@ void llama_model::load_hparams(llama_model_loader & ml) {
default: type = LLM_TYPE_UNKNOWN;
}
} break;
+ case LLM_ARCH_APERTUS:
+ {
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
+ ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_N, hparams.xielu_alpha_n, hparams.n_layer);
+ ml.get_key_or_arr(LLM_KV_XIELU_ALPHA_P, hparams.xielu_alpha_p, hparams.n_layer);
+ ml.get_key_or_arr(LLM_KV_XIELU_BETA, hparams.xielu_beta, hparams.n_layer);
+ ml.get_key_or_arr(LLM_KV_XIELU_EPS, hparams.xielu_eps, hparams.n_layer);
+
+ switch (hparams.n_layer) {
+ case 32: type = LLM_TYPE_8B; break;
+ default: type = LLM_TYPE_UNKNOWN;
+ }
+ } break;
default: throw std::runtime_error("unsupported model architecture");
}
@@ -5915,6 +5932,48 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
layer.ffn_up_chexps = create_tensor(tn(LLM_TENSOR_FFN_UP_CHEXPS, "weight", i), { n_embd, n_ff_chexp, n_chunk_expert}, 0);
}
} break;
+ case LLM_ARCH_APERTUS:
+ {
+ tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
+
+ // output
+ output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
+ output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0);
+
+ for (int i = 0; i < n_layer; ++i) {
+ auto & layer = layers[i];
+
+ layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
+
+ if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
+ layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
+ layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
+ } else {
+ layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), { n_rot/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
+ }
+
+ layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd_head_k * n_head }, 0);
+ layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
+ layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
+ layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd_head_k * n_head, n_embd }, 0);
+
+ // optional bias tensors
+ layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
+ layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
+ layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), { n_embd_gqa }, TENSOR_NOT_REQUIRED);
+ layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, TENSOR_NOT_REQUIRED);
+
+ layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
+ layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
+ layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
+
+ // Q and K layernorms for Apertus
+ layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), { n_embd_head_k }, 0);
+ layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
+ layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), { n_embd_head_k }, 0);
+ layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), { n_embd_head_k }, TENSOR_NOT_REQUIRED);
+ }
+ } break;
default:
throw std::runtime_error("unknown architecture");
}
@@ -19099,6 +19158,141 @@ struct llm_build_grovemoe : public llm_graph_context {
}
};
+struct llm_build_apertus : public llm_graph_context {
+ llm_build_apertus(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
+ const int64_t n_embd_head = hparams.n_embd_head_v;
+
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ ggml_tensor * cur;
+ ggml_tensor * inpL;
+
+ inpL = build_inp_embd(model.tok_embd);
+
+ ggml_tensor * inp_pos = build_inp_pos();
+ auto * inp_attn = build_attn_inp_kv();
+
+ const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f / sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
+
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
+ for (int il = 0; il < n_layer; ++il) {
+ ggml_tensor * inpSA = inpL;
+
+ cur = build_norm(inpL,
+ model.layers[il].attn_norm, nullptr,
+ LLM_NORM_RMS, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
+ {
+ ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
+
+ // compute Q and K and RoPE them
+ ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+
+ ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+
+ ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
+ cb(Qcur, "Qcur_normed", il);
+
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
+ cb(Kcur, "Kcur_normed", il);
+
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+
+ Qcur = ggml_rope_ext(
+ ctx0, Qcur, inp_pos, rope_factors,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+
+ Kcur = ggml_rope_ext(
+ ctx0, Kcur, inp_pos, rope_factors,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+
+ cb(Qcur, "Qcur_pos", il);
+ cb(Kcur, "Kcur_pos", il);
+ cb(Vcur, "Vcur_pos", il);
+
+ cur = build_attn(inp_attn,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
+ cb(cur, "attn_out", il);
+ }
+
+ if (il == n_layer - 1 && inp_out_ids) {
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+ }
+
+ ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network with xIELU activation
+ {
+ cur = build_norm(ffn_inp,
+ model.layers[il].ffn_norm, nullptr,
+ LLM_NORM_RMS, il);
+ cb(cur, "ffn_norm", il);
+
+ // Up projection
+ ggml_tensor * up = build_lora_mm(model.layers[il].ffn_up, cur);
+ cb(up, "ffn_up", il);
+
+ float alpha_n_val = hparams.xielu_alpha_n[il];
+ float alpha_p_val = hparams.xielu_alpha_p[il];
+ float beta_val = hparams.xielu_beta[il];
+ float eps_val = hparams.xielu_eps[il];
+
+ // Apply xIELU activation
+ ggml_tensor * activated = ggml_xielu(ctx0, up, alpha_n_val, alpha_p_val, beta_val, eps_val);
+ cb(activated, "ffn_xielu", il);
+
+ // Down projection
+ cur = build_lora_mm(model.layers[il].ffn_down, activated);
+ cb(cur, "ffn_down", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "ffn_out", il);
+
+ cur = build_cvec(cur, il);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = build_norm(cur,
+ model.output_norm, nullptr,
+ LLM_NORM_RMS, -1);
+
+ cb(cur, "result_norm", -1);
+ res->t_embd = cur;
+
+ // lm_head
+ cur = build_lora_mm(model.output, cur);
+
+ cb(cur, "result_output", -1);
+ res->t_logits = cur;
+
+ ggml_build_forward_expand(gf, cur);
+ }
+};
+
llama_memory_i * llama_model::create_memory(const llama_memory_params & params, llama_cparams & cparams) const {
llama_memory_i * res;
@@ -19629,6 +19823,10 @@ ggml_cgraph * llama_model::build_graph(const llm_graph_params & params) const {
{
llm = std::make_unique(*this, params);
} break;
+ case LLM_ARCH_APERTUS:
+ {
+ llm = std::make_unique(*this, params);
+ } break;
default:
GGML_ABORT("fatal error");
}
@@ -19835,6 +20033,7 @@ llama_rope_type llama_model_rope_type(const llama_model * model) {
case LLM_ARCH_GLM4_MOE:
case LLM_ARCH_SEED_OSS:
case LLM_ARCH_GROVEMOE:
+ case LLM_ARCH_APERTUS:
return LLAMA_ROPE_TYPE_NEOX;
case LLM_ARCH_QWEN2VL:
diff --git a/src/llama-model.h b/src/llama-model.h
index d73ce969323..eec564e70b6 100644
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -380,6 +380,12 @@ struct llama_layer {
// openai-moe
struct ggml_tensor * attn_sinks = nullptr;
+ // xIELU activation parameters for Apertus
+ struct ggml_tensor * ffn_act_alpha_n = nullptr;
+ struct ggml_tensor * ffn_act_alpha_p = nullptr;
+ struct ggml_tensor * ffn_act_beta = nullptr;
+ struct ggml_tensor * ffn_act_eps = nullptr;
+
struct llama_layer_posnet posnet;
struct llama_layer_convnext convnext;
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index 62d815cc268..c1e45972e54 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -3752,9 +3752,10 @@ struct test_soft_max : public test_case {
const std::array nr23; // broadcast only dims 2 and 3
const float scale;
const float max_bias;
+ const bool inplace;
std::string vars() override {
- return VARS_TO_STR8(type, ne, mask, sinks, m_prec, nr23, scale, max_bias);
+ return VARS_TO_STR9(type, ne, mask, sinks, m_prec, nr23, scale, max_bias, inplace);
}
// the 1024 test with bias occasionally fails:
@@ -3770,8 +3771,9 @@ struct test_soft_max : public test_case {
ggml_type m_prec = GGML_TYPE_F32,
std::array nr23 = {1, 1},
float scale = 1.0f,
- float max_bias = 0.0f)
- : type(type), ne(ne), mask(mask), sinks(sinks), m_prec(m_prec), nr23(nr23), scale(scale), max_bias(max_bias) {}
+ float max_bias = 0.0f,
+ bool inplace = false)
+ : type(type), ne(ne), mask(mask), sinks(sinks), m_prec(m_prec), nr23(nr23), scale(scale), max_bias(max_bias), inplace(inplace) {}
ggml_tensor * build_graph(ggml_context * ctx) override {
ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2]*nr23[0], ne[3]*nr23[1]);
@@ -3790,7 +3792,12 @@ struct test_soft_max : public test_case {
ggml_set_name(sinks, "sinks");
}
- ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
+ ggml_tensor * out;
+ if (inplace) {
+ out = ggml_soft_max_ext_inplace(ctx, a, mask, scale, max_bias);
+ } else {
+ out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
+ }
ggml_soft_max_add_sinks(out, sinks);
ggml_set_name(out, "out");
@@ -6562,6 +6569,9 @@ static std::vector> make_test_cases_eval() {
}
}
}
+ // inplace tests
+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, mask, sinks, GGML_TYPE_F32, {1, 1}, 0.1f, 0.0f, true));
+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, mask, sinks, GGML_TYPE_F16, {1, 1}, 0.1f, 0.0f, true));
}
}
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, true, GGML_TYPE_F32, {1, 1}, 0.1f, 0.0f));
diff --git a/tests/test-barrier.cpp b/tests/test-barrier.cpp
index d85bf912b22..04c27761dc8 100644
--- a/tests/test-barrier.cpp
+++ b/tests/test-barrier.cpp
@@ -1,6 +1,5 @@
#include "ggml.h"
#include "ggml-cpu.h"
-#include "ggml-backend.h"
#include
#include
@@ -8,12 +7,13 @@
#include
#include
#include
+#include
#define MAX_NARGS 2
int main(int argc, char *argv[]) {
- int n_threads = 4;
+ int n_threads = std::max(1, std::min(4, (int) std::thread::hardware_concurrency()));
int n_rounds = 100;
if (argc > 1) {
diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp
index ce0f4b0a2a9..9cd67e3ef49 100644
--- a/tests/test-chat.cpp
+++ b/tests/test-chat.cpp
@@ -2054,6 +2054,79 @@ static void test_template_output_parsers() {
/* .parse_tool_calls = */ true,
}));
}
+ {
+ auto tmpls = read_templates("models/templates/Apertus-8B-Instruct.jinja");
+ std::vector end_tokens{ "<|assistant_end|>" };
+
+ assert_equals(COMMON_CHAT_FORMAT_APERTUS, common_chat_templates_apply(tmpls.get(), inputs_no_tools).format);
+ assert_equals(COMMON_CHAT_FORMAT_APERTUS, common_chat_templates_apply(tmpls.get(), inputs_tools).format);
+
+ // Test parsing regular content
+ assert_msg_equals(message_assist,
+ common_chat_parse(
+ "Hello, world!\nWhat's up?",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_APERTUS}));
+
+ // Test parsing content with thinking
+ assert_msg_equals(message_assist_thoughts,
+ common_chat_parse(
+ "<|inner_prefix|>I'm\nthinking<|inner_suffix|>Hello, world!\nWhat's up?",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_APERTUS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK,
+ }));
+
+ // Test parsing tool calls
+ assert_msg_equals(message_assist_call,
+ common_chat_parse(
+ "<|tools_prefix|>[{\"special_function\": {\"arg1\": 1}}]<|tools_suffix|>",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_APERTUS}));
+
+ // Test parsing tool calls with thinking
+ assert_msg_equals(message_assist_call_thoughts,
+ common_chat_parse(
+ "<|inner_prefix|>I'm\nthinking<|inner_suffix|><|tools_prefix|>[{\"special_function\": {\"arg1\": 1}}]<|tools_suffix|>",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_APERTUS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK
+ }));
+
+ // Test tool calls with extra content
+ assert_msg_equals(message_assist_call_content,
+ common_chat_parse(
+ "<|tools_prefix|>[{\"special_function\": {\"arg1\": 1}}]<|tools_suffix|>Hello, world!\nWhat's up?",
+ /* is_partial= */ false,
+ {COMMON_CHAT_FORMAT_APERTUS}
+ ));
+
+ // Test tool calls with extra content AND thinking
+ assert_msg_equals(message_assist_call_thoughts_content,
+ common_chat_parse(
+ "<|inner_prefix|>I'm\nthinking<|inner_suffix|><|tools_prefix|>[{\"special_function\": {\"arg1\": 1}}]<|tools_suffix|>Hello, world!\nWhat's up?",
+ /* is_partial= */ false,
+ {
+ /* .format = */ COMMON_CHAT_FORMAT_APERTUS,
+ /* .reasoning_format = */ COMMON_REASONING_FORMAT_DEEPSEEK
+ }));
+
+ // Test template generation for regular content
+ test_templates(tmpls.get(), end_tokens, message_assist, tools,
+ "Hello, world!\nWhat's up?",
+ /* expect_grammar_triggered= */ false);
+
+ // Test template generation for tool calls
+ test_templates(tmpls.get(), end_tokens, message_assist_call, tools,
+ "<|tools_prefix|>[{\"special_function\": {\"arg1\": 1}}]<|tools_suffix|>",
+ /* expect_grammar_triggered= */ true
+ );
+
+ assert_equals(true, common_chat_templates_support_enable_thinking(tmpls.get()));
+ }
+
}
static void test_msg_diffs_compute() {
diff --git a/tools/tts/convert_pt_to_hf.py b/tools/tts/convert_pt_to_hf.py
index 8909a65fd1e..ebd55d9657b 100644
--- a/tools/tts/convert_pt_to_hf.py
+++ b/tools/tts/convert_pt_to_hf.py
@@ -12,7 +12,7 @@
from safetensors.torch import save_file
# default
-model_path = './model.pt';
+model_path = './model.pt'
# read from CLI
if len(sys.argv) > 1: