Skip to content

Commit 893e6af

Browse files
committed
Remove extra code and format
1 parent 6b93b37 commit 893e6af

File tree

4 files changed

+14
-32
lines changed

4 files changed

+14
-32
lines changed

ggml/include/ggml.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -225,13 +225,7 @@
225225
# define GGML_MAX_NAME 64
226226
#endif
227227

228-
// For single-thread WASM builds, only use 1 thread
229-
#if !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
230228
#define GGML_DEFAULT_N_THREADS 4
231-
#else
232-
#define GGML_DEFAULT_N_THREADS 1
233-
#endif
234-
235229
#define GGML_DEFAULT_GRAPH_SIZE 2048
236230

237231
#if UINTPTR_MAX == 0xFFFFFFFF

ggml/src/ggml-cpu/ggml-cpu.cpp

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -246,11 +246,8 @@ bool ggml_backend_is_cpu(ggml_backend_t backend) {
246246
void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
247247
GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
248248

249-
// For single-thread WASM builds, do not allow changing the number of threads
250-
#if !defined(_EMSCRIPTEN_) || defined(__EMSCRIPTEN_PTHREADS__)
251249
struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
252250
ctx->n_threads = n_threads;
253-
#endif
254251
}
255252

256253
void ggml_backend_cpu_set_threadpool(ggml_backend_t backend_cpu, ggml_threadpool_t threadpool) {
@@ -625,14 +622,10 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r
625622
}
626623

627624
static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) {
628-
629-
// For single-thread WASM builds, do not expose a set_n_threads function
630-
#if !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
631625
if (strcmp(name, "ggml_backend_set_n_threads") == 0) {
632626
ggml_backend_set_n_threads_t fct = ggml_backend_cpu_set_n_threads;
633627
return (void *)fct;
634628
}
635-
#endif
636629
if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) {
637630
ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_cpu_device_get_extra_buffers_type;
638631
return (void *)fct;

ggml/src/ggml-webgpu/ggml-webgpu.cpp

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1005,19 +1005,20 @@ static webgpu_command ggml_webgpu_unary_op(webgpu_context & ctx, ggml_tensor * s
10051005
};
10061006

10071007
switch (unary_op) {
1008-
case GGML_UNARY_OP_XIELU: {
1009-
// Get float parameters and reinterpret their bit patterns as uint32_t
1010-
// for passing through the params buffer
1011-
float alpha_n = ggml_get_op_params_f32(dst, 1);
1012-
float alpha_p = ggml_get_op_params_f32(dst, 2);
1013-
float beta = ggml_get_op_params_f32(dst, 3);
1014-
float eps = ggml_get_op_params_f32(dst, 4);
1015-
params.push_back(*reinterpret_cast<const uint32_t *>(&alpha_n));
1016-
params.push_back(*reinterpret_cast<const uint32_t *>(&alpha_p));
1017-
params.push_back(*reinterpret_cast<const uint32_t *>(&beta));
1018-
params.push_back(*reinterpret_cast<const uint32_t *>(&eps));
1019-
break;
1020-
}
1008+
case GGML_UNARY_OP_XIELU:
1009+
{
1010+
// Get float parameters and reinterpret their bit patterns as uint32_t
1011+
// for passing through the params buffer
1012+
float alpha_n = ggml_get_op_params_f32(dst, 1);
1013+
float alpha_p = ggml_get_op_params_f32(dst, 2);
1014+
float beta = ggml_get_op_params_f32(dst, 3);
1015+
float eps = ggml_get_op_params_f32(dst, 4);
1016+
params.push_back(*reinterpret_cast<const uint32_t *>(&alpha_n));
1017+
params.push_back(*reinterpret_cast<const uint32_t *>(&alpha_p));
1018+
params.push_back(*reinterpret_cast<const uint32_t *>(&beta));
1019+
params.push_back(*reinterpret_cast<const uint32_t *>(&eps));
1020+
break;
1021+
}
10211022
default:
10221023
break;
10231024
}

tests/test-backend-ops.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,6 @@
4747
# define N_THREADS std::thread::hardware_concurrency()
4848
#endif
4949

50-
#ifdef __EMSCRIPTEN__
51-
# define N_THREADS 1
52-
#else
53-
# define N_THREADS std::thread::hardware_concurrency()
54-
#endif
55-
5650
static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) {
5751
size_t nels = ggml_nelements(tensor);
5852
std::vector<float> data(nels);

0 commit comments

Comments
 (0)