/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c
Line | Count | Source |
1 | | #define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows |
2 | | #define _USE_MATH_DEFINES // For M_PI on MSVC |
3 | | |
4 | | #include "ggml-backend-impl.h" |
5 | | #include "ggml-backend.h" |
6 | | #include "traits.h" |
7 | | #include "ggml-cpu-impl.h" |
8 | | #include "ggml-cpu.h" |
9 | | #include "ggml-impl.h" |
10 | | #include "quants.h" |
11 | | #include "ggml-threading.h" |
12 | | #include "unary-ops.h" |
13 | | #include "binary-ops.h" |
14 | | #include "vec.h" |
15 | | #include "ops.h" |
16 | | #include "ggml.h" |
17 | | |
18 | | #if defined(_MSC_VER) || defined(__MINGW32__) |
19 | | #include <malloc.h> // using malloc.h with MSC/MINGW |
20 | | #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) |
21 | | #include <alloca.h> |
22 | | #endif |
23 | | |
24 | | #include <assert.h> |
25 | | #include <errno.h> |
26 | | #include <time.h> |
27 | | #include <math.h> |
28 | | #include <stdlib.h> |
29 | | #include <string.h> |
30 | | #include <stdint.h> |
31 | | #include <inttypes.h> |
32 | | #include <stdio.h> |
33 | | #include <float.h> |
34 | | #include <limits.h> |
35 | | #include <stdarg.h> |
36 | | #include <signal.h> |
37 | | #if defined(__gnu_linux__) |
38 | | #include <syscall.h> |
39 | | #endif |
40 | | |
41 | | #ifdef GGML_USE_OPENMP |
42 | | #include <omp.h> |
43 | | #endif |
44 | | |
45 | | #if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8) |
46 | | #undef GGML_USE_LLAMAFILE |
47 | | #endif |
48 | | |
49 | | #ifdef GGML_USE_LLAMAFILE |
50 | | #include "llamafile/sgemm.h" |
51 | | #endif |
52 | | |
53 | | // Note: once we move threading into a separate C++ file |
54 | | // will use std::hardware_destructive_interference_size instead of hardcoding it here |
55 | | // and we'll use C++ attribute syntax. |
56 | | #define GGML_CACHE_LINE 64 |
57 | | |
58 | | #if defined(__clang__) || defined(__GNUC__) |
59 | | #define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE))) |
60 | | #endif |
61 | | |
62 | | #if defined(__has_feature) |
63 | | #if __has_feature(thread_sanitizer) |
64 | | #define GGML_TSAN_ENABLED 1 |
65 | | #endif |
66 | | #else // __has_feature |
67 | | #if defined(__SANITIZE_THREAD__) |
68 | | #define GGML_TSAN_ENABLED 1 |
69 | | #endif |
70 | | #endif // __has_feature |
71 | | |
72 | 2 | #define UNUSED GGML_UNUSED |
73 | | #define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0) |
74 | | |
75 | | // precomputed f32 table for f16 (256 KB) (simd-mappings.h) |
76 | | float ggml_table_f32_f16[1 << 16]; |
77 | | |
78 | | #if defined(__ARM_ARCH) |
79 | | struct ggml_arm_arch_features_type { |
80 | | int sve_cnt; |
81 | | } ggml_arm_arch_features = { 0 }; |
82 | | #endif |
83 | | |
84 | | #if defined(__riscv) |
85 | | struct ggml_riscv_arch_features_type { |
86 | | int rvv_vlen; |
87 | | } ggml_riscv_arch_features = { 0 }; |
88 | | #endif |
89 | | |
90 | | #if defined(_WIN32) |
91 | | |
92 | | #define WIN32_LEAN_AND_MEAN |
93 | | #ifndef NOMINMAX |
94 | | #define NOMINMAX |
95 | | #endif |
96 | | #include <windows.h> |
97 | | |
98 | | #if defined(_MSC_VER) && !defined(__clang__) |
99 | | #define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE)) |
100 | | |
101 | | typedef volatile LONG atomic_int; |
102 | | typedef atomic_int atomic_bool; |
103 | | typedef atomic_int atomic_flag; |
104 | | |
105 | | #define ATOMIC_FLAG_INIT 0 |
106 | | |
107 | | typedef enum { |
108 | | memory_order_relaxed, |
109 | | memory_order_consume, |
110 | | memory_order_acquire, |
111 | | memory_order_release, |
112 | | memory_order_acq_rel, |
113 | | memory_order_seq_cst |
114 | | } memory_order; |
115 | | |
116 | | static void atomic_store(atomic_int * ptr, LONG val) { |
117 | | InterlockedExchange(ptr, val); |
118 | | } |
119 | | static void atomic_store_explicit(atomic_int * ptr, LONG val, memory_order mo) { |
120 | | // TODO: add support for explicit memory order |
121 | | InterlockedExchange(ptr, val); |
122 | | } |
123 | | static LONG atomic_load(atomic_int * ptr) { |
124 | | return InterlockedCompareExchange(ptr, 0, 0); |
125 | | } |
126 | | static LONG atomic_load_explicit(atomic_int * ptr, memory_order mo) { |
127 | | // TODO: add support for explicit memory order |
128 | | return InterlockedCompareExchange(ptr, 0, 0); |
129 | | } |
130 | | static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) { |
131 | | return InterlockedExchangeAdd(ptr, inc); |
132 | | } |
133 | | static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, memory_order mo) { |
134 | | // TODO: add support for explicit memory order |
135 | | return InterlockedExchangeAdd(ptr, inc); |
136 | | } |
137 | | static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) { |
138 | | return InterlockedExchange(ptr, 1); |
139 | | } |
140 | | static void atomic_flag_clear(atomic_flag * ptr) { |
141 | | InterlockedExchange(ptr, 0); |
142 | | } |
143 | | static void atomic_thread_fence(memory_order mo) { |
144 | | MemoryBarrier(); |
145 | | } |
146 | | #else // clang |
147 | | #include <stdatomic.h> |
148 | | #endif |
149 | | |
150 | | typedef HANDLE pthread_t; |
151 | | |
152 | | typedef DWORD thread_ret_t; |
153 | | static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) { |
154 | | (void) unused; |
155 | | HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL); |
156 | | if (handle == NULL) |
157 | | { |
158 | | return EAGAIN; |
159 | | } |
160 | | |
161 | | *out = handle; |
162 | | return 0; |
163 | | } |
164 | | |
165 | | static int pthread_join(pthread_t thread, void * unused) { |
166 | | (void) unused; |
167 | | int ret = (int) WaitForSingleObject(thread, INFINITE); |
168 | | CloseHandle(thread); |
169 | | return ret; |
170 | | } |
171 | | |
172 | | static int sched_yield (void) { |
173 | | Sleep (0); |
174 | | return 0; |
175 | | } |
176 | | #else |
177 | | |
178 | | #include <pthread.h> |
179 | | #include <stdatomic.h> |
180 | | #include <sched.h> |
181 | | #if defined(__FreeBSD__) |
182 | | #include <pthread_np.h> |
183 | | #endif |
184 | | |
185 | | typedef void * thread_ret_t; |
186 | | |
187 | | #include <sys/types.h> |
188 | | #include <sys/stat.h> |
189 | | #include <unistd.h> |
190 | | |
191 | | #endif |
192 | | |
193 | | typedef pthread_t ggml_thread_t; |
194 | | |
195 | 0 | #define GGML_THREADPOOL_N_THREADS_MASK (0xffffU) |
196 | 0 | #define GGML_THREADPOOL_N_THREADS_BITS (16) |
197 | | |
198 | | #if defined(__APPLE__) |
199 | | #include <unistd.h> |
200 | | #include <mach/mach.h> |
201 | | #include <TargetConditionals.h> |
202 | | #endif |
203 | | |
204 | | static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { |
205 | | [GGML_TYPE_F32] = { |
206 | | .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32, |
207 | | .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32, |
208 | | .vec_dot_type = GGML_TYPE_F32, |
209 | | .nrows = 1, |
210 | | }, |
211 | | [GGML_TYPE_F16] = { |
212 | | .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp16, |
213 | | .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16, |
214 | | .vec_dot_type = GGML_TYPE_F16, |
215 | | .nrows = 1, |
216 | | }, |
217 | | [GGML_TYPE_Q4_0] = { |
218 | | .from_float = quantize_row_q4_0, |
219 | | .vec_dot = ggml_vec_dot_q4_0_q8_0, |
220 | | .vec_dot_type = GGML_TYPE_Q8_0, |
221 | | #if defined (__ARM_FEATURE_MATMUL_INT8) |
222 | | .nrows = 2, |
223 | | #else |
224 | | .nrows = 1, |
225 | | #endif |
226 | | }, |
227 | | [GGML_TYPE_Q4_1] = { |
228 | | .from_float = quantize_row_q4_1, |
229 | | .vec_dot = ggml_vec_dot_q4_1_q8_1, |
230 | | .vec_dot_type = GGML_TYPE_Q8_1, |
231 | | #if defined (__ARM_FEATURE_MATMUL_INT8) |
232 | | .nrows = 2, |
233 | | #else |
234 | | .nrows = 1, |
235 | | #endif |
236 | | }, |
237 | | [GGML_TYPE_Q5_0] = { |
238 | | .from_float = quantize_row_q5_0, |
239 | | .vec_dot = ggml_vec_dot_q5_0_q8_0, |
240 | | .vec_dot_type = GGML_TYPE_Q8_0, |
241 | | .nrows = 1, |
242 | | }, |
243 | | [GGML_TYPE_Q5_1] = { |
244 | | .from_float = quantize_row_q5_1, |
245 | | .vec_dot = ggml_vec_dot_q5_1_q8_1, |
246 | | .vec_dot_type = GGML_TYPE_Q8_1, |
247 | | .nrows = 1, |
248 | | }, |
249 | | [GGML_TYPE_Q8_0] = { |
250 | | .from_float = quantize_row_q8_0, |
251 | | .vec_dot = ggml_vec_dot_q8_0_q8_0, |
252 | | .vec_dot_type = GGML_TYPE_Q8_0, |
253 | | #if defined (__ARM_FEATURE_MATMUL_INT8) |
254 | | .nrows = 2, |
255 | | #else |
256 | | .nrows = 1, |
257 | | #endif |
258 | | }, |
259 | | [GGML_TYPE_Q8_1] = { |
260 | | .from_float = quantize_row_q8_1, |
261 | | .vec_dot_type = GGML_TYPE_Q8_1, |
262 | | .nrows = 1, |
263 | | }, |
264 | | [GGML_TYPE_MXFP4] = { |
265 | | .from_float = quantize_row_mxfp4, |
266 | | .vec_dot = ggml_vec_dot_mxfp4_q8_0, |
267 | | .vec_dot_type = GGML_TYPE_Q8_0, |
268 | | .nrows = 1, |
269 | | }, |
270 | | [GGML_TYPE_Q2_K] = { |
271 | | .from_float = quantize_row_q2_K, |
272 | | .vec_dot = ggml_vec_dot_q2_K_q8_K, |
273 | | .vec_dot_type = GGML_TYPE_Q8_K, |
274 | | .nrows = 1, |
275 | | }, |
276 | | [GGML_TYPE_Q3_K] = { |
277 | | .from_float = quantize_row_q3_K, |
278 | | .vec_dot = ggml_vec_dot_q3_K_q8_K, |
279 | | .vec_dot_type = GGML_TYPE_Q8_K, |
280 | | .nrows = 1, |
281 | | }, |
282 | | [GGML_TYPE_Q4_K] = { |
283 | | .from_float = quantize_row_q4_K, |
284 | | .vec_dot = ggml_vec_dot_q4_K_q8_K, |
285 | | .vec_dot_type = GGML_TYPE_Q8_K, |
286 | | #if defined (__ARM_FEATURE_MATMUL_INT8) |
287 | | .nrows = 2, |
288 | | #else |
289 | | .nrows = 1, |
290 | | #endif |
291 | | }, |
292 | | [GGML_TYPE_Q5_K] = { |
293 | | .from_float = quantize_row_q5_K, |
294 | | .vec_dot = ggml_vec_dot_q5_K_q8_K, |
295 | | .vec_dot_type = GGML_TYPE_Q8_K, |
296 | | .nrows = 1, |
297 | | }, |
298 | | [GGML_TYPE_Q6_K] = { |
299 | | .from_float = quantize_row_q6_K, |
300 | | .vec_dot = ggml_vec_dot_q6_K_q8_K, |
301 | | .vec_dot_type = GGML_TYPE_Q8_K, |
302 | | #if defined (__ARM_FEATURE_MATMUL_INT8) |
303 | | .nrows = 2, |
304 | | #else |
305 | | .nrows = 1, |
306 | | #endif |
307 | | }, |
308 | | [GGML_TYPE_IQ2_XXS] = { |
309 | | .from_float = NULL, |
310 | | .vec_dot = ggml_vec_dot_iq2_xxs_q8_K, |
311 | | .vec_dot_type = GGML_TYPE_Q8_K, |
312 | | .nrows = 1, |
313 | | }, |
314 | | [GGML_TYPE_IQ2_XS] = { |
315 | | .from_float = NULL, |
316 | | .vec_dot = ggml_vec_dot_iq2_xs_q8_K, |
317 | | .vec_dot_type = GGML_TYPE_Q8_K, |
318 | | .nrows = 1, |
319 | | }, |
320 | | [GGML_TYPE_IQ3_XXS] = { |
321 | | // NOTE: from_float for iq3 and iq2_s was removed because these quants require initialization in ggml_quantize_init |
322 | | //.from_float = quantize_row_iq3_xxs, |
323 | | .vec_dot = ggml_vec_dot_iq3_xxs_q8_K, |
324 | | .vec_dot_type = GGML_TYPE_Q8_K, |
325 | | .nrows = 1, |
326 | | }, |
327 | | [GGML_TYPE_IQ3_S] = { |
328 | | //.from_float = quantize_row_iq3_s, |
329 | | .vec_dot = ggml_vec_dot_iq3_s_q8_K, |
330 | | .vec_dot_type = GGML_TYPE_Q8_K, |
331 | | .nrows = 1, |
332 | | }, |
333 | | [GGML_TYPE_IQ2_S] = { |
334 | | //.from_float = quantize_row_iq2_s, |
335 | | .vec_dot = ggml_vec_dot_iq2_s_q8_K, |
336 | | .vec_dot_type = GGML_TYPE_Q8_K, |
337 | | .nrows = 1, |
338 | | }, |
339 | | [GGML_TYPE_IQ1_S] = { |
340 | | .from_float = NULL, |
341 | | .vec_dot = ggml_vec_dot_iq1_s_q8_K, |
342 | | .vec_dot_type = GGML_TYPE_Q8_K, |
343 | | .nrows = 1, |
344 | | }, |
345 | | [GGML_TYPE_IQ1_M] = { |
346 | | .from_float = NULL, |
347 | | .vec_dot = ggml_vec_dot_iq1_m_q8_K, |
348 | | .vec_dot_type = GGML_TYPE_Q8_K, |
349 | | .nrows = 1, |
350 | | }, |
351 | | [GGML_TYPE_IQ4_NL] = { |
352 | | .from_float = quantize_row_iq4_nl, |
353 | | .vec_dot = ggml_vec_dot_iq4_nl_q8_0, |
354 | | .vec_dot_type = GGML_TYPE_Q8_0, |
355 | | .nrows = 1, |
356 | | }, |
357 | | [GGML_TYPE_IQ4_XS] = { |
358 | | .from_float = quantize_row_iq4_xs, |
359 | | .vec_dot = ggml_vec_dot_iq4_xs_q8_K, |
360 | | .vec_dot_type = GGML_TYPE_Q8_K, |
361 | | .nrows = 1, |
362 | | }, |
363 | | [GGML_TYPE_Q8_K] = { |
364 | | .from_float = quantize_row_q8_K, |
365 | | }, |
366 | | [GGML_TYPE_BF16] = { |
367 | | .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_bf16, |
368 | | .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16, |
369 | | .vec_dot_type = GGML_TYPE_BF16, |
370 | | .nrows = 1, |
371 | | }, |
372 | | [GGML_TYPE_TQ1_0] = { |
373 | | .from_float = quantize_row_tq1_0, |
374 | | .vec_dot = ggml_vec_dot_tq1_0_q8_K, |
375 | | .vec_dot_type = GGML_TYPE_Q8_K, |
376 | | .nrows = 1, |
377 | | }, |
378 | | [GGML_TYPE_TQ2_0] = { |
379 | | .from_float = quantize_row_tq2_0, |
380 | | .vec_dot = ggml_vec_dot_tq2_0_q8_K, |
381 | | .vec_dot_type = GGML_TYPE_Q8_K, |
382 | | .nrows = 1, |
383 | | }, |
384 | | [GGML_TYPE_I32] = { |
385 | | .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_i32, |
386 | | }, |
387 | | }; |
388 | | |
389 | 0 | const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) { |
390 | 0 | return &type_traits_cpu[type]; |
391 | 0 | } |
392 | | |
393 | | // |
394 | | // Threading defs |
395 | | // |
396 | | |
397 | | typedef pthread_t ggml_thread_t; |
398 | | |
399 | | #if defined(_WIN32) |
400 | | |
401 | | typedef CONDITION_VARIABLE ggml_cond_t; |
402 | | typedef SRWLOCK ggml_mutex_t; |
403 | | |
404 | | #define ggml_mutex_init(m) InitializeSRWLock(m) |
405 | | #define ggml_mutex_destroy(m) |
406 | | #define ggml_mutex_lock(m) AcquireSRWLockExclusive(m) |
407 | | #define ggml_mutex_unlock(m) ReleaseSRWLockExclusive(m) |
408 | | #define ggml_mutex_lock_shared(m) AcquireSRWLockShared(m) |
409 | | #define ggml_mutex_unlock_shared(m) ReleaseSRWLockShared(m) |
410 | | |
411 | | #define ggml_cond_init(c) InitializeConditionVariable(c) |
412 | | #define ggml_cond_destroy(c) |
413 | | #define ggml_cond_wait(c, m) SleepConditionVariableSRW(c, m, INFINITE, CONDITION_VARIABLE_LOCKMODE_SHARED) |
414 | | #define ggml_cond_broadcast(c) WakeAllConditionVariable(c) |
415 | | |
416 | | #define ggml_thread_create pthread_create |
417 | | #define ggml_thread_join pthread_join |
418 | | |
419 | | #else |
420 | | |
421 | | typedef pthread_cond_t ggml_cond_t; |
422 | | typedef pthread_mutex_t ggml_mutex_t; |
423 | | |
424 | 0 | #define ggml_mutex_init(m) pthread_mutex_init(m, NULL) |
425 | 0 | #define ggml_mutex_destroy(m) pthread_mutex_destroy(m) |
426 | 0 | #define ggml_mutex_lock(m) pthread_mutex_lock(m) |
427 | 0 | #define ggml_mutex_unlock(m) pthread_mutex_unlock(m) |
428 | 0 | #define ggml_mutex_lock_shared(m) pthread_mutex_lock(m) |
429 | 0 | #define ggml_mutex_unlock_shared(m) pthread_mutex_unlock(m) |
430 | | |
431 | | #define ggml_lock_init(x) UNUSED(x) |
432 | | #define ggml_lock_destroy(x) UNUSED(x) |
433 | | #if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) |
434 | | #define ggml_lock_lock(x) _mm_pause() |
435 | | #else |
436 | | #define ggml_lock_lock(x) UNUSED(x) |
437 | | #endif |
438 | | #define ggml_lock_unlock(x) UNUSED(x) |
439 | | |
440 | | #define GGML_LOCK_INITIALIZER 0 |
441 | 0 | #define ggml_cond_init(c) pthread_cond_init(c, NULL) |
442 | 0 | #define ggml_cond_destroy(c) pthread_cond_destroy(c) |
443 | 0 | #define ggml_cond_wait(c, m) pthread_cond_wait(c, m) |
444 | 0 | #define ggml_cond_broadcast(c) pthread_cond_broadcast(c) |
445 | | |
446 | 0 | #define ggml_thread_create pthread_create |
447 | 0 | #define ggml_thread_join pthread_join |
448 | | |
449 | | #endif |
450 | | |
451 | | // Threadpool def |
452 | | struct ggml_threadpool { |
453 | | ggml_mutex_t mutex; // mutex for cond.var |
454 | | ggml_cond_t cond; // cond.var for waiting for new work |
455 | | |
456 | | struct ggml_cgraph * cgraph; |
457 | | struct ggml_cplan * cplan; |
458 | | |
459 | | // synchronization primitives |
460 | | atomic_int n_graph; // updated when there is work to be done (i.e each graph) holds graph and active thread counts. |
461 | | atomic_int GGML_CACHE_ALIGN n_barrier; |
462 | | atomic_int GGML_CACHE_ALIGN n_barrier_passed; |
463 | | atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. |
464 | | |
465 | | // these are atomic as an annotation for thread-sanitizer |
466 | | atomic_bool stop; // Used for stopping the threadpool altogether |
467 | | atomic_bool pause; // Used for pausing the threadpool or individual threads |
468 | | atomic_int abort; // Used for aborting processing of a graph |
469 | | |
470 | | struct ggml_compute_state * workers; // per thread state |
471 | | int n_threads; // Number of threads in the pool |
472 | | int32_t prio; // Scheduling priority |
473 | | uint32_t poll; // Polling level (0 - no polling) |
474 | | |
475 | | enum ggml_status ec; |
476 | | }; |
477 | | |
478 | | // Per-thread state |
479 | | struct ggml_compute_state { |
480 | | #ifndef GGML_USE_OPENMP |
481 | | ggml_thread_t thrd; |
482 | | int last_graph; |
483 | | bool pending; |
484 | | #endif |
485 | | bool cpumask[GGML_MAX_N_THREADS]; |
486 | | struct ggml_threadpool * threadpool; |
487 | | int ith; |
488 | | }; |
489 | | |
490 | | // Helpers for polling loops |
491 | | #if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) ) |
492 | | static inline void ggml_thread_cpu_relax(void) { |
493 | | __asm__ volatile("yield" ::: "memory"); |
494 | | } |
495 | | #elif defined(__x86_64__) |
496 | 0 | static inline void ggml_thread_cpu_relax(void) { |
497 | 0 | _mm_pause(); |
498 | 0 | } |
499 | | #elif defined(__riscv) |
500 | | static inline void ggml_thread_cpu_relax(void) { |
501 | | #ifdef __riscv_zihintpause |
502 | | __asm__ __volatile__ ("pause"); |
503 | | #else |
504 | | /* Encoding of the pause instruction */ |
505 | | __asm__ __volatile__ (".4byte 0x100000F"); |
506 | | #endif |
507 | | } |
508 | | #else |
509 | | static inline void ggml_thread_cpu_relax(void) {;} |
510 | | #endif |
511 | | |
512 | | // |
513 | | // NUMA support |
514 | | // |
515 | | |
516 | 0 | #define GGML_NUMA_MAX_NODES 8 |
517 | 0 | #define GGML_NUMA_MAX_CPUS 512 |
518 | | |
519 | | struct ggml_numa_node { |
520 | | uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node |
521 | | uint32_t n_cpus; |
522 | | }; |
523 | | |
524 | | struct ggml_numa_nodes { |
525 | | enum ggml_numa_strategy numa_strategy; |
526 | | struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES]; |
527 | | uint32_t n_nodes; |
528 | | uint32_t total_cpus; // hardware threads on system |
529 | | uint32_t current_node; // node on which main process is execting |
530 | | #if defined(__gnu_linux__) |
531 | | cpu_set_t cpuset; // cpuset from numactl |
532 | | #else |
533 | | uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype |
534 | | #endif |
535 | | }; |
536 | | |
537 | | // |
538 | | // ggml state |
539 | | // |
540 | | |
541 | | struct ggml_state { |
542 | | struct ggml_numa_nodes numa; |
543 | | }; |
544 | | |
545 | | static struct ggml_state g_state = {0}; |
546 | | |
547 | 0 | void ggml_barrier(struct ggml_threadpool * tp) { |
548 | 0 | int n_threads = atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK; |
549 | 0 | if (n_threads == 1) { |
550 | 0 | return; |
551 | 0 | } |
552 | | |
553 | | #ifdef GGML_USE_OPENMP |
554 | | #pragma omp barrier |
555 | | #else |
556 | 0 | int n_passed = atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed); |
557 | | |
558 | | // enter barrier (full seq-cst fence) |
559 | 0 | int n_barrier = atomic_fetch_add_explicit(&tp->n_barrier, 1, memory_order_seq_cst); |
560 | |
|
561 | 0 | if (n_barrier == (n_threads - 1)) { |
562 | | // last thread |
563 | 0 | atomic_store_explicit(&tp->n_barrier, 0, memory_order_relaxed); |
564 | | |
565 | | // exit barrier (full seq-cst fence) |
566 | 0 | atomic_fetch_add_explicit(&tp->n_barrier_passed, 1, memory_order_seq_cst); |
567 | 0 | return; |
568 | 0 | } |
569 | | |
570 | | // wait for other threads |
571 | 0 | while (atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed) == n_passed) { |
572 | 0 | ggml_thread_cpu_relax(); |
573 | 0 | } |
574 | | |
575 | | // exit barrier (full seq-cst fence) |
576 | | // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead |
577 | | #ifdef GGML_TSAN_ENABLED |
578 | | atomic_fetch_add_explicit(&tp->n_barrier_passed, 0, memory_order_seq_cst); |
579 | | #else |
580 | 0 | atomic_thread_fence(memory_order_seq_cst); |
581 | 0 | #endif |
582 | 0 | #endif |
583 | 0 | } |
584 | | |
585 | 0 | void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { |
586 | 0 | atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); |
587 | 0 | } |
588 | | |
589 | 0 | int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { |
590 | 0 | return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); |
591 | 0 | } |
592 | | |
593 | | #if defined(__gnu_linux__) |
594 | 0 | static cpu_set_t ggml_get_numa_affinity(void) { |
595 | 0 | cpu_set_t cpuset; |
596 | 0 | pthread_t thread; |
597 | 0 | thread = pthread_self(); |
598 | 0 | CPU_ZERO(&cpuset); |
599 | 0 | pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset); |
600 | 0 | return cpuset; |
601 | 0 | } |
602 | | #else |
603 | | static uint32_t ggml_get_numa_affinity(void) { |
604 | | return 0; // no NUMA support |
605 | | } |
606 | | #endif |
607 | | |
608 | 0 | void ggml_numa_init(enum ggml_numa_strategy numa_flag) { |
609 | 0 | if (g_state.numa.n_nodes > 0) { |
610 | 0 | fprintf(stderr, "ggml_numa_init: NUMA already initialized\n"); |
611 | |
|
612 | 0 | return; |
613 | 0 | } |
614 | | |
615 | 0 | #if defined(__gnu_linux__) |
616 | 0 | struct stat st; |
617 | 0 | char path[256]; |
618 | 0 | int rv; |
619 | | |
620 | | // set numa scheme |
621 | 0 | g_state.numa.numa_strategy = numa_flag; |
622 | |
|
623 | 0 | GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy); |
624 | |
|
625 | 0 | g_state.numa.cpuset = ggml_get_numa_affinity(); |
626 | | |
627 | | // enumerate nodes |
628 | 0 | while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) { |
629 | 0 | rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes); |
630 | 0 | GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
631 | 0 | if (stat(path, &st) != 0) { break; } |
632 | 0 | ++g_state.numa.n_nodes; |
633 | 0 | } |
634 | | |
635 | | // enumerate CPUs |
636 | 0 | while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) { |
637 | 0 | rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus); |
638 | 0 | GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
639 | 0 | if (stat(path, &st) != 0) { break; } |
640 | 0 | ++g_state.numa.total_cpus; |
641 | 0 | } |
642 | |
|
643 | 0 | GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus); |
644 | | |
645 | | // figure out which node we're on |
646 | 0 | uint current_cpu; |
647 | 0 | int getcpu_ret = 0; |
648 | | #if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 33) || defined(__COSMOPOLITAN__) |
649 | | getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node); |
650 | | #else |
651 | | // old glibc doesn't have a wrapper for this call. Fall back on direct syscall |
652 | | # if !defined(SYS_getcpu) && defined(SYS_get_cpu) |
653 | | # define SYS_getcpu SYS_get_cpu // some older glibc versions use this name |
654 | | # endif |
655 | 0 | getcpu_ret = syscall(SYS_getcpu, ¤t_cpu, &g_state.numa.current_node); |
656 | 0 | #endif |
657 | |
|
658 | 0 | if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) { |
659 | 0 | g_state.numa.n_nodes = 0; |
660 | 0 | return; |
661 | 0 | } |
662 | | |
663 | 0 | GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu); |
664 | |
|
665 | 0 | for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) { |
666 | 0 | struct ggml_numa_node * node = &g_state.numa.nodes[n]; |
667 | 0 | GGML_PRINT_DEBUG("CPUs on node %u:", n); |
668 | 0 | node->n_cpus = 0; |
669 | 0 | for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) { |
670 | 0 | rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c); |
671 | 0 | GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path)); |
672 | 0 | if (stat(path, &st) == 0) { |
673 | 0 | node->cpus[node->n_cpus++] = c; |
674 | 0 | GGML_PRINT_DEBUG(" %u", c); |
675 | 0 | } |
676 | 0 | } |
677 | 0 | GGML_PRINT_DEBUG("\n"); |
678 | 0 | } |
679 | |
|
680 | 0 | if (ggml_is_numa()) { |
681 | 0 | FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r"); |
682 | 0 | if (fptr != NULL) { |
683 | 0 | char buf[42]; |
684 | 0 | if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) { |
685 | 0 | GGML_LOG_WARN("/proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n"); |
686 | 0 | } |
687 | 0 | fclose(fptr); |
688 | 0 | } |
689 | 0 | } |
690 | | #else |
691 | | UNUSED(numa_flag); |
692 | | // TODO |
693 | | #endif |
694 | 0 | } |
695 | | |
696 | 0 | bool ggml_is_numa(void) { |
697 | 0 | return g_state.numa.n_nodes > 1; |
698 | 0 | } |
699 | | |
700 | | #if defined(__ARM_ARCH) |
701 | | #if defined(__aarch64__) && defined(__ARM_FEATURE_SVE) |
702 | | #include <arm_sve.h> |
703 | | static void ggml_init_arm_arch_features(void) { |
704 | | ggml_arm_arch_features.sve_cnt = svcntb(); |
705 | | } |
706 | | #else |
707 | | static void ggml_init_arm_arch_features(void) {} |
708 | | #endif |
709 | | #endif // __ARM_ARCH |
710 | | |
711 | | #if defined(__riscv) && defined(__riscv_v_intrinsic) |
712 | | #include <riscv_vector.h> |
713 | | static void ggml_init_riscv_arch_features(void) { |
714 | | ggml_riscv_arch_features.rvv_vlen = __riscv_vlenb(); |
715 | | } |
716 | | #else |
717 | 0 | static void ggml_init_riscv_arch_features(void) {} |
718 | | #endif |
719 | | |
720 | 0 | struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) { |
721 | 0 | GGML_ASSERT(!ggml_get_no_alloc(ctx)); |
722 | |
|
723 | 0 | struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1); |
724 | |
|
725 | 0 | ggml_set_i32(result, value); |
726 | |
|
727 | 0 | return result; |
728 | 0 | } |
729 | | |
730 | 0 | struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) { |
731 | 0 | GGML_ASSERT(!ggml_get_no_alloc(ctx)); |
732 | |
|
733 | 0 | struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1); |
734 | |
|
735 | 0 | ggml_set_f32(result, value); |
736 | |
|
737 | 0 | return result; |
738 | 0 | } |
739 | | |
740 | 0 | struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) { |
741 | 0 | const int n = ggml_nrows(tensor); |
742 | 0 | const int nc = tensor->ne[0]; |
743 | 0 | const size_t n1 = tensor->nb[1]; |
744 | |
|
745 | 0 | char * const data = tensor->data; |
746 | |
|
747 | 0 | switch (tensor->type) { |
748 | 0 | case GGML_TYPE_I8: |
749 | 0 | { |
750 | 0 | assert(tensor->nb[0] == sizeof(int8_t)); |
751 | 0 | for (int i = 0; i < n; i++) { |
752 | 0 | ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); |
753 | 0 | } |
754 | 0 | } break; |
755 | 0 | case GGML_TYPE_I16: |
756 | 0 | { |
757 | 0 | assert(tensor->nb[0] == sizeof(int16_t)); |
758 | 0 | for (int i = 0; i < n; i++) { |
759 | 0 | ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); |
760 | 0 | } |
761 | 0 | } break; |
762 | 0 | case GGML_TYPE_I32: |
763 | 0 | { |
764 | 0 | assert(tensor->nb[0] == sizeof(int32_t)); |
765 | 0 | for (int i = 0; i < n; i++) { |
766 | 0 | ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); |
767 | 0 | } |
768 | 0 | } break; |
769 | 0 | case GGML_TYPE_F16: |
770 | 0 | { |
771 | 0 | assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
772 | 0 | for (int i = 0; i < n; i++) { |
773 | 0 | ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); |
774 | 0 | } |
775 | 0 | } break; |
776 | 0 | case GGML_TYPE_BF16: |
777 | 0 | { |
778 | 0 | assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
779 | 0 | for (int i = 0; i < n; i++) { |
780 | 0 | ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); |
781 | 0 | } |
782 | 0 | } break; |
783 | 0 | case GGML_TYPE_F32: |
784 | 0 | { |
785 | 0 | assert(tensor->nb[0] == sizeof(float)); |
786 | 0 | for (int i = 0; i < n; i++) { |
787 | 0 | ggml_vec_set_f32(nc, (float *)(data + i*n1), value); |
788 | 0 | } |
789 | 0 | } break; |
790 | 0 | default: |
791 | 0 | { |
792 | 0 | GGML_ABORT("fatal error"); |
793 | 0 | } |
794 | 0 | } |
795 | | |
796 | 0 | return tensor; |
797 | 0 | } |
798 | | |
799 | 0 | struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) { |
800 | 0 | const int n = ggml_nrows(tensor); |
801 | 0 | const int nc = tensor->ne[0]; |
802 | 0 | const size_t n1 = tensor->nb[1]; |
803 | |
|
804 | 0 | char * const data = tensor->data; |
805 | |
|
806 | 0 | switch (tensor->type) { |
807 | 0 | case GGML_TYPE_I8: |
808 | 0 | { |
809 | 0 | assert(tensor->nb[0] == sizeof(int8_t)); |
810 | 0 | for (int i = 0; i < n; i++) { |
811 | 0 | ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value); |
812 | 0 | } |
813 | 0 | } break; |
814 | 0 | case GGML_TYPE_I16: |
815 | 0 | { |
816 | 0 | assert(tensor->nb[0] == sizeof(int16_t)); |
817 | 0 | for (int i = 0; i < n; i++) { |
818 | 0 | ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value); |
819 | 0 | } |
820 | 0 | } break; |
821 | 0 | case GGML_TYPE_I32: |
822 | 0 | { |
823 | 0 | assert(tensor->nb[0] == sizeof(int32_t)); |
824 | 0 | for (int i = 0; i < n; i++) { |
825 | 0 | ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value); |
826 | 0 | } |
827 | 0 | } break; |
828 | 0 | case GGML_TYPE_F16: |
829 | 0 | { |
830 | 0 | assert(tensor->nb[0] == sizeof(ggml_fp16_t)); |
831 | 0 | for (int i = 0; i < n; i++) { |
832 | 0 | ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value)); |
833 | 0 | } |
834 | 0 | } break; |
835 | 0 | case GGML_TYPE_BF16: |
836 | 0 | { |
837 | 0 | assert(tensor->nb[0] == sizeof(ggml_bf16_t)); |
838 | 0 | for (int i = 0; i < n; i++) { |
839 | 0 | ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value)); |
840 | 0 | } |
841 | 0 | } break; |
842 | 0 | case GGML_TYPE_F32: |
843 | 0 | { |
844 | 0 | assert(tensor->nb[0] == sizeof(float)); |
845 | 0 | for (int i = 0; i < n; i++) { |
846 | 0 | ggml_vec_set_f32(nc, (float *)(data + i*n1), value); |
847 | 0 | } |
848 | 0 | } break; |
849 | 0 | default: |
850 | 0 | { |
851 | 0 | GGML_ABORT("fatal error"); |
852 | 0 | } |
853 | 0 | } |
854 | | |
855 | 0 | return tensor; |
856 | 0 | } |
857 | | |
858 | 0 | int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { |
859 | 0 | if (!ggml_is_contiguous(tensor)) { |
860 | 0 | int64_t id[4] = { 0, 0, 0, 0 }; |
861 | 0 | ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
862 | 0 | return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]); |
863 | 0 | } |
864 | 0 | switch (tensor->type) { |
865 | 0 | case GGML_TYPE_I8: |
866 | 0 | { |
867 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); |
868 | 0 | return ((int8_t *)(tensor->data))[i]; |
869 | 0 | } |
870 | 0 | case GGML_TYPE_I16: |
871 | 0 | { |
872 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); |
873 | 0 | return ((int16_t *)(tensor->data))[i]; |
874 | 0 | } |
875 | 0 | case GGML_TYPE_I32: |
876 | 0 | { |
877 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); |
878 | 0 | return ((int32_t *)(tensor->data))[i]; |
879 | 0 | } |
880 | 0 | case GGML_TYPE_F16: |
881 | 0 | { |
882 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); |
883 | 0 | return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); |
884 | 0 | } |
885 | 0 | case GGML_TYPE_BF16: |
886 | 0 | { |
887 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); |
888 | 0 | return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); |
889 | 0 | } |
890 | 0 | case GGML_TYPE_F32: |
891 | 0 | { |
892 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(float)); |
893 | 0 | return ((float *)(tensor->data))[i]; |
894 | 0 | } |
895 | 0 | default: |
896 | 0 | { |
897 | 0 | GGML_ABORT("fatal error"); |
898 | 0 | } |
899 | 0 | } |
900 | 0 | } |
901 | | |
902 | 0 | void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { |
903 | 0 | if (!ggml_is_contiguous(tensor)) { |
904 | 0 | int64_t id[4] = { 0, 0, 0, 0 }; |
905 | 0 | ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
906 | 0 | ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value); |
907 | 0 | return; |
908 | 0 | } |
909 | 0 | switch (tensor->type) { |
910 | 0 | case GGML_TYPE_I8: |
911 | 0 | { |
912 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int8_t)); |
913 | 0 | ((int8_t *)(tensor->data))[i] = value; |
914 | 0 | } break; |
915 | 0 | case GGML_TYPE_I16: |
916 | 0 | { |
917 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int16_t)); |
918 | 0 | ((int16_t *)(tensor->data))[i] = value; |
919 | 0 | } break; |
920 | 0 | case GGML_TYPE_I32: |
921 | 0 | { |
922 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(int32_t)); |
923 | 0 | ((int32_t *)(tensor->data))[i] = value; |
924 | 0 | } break; |
925 | 0 | case GGML_TYPE_F16: |
926 | 0 | { |
927 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t)); |
928 | 0 | ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); |
929 | 0 | } break; |
930 | 0 | case GGML_TYPE_BF16: |
931 | 0 | { |
932 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t)); |
933 | 0 | ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); |
934 | 0 | } break; |
935 | 0 | case GGML_TYPE_F32: |
936 | 0 | { |
937 | 0 | GGML_ASSERT(tensor->nb[0] == sizeof(float)); |
938 | 0 | ((float *)(tensor->data))[i] = value; |
939 | 0 | } break; |
940 | 0 | default: |
941 | 0 | { |
942 | 0 | GGML_ABORT("fatal error"); |
943 | 0 | } |
944 | 0 | } |
945 | 0 | } |
946 | | |
947 | 0 | int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { |
948 | 0 | void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
949 | 0 | switch (tensor->type) { |
950 | 0 | case GGML_TYPE_I8: |
951 | 0 | return ((int8_t *) data)[0]; |
952 | 0 | case GGML_TYPE_I16: |
953 | 0 | return ((int16_t *) data)[0]; |
954 | 0 | case GGML_TYPE_I32: |
955 | 0 | return ((int32_t *) data)[0]; |
956 | 0 | case GGML_TYPE_F16: |
957 | 0 | return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); |
958 | 0 | case GGML_TYPE_BF16: |
959 | 0 | return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); |
960 | 0 | case GGML_TYPE_F32: |
961 | 0 | return ((float *) data)[0]; |
962 | 0 | default: |
963 | 0 | GGML_ABORT("fatal error"); |
964 | 0 | } |
965 | 0 | } |
966 | | |
967 | 0 | void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) { |
968 | 0 | void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
969 | 0 | switch (tensor->type) { |
970 | 0 | case GGML_TYPE_I8: |
971 | 0 | { |
972 | 0 | ((int8_t *)(data))[0] = value; |
973 | 0 | } break; |
974 | 0 | case GGML_TYPE_I16: |
975 | 0 | { |
976 | 0 | ((int16_t *)(data))[0] = value; |
977 | 0 | } break; |
978 | 0 | case GGML_TYPE_I32: |
979 | 0 | { |
980 | 0 | ((int32_t *)(data))[0] = value; |
981 | 0 | } break; |
982 | 0 | case GGML_TYPE_F16: |
983 | 0 | { |
984 | 0 | ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); |
985 | 0 | } break; |
986 | 0 | case GGML_TYPE_BF16: |
987 | 0 | { |
988 | 0 | ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); |
989 | 0 | } break; |
990 | 0 | case GGML_TYPE_F32: |
991 | 0 | { |
992 | 0 | ((float *)(data))[0] = value; |
993 | 0 | } break; |
994 | 0 | default: |
995 | 0 | { |
996 | 0 | GGML_ABORT("fatal error"); |
997 | 0 | } |
998 | 0 | } |
999 | 0 | } |
1000 | | |
1001 | 0 | float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { |
1002 | 0 | if (!ggml_is_contiguous(tensor)) { |
1003 | 0 | int64_t id[4] = { 0, 0, 0, 0 }; |
1004 | 0 | ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
1005 | 0 | return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]); |
1006 | 0 | } |
1007 | 0 | switch (tensor->type) { |
1008 | 0 | case GGML_TYPE_I8: |
1009 | 0 | { |
1010 | 0 | return ((int8_t *)(tensor->data))[i]; |
1011 | 0 | } |
1012 | 0 | case GGML_TYPE_I16: |
1013 | 0 | { |
1014 | 0 | return ((int16_t *)(tensor->data))[i]; |
1015 | 0 | } |
1016 | 0 | case GGML_TYPE_I32: |
1017 | 0 | { |
1018 | 0 | return ((int32_t *)(tensor->data))[i]; |
1019 | 0 | } |
1020 | 0 | case GGML_TYPE_F16: |
1021 | 0 | { |
1022 | 0 | return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]); |
1023 | 0 | } |
1024 | 0 | case GGML_TYPE_BF16: |
1025 | 0 | { |
1026 | 0 | return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]); |
1027 | 0 | } |
1028 | 0 | case GGML_TYPE_F32: |
1029 | 0 | { |
1030 | 0 | return ((float *)(tensor->data))[i]; |
1031 | 0 | } |
1032 | 0 | default: |
1033 | 0 | { |
1034 | 0 | GGML_ABORT("fatal error"); |
1035 | 0 | } |
1036 | 0 | } |
1037 | 0 | } |
1038 | | |
1039 | 0 | void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { |
1040 | 0 | if (!ggml_is_contiguous(tensor)) { |
1041 | 0 | int64_t id[4] = { 0, 0, 0, 0 }; |
1042 | 0 | ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]); |
1043 | 0 | ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value); |
1044 | 0 | return; |
1045 | 0 | } |
1046 | 0 | switch (tensor->type) { |
1047 | 0 | case GGML_TYPE_I8: |
1048 | 0 | { |
1049 | 0 | ((int8_t *)(tensor->data))[i] = value; |
1050 | 0 | } break; |
1051 | 0 | case GGML_TYPE_I16: |
1052 | 0 | { |
1053 | 0 | ((int16_t *)(tensor->data))[i] = value; |
1054 | 0 | } break; |
1055 | 0 | case GGML_TYPE_I32: |
1056 | 0 | { |
1057 | 0 | ((int32_t *)(tensor->data))[i] = value; |
1058 | 0 | } break; |
1059 | 0 | case GGML_TYPE_F16: |
1060 | 0 | { |
1061 | 0 | ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value); |
1062 | 0 | } break; |
1063 | 0 | case GGML_TYPE_BF16: |
1064 | 0 | { |
1065 | 0 | ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value); |
1066 | 0 | } break; |
1067 | 0 | case GGML_TYPE_F32: |
1068 | 0 | { |
1069 | 0 | ((float *)(tensor->data))[i] = value; |
1070 | 0 | } break; |
1071 | 0 | default: |
1072 | 0 | { |
1073 | 0 | GGML_ABORT("fatal error"); |
1074 | 0 | } |
1075 | 0 | } |
1076 | 0 | } |
1077 | | |
1078 | 0 | float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) { |
1079 | 0 | void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
1080 | 0 | switch (tensor->type) { |
1081 | 0 | case GGML_TYPE_I8: |
1082 | 0 | return ((int8_t *) data)[0]; |
1083 | 0 | case GGML_TYPE_I16: |
1084 | 0 | return ((int16_t *) data)[0]; |
1085 | 0 | case GGML_TYPE_I32: |
1086 | 0 | return ((int32_t *) data)[0]; |
1087 | 0 | case GGML_TYPE_F16: |
1088 | 0 | return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]); |
1089 | 0 | case GGML_TYPE_BF16: |
1090 | 0 | return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]); |
1091 | 0 | case GGML_TYPE_F32: |
1092 | 0 | return ((float *) data)[0]; |
1093 | 0 | default: |
1094 | 0 | GGML_ABORT("fatal error"); |
1095 | 0 | } |
1096 | 0 | } |
1097 | | |
1098 | 0 | void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) { |
1099 | 0 | void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3]; |
1100 | 0 | switch (tensor->type) { |
1101 | 0 | case GGML_TYPE_I8: |
1102 | 0 | { |
1103 | 0 | ((int8_t *)(data))[0] = value; |
1104 | 0 | } break; |
1105 | 0 | case GGML_TYPE_I16: |
1106 | 0 | { |
1107 | 0 | ((int16_t *)(data))[0] = value; |
1108 | 0 | } break; |
1109 | 0 | case GGML_TYPE_I32: |
1110 | 0 | { |
1111 | 0 | ((int32_t *)(data))[0] = value; |
1112 | 0 | } break; |
1113 | 0 | case GGML_TYPE_F16: |
1114 | 0 | { |
1115 | 0 | ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value); |
1116 | 0 | } break; |
1117 | 0 | case GGML_TYPE_BF16: |
1118 | 0 | { |
1119 | 0 | ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value); |
1120 | 0 | } break; |
1121 | 0 | case GGML_TYPE_F32: |
1122 | 0 | { |
1123 | 0 | ((float *)(data))[0] = value; |
1124 | 0 | } break; |
1125 | 0 | default: |
1126 | 0 | { |
1127 | 0 | GGML_ABORT("fatal error"); |
1128 | 0 | } |
1129 | 0 | } |
1130 | 0 | } |
1131 | | |
1132 | | //////////////////////////////////////////////////////////////////////////////// |
1133 | | |
1134 | | // ggml_compute_forward_mul_mat |
1135 | | |
1136 | | static void ggml_compute_forward_mul_mat_one_chunk( |
1137 | | const struct ggml_compute_params * params, |
1138 | | struct ggml_tensor * dst, |
1139 | | const enum ggml_type type, |
1140 | | const int64_t num_rows_per_vec_dot, |
1141 | | const int64_t ir0_start, |
1142 | | const int64_t ir0_end, |
1143 | | const int64_t ir1_start, |
1144 | 0 | const int64_t ir1_end) { |
1145 | |
|
1146 | 0 | const struct ggml_tensor * src0 = dst->src[0]; |
1147 | 0 | const struct ggml_tensor * src1 = dst->src[1]; |
1148 | |
|
1149 | 0 | GGML_TENSOR_BINARY_OP_LOCALS |
1150 | |
|
1151 | 0 | const bool src1_cont = ggml_is_contiguous(src1); |
1152 | |
|
1153 | 0 | ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; |
1154 | 0 | enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
1155 | | |
1156 | | // broadcast factors |
1157 | 0 | const int64_t r2 = ne12 / ne02; |
1158 | 0 | const int64_t r3 = ne13 / ne03; |
1159 | | |
1160 | | //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end); |
1161 | | |
1162 | | // threads with no work simply yield (not sure if it helps) |
1163 | 0 | if (ir0_start >= ir0_end || ir1_start >= ir1_end) { |
1164 | 0 | return; |
1165 | 0 | } |
1166 | | |
1167 | 0 | const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
1168 | 0 | const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
1169 | |
|
1170 | 0 | assert(ne12 % ne02 == 0); |
1171 | 0 | assert(ne13 % ne03 == 0); |
1172 | | |
1173 | | // block-tiling attempt |
1174 | 0 | const int64_t blck_0 = 16; |
1175 | 0 | const int64_t blck_1 = 16; |
1176 | |
|
1177 | 0 | const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; |
1178 | | |
1179 | | // attempt to reduce false-sharing (does not seem to make a difference) |
1180 | | // 16 * 2, accounting for mmla kernels |
1181 | 0 | float tmp[32]; |
1182 | |
|
1183 | 0 | for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { |
1184 | 0 | for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { |
1185 | 0 | for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { |
1186 | 0 | const int64_t i13 = (ir1 / (ne12 * ne1)); |
1187 | 0 | const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; |
1188 | 0 | const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); |
1189 | | |
1190 | | // broadcast src0 into src1 |
1191 | 0 | const int64_t i03 = i13 / r3; |
1192 | 0 | const int64_t i02 = i12 / r2; |
1193 | |
|
1194 | 0 | const int64_t i1 = i11; |
1195 | 0 | const int64_t i2 = i12; |
1196 | 0 | const int64_t i3 = i13; |
1197 | |
|
1198 | 0 | const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); |
1199 | | |
1200 | | // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides |
1201 | | // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using |
1202 | | // the original src1 data pointer, so we should index using the indices directly |
1203 | | // TODO: this is a bit of a hack, we should probably have a better way to handle this |
1204 | 0 | const char * src1_col = (const char*)wdata + |
1205 | 0 | (src1_cont || src1->type != vec_dot_type |
1206 | 0 | ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size |
1207 | 0 | : (i11 * nb11 + i12 * nb12 + i13 * nb13)); |
1208 | 0 | float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); |
1209 | | |
1210 | | //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { |
1211 | | // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); |
1212 | | //} |
1213 | |
|
1214 | 0 | for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { |
1215 | 0 | vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); |
1216 | 0 | } |
1217 | |
|
1218 | 0 | for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { |
1219 | 0 | memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); |
1220 | 0 | } |
1221 | 0 | } |
1222 | 0 | } |
1223 | 0 | } |
1224 | 0 | } |
1225 | | |
1226 | | void ggml_compute_forward_mul_mat( |
1227 | | const struct ggml_compute_params * params, |
1228 | 0 | struct ggml_tensor * dst) { |
1229 | |
|
1230 | 0 | const struct ggml_tensor * src0 = dst->src[0]; |
1231 | 0 | const struct ggml_tensor * src1 = dst->src[1]; |
1232 | |
|
1233 | 0 | GGML_TENSOR_BINARY_OP_LOCALS |
1234 | |
|
1235 | 0 | const int ith = params->ith; |
1236 | 0 | const int nth = params->nth; |
1237 | |
|
1238 | 0 | enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; |
1239 | 0 | ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; |
1240 | 0 | int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows; |
1241 | |
|
1242 | 0 | GGML_ASSERT(ne0 == ne01); |
1243 | 0 | GGML_ASSERT(ne1 == ne11); |
1244 | 0 | GGML_ASSERT(ne2 == ne12); |
1245 | 0 | GGML_ASSERT(ne3 == ne13); |
1246 | | |
1247 | | // we don't support permuted src0 or src1 |
1248 | 0 | GGML_ASSERT(nb00 == ggml_type_size(src0->type)); |
1249 | 0 | GGML_ASSERT(nb10 == ggml_type_size(src1->type)); |
1250 | | |
1251 | | // dst cannot be transposed or permuted |
1252 | 0 | GGML_ASSERT(nb0 == sizeof(float)); |
1253 | 0 | GGML_ASSERT(nb0 <= nb1); |
1254 | 0 | GGML_ASSERT(nb1 <= nb2); |
1255 | 0 | GGML_ASSERT(nb2 <= nb3); |
1256 | | |
1257 | | // nb01 >= nb00 - src0 is not transposed |
1258 | | // compute by src0 rows |
1259 | | |
1260 | | // TODO: extract to "extra_op" |
1261 | 0 | #if GGML_USE_LLAMAFILE |
1262 | | // broadcast factors |
1263 | 0 | const int64_t r2 = ne12 / ne02; |
1264 | 0 | const int64_t r3 = ne13 / ne03; |
1265 | |
|
1266 | 0 | const bool src1_cont = ggml_is_contiguous(src1); |
1267 | |
|
1268 | 0 | if (src1_cont) { |
1269 | 0 | for (int64_t i13 = 0; i13 < ne13; i13++) |
1270 | 0 | for (int64_t i12 = 0; i12 < ne12; i12++) |
1271 | 0 | if (!llamafile_sgemm(params, |
1272 | 0 | ne01, ne11, ne00/ggml_blck_size(src0->type), |
1273 | 0 | (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, |
1274 | 0 | nb01/ggml_type_size(src0->type), |
1275 | 0 | (const char *)src1->data + i12*nb12 + i13*nb13, |
1276 | 0 | nb11/ggml_type_size(src1->type), |
1277 | 0 | (char *)dst->data + i12*nb2 + i13*nb3, |
1278 | 0 | nb1/ggml_type_size(dst->type), |
1279 | 0 | src0->type, |
1280 | 0 | src1->type, |
1281 | 0 | dst->type)) |
1282 | 0 | goto UseGgmlGemm1; |
1283 | 0 | return; |
1284 | 0 | } |
1285 | 0 | UseGgmlGemm1:; |
1286 | 0 | #endif |
1287 | |
|
1288 | 0 | if (src1->type != vec_dot_type) { |
1289 | 0 | char * wdata = params->wdata; |
1290 | |
|
1291 | 0 | const size_t nbw0 = ggml_type_size(vec_dot_type); |
1292 | 0 | const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); |
1293 | 0 | const size_t nbw2 = nbw1*ne11; |
1294 | 0 | const size_t nbw3 = nbw2*ne12; |
1295 | |
|
1296 | 0 | assert(params->wsize >= ne13*nbw3); |
1297 | 0 | GGML_ASSERT(src1->type == GGML_TYPE_F32); |
1298 | |
|
1299 | | #if 0 |
1300 | | for (int64_t i13 = 0; i13 < ne13; ++i13) { |
1301 | | for (int64_t i12 = 0; i12 < ne12; ++i12) { |
1302 | | for (int64_t i11 = ith; i11 < ne11; i11 += nth) { |
1303 | | from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), |
1304 | | (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), |
1305 | | ne10); |
1306 | | } |
1307 | | } |
1308 | | } |
1309 | | #else |
1310 | 0 | for (int64_t i13 = 0; i13 < ne13; ++i13) { |
1311 | 0 | for (int64_t i12 = 0; i12 < ne12; ++i12) { |
1312 | 0 | for (int64_t i11 = 0; i11 < ne11; ++i11) { |
1313 | 0 | size_t bs = ggml_blck_size(vec_dot_type); |
1314 | 0 | int64_t ne10_block_start = (ith * ne10/bs) / nth; |
1315 | 0 | int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; |
1316 | 0 | from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), |
1317 | 0 | (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), |
1318 | 0 | (ne10_block_end - ne10_block_start) * bs); |
1319 | 0 | } |
1320 | 0 | } |
1321 | 0 | } |
1322 | 0 | #endif |
1323 | 0 | } |
1324 | |
|
1325 | 0 | if (ith == 0) { |
1326 | | // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. |
1327 | 0 | atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed); |
1328 | 0 | } |
1329 | |
|
1330 | 0 | ggml_barrier(params->threadpool); |
1331 | |
|
1332 | 0 | #if GGML_USE_LLAMAFILE |
1333 | 0 | if (src1->type != vec_dot_type) { |
1334 | 0 | const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
1335 | 0 | const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
1336 | |
|
1337 | 0 | for (int64_t i13 = 0; i13 < ne13; i13++) |
1338 | 0 | for (int64_t i12 = 0; i12 < ne12; i12++) |
1339 | 0 | if (!llamafile_sgemm(params, |
1340 | 0 | ne01, ne11, ne00/ggml_blck_size(src0->type), |
1341 | 0 | (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, |
1342 | 0 | nb01/ggml_type_size(src0->type), |
1343 | 0 | (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, |
1344 | 0 | row_size/ggml_type_size(vec_dot_type), |
1345 | 0 | (char *)dst->data + i12*nb2 + i13*nb3, |
1346 | 0 | nb1/ggml_type_size(dst->type), |
1347 | 0 | src0->type, |
1348 | 0 | vec_dot_type, |
1349 | 0 | dst->type)) |
1350 | 0 | goto UseGgmlGemm2; |
1351 | 0 | return; |
1352 | 0 | } |
1353 | 0 | UseGgmlGemm2:; |
1354 | 0 | #endif |
1355 | | |
1356 | | // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) |
1357 | 0 | const int64_t nr0 = ne0; |
1358 | | |
1359 | | // This is the size of the rest of the dimensions of the result |
1360 | 0 | const int64_t nr1 = ne1 * ne2 * ne3; |
1361 | | |
1362 | | // Now select a reasonable chunk size. |
1363 | 0 | int chunk_size = 16; |
1364 | | |
1365 | | // We need to step up the size if it's small |
1366 | 0 | if (nr0 == 1 || nr1 == 1) { |
1367 | 0 | chunk_size = 64; |
1368 | 0 | } |
1369 | | |
1370 | | // distribute the work across the inner or outer loop based on which one is larger |
1371 | | // The number of chunks in the 0/1 dim. |
1372 | | // CEIL(nr0/chunk_size) |
1373 | 0 | int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; |
1374 | 0 | int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; |
1375 | | |
1376 | | // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread. |
1377 | | // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggml-org/llama.cpp/pull/6915 |
1378 | | // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that. |
1379 | 0 | if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { |
1380 | | // distribute the thread work across the inner or outer loop based on which one is larger |
1381 | 0 | nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows |
1382 | 0 | nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows |
1383 | 0 | } |
1384 | | |
1385 | | // The number of elements in each chunk |
1386 | 0 | const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; |
1387 | 0 | const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; |
1388 | | |
1389 | | // The first chunk comes from our thread_id, the rest will get auto-assigned. |
1390 | 0 | int current_chunk = ith; |
1391 | |
|
1392 | 0 | while (current_chunk < nchunk0 * nchunk1) { |
1393 | 0 | const int64_t ith0 = current_chunk % nchunk0; |
1394 | 0 | const int64_t ith1 = current_chunk / nchunk0; |
1395 | |
|
1396 | 0 | const int64_t ir0_start = dr0 * ith0; |
1397 | 0 | const int64_t ir0_end = MIN(ir0_start + dr0, nr0); |
1398 | |
|
1399 | 0 | const int64_t ir1_start = dr1 * ith1; |
1400 | 0 | const int64_t ir1_end = MIN(ir1_start + dr1, nr1); |
1401 | | |
1402 | | // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols |
1403 | 0 | int64_t num_rows_per_vec_dot = vec_dot_num_rows; |
1404 | | |
1405 | | // these checks are needed to avoid crossing dim1 boundaries |
1406 | | // can be optimized, but the logic would become more complicated, so keeping it like this for simplicity |
1407 | 0 | if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) { |
1408 | 0 | num_rows_per_vec_dot = 1; |
1409 | 0 | } |
1410 | 0 | ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); |
1411 | |
|
1412 | 0 | if (nth >= nchunk0 * nchunk1) { |
1413 | 0 | break; |
1414 | 0 | } |
1415 | | |
1416 | 0 | current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed); |
1417 | 0 | } |
1418 | 0 | } |
1419 | | |
1420 | | // ggml_compute_forward_mul_mat_id |
1421 | | |
1422 | 0 | #define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)] |
1423 | | |
1424 | | struct mmid_row_mapping { |
1425 | | int32_t i1; |
1426 | | int32_t i2; |
1427 | | }; |
1428 | | |
1429 | | static void ggml_compute_forward_mul_mat_id_one_chunk( |
1430 | | struct ggml_tensor * dst, |
1431 | | const struct ggml_tensor * src0, |
1432 | | const struct ggml_tensor * src1, |
1433 | | const struct ggml_tensor * ids, |
1434 | | const int64_t cur_a, |
1435 | | const int64_t ir0_start, |
1436 | | const int64_t ir0_end, |
1437 | | const int64_t ir1_start, |
1438 | | const int64_t ir1_end, |
1439 | | const char * src0_cur, |
1440 | | const struct mmid_row_mapping * matrix_rows, |
1441 | | const size_t row_size, |
1442 | | const bool src1_cont, |
1443 | 0 | const void * wdata) { |
1444 | |
|
1445 | 0 | GGML_TENSOR_BINARY_OP_LOCALS |
1446 | |
|
1447 | 0 | const enum ggml_type type = src0->type; |
1448 | |
|
1449 | 0 | ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; |
1450 | 0 | enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
1451 | |
|
1452 | 0 | const int64_t blck_0 = 16; |
1453 | 0 | const int64_t blck_1 = 16; |
1454 | |
|
1455 | 0 | float tmp[16]; |
1456 | |
|
1457 | 0 | for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { |
1458 | 0 | for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { |
1459 | 0 | for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) { |
1460 | 0 | const int64_t _i12 = ir1; // logical row index for this expert |
1461 | |
|
1462 | 0 | struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12); |
1463 | 0 | const int id = row_mapping.i1; // selected expert index |
1464 | |
|
1465 | 0 | const int64_t i11 = id % ne11; |
1466 | 0 | const int64_t i12 = row_mapping.i2; // row index in src1 |
1467 | |
|
1468 | 0 | const int64_t i1 = id; // selected expert index |
1469 | 0 | const int64_t i2 = i12; // row |
1470 | | |
1471 | | // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides |
1472 | | // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using |
1473 | | // the original src1 data pointer, so we should index using the indices directly |
1474 | | // TODO: this is a bit of a hack, we should probably have a better way to handle this |
1475 | 0 | const char * src1_col = (const char *) wdata + |
1476 | 0 | (src1_cont || src1->type != vec_dot_type |
1477 | 0 | ? (i11 + i12*ne11)*row_size |
1478 | 0 | : (i11*nb11 + i12*nb12)); |
1479 | |
|
1480 | 0 | float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2)); |
1481 | |
|
1482 | 0 | for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { |
1483 | 0 | vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1); |
1484 | 0 | } |
1485 | |
|
1486 | 0 | memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float)); |
1487 | 0 | } |
1488 | 0 | } |
1489 | 0 | } |
1490 | 0 | } |
1491 | | |
1492 | 0 | static void * incr_ptr_aligned(void ** p, size_t size, size_t align) { |
1493 | |
|
1494 | 0 | void * ptr = *p; |
1495 | 0 | ptr = (void *) GGML_PAD((uintptr_t) ptr, align); |
1496 | 0 | *p = (void *) ((char *) ptr + size); |
1497 | 0 | return ptr; |
1498 | 0 | } |
1499 | | |
1500 | | static void ggml_compute_forward_mul_mat_id( |
1501 | | const struct ggml_compute_params * params, |
1502 | 0 | struct ggml_tensor * dst) { |
1503 | |
|
1504 | 0 | const struct ggml_tensor * src0 = dst->src[0]; |
1505 | 0 | const struct ggml_tensor * src1 = dst->src[1]; |
1506 | 0 | const struct ggml_tensor * ids = dst->src[2]; |
1507 | |
|
1508 | 0 | GGML_TENSOR_BINARY_OP_LOCALS |
1509 | |
|
1510 | 0 | const int ith = params->ith; |
1511 | 0 | const int nth = params->nth; |
1512 | |
|
1513 | 0 | const enum ggml_type type = src0->type; |
1514 | |
|
1515 | 0 | const bool src1_cont = ggml_is_contiguous(src1); |
1516 | |
|
1517 | 0 | enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; |
1518 | 0 | ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; |
1519 | | |
1520 | | // we don't support permuted src0 or src1 |
1521 | 0 | GGML_ASSERT(nb00 == ggml_type_size(type)); |
1522 | 0 | GGML_ASSERT(nb10 == ggml_type_size(src1->type)); |
1523 | | |
1524 | | // dst cannot be transposed or permuted |
1525 | 0 | GGML_ASSERT(nb0 == sizeof(float)); |
1526 | 0 | GGML_ASSERT(nb0 <= nb1); |
1527 | 0 | GGML_ASSERT(nb1 <= nb2); |
1528 | 0 | GGML_ASSERT(nb2 <= nb3); |
1529 | | |
1530 | | // row groups |
1531 | 0 | const int n_ids = ids->ne[0]; // n_expert_used |
1532 | 0 | const int n_as = ne02; // n_expert |
1533 | |
|
1534 | 0 | void * wdata_cur = params->wdata; |
1535 | |
|
1536 | 0 | if (src1->type != vec_dot_type) { |
1537 | 0 | incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t)); |
1538 | 0 | } |
1539 | |
|
1540 | 0 | int64_t * matrix_row_counts = // [n_as] |
1541 | 0 | incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t)); |
1542 | |
|
1543 | 0 | struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]] |
1544 | 0 | incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t)); |
1545 | |
|
1546 | 0 | char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as] |
1547 | 0 | incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE); |
1548 | |
|
1549 | 0 | GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata)); |
1550 | |
|
1551 | 0 | if (src1->type != vec_dot_type) { |
1552 | 0 | char * wdata = params->wdata; |
1553 | |
|
1554 | 0 | const size_t nbw0 = ggml_type_size(vec_dot_type); |
1555 | 0 | const size_t nbw1 = ggml_row_size(vec_dot_type, ne10); |
1556 | 0 | const size_t nbw2 = nbw1*ne11; |
1557 | 0 | const size_t nbw3 = nbw2*ne12; |
1558 | |
|
1559 | 0 | assert(params->wsize >= ne13*nbw3); |
1560 | 0 | GGML_ASSERT(src1->type == GGML_TYPE_F32); |
1561 | |
|
1562 | | #if 0 |
1563 | | for (int64_t i13 = 0; i13 < ne13; ++i13) { |
1564 | | for (int64_t i12 = ith; i12 < ne12; i12 += nth) { |
1565 | | for (int64_t i11 = 0; i11 < ne11; ++i11) { |
1566 | | from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), |
1567 | | (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), |
1568 | | ne10); |
1569 | | } |
1570 | | } |
1571 | | } |
1572 | | #else |
1573 | 0 | for (int64_t i13 = 0; i13 < ne13; ++i13) { |
1574 | 0 | for (int64_t i12 = 0; i12 < ne12; ++i12) { |
1575 | 0 | for (int64_t i11 = 0; i11 < ne11; ++i11) { |
1576 | 0 | size_t bs = ggml_blck_size(vec_dot_type); |
1577 | 0 | int64_t ne10_block_start = (ith * ne10/bs) / nth; |
1578 | 0 | int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth; |
1579 | 0 | from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10), |
1580 | 0 | (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0), |
1581 | 0 | (ne10_block_end - ne10_block_start) * bs); |
1582 | 0 | } |
1583 | 0 | } |
1584 | 0 | } |
1585 | 0 | #endif |
1586 | 0 | } |
1587 | |
|
1588 | 0 | if (ith == 0) { |
1589 | | // initialize matrix_row_counts |
1590 | 0 | memset(matrix_row_counts, 0, n_as*sizeof(int64_t)); |
1591 | | |
1592 | | // group rows by src0 matrix |
1593 | 0 | for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { |
1594 | 0 | for (int id = 0; id < n_ids; ++id) { |
1595 | 0 | const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]); |
1596 | |
|
1597 | 0 | assert(i02 >= 0 && i02 < n_as); |
1598 | |
|
1599 | 0 | MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1}; |
1600 | 0 | matrix_row_counts[i02] += 1; |
1601 | 0 | } |
1602 | 0 | } |
1603 | 0 | } |
1604 | | |
1605 | | // reset current_chunk |
1606 | 0 | for (int cur_a = ith; cur_a < n_as; cur_a += nth) { |
1607 | 0 | atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); |
1608 | 0 | *current_chunk_ctr = nth; |
1609 | 0 | } |
1610 | |
|
1611 | 0 | ggml_barrier(params->threadpool); |
1612 | |
|
1613 | 0 | for (int cur_a = 0; cur_a < n_as; ++cur_a) { |
1614 | 0 | const int64_t cne1 = matrix_row_counts[cur_a]; |
1615 | |
|
1616 | 0 | if (cne1 == 0) { |
1617 | 0 | continue; |
1618 | 0 | } |
1619 | | |
1620 | 0 | const char * src0_cur = (const char *) src0->data + cur_a * nb02; |
1621 | 0 | const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; |
1622 | 0 | const size_t row_size = ggml_row_size(vec_dot_type, ne10); |
1623 | |
|
1624 | 0 | const int64_t nr0 = ne01; |
1625 | 0 | const int64_t nr1 = cne1; |
1626 | |
|
1627 | 0 | int chunk_size = 16; |
1628 | 0 | if (nr0 == 1 || nr1 == 1) { |
1629 | 0 | chunk_size = 64; |
1630 | 0 | } |
1631 | | |
1632 | | // disable for NUMA |
1633 | 0 | const bool disable_chunking = ggml_is_numa(); |
1634 | |
|
1635 | 0 | int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; |
1636 | 0 | int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; |
1637 | |
|
1638 | 0 | if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) { |
1639 | 0 | nchunk0 = nr0 > nr1 ? nth : 1; |
1640 | 0 | nchunk1 = nr0 > nr1 ? 1 : nth; |
1641 | 0 | } |
1642 | |
|
1643 | 0 | const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; |
1644 | 0 | const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; |
1645 | |
|
1646 | 0 | int current_chunk = ith; |
1647 | |
|
1648 | 0 | atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a); |
1649 | |
|
1650 | 0 | while (current_chunk < nchunk0 * nchunk1) { |
1651 | 0 | const int64_t ith0 = current_chunk % nchunk0; |
1652 | 0 | const int64_t ith1 = current_chunk / nchunk0; |
1653 | |
|
1654 | 0 | const int64_t ir0_start = dr0 * ith0; |
1655 | 0 | const int64_t ir0_end = MIN(ir0_start + dr0, nr0); |
1656 | |
|
1657 | 0 | const int64_t ir1_start = dr1 * ith1; |
1658 | 0 | const int64_t ir1_end = MIN(ir1_start + dr1, nr1); |
1659 | |
|
1660 | 0 | ggml_compute_forward_mul_mat_id_one_chunk( |
1661 | 0 | dst, src0, src1, ids, cur_a, |
1662 | 0 | ir0_start, ir0_end, ir1_start, ir1_end, |
1663 | 0 | src0_cur, matrix_rows, row_size, src1_cont, wdata |
1664 | 0 | ); |
1665 | |
|
1666 | 0 | if (nth >= nchunk0 * nchunk1) { |
1667 | 0 | break; |
1668 | 0 | } |
1669 | | |
1670 | 0 | current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed); |
1671 | 0 | } |
1672 | 0 | } |
1673 | 0 | } |
1674 | | |
1675 | | ///////////////////////////////// |
1676 | | |
1677 | 0 | static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { |
1678 | 0 | GGML_ASSERT(params); |
1679 | |
|
1680 | 0 | if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) { |
1681 | 0 | return; |
1682 | 0 | } |
1683 | | |
1684 | | // extra_buffer op? |
1685 | 0 | if (ggml_cpu_extra_compute_forward(params, tensor)) { |
1686 | 0 | return; |
1687 | 0 | } |
1688 | | |
1689 | 0 | switch (tensor->op) { |
1690 | 0 | case GGML_OP_DUP: |
1691 | 0 | { |
1692 | 0 | ggml_compute_forward_dup(params, tensor); |
1693 | 0 | } break; |
1694 | 0 | case GGML_OP_ADD: |
1695 | 0 | { |
1696 | 0 | ggml_compute_forward_add(params, tensor); |
1697 | 0 | } break; |
1698 | 0 | case GGML_OP_ADD_ID: |
1699 | 0 | { |
1700 | 0 | ggml_compute_forward_add_id(params, tensor); |
1701 | 0 | } break; |
1702 | 0 | case GGML_OP_ADD1: |
1703 | 0 | { |
1704 | 0 | ggml_compute_forward_add1(params, tensor); |
1705 | 0 | } break; |
1706 | 0 | case GGML_OP_ACC: |
1707 | 0 | { |
1708 | 0 | ggml_compute_forward_acc(params, tensor); |
1709 | 0 | } break; |
1710 | 0 | case GGML_OP_SUB: |
1711 | 0 | { |
1712 | 0 | ggml_compute_forward_sub(params, tensor); |
1713 | 0 | } break; |
1714 | 0 | case GGML_OP_MUL: |
1715 | 0 | { |
1716 | 0 | ggml_compute_forward_mul(params, tensor); |
1717 | 0 | } break; |
1718 | 0 | case GGML_OP_DIV: |
1719 | 0 | { |
1720 | 0 | ggml_compute_forward_div(params, tensor); |
1721 | 0 | } break; |
1722 | 0 | case GGML_OP_SQR: |
1723 | 0 | { |
1724 | 0 | ggml_compute_forward_sqr(params, tensor); |
1725 | 0 | } break; |
1726 | 0 | case GGML_OP_SQRT: |
1727 | 0 | { |
1728 | 0 | ggml_compute_forward_sqrt(params, tensor); |
1729 | 0 | } break; |
1730 | 0 | case GGML_OP_LOG: |
1731 | 0 | { |
1732 | 0 | ggml_compute_forward_log(params, tensor); |
1733 | 0 | } break; |
1734 | 0 | case GGML_OP_SIN: |
1735 | 0 | { |
1736 | 0 | ggml_compute_forward_sin(params, tensor); |
1737 | 0 | } break; |
1738 | 0 | case GGML_OP_COS: |
1739 | 0 | { |
1740 | 0 | ggml_compute_forward_cos(params, tensor); |
1741 | 0 | } break; |
1742 | 0 | case GGML_OP_SUM: |
1743 | 0 | { |
1744 | 0 | ggml_compute_forward_sum(params, tensor); |
1745 | 0 | } break; |
1746 | 0 | case GGML_OP_SUM_ROWS: |
1747 | 0 | { |
1748 | 0 | ggml_compute_forward_sum_rows(params, tensor); |
1749 | 0 | } break; |
1750 | 0 | case GGML_OP_CUMSUM: |
1751 | 0 | { |
1752 | 0 | ggml_compute_forward_cumsum(params, tensor); |
1753 | 0 | } break; |
1754 | 0 | case GGML_OP_MEAN: |
1755 | 0 | { |
1756 | 0 | ggml_compute_forward_mean(params, tensor); |
1757 | 0 | } break; |
1758 | 0 | case GGML_OP_ARGMAX: |
1759 | 0 | { |
1760 | 0 | ggml_compute_forward_argmax(params, tensor); |
1761 | 0 | } break; |
1762 | 0 | case GGML_OP_COUNT_EQUAL: |
1763 | 0 | { |
1764 | 0 | ggml_compute_forward_count_equal(params, tensor); |
1765 | 0 | } break; |
1766 | 0 | case GGML_OP_REPEAT: |
1767 | 0 | { |
1768 | 0 | ggml_compute_forward_repeat(params, tensor); |
1769 | 0 | } break; |
1770 | 0 | case GGML_OP_REPEAT_BACK: |
1771 | 0 | { |
1772 | 0 | ggml_compute_forward_repeat_back(params, tensor); |
1773 | 0 | } break; |
1774 | 0 | case GGML_OP_CONCAT: |
1775 | 0 | { |
1776 | 0 | ggml_compute_forward_concat(params, tensor); |
1777 | 0 | } break; |
1778 | 0 | case GGML_OP_SILU_BACK: |
1779 | 0 | { |
1780 | 0 | ggml_compute_forward_silu_back(params, tensor); |
1781 | 0 | } break; |
1782 | 0 | case GGML_OP_NORM: |
1783 | 0 | { |
1784 | 0 | ggml_compute_forward_norm(params, tensor); |
1785 | 0 | } break; |
1786 | 0 | case GGML_OP_RMS_NORM: |
1787 | 0 | { |
1788 | 0 | ggml_compute_forward_rms_norm(params, tensor); |
1789 | 0 | } break; |
1790 | 0 | case GGML_OP_RMS_NORM_BACK: |
1791 | 0 | { |
1792 | 0 | ggml_compute_forward_rms_norm_back(params, tensor); |
1793 | 0 | } break; |
1794 | 0 | case GGML_OP_GROUP_NORM: |
1795 | 0 | { |
1796 | 0 | ggml_compute_forward_group_norm(params, tensor); |
1797 | 0 | } break; |
1798 | 0 | case GGML_OP_L2_NORM: |
1799 | 0 | { |
1800 | 0 | ggml_compute_forward_l2_norm(params, tensor); |
1801 | 0 | } break; |
1802 | 0 | case GGML_OP_MUL_MAT: |
1803 | 0 | { |
1804 | 0 | ggml_compute_forward_mul_mat(params, tensor); |
1805 | 0 | } break; |
1806 | 0 | case GGML_OP_MUL_MAT_ID: |
1807 | 0 | { |
1808 | 0 | ggml_compute_forward_mul_mat_id(params, tensor); |
1809 | 0 | } break; |
1810 | 0 | case GGML_OP_OUT_PROD: |
1811 | 0 | { |
1812 | 0 | ggml_compute_forward_out_prod(params, tensor); |
1813 | 0 | } break; |
1814 | 0 | case GGML_OP_SCALE: |
1815 | 0 | { |
1816 | 0 | ggml_compute_forward_scale(params, tensor); |
1817 | 0 | } break; |
1818 | 0 | case GGML_OP_SET: |
1819 | 0 | { |
1820 | 0 | ggml_compute_forward_set(params, tensor); |
1821 | 0 | } break; |
1822 | 0 | case GGML_OP_CPY: |
1823 | 0 | { |
1824 | 0 | ggml_compute_forward_cpy(params, tensor); |
1825 | 0 | } break; |
1826 | 0 | case GGML_OP_CONT: |
1827 | 0 | { |
1828 | 0 | ggml_compute_forward_cont(params, tensor); |
1829 | 0 | } break; |
1830 | 0 | case GGML_OP_GET_ROWS: |
1831 | 0 | { |
1832 | 0 | ggml_compute_forward_get_rows(params, tensor); |
1833 | 0 | } break; |
1834 | 0 | case GGML_OP_GET_ROWS_BACK: |
1835 | 0 | { |
1836 | 0 | ggml_compute_forward_get_rows_back(params, tensor); |
1837 | 0 | } break; |
1838 | 0 | case GGML_OP_SET_ROWS: |
1839 | 0 | { |
1840 | 0 | ggml_compute_forward_set_rows(params, tensor); |
1841 | 0 | } break; |
1842 | 0 | case GGML_OP_DIAG: |
1843 | 0 | { |
1844 | 0 | ggml_compute_forward_diag(params, tensor); |
1845 | 0 | } break; |
1846 | 0 | case GGML_OP_DIAG_MASK_INF: |
1847 | 0 | { |
1848 | 0 | ggml_compute_forward_diag_mask_inf(params, tensor); |
1849 | 0 | } break; |
1850 | 0 | case GGML_OP_DIAG_MASK_ZERO: |
1851 | 0 | { |
1852 | 0 | ggml_compute_forward_diag_mask_zero(params, tensor); |
1853 | 0 | } break; |
1854 | 0 | case GGML_OP_SOFT_MAX: |
1855 | 0 | { |
1856 | 0 | ggml_compute_forward_soft_max(params, tensor); |
1857 | 0 | } break; |
1858 | 0 | case GGML_OP_SOFT_MAX_BACK: |
1859 | 0 | { |
1860 | 0 | ggml_compute_forward_soft_max_ext_back(params, tensor); |
1861 | 0 | } break; |
1862 | 0 | case GGML_OP_ROPE: |
1863 | 0 | { |
1864 | 0 | ggml_compute_forward_rope(params, tensor); |
1865 | 0 | } break; |
1866 | 0 | case GGML_OP_ROPE_BACK: |
1867 | 0 | { |
1868 | 0 | ggml_compute_forward_rope_back(params, tensor); |
1869 | 0 | } break; |
1870 | 0 | case GGML_OP_CLAMP: |
1871 | 0 | { |
1872 | 0 | ggml_compute_forward_clamp(params, tensor); |
1873 | 0 | } break; |
1874 | 0 | case GGML_OP_CONV_TRANSPOSE_1D: |
1875 | 0 | { |
1876 | 0 | ggml_compute_forward_conv_transpose_1d(params, tensor); |
1877 | 0 | } break; |
1878 | 0 | case GGML_OP_IM2COL: |
1879 | 0 | { |
1880 | 0 | ggml_compute_forward_im2col(params, tensor); |
1881 | 0 | } break; |
1882 | 0 | case GGML_OP_IM2COL_BACK: |
1883 | 0 | { |
1884 | 0 | ggml_compute_forward_im2col_back_f32(params, tensor); |
1885 | 0 | } break; |
1886 | 0 | case GGML_OP_IM2COL_3D: |
1887 | 0 | { |
1888 | 0 | ggml_compute_forward_im2col_3d(params, tensor); |
1889 | 0 | } break; |
1890 | 0 | case GGML_OP_CONV_2D: |
1891 | 0 | { |
1892 | 0 | ggml_compute_forward_conv_2d(params, tensor); |
1893 | 0 | } break; |
1894 | 0 | case GGML_OP_CONV_3D: |
1895 | 0 | { |
1896 | 0 | ggml_compute_forward_conv_3d(params, tensor); |
1897 | 0 | } break; |
1898 | 0 | case GGML_OP_CONV_2D_DW: |
1899 | 0 | { |
1900 | 0 | ggml_compute_forward_conv_2d_dw(params, tensor); |
1901 | 0 | } break; |
1902 | 0 | case GGML_OP_CONV_TRANSPOSE_2D: |
1903 | 0 | { |
1904 | 0 | ggml_compute_forward_conv_transpose_2d(params, tensor); |
1905 | 0 | } break; |
1906 | 0 | case GGML_OP_POOL_1D: |
1907 | 0 | { |
1908 | 0 | ggml_compute_forward_pool_1d(params, tensor); |
1909 | 0 | } break; |
1910 | 0 | case GGML_OP_POOL_2D: |
1911 | 0 | { |
1912 | 0 | ggml_compute_forward_pool_2d(params, tensor); |
1913 | 0 | } break; |
1914 | 0 | case GGML_OP_POOL_2D_BACK: |
1915 | 0 | { |
1916 | 0 | ggml_compute_forward_pool_2d_back(params, tensor); |
1917 | 0 | } break; |
1918 | 0 | case GGML_OP_UPSCALE: |
1919 | 0 | { |
1920 | 0 | ggml_compute_forward_upscale(params, tensor); |
1921 | 0 | } break; |
1922 | 0 | case GGML_OP_PAD: |
1923 | 0 | { |
1924 | 0 | ggml_compute_forward_pad(params, tensor); |
1925 | 0 | } break; |
1926 | 0 | case GGML_OP_PAD_REFLECT_1D: |
1927 | 0 | { |
1928 | 0 | ggml_compute_forward_pad_reflect_1d(params, tensor); |
1929 | 0 | } break; |
1930 | 0 | case GGML_OP_ROLL: |
1931 | 0 | { |
1932 | 0 | ggml_compute_forward_roll(params, tensor); |
1933 | 0 | } break; |
1934 | 0 | case GGML_OP_ARANGE: |
1935 | 0 | { |
1936 | 0 | ggml_compute_forward_arange(params, tensor); |
1937 | 0 | } break; |
1938 | 0 | case GGML_OP_TIMESTEP_EMBEDDING: |
1939 | 0 | { |
1940 | 0 | ggml_compute_forward_timestep_embedding(params, tensor); |
1941 | 0 | } break; |
1942 | 0 | case GGML_OP_ARGSORT: |
1943 | 0 | { |
1944 | 0 | ggml_compute_forward_argsort(params, tensor); |
1945 | 0 | } break; |
1946 | 0 | case GGML_OP_TOP_K: |
1947 | 0 | { |
1948 | 0 | ggml_compute_forward_top_k(params, tensor); |
1949 | 0 | } break; |
1950 | 0 | case GGML_OP_LEAKY_RELU: |
1951 | 0 | { |
1952 | 0 | ggml_compute_forward_leaky_relu(params, tensor); |
1953 | 0 | } break; |
1954 | 0 | case GGML_OP_TRI: |
1955 | 0 | { |
1956 | 0 | ggml_compute_forward_tri(params, tensor); |
1957 | 0 | } break; |
1958 | 0 | case GGML_OP_FILL: |
1959 | 0 | { |
1960 | 0 | ggml_compute_forward_fill(params, tensor); |
1961 | 0 | } break; |
1962 | 0 | case GGML_OP_FLASH_ATTN_EXT: |
1963 | 0 | { |
1964 | 0 | ggml_compute_forward_flash_attn_ext(params, tensor); |
1965 | 0 | } break; |
1966 | 0 | case GGML_OP_FLASH_ATTN_BACK: |
1967 | 0 | { |
1968 | 0 | int32_t t = ggml_get_op_params_i32(tensor, 0); |
1969 | 0 | GGML_ASSERT(t == 0 || t == 1); |
1970 | 0 | bool masked = t != 0; |
1971 | 0 | ggml_compute_forward_flash_attn_back(params, masked, tensor); |
1972 | 0 | } break; |
1973 | 0 | case GGML_OP_SSM_CONV: |
1974 | 0 | { |
1975 | 0 | ggml_compute_forward_ssm_conv(params, tensor); |
1976 | 0 | } break; |
1977 | 0 | case GGML_OP_SSM_SCAN: |
1978 | 0 | { |
1979 | 0 | ggml_compute_forward_ssm_scan(params, tensor); |
1980 | 0 | } break; |
1981 | 0 | case GGML_OP_WIN_PART: |
1982 | 0 | { |
1983 | 0 | ggml_compute_forward_win_part(params, tensor); |
1984 | 0 | } break; |
1985 | 0 | case GGML_OP_WIN_UNPART: |
1986 | 0 | { |
1987 | 0 | ggml_compute_forward_win_unpart(params, tensor); |
1988 | 0 | } break; |
1989 | 0 | case GGML_OP_UNARY: |
1990 | 0 | { |
1991 | 0 | ggml_compute_forward_unary(params, tensor); |
1992 | 0 | } break; |
1993 | 0 | case GGML_OP_GLU: |
1994 | 0 | { |
1995 | 0 | ggml_compute_forward_glu(params, tensor); |
1996 | 0 | } break; |
1997 | 0 | case GGML_OP_GET_REL_POS: |
1998 | 0 | { |
1999 | 0 | ggml_compute_forward_get_rel_pos(params, tensor); |
2000 | 0 | } break; |
2001 | 0 | case GGML_OP_ADD_REL_POS: |
2002 | 0 | { |
2003 | 0 | ggml_compute_forward_add_rel_pos(params, tensor); |
2004 | 0 | } break; |
2005 | 0 | case GGML_OP_RWKV_WKV6: |
2006 | 0 | { |
2007 | 0 | ggml_compute_forward_rwkv_wkv6(params, tensor); |
2008 | 0 | } break; |
2009 | 0 | case GGML_OP_GATED_LINEAR_ATTN: |
2010 | 0 | { |
2011 | 0 | ggml_compute_forward_gla(params, tensor); |
2012 | 0 | } break; |
2013 | 0 | case GGML_OP_RWKV_WKV7: |
2014 | 0 | { |
2015 | 0 | ggml_compute_forward_rwkv_wkv7(params, tensor); |
2016 | 0 | } break; |
2017 | 0 | case GGML_OP_SOLVE_TRI: |
2018 | 0 | { |
2019 | 0 | ggml_compute_forward_solve_tri(params, tensor); |
2020 | 0 | } break; |
2021 | 0 | case GGML_OP_MAP_CUSTOM1: |
2022 | 0 | { |
2023 | 0 | ggml_compute_forward_map_custom1(params, tensor); |
2024 | 0 | } |
2025 | 0 | break; |
2026 | 0 | case GGML_OP_MAP_CUSTOM2: |
2027 | 0 | { |
2028 | 0 | ggml_compute_forward_map_custom2(params, tensor); |
2029 | 0 | } |
2030 | 0 | break; |
2031 | 0 | case GGML_OP_MAP_CUSTOM3: |
2032 | 0 | { |
2033 | 0 | ggml_compute_forward_map_custom3(params, tensor); |
2034 | 0 | } |
2035 | 0 | break; |
2036 | 0 | case GGML_OP_CUSTOM: |
2037 | 0 | { |
2038 | 0 | ggml_compute_forward_custom(params, tensor); |
2039 | 0 | } |
2040 | 0 | break; |
2041 | 0 | case GGML_OP_CROSS_ENTROPY_LOSS: |
2042 | 0 | { |
2043 | 0 | ggml_compute_forward_cross_entropy_loss(params, tensor); |
2044 | 0 | } |
2045 | 0 | break; |
2046 | 0 | case GGML_OP_CROSS_ENTROPY_LOSS_BACK: |
2047 | 0 | { |
2048 | 0 | ggml_compute_forward_cross_entropy_loss_back(params, tensor); |
2049 | 0 | } |
2050 | 0 | break; |
2051 | 0 | case GGML_OP_OPT_STEP_ADAMW: |
2052 | 0 | { |
2053 | 0 | ggml_compute_forward_opt_step_adamw(params, tensor); |
2054 | 0 | } |
2055 | 0 | break; |
2056 | 0 | case GGML_OP_OPT_STEP_SGD: |
2057 | 0 | { |
2058 | 0 | ggml_compute_forward_opt_step_sgd(params, tensor); |
2059 | 0 | } |
2060 | 0 | break; |
2061 | 0 | case GGML_OP_NONE: |
2062 | 0 | { |
2063 | | // nop |
2064 | 0 | } break; |
2065 | 0 | case GGML_OP_RESHAPE: |
2066 | 0 | { |
2067 | | // nop |
2068 | 0 | } break; |
2069 | 0 | case GGML_OP_PERMUTE: |
2070 | 0 | { |
2071 | | // nop |
2072 | 0 | } break; |
2073 | 0 | case GGML_OP_VIEW: |
2074 | 0 | { |
2075 | | // nop |
2076 | 0 | } break; |
2077 | 0 | case GGML_OP_TRANSPOSE: |
2078 | 0 | { |
2079 | | // nop |
2080 | 0 | } break; |
2081 | 0 | case GGML_OP_COUNT: |
2082 | 0 | { |
2083 | 0 | GGML_ABORT("fatal error"); |
2084 | 0 | } |
2085 | 0 | } |
2086 | 0 | } |
2087 | | |
2088 | | // Android's libc implementation "bionic" does not support setting affinity |
2089 | | #if defined(__gnu_linux__) |
2090 | 0 | static void set_numa_thread_affinity(int thread_n) { |
2091 | 0 | if (!ggml_is_numa()) { |
2092 | 0 | return; |
2093 | 0 | } |
2094 | | |
2095 | 0 | int node_num; |
2096 | 0 | int rv; |
2097 | 0 | size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); |
2098 | |
|
2099 | 0 | switch(g_state.numa.numa_strategy) { |
2100 | 0 | case GGML_NUMA_STRATEGY_DISTRIBUTE: |
2101 | | // run thread on node_num thread_n / (threads per node) |
2102 | 0 | node_num = thread_n % g_state.numa.n_nodes; |
2103 | 0 | break; |
2104 | 0 | case GGML_NUMA_STRATEGY_ISOLATE: |
2105 | | // run thread on current_node |
2106 | 0 | node_num = g_state.numa.current_node; |
2107 | 0 | break; |
2108 | 0 | case GGML_NUMA_STRATEGY_NUMACTL: |
2109 | | // use the cpuset that numactl gave us |
2110 | 0 | rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset); |
2111 | 0 | if (rv) { |
2112 | 0 | fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv)); |
2113 | 0 | } |
2114 | 0 | return; |
2115 | 0 | default: |
2116 | 0 | return; |
2117 | 0 | } |
2118 | | |
2119 | 0 | struct ggml_numa_node * node = &g_state.numa.nodes[node_num]; |
2120 | |
|
2121 | 0 | cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); |
2122 | 0 | CPU_ZERO_S(setsize, cpus); |
2123 | 0 | for (size_t i = 0; i < node->n_cpus; ++i) { |
2124 | 0 | CPU_SET_S(node->cpus[i], setsize, cpus); |
2125 | 0 | } |
2126 | |
|
2127 | 0 | rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); |
2128 | 0 | if (rv) { |
2129 | 0 | fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); |
2130 | 0 | } |
2131 | |
|
2132 | 0 | CPU_FREE(cpus); |
2133 | 0 | } |
2134 | | |
2135 | 0 | static void clear_numa_thread_affinity(void) { |
2136 | 0 | if (!ggml_is_numa()) { |
2137 | 0 | return; |
2138 | 0 | } |
2139 | | |
2140 | 0 | size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus); |
2141 | |
|
2142 | 0 | cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus); |
2143 | 0 | CPU_ZERO_S(setsize, cpus); |
2144 | 0 | for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) { |
2145 | 0 | CPU_SET_S(i, setsize, cpus); |
2146 | 0 | } |
2147 | |
|
2148 | 0 | int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus); |
2149 | 0 | if (rv) { |
2150 | 0 | fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv)); |
2151 | 0 | } |
2152 | |
|
2153 | 0 | CPU_FREE(cpus); |
2154 | 0 | } |
2155 | | #else |
2156 | | // TODO: Windows etc. |
2157 | | // (the linux implementation may also work on BSD, someone should test) |
2158 | | static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } |
2159 | | static void clear_numa_thread_affinity(void) {} |
2160 | | #endif |
2161 | | |
2162 | 0 | static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { |
2163 | 0 | int n_tasks = 0; |
2164 | |
|
2165 | 0 | if (ggml_is_empty(node)) { |
2166 | | // no need to multi-thread a no-op |
2167 | 0 | n_tasks = 1; |
2168 | 0 | return n_tasks; |
2169 | 0 | } |
2170 | | |
2171 | 0 | switch (node->op) { |
2172 | 0 | case GGML_OP_CPY: |
2173 | 0 | case GGML_OP_DUP: |
2174 | 0 | case GGML_OP_CONT: |
2175 | 0 | case GGML_OP_ADD: |
2176 | 0 | case GGML_OP_ADD_ID: |
2177 | 0 | case GGML_OP_ADD1: |
2178 | 0 | case GGML_OP_ACC: |
2179 | 0 | case GGML_OP_CUMSUM: |
2180 | 0 | case GGML_OP_TRI: |
2181 | 0 | case GGML_OP_FILL: |
2182 | 0 | { |
2183 | 0 | n_tasks = n_threads; |
2184 | 0 | } break; |
2185 | 0 | case GGML_OP_SUB: |
2186 | 0 | case GGML_OP_SQR: |
2187 | 0 | case GGML_OP_SQRT: |
2188 | 0 | case GGML_OP_LOG: |
2189 | 0 | case GGML_OP_SIN: |
2190 | 0 | case GGML_OP_COS: |
2191 | 0 | case GGML_OP_SUM: |
2192 | 0 | case GGML_OP_SUM_ROWS: |
2193 | 0 | case GGML_OP_MEAN: |
2194 | 0 | case GGML_OP_ARGMAX: |
2195 | 0 | { |
2196 | 0 | n_tasks = 1; |
2197 | 0 | } break; |
2198 | 0 | case GGML_OP_COUNT_EQUAL: |
2199 | 0 | case GGML_OP_SOLVE_TRI: |
2200 | 0 | { |
2201 | 0 | n_tasks = n_threads; |
2202 | 0 | } break; |
2203 | 0 | case GGML_OP_REPEAT: |
2204 | 0 | case GGML_OP_REPEAT_BACK: |
2205 | 0 | case GGML_OP_LEAKY_RELU: |
2206 | 0 | { |
2207 | 0 | n_tasks = 1; |
2208 | 0 | } break; |
2209 | 0 | case GGML_OP_UNARY: |
2210 | 0 | switch (ggml_get_unary_op(node)) { |
2211 | 0 | case GGML_UNARY_OP_ABS: |
2212 | 0 | case GGML_UNARY_OP_SGN: |
2213 | 0 | case GGML_UNARY_OP_NEG: |
2214 | 0 | case GGML_UNARY_OP_STEP: |
2215 | 0 | case GGML_UNARY_OP_TANH: |
2216 | 0 | case GGML_UNARY_OP_ELU: |
2217 | 0 | case GGML_UNARY_OP_RELU: |
2218 | 0 | case GGML_UNARY_OP_SIGMOID: |
2219 | 0 | case GGML_UNARY_OP_HARDSWISH: |
2220 | 0 | case GGML_UNARY_OP_HARDSIGMOID: |
2221 | 0 | case GGML_UNARY_OP_EXP: |
2222 | 0 | case GGML_UNARY_OP_SOFTPLUS: |
2223 | 0 | case GGML_UNARY_OP_EXPM1: |
2224 | 0 | case GGML_UNARY_OP_FLOOR: |
2225 | 0 | case GGML_UNARY_OP_CEIL: |
2226 | 0 | case GGML_UNARY_OP_ROUND: |
2227 | 0 | case GGML_UNARY_OP_TRUNC: |
2228 | 0 | { |
2229 | 0 | n_tasks = 1; |
2230 | 0 | } break; |
2231 | | |
2232 | 0 | case GGML_UNARY_OP_GELU: |
2233 | 0 | case GGML_UNARY_OP_GELU_ERF: |
2234 | 0 | case GGML_UNARY_OP_GELU_QUICK: |
2235 | 0 | case GGML_UNARY_OP_SILU: |
2236 | 0 | case GGML_UNARY_OP_XIELU: |
2237 | 0 | { |
2238 | 0 | n_tasks = n_threads; |
2239 | 0 | } break; |
2240 | 0 | default: |
2241 | 0 | GGML_ABORT("fatal error"); |
2242 | 0 | } |
2243 | 0 | break; |
2244 | 0 | case GGML_OP_GLU: |
2245 | 0 | switch (ggml_get_glu_op(node)) { |
2246 | 0 | case GGML_GLU_OP_REGLU: |
2247 | 0 | case GGML_GLU_OP_GEGLU: |
2248 | 0 | case GGML_GLU_OP_SWIGLU: |
2249 | 0 | case GGML_GLU_OP_SWIGLU_OAI: |
2250 | 0 | case GGML_GLU_OP_GEGLU_ERF: |
2251 | 0 | case GGML_GLU_OP_GEGLU_QUICK: |
2252 | 0 | { |
2253 | 0 | n_tasks = n_threads; |
2254 | 0 | } break; |
2255 | 0 | default: |
2256 | 0 | GGML_ABORT("fatal error"); |
2257 | 0 | } |
2258 | 0 | break; |
2259 | 0 | case GGML_OP_SILU_BACK: |
2260 | 0 | case GGML_OP_MUL: |
2261 | 0 | case GGML_OP_DIV: |
2262 | 0 | case GGML_OP_NORM: |
2263 | 0 | case GGML_OP_RMS_NORM: |
2264 | 0 | case GGML_OP_RMS_NORM_BACK: |
2265 | 0 | case GGML_OP_L2_NORM: |
2266 | 0 | case GGML_OP_GROUP_NORM: |
2267 | 0 | case GGML_OP_CONCAT: |
2268 | 0 | case GGML_OP_MUL_MAT: |
2269 | 0 | case GGML_OP_MUL_MAT_ID: |
2270 | 0 | case GGML_OP_OUT_PROD: |
2271 | 0 | { |
2272 | 0 | n_tasks = n_threads; |
2273 | 0 | } break; |
2274 | 0 | case GGML_OP_GET_ROWS: |
2275 | 0 | case GGML_OP_SET_ROWS: |
2276 | 0 | { |
2277 | | // FIXME: get_rows can use additional threads, but the cost of launching additional threads |
2278 | | // decreases performance with GPU offloading |
2279 | | //n_tasks = n_threads; |
2280 | 0 | n_tasks = 1; |
2281 | 0 | } break; |
2282 | 0 | case GGML_OP_SCALE: |
2283 | 0 | case GGML_OP_SET: |
2284 | 0 | case GGML_OP_RESHAPE: |
2285 | 0 | case GGML_OP_VIEW: |
2286 | 0 | case GGML_OP_PERMUTE: |
2287 | 0 | case GGML_OP_TRANSPOSE: |
2288 | 0 | case GGML_OP_GET_ROWS_BACK: |
2289 | 0 | case GGML_OP_DIAG: |
2290 | 0 | { |
2291 | 0 | n_tasks = 1; |
2292 | 0 | } break; |
2293 | 0 | case GGML_OP_DIAG_MASK_ZERO: |
2294 | 0 | case GGML_OP_DIAG_MASK_INF: |
2295 | 0 | case GGML_OP_SOFT_MAX_BACK: |
2296 | 0 | case GGML_OP_ROPE: |
2297 | 0 | case GGML_OP_ROPE_BACK: |
2298 | 0 | case GGML_OP_ADD_REL_POS: |
2299 | 0 | { |
2300 | 0 | n_tasks = n_threads; |
2301 | 0 | } break; |
2302 | 0 | case GGML_OP_CLAMP: |
2303 | 0 | { |
2304 | 0 | n_tasks = 1; //TODO |
2305 | 0 | } break; |
2306 | 0 | case GGML_OP_SOFT_MAX: |
2307 | 0 | { |
2308 | 0 | n_tasks = MIN(n_threads, ggml_nrows(node->src[0])); |
2309 | 0 | } break; |
2310 | 0 | case GGML_OP_IM2COL: |
2311 | 0 | case GGML_OP_IM2COL_BACK: |
2312 | 0 | case GGML_OP_IM2COL_3D: |
2313 | 0 | case GGML_OP_CONV_2D: |
2314 | 0 | case GGML_OP_CONV_3D: |
2315 | 0 | case GGML_OP_CONV_2D_DW: |
2316 | 0 | case GGML_OP_CONV_TRANSPOSE_1D: |
2317 | 0 | case GGML_OP_CONV_TRANSPOSE_2D: |
2318 | 0 | { |
2319 | 0 | n_tasks = n_threads; |
2320 | 0 | } break; |
2321 | 0 | case GGML_OP_POOL_1D: |
2322 | 0 | case GGML_OP_POOL_2D: |
2323 | 0 | case GGML_OP_POOL_2D_BACK: |
2324 | 0 | { |
2325 | 0 | n_tasks = 1; |
2326 | 0 | } break; |
2327 | 0 | case GGML_OP_UPSCALE: |
2328 | 0 | case GGML_OP_PAD: |
2329 | 0 | case GGML_OP_PAD_REFLECT_1D: |
2330 | 0 | case GGML_OP_ROLL: |
2331 | 0 | case GGML_OP_ARANGE: |
2332 | 0 | case GGML_OP_TIMESTEP_EMBEDDING: |
2333 | 0 | case GGML_OP_ARGSORT: |
2334 | 0 | case GGML_OP_TOP_K: |
2335 | 0 | case GGML_OP_FLASH_ATTN_EXT: |
2336 | 0 | case GGML_OP_FLASH_ATTN_BACK: |
2337 | 0 | case GGML_OP_SSM_CONV: |
2338 | 0 | case GGML_OP_SSM_SCAN: |
2339 | 0 | case GGML_OP_RWKV_WKV6: |
2340 | 0 | case GGML_OP_GATED_LINEAR_ATTN: |
2341 | 0 | case GGML_OP_RWKV_WKV7: |
2342 | 0 | { |
2343 | 0 | n_tasks = n_threads; |
2344 | 0 | } break; |
2345 | 0 | case GGML_OP_WIN_PART: |
2346 | 0 | case GGML_OP_WIN_UNPART: |
2347 | 0 | case GGML_OP_GET_REL_POS: |
2348 | 0 | { |
2349 | 0 | n_tasks = 1; |
2350 | 0 | } break; |
2351 | 0 | case GGML_OP_MAP_CUSTOM1: |
2352 | 0 | { |
2353 | 0 | struct ggml_map_custom1_op_params p; |
2354 | 0 | memcpy(&p, node->op_params, sizeof(p)); |
2355 | 0 | if (p.n_tasks == GGML_N_TASKS_MAX) { |
2356 | 0 | n_tasks = n_threads; |
2357 | 0 | } else { |
2358 | 0 | n_tasks = MIN(p.n_tasks, n_threads); |
2359 | 0 | } |
2360 | 0 | } break; |
2361 | 0 | case GGML_OP_MAP_CUSTOM2: |
2362 | 0 | { |
2363 | 0 | struct ggml_map_custom2_op_params p; |
2364 | 0 | memcpy(&p, node->op_params, sizeof(p)); |
2365 | 0 | if (p.n_tasks == GGML_N_TASKS_MAX) { |
2366 | 0 | n_tasks = n_threads; |
2367 | 0 | } else { |
2368 | 0 | n_tasks = MIN(p.n_tasks, n_threads); |
2369 | 0 | } |
2370 | 0 | } break; |
2371 | 0 | case GGML_OP_MAP_CUSTOM3: |
2372 | 0 | { |
2373 | 0 | struct ggml_map_custom3_op_params p; |
2374 | 0 | memcpy(&p, node->op_params, sizeof(p)); |
2375 | 0 | if (p.n_tasks == GGML_N_TASKS_MAX) { |
2376 | 0 | n_tasks = n_threads; |
2377 | 0 | } else { |
2378 | 0 | n_tasks = MIN(p.n_tasks, n_threads); |
2379 | 0 | } |
2380 | 0 | } break; |
2381 | 0 | case GGML_OP_CUSTOM: |
2382 | 0 | { |
2383 | 0 | struct ggml_custom_op_params p; |
2384 | 0 | memcpy(&p, node->op_params, sizeof(p)); |
2385 | 0 | if (p.n_tasks == GGML_N_TASKS_MAX) { |
2386 | 0 | n_tasks = n_threads; |
2387 | 0 | } else { |
2388 | 0 | n_tasks = MIN(p.n_tasks, n_threads); |
2389 | 0 | } |
2390 | 0 | } break; |
2391 | 0 | case GGML_OP_CROSS_ENTROPY_LOSS: |
2392 | 0 | case GGML_OP_CROSS_ENTROPY_LOSS_BACK: |
2393 | 0 | case GGML_OP_OPT_STEP_ADAMW: |
2394 | 0 | case GGML_OP_OPT_STEP_SGD: |
2395 | 0 | { |
2396 | 0 | n_tasks = n_threads; |
2397 | 0 | } break; |
2398 | 0 | case GGML_OP_NONE: |
2399 | 0 | { |
2400 | 0 | n_tasks = 1; |
2401 | 0 | } break; |
2402 | 0 | case GGML_OP_COUNT: |
2403 | 0 | { |
2404 | 0 | GGML_ABORT("fatal error"); |
2405 | 0 | } |
2406 | 0 | default: |
2407 | 0 | { |
2408 | 0 | fprintf(stderr, "%s: op not implemented: ", __func__); |
2409 | 0 | if (node->op < GGML_OP_COUNT) { |
2410 | 0 | fprintf(stderr, "%s\n", ggml_op_name(node->op)); |
2411 | 0 | } else { |
2412 | 0 | fprintf(stderr, "%d\n", node->op); |
2413 | 0 | } |
2414 | 0 | GGML_ABORT("fatal error"); |
2415 | 0 | } |
2416 | 0 | } |
2417 | | |
2418 | 0 | assert(n_tasks > 0); |
2419 | |
|
2420 | 0 | return n_tasks; |
2421 | 0 | } |
2422 | | |
2423 | | static thread_ret_t ggml_graph_compute_secondary_thread(void* data); |
2424 | | |
2425 | | #if defined(_WIN32) |
2426 | | #include "windows.h" |
2427 | | |
2428 | | // TODO: support > 64 CPUs |
2429 | | static bool ggml_thread_apply_affinity(bool * mask) { |
2430 | | HANDLE h = GetCurrentThread(); |
2431 | | uint64_t bitmask = 0ULL; |
2432 | | |
2433 | | assert(GGML_MAX_N_THREADS >= 64); |
2434 | | |
2435 | | for (int32_t i = 0; i < 8; i++) { |
2436 | | int32_t idx = i * 8; |
2437 | | uint8_t val = 0; |
2438 | | val |= mask[idx + 0] << 0; |
2439 | | val |= mask[idx + 1] << 1; |
2440 | | val |= mask[idx + 2] << 2; |
2441 | | val |= mask[idx + 3] << 3; |
2442 | | val |= mask[idx + 4] << 4; |
2443 | | val |= mask[idx + 5] << 5; |
2444 | | val |= mask[idx + 6] << 6; |
2445 | | val |= mask[idx + 7] << 7; |
2446 | | bitmask |= (uint64_t)val << idx; |
2447 | | } |
2448 | | |
2449 | | for (int32_t i = 64; i < GGML_MAX_N_THREADS; i++) { |
2450 | | if (mask[i]) { |
2451 | | fprintf(stderr, "warn: setting thread-affinity for > 64 CPUs isn't supported on windows!\n"); |
2452 | | break; |
2453 | | } |
2454 | | } |
2455 | | |
2456 | | DWORD_PTR m = (DWORD_PTR)bitmask; |
2457 | | |
2458 | | m = SetThreadAffinityMask(h, m); |
2459 | | |
2460 | | return m != 0; |
2461 | | } |
2462 | | |
2463 | | static bool ggml_thread_apply_priority(int32_t prio) { |
2464 | | // Note that on Windows the Process Priority Class must be updated in order to set Thread priority. |
2465 | | // This is up to the applications. |
2466 | | DWORD p = THREAD_PRIORITY_NORMAL; |
2467 | | switch (prio) { |
2468 | | case GGML_SCHED_PRIO_LOW: p = THREAD_PRIORITY_BELOW_NORMAL; break; |
2469 | | case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break; |
2470 | | case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break; |
2471 | | case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break; |
2472 | | case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break; |
2473 | | } |
2474 | | |
2475 | | if (prio != GGML_SCHED_PRIO_LOW) { |
2476 | | // Tell Windows that this thread should not be throttled (needs its own CPU core). |
2477 | | // Newer Windows 11 versions aggresively park (offline) CPU cores and often place |
2478 | | // all our threads onto the first 4 cores which results in terrible performance with |
2479 | | // n_threads > 4 |
2480 | | #if _WIN32_WINNT >= 0x0602 |
2481 | | THREAD_POWER_THROTTLING_STATE t; |
2482 | | ZeroMemory(&t, sizeof(t)); |
2483 | | t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION; |
2484 | | t.ControlMask = THREAD_POWER_THROTTLING_EXECUTION_SPEED; |
2485 | | t.StateMask = 0; |
2486 | | |
2487 | | if (!SetThreadInformation(GetCurrentThread(), ThreadPowerThrottling, &t, sizeof(t))) { |
2488 | | GGML_LOG_DEBUG("failed to disable thread power throttling %d : (%d)\n", prio, (int) GetLastError()); |
2489 | | return false; |
2490 | | } |
2491 | | #endif |
2492 | | } |
2493 | | |
2494 | | if (prio == GGML_SCHED_PRIO_NORMAL) { |
2495 | | // Keep inherited policy/priority |
2496 | | return true; |
2497 | | } |
2498 | | |
2499 | | if (!SetThreadPriority(GetCurrentThread(), p)) { |
2500 | | fprintf(stderr, "warn: failed to set thread priority %d : (%d)\n", prio, (int) GetLastError()); |
2501 | | return false; |
2502 | | } |
2503 | | |
2504 | | return true; |
2505 | | } |
2506 | | |
2507 | | #elif defined(__APPLE__) |
2508 | | #include <sys/types.h> |
2509 | | #include <sys/resource.h> |
2510 | | |
2511 | | static bool ggml_thread_apply_affinity(const bool * mask) { |
2512 | | // Not supported on Apple platforms |
2513 | | UNUSED(mask); |
2514 | | return true; |
2515 | | } |
2516 | | |
2517 | | static bool ggml_thread_apply_priority(int32_t prio) { |
2518 | | struct sched_param p; |
2519 | | int32_t policy = SCHED_OTHER; |
2520 | | switch (prio) { |
2521 | | // TODO: there seems to be no way to set lower prio on Apple platforms |
2522 | | case GGML_SCHED_PRIO_LOW: policy = SCHED_OTHER; p.sched_priority = 0; break; |
2523 | | case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; |
2524 | | case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; |
2525 | | case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; |
2526 | | case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; |
2527 | | } |
2528 | | |
2529 | | if (prio == GGML_SCHED_PRIO_NORMAL) { |
2530 | | // Keep inherited policy/priority |
2531 | | return true; |
2532 | | } |
2533 | | |
2534 | | int32_t err = pthread_setschedparam(pthread_self(), policy, &p); |
2535 | | if (err != 0) { |
2536 | | fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); |
2537 | | return false; |
2538 | | } |
2539 | | |
2540 | | return true; |
2541 | | } |
2542 | | |
2543 | | #elif defined(__gnu_linux__) |
2544 | | // TODO: this may not work on BSD, to be verified |
2545 | | |
2546 | 0 | static bool ggml_thread_apply_affinity(const bool * mask) { |
2547 | 0 | cpu_set_t cpuset; |
2548 | 0 | int err; |
2549 | |
|
2550 | 0 | CPU_ZERO(&cpuset); |
2551 | |
|
2552 | 0 | for (uint32_t i = 0; i < GGML_MAX_N_THREADS; i++) { |
2553 | 0 | if (mask[i]) { |
2554 | 0 | GGML_PRINT_DEBUG("Thread %lx: adding %d to cpuset\n", pthread_self(), i); |
2555 | 0 | CPU_SET(i, &cpuset); |
2556 | 0 | } |
2557 | 0 | } |
2558 | |
|
2559 | | #ifdef __ANDROID__ |
2560 | | err = sched_setaffinity(0, sizeof(cpuset), &cpuset); |
2561 | | if (err < 0) { |
2562 | | err = errno; |
2563 | | } |
2564 | | #else |
2565 | 0 | err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); |
2566 | 0 | #endif |
2567 | 0 | if (err != 0) { |
2568 | 0 | fprintf(stderr, "warn: failed to set affinity mask 0x%llx : %s (%d)\n", (unsigned long long)mask, strerror(err), err); |
2569 | 0 | return false; |
2570 | 0 | } |
2571 | | |
2572 | 0 | return true; |
2573 | 0 | } |
2574 | | |
2575 | 0 | static bool ggml_thread_apply_priority(int32_t prio) { |
2576 | 0 | struct sched_param p; |
2577 | 0 | int32_t policy = SCHED_OTHER; |
2578 | 0 | switch (prio) { |
2579 | 0 | case GGML_SCHED_PRIO_LOW: policy = SCHED_BATCH; p.sched_priority = 0; break; |
2580 | 0 | case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break; |
2581 | 0 | case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break; |
2582 | 0 | case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break; |
2583 | 0 | case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break; |
2584 | 0 | } |
2585 | | |
2586 | 0 | if (prio == GGML_SCHED_PRIO_NORMAL) { |
2587 | | // Keep inherited policy/priority |
2588 | 0 | return true; |
2589 | 0 | } |
2590 | | |
2591 | 0 | int32_t err = pthread_setschedparam(pthread_self(), policy, &p); |
2592 | 0 | if (err != 0) { |
2593 | 0 | fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err); |
2594 | 0 | return false; |
2595 | 0 | } |
2596 | | |
2597 | 0 | return true; |
2598 | 0 | } |
2599 | | |
2600 | | #else // unsupported platforms |
2601 | | |
2602 | | static bool ggml_thread_apply_affinity(const bool * mask) { |
2603 | | UNUSED(mask); |
2604 | | return true; |
2605 | | } |
2606 | | |
2607 | | static bool ggml_thread_apply_priority(int32_t prio) { |
2608 | | UNUSED(prio); |
2609 | | return true; |
2610 | | } |
2611 | | |
2612 | | #endif |
2613 | | |
2614 | 0 | static bool ggml_thread_cpumask_is_valid(const bool * mask) { |
2615 | 0 | for (int i = 0; i < GGML_MAX_N_THREADS; i++) { |
2616 | 0 | if (mask[i]) { return true; } |
2617 | 0 | } |
2618 | 0 | return false; |
2619 | 0 | } |
2620 | | |
2621 | 0 | static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) { |
2622 | 0 | if (!strict) { |
2623 | 0 | memcpy(local_mask, global_mask, GGML_MAX_N_THREADS); |
2624 | 0 | return; |
2625 | 0 | } else { |
2626 | 0 | memset(local_mask, 0, GGML_MAX_N_THREADS); |
2627 | 0 | int32_t base_idx = *iter; |
2628 | 0 | for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) { |
2629 | 0 | int32_t idx = base_idx + i; |
2630 | 0 | if (idx >= GGML_MAX_N_THREADS) { |
2631 | | // Just a cheaper modulo |
2632 | 0 | idx -= GGML_MAX_N_THREADS; |
2633 | 0 | } |
2634 | 0 | if (global_mask[idx]) { |
2635 | 0 | local_mask[idx] = 1; |
2636 | 0 | *iter = idx + 1; |
2637 | 0 | return; |
2638 | 0 | } |
2639 | 0 | } |
2640 | 0 | } |
2641 | 0 | } |
2642 | | |
2643 | 0 | void ggml_threadpool_free(struct ggml_threadpool* threadpool) { |
2644 | 0 | if (!threadpool) return; |
2645 | | |
2646 | 0 | const int n_threads = threadpool->n_threads; |
2647 | |
|
2648 | 0 | #ifndef GGML_USE_OPENMP |
2649 | 0 | struct ggml_compute_state* workers = threadpool->workers; |
2650 | |
|
2651 | 0 | ggml_mutex_lock(&threadpool->mutex); |
2652 | |
|
2653 | 0 | threadpool->stop = true; |
2654 | 0 | threadpool->pause = false; |
2655 | |
|
2656 | 0 | ggml_cond_broadcast(&threadpool->cond); |
2657 | 0 | ggml_mutex_unlock(&threadpool->mutex); |
2658 | |
|
2659 | 0 | for (int j = 1; j < n_threads; j++) { |
2660 | 0 | int32_t rc = ggml_thread_join(workers[j].thrd, NULL); |
2661 | 0 | GGML_ASSERT(rc == GGML_EXIT_SUCCESS || rc == GGML_EXIT_ABORTED); |
2662 | 0 | UNUSED(rc); |
2663 | 0 | } |
2664 | |
|
2665 | 0 | ggml_mutex_destroy(&threadpool->mutex); |
2666 | 0 | ggml_cond_destroy(&threadpool->cond); |
2667 | 0 | #endif // GGML_USE_OPENMP |
2668 | |
|
2669 | 0 | const size_t workers_size = sizeof(struct ggml_compute_state) * n_threads; |
2670 | 0 | ggml_aligned_free(threadpool->workers, workers_size); |
2671 | 0 | ggml_aligned_free(threadpool, sizeof(struct ggml_threadpool)); |
2672 | 0 | } |
2673 | | |
2674 | | #ifndef GGML_USE_OPENMP |
2675 | | // pause/resume must be called under mutex |
2676 | 0 | static void ggml_threadpool_pause_locked(struct ggml_threadpool * threadpool) { |
2677 | 0 | GGML_PRINT_DEBUG("Pausing threadpool\n"); |
2678 | 0 | threadpool->pause = true; |
2679 | 0 | ggml_cond_broadcast(&threadpool->cond); |
2680 | 0 | } |
2681 | | |
2682 | 0 | static void ggml_threadpool_resume_locked(struct ggml_threadpool * threadpool) { |
2683 | 0 | GGML_PRINT_DEBUG("Resuming threadpool\n"); |
2684 | 0 | threadpool->pause = false; |
2685 | 0 | ggml_cond_broadcast(&threadpool->cond); |
2686 | 0 | } |
2687 | | #endif |
2688 | | |
2689 | 0 | void ggml_threadpool_pause(struct ggml_threadpool * threadpool) { |
2690 | 0 | #ifndef GGML_USE_OPENMP |
2691 | 0 | ggml_mutex_lock(&threadpool->mutex); |
2692 | 0 | if (!threadpool->pause) { |
2693 | 0 | ggml_threadpool_pause_locked(threadpool); |
2694 | 0 | } |
2695 | 0 | ggml_mutex_unlock(&threadpool->mutex); |
2696 | | #else |
2697 | | UNUSED(threadpool); |
2698 | | #endif |
2699 | 0 | } |
2700 | | |
2701 | 0 | void ggml_threadpool_resume(struct ggml_threadpool * threadpool) { |
2702 | 0 | #ifndef GGML_USE_OPENMP |
2703 | 0 | ggml_mutex_lock(&threadpool->mutex); |
2704 | 0 | if (threadpool->pause) { |
2705 | 0 | ggml_threadpool_resume_locked(threadpool); |
2706 | 0 | } |
2707 | 0 | ggml_mutex_unlock(&threadpool->mutex); |
2708 | | #else |
2709 | | UNUSED(threadpool); |
2710 | | #endif |
2711 | 0 | } |
2712 | | |
2713 | | struct ggml_cplan ggml_graph_plan( |
2714 | | const struct ggml_cgraph * cgraph, |
2715 | | int n_threads, |
2716 | 0 | struct ggml_threadpool * threadpool) { |
2717 | |
|
2718 | 0 | if (threadpool == NULL) { |
2719 | | //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads); |
2720 | 0 | } |
2721 | 0 | if (n_threads <= 0) { |
2722 | 0 | n_threads = threadpool ? threadpool->n_threads : GGML_DEFAULT_N_THREADS; |
2723 | 0 | } |
2724 | |
|
2725 | | #if defined(__EMSCRIPTEN__) && !defined(__EMSCRIPTEN_PTHREADS__) |
2726 | | // Emscripten without pthreads support can only use a single thread |
2727 | | n_threads = 1; |
2728 | | #endif |
2729 | |
|
2730 | 0 | size_t work_size = 0; |
2731 | |
|
2732 | 0 | struct ggml_cplan cplan; |
2733 | 0 | memset(&cplan, 0, sizeof(struct ggml_cplan)); |
2734 | |
|
2735 | 0 | int max_tasks = 1; |
2736 | | |
2737 | | // thread scheduling for the different operations + work buffer size estimation |
2738 | 0 | for (int i = 0; i < cgraph->n_nodes; i++) { |
2739 | 0 | struct ggml_tensor * node = cgraph->nodes[i]; |
2740 | |
|
2741 | 0 | const int n_tasks = ggml_get_n_tasks(node, n_threads); |
2742 | |
|
2743 | 0 | max_tasks = MAX(max_tasks, n_tasks); |
2744 | |
|
2745 | 0 | size_t cur = 0; |
2746 | |
|
2747 | 0 | if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) { |
2748 | 0 | switch (node->op) { |
2749 | 0 | case GGML_OP_CPY: |
2750 | 0 | case GGML_OP_DUP: |
2751 | 0 | { |
2752 | 0 | if (ggml_is_quantized(node->type) || |
2753 | | // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32 |
2754 | 0 | (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || |
2755 | 0 | (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16) || |
2756 | | // conversion between F32 and I32 |
2757 | 0 | (node->src[0]->type == GGML_TYPE_F32 && node->src[1] && node->src[1]->type == GGML_TYPE_I32) || |
2758 | 0 | (node->src[0]->type == GGML_TYPE_I32 && node->src[1] && node->src[1]->type == GGML_TYPE_F32)) { |
2759 | 0 | cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; |
2760 | 0 | } |
2761 | 0 | } break; |
2762 | 0 | case GGML_OP_ADD: |
2763 | 0 | case GGML_OP_ADD_ID: |
2764 | 0 | case GGML_OP_ADD1: |
2765 | 0 | { |
2766 | 0 | if (ggml_is_quantized(node->src[0]->type)) { |
2767 | 0 | cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; |
2768 | 0 | } |
2769 | 0 | } break; |
2770 | 0 | case GGML_OP_ACC: |
2771 | 0 | { |
2772 | 0 | if (ggml_is_quantized(node->src[0]->type)) { |
2773 | 0 | cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; |
2774 | 0 | } |
2775 | 0 | } break; |
2776 | 0 | case GGML_OP_COUNT_EQUAL: |
2777 | 0 | { |
2778 | 0 | cur = ggml_type_size(node->type)*n_tasks; |
2779 | 0 | } break; |
2780 | 0 | case GGML_OP_MUL_MAT: |
2781 | 0 | { |
2782 | 0 | const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type; |
2783 | |
|
2784 | 0 | if (node->src[1]->type != vec_dot_type) { |
2785 | 0 | cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1])); |
2786 | 0 | } |
2787 | 0 | } break; |
2788 | 0 | case GGML_OP_MUL_MAT_ID: |
2789 | 0 | { |
2790 | 0 | cur = 0; |
2791 | 0 | const struct ggml_tensor * src0 = node->src[0]; |
2792 | 0 | const struct ggml_tensor * src1 = node->src[1]; |
2793 | 0 | const struct ggml_tensor * ids = node->src[2]; |
2794 | 0 | const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; |
2795 | 0 | const int n_as = src0->ne[2]; |
2796 | | // src1 |
2797 | 0 | if (src1->type != vec_dot_type) { |
2798 | 0 | cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t); |
2799 | 0 | } |
2800 | | // matrix_row_counts |
2801 | 0 | cur += n_as * sizeof(int64_t) + sizeof(int64_t); |
2802 | | // matrix_rows |
2803 | 0 | cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t); |
2804 | | // atomic_current_chunk |
2805 | 0 | cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE; |
2806 | 0 | } break; |
2807 | 0 | case GGML_OP_OUT_PROD: |
2808 | 0 | { |
2809 | 0 | if (ggml_is_quantized(node->src[0]->type)) { |
2810 | 0 | cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; |
2811 | 0 | } |
2812 | 0 | } break; |
2813 | 0 | case GGML_OP_SOFT_MAX: |
2814 | 0 | case GGML_OP_ROPE: |
2815 | 0 | case GGML_OP_ROPE_BACK: |
2816 | 0 | { |
2817 | 0 | cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; |
2818 | 0 | } break; |
2819 | 0 | case GGML_OP_CONV_TRANSPOSE_1D: |
2820 | 0 | { |
2821 | 0 | GGML_ASSERT(node->src[0]->ne[3] == 1); |
2822 | 0 | GGML_ASSERT(node->src[1]->ne[2] == 1); |
2823 | 0 | GGML_ASSERT(node->src[1]->ne[3] == 1); |
2824 | |
|
2825 | 0 | const int64_t ne00 = node->src[0]->ne[0]; // K |
2826 | 0 | const int64_t ne01 = node->src[0]->ne[1]; // Cout |
2827 | 0 | const int64_t ne02 = node->src[0]->ne[2]; // Cin |
2828 | 0 | const int64_t ne10 = node->src[1]->ne[0]; // L |
2829 | 0 | const int64_t ne11 = node->src[1]->ne[1]; // Cin |
2830 | |
|
2831 | 0 | if ((node->src[0]->type == GGML_TYPE_F16 || |
2832 | 0 | node->src[0]->type == GGML_TYPE_BF16) && |
2833 | 0 | node->src[1]->type == GGML_TYPE_F32) { |
2834 | 0 | cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; |
2835 | 0 | cur += sizeof(ggml_fp16_t)*ne10*ne11; |
2836 | 0 | } else if (node->src[0]->type == GGML_TYPE_F32 && |
2837 | 0 | node->src[1]->type == GGML_TYPE_F32) { |
2838 | 0 | cur += sizeof(float)*ne00*ne01*ne02; |
2839 | 0 | cur += sizeof(float)*ne10*ne11; |
2840 | 0 | } else { |
2841 | 0 | GGML_ABORT("fatal error"); |
2842 | 0 | } |
2843 | 0 | } break; |
2844 | 0 | case GGML_OP_CONV_2D: |
2845 | 0 | case GGML_OP_CONV_3D: |
2846 | 0 | { |
2847 | 0 | cur = GGML_IM2COL_WORK_SIZE; |
2848 | 0 | } break; |
2849 | 0 | case GGML_OP_CONV_TRANSPOSE_2D: |
2850 | 0 | { |
2851 | 0 | const int64_t ne00 = node->src[0]->ne[0]; // W |
2852 | 0 | const int64_t ne01 = node->src[0]->ne[1]; // H |
2853 | 0 | const int64_t ne02 = node->src[0]->ne[2]; // Channels Out |
2854 | 0 | const int64_t ne03 = node->src[0]->ne[3]; // Channels In |
2855 | |
|
2856 | 0 | const int64_t ne10 = node->src[1]->ne[0]; // W |
2857 | 0 | const int64_t ne11 = node->src[1]->ne[1]; // H |
2858 | 0 | const int64_t ne12 = node->src[1]->ne[2]; // Channels In |
2859 | |
|
2860 | 0 | cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; |
2861 | 0 | cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; |
2862 | 0 | } break; |
2863 | 0 | case GGML_OP_TOP_K: |
2864 | 0 | { |
2865 | 0 | cur += sizeof(int32_t)*node->src[0]->ne[0]*n_tasks; |
2866 | 0 | } break; |
2867 | 0 | case GGML_OP_FLASH_ATTN_EXT: |
2868 | 0 | { |
2869 | 0 | const int64_t ne10 = node->src[1]->ne[0]; // DK |
2870 | 0 | const int64_t ne20 = node->src[2]->ne[0]; // DV |
2871 | |
|
2872 | 0 | cur = sizeof(float)*(1*ne10 + 2*ne20)*n_tasks; // 1x head size K + 2x head size V (per thread) |
2873 | 0 | } break; |
2874 | 0 | case GGML_OP_FLASH_ATTN_BACK: |
2875 | 0 | { |
2876 | 0 | const int64_t D = node->src[0]->ne[0]; |
2877 | 0 | const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); |
2878 | 0 | const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back |
2879 | 0 | if (node->src[1]->type == GGML_TYPE_F32) { |
2880 | 0 | cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) |
2881 | 0 | cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 |
2882 | 0 | } else if (node->src[1]->type == GGML_TYPE_F16) { |
2883 | 0 | cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) |
2884 | 0 | cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 |
2885 | 0 | } else if (node->src[1]->type == GGML_TYPE_BF16) { |
2886 | 0 | cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) |
2887 | 0 | cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 |
2888 | 0 | } |
2889 | 0 | } break; |
2890 | | |
2891 | 0 | case GGML_OP_CROSS_ENTROPY_LOSS: |
2892 | 0 | { |
2893 | 0 | cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); |
2894 | 0 | } break; |
2895 | 0 | case GGML_OP_COUNT: |
2896 | 0 | { |
2897 | 0 | GGML_ABORT("fatal error"); |
2898 | 0 | } |
2899 | 0 | default: |
2900 | 0 | break; |
2901 | 0 | } |
2902 | 0 | } |
2903 | | |
2904 | 0 | work_size = MAX(work_size, cur); |
2905 | 0 | } |
2906 | | |
2907 | 0 | if (work_size > 0) { |
2908 | 0 | work_size += CACHE_LINE_SIZE*(n_threads); |
2909 | 0 | } |
2910 | |
|
2911 | 0 | cplan.threadpool = threadpool; |
2912 | 0 | cplan.n_threads = MIN(max_tasks, n_threads); |
2913 | 0 | cplan.work_size = work_size; |
2914 | 0 | cplan.work_data = NULL; |
2915 | |
|
2916 | 0 | return cplan; |
2917 | 0 | } |
2918 | | |
2919 | 0 | static thread_ret_t ggml_graph_compute_thread(void * data) { |
2920 | 0 | struct ggml_compute_state * state = (struct ggml_compute_state *) data; |
2921 | 0 | struct ggml_threadpool * tp = state->threadpool; |
2922 | |
|
2923 | 0 | const struct ggml_cgraph * cgraph = tp->cgraph; |
2924 | 0 | const struct ggml_cplan * cplan = tp->cplan; |
2925 | |
|
2926 | 0 | set_numa_thread_affinity(state->ith); |
2927 | |
|
2928 | 0 | struct ggml_compute_params params = { |
2929 | 0 | /*.ith =*/ state->ith, |
2930 | 0 | /*.nth =*/ atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK, |
2931 | 0 | /*.wsize =*/ cplan->work_size, |
2932 | 0 | /*.wdata =*/ cplan->work_data, |
2933 | 0 | /*.threadpool=*/ tp, |
2934 | 0 | }; |
2935 | |
|
2936 | 0 | GGML_PRINT_DEBUG("thread #%d compute-start cplan %p last-graph %d \n", state->ith, cplan, state->last_graph); |
2937 | |
|
2938 | 0 | for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) { |
2939 | 0 | struct ggml_tensor * node = cgraph->nodes[node_n]; |
2940 | |
|
2941 | 0 | if (ggml_op_is_empty(node->op)) { |
2942 | | // skip NOPs |
2943 | 0 | continue; |
2944 | 0 | } |
2945 | | |
2946 | 0 | ggml_compute_forward(¶ms, node); |
2947 | |
|
2948 | 0 | if (state->ith == 0 && cplan->abort_callback && |
2949 | 0 | cplan->abort_callback(cplan->abort_callback_data)) { |
2950 | 0 | atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed); |
2951 | 0 | tp->ec = GGML_STATUS_ABORTED; |
2952 | 0 | } |
2953 | |
|
2954 | 0 | if (node_n + 1 < cgraph->n_nodes) { |
2955 | 0 | ggml_barrier(state->threadpool); |
2956 | 0 | } |
2957 | 0 | } |
2958 | |
|
2959 | 0 | GGML_PRINT_DEBUG("thread #%d compute-done cplan %p last-graph %d \n", state->ith, cplan, state->last_graph); |
2960 | |
|
2961 | 0 | ggml_barrier(state->threadpool); |
2962 | |
|
2963 | 0 | return 0; |
2964 | 0 | } |
2965 | | |
2966 | | #ifndef GGML_USE_OPENMP |
2967 | | |
2968 | | // check if thread is ready to proceed (exit from polling or sleeping) |
2969 | | // returns true if loops should exit, sets state->pending to indicate new work |
2970 | 0 | static inline bool ggml_graph_compute_thread_ready(struct ggml_compute_state * state) { |
2971 | 0 | struct ggml_threadpool * threadpool = state->threadpool; |
2972 | |
|
2973 | 0 | if (state->pending || threadpool->stop || threadpool->pause) { return true; } |
2974 | | |
2975 | | // check for new graph/work |
2976 | 0 | int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed); |
2977 | 0 | int n_threads = n_graph & GGML_THREADPOOL_N_THREADS_MASK; |
2978 | 0 | if (n_graph != state->last_graph) { |
2979 | 0 | state->pending = (state->ith < n_threads); |
2980 | 0 | state->last_graph = n_graph; |
2981 | 0 | return true; |
2982 | 0 | } |
2983 | | |
2984 | 0 | return false; |
2985 | 0 | } |
2986 | | |
2987 | | // sync thread state after polling |
2988 | 0 | static inline void ggml_graph_compute_thread_sync(struct ggml_compute_state * state) { |
2989 | | // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead |
2990 | | #ifdef GGML_TSAN_ENABLED |
2991 | | atomic_fetch_add_explicit(&state->threadpool->n_graph, 0, memory_order_seq_cst); |
2992 | | #else |
2993 | 0 | atomic_thread_fence(memory_order_seq_cst); |
2994 | 0 | #endif |
2995 | 0 | UNUSED(state); |
2996 | 0 | } |
2997 | | |
2998 | 0 | static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state * state) { |
2999 | 0 | struct ggml_threadpool * threadpool = state->threadpool; |
3000 | | |
3001 | | // This seems to make 0 ... 100 a decent range for polling level across modern processors. |
3002 | | // Perhaps, we can adjust it dynamically based on load and things. |
3003 | 0 | const uint64_t n_rounds = 1024UL * 128 * threadpool->poll; |
3004 | |
|
3005 | 0 | for (uint64_t i=0; !ggml_graph_compute_thread_ready(state) && i < n_rounds; i++) { |
3006 | | // No new work. Keep polling. |
3007 | 0 | ggml_thread_cpu_relax(); |
3008 | 0 | } |
3009 | |
|
3010 | 0 | return state->pending; |
3011 | 0 | } |
3012 | | |
3013 | 0 | static inline bool ggml_graph_compute_check_for_work(struct ggml_compute_state * state) { |
3014 | 0 | struct ggml_threadpool * threadpool = state->threadpool; |
3015 | |
|
3016 | 0 | if (ggml_graph_compute_poll_for_work(state)) { |
3017 | 0 | ggml_graph_compute_thread_sync(state); |
3018 | 0 | return state->pending; |
3019 | 0 | } |
3020 | | |
3021 | 0 | ggml_mutex_lock_shared(&threadpool->mutex); |
3022 | 0 | while (!ggml_graph_compute_thread_ready(state)) { |
3023 | | // No new work. Wait for the signal. |
3024 | 0 | GGML_PRINT_DEBUG("thread #%d waiting for work (sleeping)\n", state->ith); |
3025 | 0 | ggml_cond_wait(&threadpool->cond, &threadpool->mutex); |
3026 | 0 | } |
3027 | 0 | ggml_mutex_unlock_shared(&threadpool->mutex); |
3028 | |
|
3029 | 0 | return state->pending; |
3030 | 0 | } |
3031 | | |
3032 | 0 | static thread_ret_t ggml_graph_compute_secondary_thread(void* data) { |
3033 | 0 | struct ggml_compute_state * state = (struct ggml_compute_state *) data; |
3034 | 0 | struct ggml_threadpool * threadpool = state->threadpool; |
3035 | |
|
3036 | 0 | ggml_thread_apply_priority(threadpool->prio); |
3037 | 0 | if (ggml_thread_cpumask_is_valid(state->cpumask)) { |
3038 | 0 | ggml_thread_apply_affinity(state->cpumask); |
3039 | 0 | } |
3040 | |
|
3041 | 0 | while (true) { |
3042 | | // Check if we need to sleep |
3043 | 0 | while (threadpool->pause) { |
3044 | 0 | GGML_PRINT_DEBUG("thread #%d inside pause loop\n", state->ith); |
3045 | 0 | ggml_mutex_lock_shared(&threadpool->mutex); |
3046 | 0 | if (threadpool->pause) { |
3047 | 0 | ggml_cond_wait(&threadpool->cond, &threadpool->mutex); |
3048 | 0 | } |
3049 | 0 | GGML_PRINT_DEBUG("thread #%d resuming after wait\n", state->ith); |
3050 | 0 | ggml_mutex_unlock_shared(&threadpool->mutex); |
3051 | 0 | } |
3052 | | |
3053 | | // This needs to be checked for after the cond_wait |
3054 | 0 | if (threadpool->stop) break; |
3055 | | |
3056 | | // Check if there is new work |
3057 | | // The main thread is the only one that can dispatch new work |
3058 | | |
3059 | 0 | ggml_graph_compute_check_for_work(state); |
3060 | 0 | if (state->pending) { |
3061 | 0 | state->pending = false; |
3062 | 0 | ggml_graph_compute_thread(state); |
3063 | 0 | } |
3064 | 0 | } |
3065 | |
|
3066 | 0 | return (thread_ret_t) 0; |
3067 | 0 | } |
3068 | | |
3069 | | // Start processing new graph |
3070 | | static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int n_threads) |
3071 | 0 | { |
3072 | | // Always take the mutex here because the worker threads are doing hybrid poll/wait |
3073 | |
|
3074 | 0 | ggml_mutex_lock(&threadpool->mutex); |
3075 | | |
3076 | | // Update the number of active threads and the graph count |
3077 | 0 | int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed) >> GGML_THREADPOOL_N_THREADS_BITS; |
3078 | 0 | n_graph = ((n_graph + 1) << GGML_THREADPOOL_N_THREADS_BITS) | (n_threads & GGML_THREADPOOL_N_THREADS_MASK); |
3079 | |
|
3080 | 0 | GGML_PRINT_DEBUG("compute-kickoff: n_threads %d n_graph %d\n", n_threads, n_graph); |
3081 | | |
3082 | | // Indicate the graph is ready to be processed |
3083 | | // We need the full seq-cst fence here because of the polling threads (used in thread_sync) |
3084 | 0 | atomic_store_explicit(&threadpool->n_graph, n_graph, memory_order_seq_cst); |
3085 | |
|
3086 | 0 | if (threadpool->pause) { |
3087 | | // Update main thread prio and affinity to match the threadpool settings |
3088 | 0 | ggml_thread_apply_priority(threadpool->prio); |
3089 | 0 | if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { |
3090 | 0 | ggml_thread_apply_affinity(threadpool->workers[0].cpumask); |
3091 | 0 | } |
3092 | | |
3093 | | // resume does cond broadcast |
3094 | 0 | ggml_threadpool_resume_locked(threadpool); |
3095 | 0 | } else { |
3096 | 0 | ggml_cond_broadcast(&threadpool->cond); |
3097 | 0 | } |
3098 | |
|
3099 | 0 | ggml_mutex_unlock(&threadpool->mutex); |
3100 | 0 | } |
3101 | | |
3102 | | #endif // GGML_USE_OPENMP |
3103 | | |
3104 | | static struct ggml_threadpool * ggml_threadpool_new_impl( |
3105 | | struct ggml_threadpool_params * tpp, |
3106 | | struct ggml_cgraph * cgraph, |
3107 | 0 | struct ggml_cplan * cplan) { |
3108 | |
|
3109 | 0 | struct ggml_threadpool * threadpool = |
3110 | 0 | ggml_aligned_malloc(sizeof(struct ggml_threadpool)); |
3111 | 0 | { |
3112 | 0 | threadpool->cgraph = cgraph; |
3113 | 0 | threadpool->cplan = cplan; |
3114 | 0 | threadpool->n_graph = 0; |
3115 | 0 | threadpool->n_barrier = 0; |
3116 | 0 | threadpool->n_barrier_passed = 0; |
3117 | 0 | threadpool->current_chunk = 0; |
3118 | 0 | threadpool->stop = false; |
3119 | 0 | threadpool->pause = tpp->paused; |
3120 | 0 | threadpool->abort = -1; |
3121 | 0 | threadpool->workers = NULL; |
3122 | 0 | threadpool->n_threads = tpp->n_threads; |
3123 | 0 | threadpool->poll = tpp->poll; |
3124 | 0 | threadpool->prio = tpp->prio; |
3125 | 0 | threadpool->ec = GGML_STATUS_SUCCESS; |
3126 | 0 | } |
3127 | | |
3128 | | // Allocate and init workers state |
3129 | 0 | const size_t workers_size = sizeof(struct ggml_compute_state) * tpp->n_threads; |
3130 | 0 | struct ggml_compute_state * workers = ggml_aligned_malloc(workers_size); |
3131 | |
|
3132 | 0 | memset(workers, 0, workers_size); |
3133 | 0 | for (int j = 0; j < tpp->n_threads; j++) { |
3134 | 0 | workers[j].threadpool = threadpool; |
3135 | 0 | workers[j].ith = j; |
3136 | 0 | } |
3137 | |
|
3138 | 0 | threadpool->workers = workers; |
3139 | |
|
3140 | | #ifdef GGML_USE_OPENMP |
3141 | | int32_t cpumask_iter = 0; |
3142 | | |
3143 | | // Compute CPU masks for each thread |
3144 | | for (int j = 0; j < tpp->n_threads; j++) { |
3145 | | ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); |
3146 | | } |
3147 | | #else // GGML_USE_OPENMP |
3148 | 0 | ggml_mutex_init(&threadpool->mutex); |
3149 | 0 | ggml_cond_init(&threadpool->cond); |
3150 | | |
3151 | | // Spin the threads for all workers, and update CPU placements. |
3152 | | // Place the main thread last (towards the higher numbered CPU cores). |
3153 | |
|
3154 | 0 | int32_t cpumask_iter = 0; |
3155 | |
|
3156 | 0 | for (int j = 1; j < tpp->n_threads; j++) { |
3157 | 0 | ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter); |
3158 | |
|
3159 | 0 | int32_t rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_secondary_thread, &workers[j]); |
3160 | 0 | GGML_ASSERT(rc == 0); |
3161 | 0 | } |
3162 | |
|
3163 | 0 | ggml_thread_cpumask_next(tpp->cpumask, workers[0].cpumask, tpp->strict_cpu, &cpumask_iter); |
3164 | |
|
3165 | 0 | if (!threadpool->pause) { |
3166 | | // Update main thread prio and affinity at the start, otherwise we'll do it in resume |
3167 | 0 | ggml_thread_apply_priority(threadpool->prio); |
3168 | 0 | if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) { |
3169 | 0 | ggml_thread_apply_affinity(threadpool->workers[0].cpumask); |
3170 | 0 | } |
3171 | 0 | } |
3172 | 0 | #endif // GGML_USE_OPENMP |
3173 | |
|
3174 | 0 | return threadpool; |
3175 | 0 | } |
3176 | | |
3177 | 0 | struct ggml_threadpool * ggml_threadpool_new(struct ggml_threadpool_params * tpp) { |
3178 | 0 | return ggml_threadpool_new_impl(tpp, NULL, NULL); |
3179 | 0 | } |
3180 | | |
3181 | 0 | enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) { |
3182 | 0 | ggml_cpu_init(); |
3183 | |
|
3184 | 0 | GGML_ASSERT(cplan); |
3185 | 0 | GGML_ASSERT(cplan->n_threads > 0); |
3186 | 0 | GGML_ASSERT(cplan->work_size == 0 || cplan->work_data != NULL); |
3187 | |
|
3188 | 0 | int n_threads = cplan->n_threads; |
3189 | 0 | struct ggml_threadpool * threadpool = cplan->threadpool; |
3190 | |
|
3191 | 0 | bool disposable_threadpool = false; |
3192 | |
|
3193 | 0 | if (threadpool == NULL) { |
3194 | | //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads); |
3195 | 0 | disposable_threadpool = true; |
3196 | |
|
3197 | 0 | struct ggml_threadpool_params ttp = ggml_threadpool_params_default(n_threads); |
3198 | 0 | threadpool = ggml_threadpool_new_impl(&ttp, cgraph, cplan); |
3199 | 0 | } else { |
3200 | | // Reset some of the parameters that need resetting |
3201 | | // No worker threads should be accessing the parameters below at this stage |
3202 | 0 | threadpool->cgraph = cgraph; |
3203 | 0 | threadpool->cplan = cplan; |
3204 | 0 | threadpool->current_chunk = 0; |
3205 | 0 | threadpool->abort = -1; |
3206 | 0 | threadpool->ec = GGML_STATUS_SUCCESS; |
3207 | 0 | } |
3208 | |
|
3209 | | #ifdef GGML_USE_OPENMP |
3210 | | if (n_threads > 1) { |
3211 | | #pragma omp parallel num_threads(n_threads) |
3212 | | { |
3213 | | #pragma omp single |
3214 | | { |
3215 | | // update the number of threads from the actual number of threads that we got from OpenMP |
3216 | | n_threads = omp_get_num_threads(); |
3217 | | atomic_store_explicit(&threadpool->n_graph, n_threads, memory_order_relaxed); |
3218 | | } |
3219 | | |
3220 | | // Apply thread CPU mask and priority |
3221 | | int ith = omp_get_thread_num(); |
3222 | | |
3223 | | ggml_thread_apply_priority(threadpool->prio); |
3224 | | if (ggml_thread_cpumask_is_valid(threadpool->workers[ith].cpumask)) { |
3225 | | ggml_thread_apply_affinity(threadpool->workers[ith].cpumask); |
3226 | | } |
3227 | | ggml_graph_compute_thread(&threadpool->workers[ith]); |
3228 | | } |
3229 | | } else { |
3230 | | atomic_store_explicit(&threadpool->n_graph, 1, memory_order_relaxed); |
3231 | | ggml_graph_compute_thread(&threadpool->workers[0]); |
3232 | | } |
3233 | | #else |
3234 | 0 | if (n_threads > threadpool->n_threads) { |
3235 | 0 | GGML_LOG_WARN("cplan requested more threads (%d) than available (%d)\n", n_threads, threadpool->n_threads); |
3236 | 0 | n_threads = threadpool->n_threads; |
3237 | 0 | } |
3238 | | |
3239 | | // Kick all threads to start the new graph |
3240 | 0 | ggml_graph_compute_kickoff(threadpool, n_threads); |
3241 | | |
3242 | | // This is a work thread too |
3243 | 0 | ggml_graph_compute_thread(&threadpool->workers[0]); |
3244 | 0 | #endif |
3245 | | |
3246 | | // don't leave affinity set on the main thread |
3247 | 0 | clear_numa_thread_affinity(); |
3248 | |
|
3249 | 0 | enum ggml_status ret = threadpool->ec; |
3250 | |
|
3251 | 0 | if (disposable_threadpool) { |
3252 | 0 | ggml_threadpool_free(threadpool); |
3253 | 0 | } |
3254 | |
|
3255 | 0 | return ret; |
3256 | 0 | } |
3257 | | |
3258 | 0 | enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) { |
3259 | 0 | struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads, NULL); |
3260 | |
|
3261 | 0 | cplan.work_data = (uint8_t *)ggml_new_buffer(ctx, cplan.work_size); |
3262 | |
|
3263 | 0 | return ggml_graph_compute(cgraph, &cplan); |
3264 | 0 | } |
3265 | | |
3266 | 0 | void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) { |
3267 | 0 | memcpy(y, x, n * sizeof(float)); |
3268 | 0 | } |
3269 | | |
3270 | 0 | void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) { |
3271 | 0 | int64_t i = 0; |
3272 | 0 | #if defined(__F16C__) |
3273 | | #if defined(__AVX512F__) |
3274 | | for (; i + 15 < n; i += 16) { |
3275 | | __m512 x_vec = _mm512_loadu_ps(x + i); |
3276 | | __m256i y_vec = _mm512_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
3277 | | _mm256_storeu_si256((__m256i *)(y + i), y_vec); |
3278 | | } |
3279 | | #endif |
3280 | 0 | for (; i + 7 < n; i += 8) { |
3281 | 0 | __m256 x_vec = _mm256_loadu_ps(x + i); |
3282 | 0 | __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
3283 | 0 | _mm_storeu_si128((__m128i *)(y + i), y_vec); |
3284 | 0 | } |
3285 | 0 | for (; i + 3 < n; i += 4) { |
3286 | 0 | __m128 x_vec = _mm_loadu_ps(x + i); |
3287 | 0 | __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT); |
3288 | 0 | _mm_storel_epi64((__m128i *)(y + i), y_vec); |
3289 | 0 | } |
3290 | | #elif defined(__riscv_zvfh) |
3291 | | for (int vl; i < n; i += vl) { |
3292 | | vl = __riscv_vsetvl_e32m2(n - i); |
3293 | | vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl); |
3294 | | vfloat16m1_t vy = __riscv_vfncvt_f_f_w_f16m1(vx, vl); |
3295 | | __riscv_vse16_v_f16m1((_Float16 *)&y[i], vy, vl); |
3296 | | } |
3297 | | #endif |
3298 | 0 | for (; i < n; ++i) { |
3299 | 0 | y[i] = GGML_CPU_FP32_TO_FP16(x[i]); |
3300 | 0 | } |
3301 | 0 | } |
3302 | | |
3303 | 0 | void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) { |
3304 | 0 | int64_t i = 0; |
3305 | 0 | #if defined(__F16C__) |
3306 | | #if defined(__AVX512F__) |
3307 | | for (; i + 15 < n; i += 16) { |
3308 | | __m256i x_vec = _mm256_loadu_si256((const __m256i *)(x + i)); |
3309 | | __m512 y_vec = _mm512_cvtph_ps(x_vec); |
3310 | | _mm512_storeu_ps(y + i, y_vec); |
3311 | | } |
3312 | | #endif |
3313 | 0 | for (; i + 7 < n; i += 8) { |
3314 | 0 | __m128i x_vec = _mm_loadu_si128((const __m128i *)(x + i)); |
3315 | 0 | __m256 y_vec = _mm256_cvtph_ps(x_vec); |
3316 | 0 | _mm256_storeu_ps(y + i, y_vec); |
3317 | 0 | } |
3318 | 0 | for (; i + 3 < n; i += 4) { |
3319 | 0 | __m128i x_vec = _mm_loadl_epi64((const __m128i *)(x + i)); |
3320 | 0 | __m128 y_vec = _mm_cvtph_ps(x_vec); |
3321 | 0 | _mm_storeu_ps(y + i, y_vec); |
3322 | 0 | } |
3323 | |
|
3324 | | #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfhmin) |
3325 | | // calculate step size |
3326 | | const int epr = __riscv_vsetvlmax_e16m2(); |
3327 | | const int step = epr * 2; |
3328 | | const int np = (n & ~(step - 1)); |
3329 | | |
3330 | | // unroll by 2 |
3331 | | for (; i < np; i += step) { |
3332 | | vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, epr); |
3333 | | vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, epr); |
3334 | | __riscv_vse32_v_f32m4(y + i, ay0, epr); |
3335 | | |
3336 | | vfloat16m2_t ax1 = __riscv_vle16_v_f16m2((const _Float16*)x + i + epr, epr); |
3337 | | vfloat32m4_t ay1 = __riscv_vfwcvt_f_f_v_f32m4(ax1, epr); |
3338 | | __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); |
3339 | | } |
3340 | | |
3341 | | // leftovers |
3342 | | int vl; |
3343 | | for (i = np; i < n; i += vl) { |
3344 | | vl = __riscv_vsetvl_e16m2(n - i); |
3345 | | vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, vl); |
3346 | | vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, vl); |
3347 | | __riscv_vse32_v_f32m4(y + i, ay0, vl); |
3348 | | } |
3349 | | |
3350 | | #endif |
3351 | |
|
3352 | 0 | for (; i < n; ++i) { |
3353 | 0 | y[i] = GGML_CPU_FP16_TO_FP32(x[i]); |
3354 | 0 | } |
3355 | 0 | } |
3356 | | |
3357 | 0 | void ggml_cpu_fp32_to_bf16(const float * x, ggml_bf16_t * y, int64_t n) { |
3358 | 0 | int64_t i = 0; |
3359 | 0 | for (; i < n; ++i) { |
3360 | 0 | y[i] = GGML_FP32_TO_BF16(x[i]); |
3361 | 0 | } |
3362 | 0 | } |
3363 | | |
3364 | 0 | void ggml_cpu_fp32_to_i32(const float * x, int32_t * y, int64_t n) { |
3365 | 0 | int64_t i = 0; |
3366 | 0 | for (; i < n; ++i) { |
3367 | 0 | y[i] = x[i]; |
3368 | 0 | } |
3369 | 0 | } |
3370 | | |
3371 | 0 | void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) { |
3372 | 0 | int64_t i = 0; |
3373 | 0 | #if defined(__AVX2__) |
3374 | | #if defined(__AVX512F__) |
3375 | | for (; i + 15 < n; i += 16) { |
3376 | | _mm512_storeu_ps(y + i, |
3377 | | _mm512_castsi512_ps( |
3378 | | _mm512_slli_epi32( |
3379 | | _mm512_cvtepu16_epi32( |
3380 | | _mm256_loadu_si256( |
3381 | | (const __m256i *)(x + i))), |
3382 | | 16))); |
3383 | | } |
3384 | | #endif |
3385 | 0 | for (; i + 7 < n; i += 8) { |
3386 | 0 | _mm256_storeu_ps(y + i, |
3387 | 0 | _mm256_castsi256_ps( |
3388 | 0 | _mm256_slli_epi32( |
3389 | 0 | _mm256_cvtepu16_epi32( |
3390 | 0 | _mm_loadu_si128( |
3391 | 0 | (const __m128i *)(x + i))), |
3392 | 0 | 16))); |
3393 | 0 | } |
3394 | | #elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfmin) |
3395 | | // calculate step size |
3396 | | const int epr = __riscv_vsetvlmax_e16m2(); |
3397 | | const int step = epr * 2; |
3398 | | const int np = (n & ~(step - 1)); |
3399 | | |
3400 | | // unroll by 2 |
3401 | | for (; i < np; i += step) { |
3402 | | vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, epr); |
3403 | | vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, epr); |
3404 | | __riscv_vse32_v_f32m4(y + i, ay0, epr); |
3405 | | |
3406 | | vbfloat16m2_t ax1 = __riscv_vle16_v_bf16m2((const __bf16*)x + i + epr, epr); |
3407 | | vfloat32m4_t ay1 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax1, epr); |
3408 | | __riscv_vse32_v_f32m4(y + i + epr, ay1, epr); |
3409 | | } |
3410 | | |
3411 | | // leftovers |
3412 | | int vl; |
3413 | | for (i = np; i < n; i += vl) { |
3414 | | vl = __riscv_vsetvl_e16m2(n - i); |
3415 | | vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, vl); |
3416 | | vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, vl); |
3417 | | __riscv_vse32_v_f32m4(y + i, ay0, vl); |
3418 | | } |
3419 | | #endif |
3420 | 0 | for (; i < n; i++) { |
3421 | 0 | y[i] = GGML_BF16_TO_FP32(x[i]); |
3422 | 0 | } |
3423 | 0 | } |
3424 | | |
3425 | 0 | int ggml_cpu_has_avx(void) { |
3426 | 0 | #if defined(__AVX__) |
3427 | 0 | return 1; |
3428 | | #else |
3429 | | return 0; |
3430 | | #endif |
3431 | 0 | } |
3432 | | |
3433 | 0 | int ggml_cpu_has_avx_vnni(void) { |
3434 | | #if defined(__AVXVNNI__) |
3435 | | return 1; |
3436 | | #else |
3437 | 0 | return 0; |
3438 | 0 | #endif |
3439 | 0 | } |
3440 | | |
3441 | 0 | int ggml_cpu_has_avx2(void) { |
3442 | 0 | #if defined(__AVX2__) |
3443 | 0 | return 1; |
3444 | | #else |
3445 | | return 0; |
3446 | | #endif |
3447 | 0 | } |
3448 | | |
3449 | 0 | int ggml_cpu_has_avx512(void) { |
3450 | | #if defined(__AVX512F__) |
3451 | | return 1; |
3452 | | #else |
3453 | 0 | return 0; |
3454 | 0 | #endif |
3455 | 0 | } |
3456 | | |
3457 | 0 | int ggml_cpu_has_avx512_vbmi(void) { |
3458 | | #if defined(__AVX512VBMI__) |
3459 | | return 1; |
3460 | | #else |
3461 | 0 | return 0; |
3462 | 0 | #endif |
3463 | 0 | } |
3464 | | |
3465 | 0 | int ggml_cpu_has_avx512_vnni(void) { |
3466 | | #if defined(__AVX512VNNI__) |
3467 | | return 1; |
3468 | | #else |
3469 | 0 | return 0; |
3470 | 0 | #endif |
3471 | 0 | } |
3472 | | |
3473 | 0 | int ggml_cpu_has_avx512_bf16(void) { |
3474 | | #if defined(__AVX512BF16__) |
3475 | | return 1; |
3476 | | #else |
3477 | 0 | return 0; |
3478 | 0 | #endif |
3479 | 0 | } |
3480 | | |
3481 | 0 | int ggml_cpu_has_amx_int8(void) { |
3482 | | #if defined(__AMX_INT8__) |
3483 | | return 1; |
3484 | | #else |
3485 | 0 | return 0; |
3486 | 0 | #endif |
3487 | 0 | } |
3488 | | |
3489 | 0 | int ggml_cpu_has_bmi2(void) { |
3490 | 0 | #if defined(__BMI2__) |
3491 | 0 | return 1; |
3492 | | #else |
3493 | | return 0; |
3494 | | #endif |
3495 | 0 | } |
3496 | | |
3497 | 0 | int ggml_cpu_has_fma(void) { |
3498 | 0 | #if defined(__FMA__) |
3499 | 0 | return 1; |
3500 | | #else |
3501 | | return 0; |
3502 | | #endif |
3503 | 0 | } |
3504 | | |
3505 | 0 | int ggml_cpu_has_arm_fma(void) { |
3506 | | #if defined(__ARM_FEATURE_FMA) |
3507 | | return 1; |
3508 | | #else |
3509 | 0 | return 0; |
3510 | 0 | #endif |
3511 | 0 | } |
3512 | | |
3513 | 0 | int ggml_cpu_has_riscv_v(void) { |
3514 | | #if defined(__riscv_v_intrinsic) |
3515 | | return 1; |
3516 | | #else |
3517 | 0 | return 0; |
3518 | 0 | #endif |
3519 | 0 | } |
3520 | | |
3521 | 0 | int ggml_cpu_get_rvv_vlen(void) { |
3522 | | #if defined(__riscv) && defined(__riscv_v_intrinsic) |
3523 | | return ggml_riscv_arch_features.rvv_vlen; |
3524 | | #else |
3525 | 0 | return 0; |
3526 | 0 | #endif |
3527 | 0 | } |
3528 | | |
3529 | 0 | int ggml_cpu_has_f16c(void) { |
3530 | 0 | #if defined(__F16C__) |
3531 | 0 | return 1; |
3532 | | #else |
3533 | | return 0; |
3534 | | #endif |
3535 | 0 | } |
3536 | | |
3537 | 0 | int ggml_cpu_has_fp16_va(void) { |
3538 | | #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) |
3539 | | return 1; |
3540 | | #else |
3541 | 0 | return 0; |
3542 | 0 | #endif |
3543 | 0 | } |
3544 | | |
3545 | 0 | int ggml_cpu_has_wasm_simd(void) { |
3546 | | #if defined(__wasm_simd128__) |
3547 | | return 1; |
3548 | | #else |
3549 | 0 | return 0; |
3550 | 0 | #endif |
3551 | 0 | } |
3552 | | |
3553 | 0 | int ggml_cpu_has_llamafile(void) { |
3554 | 0 | #if defined(GGML_USE_LLAMAFILE) |
3555 | 0 | return 1; |
3556 | | #else |
3557 | | return 0; |
3558 | | #endif |
3559 | 0 | } |
3560 | | |
3561 | 0 | int ggml_cpu_has_sse3(void) { |
3562 | 0 | #if defined(__SSE3__) |
3563 | 0 | return 1; |
3564 | | #else |
3565 | | return 0; |
3566 | | #endif |
3567 | 0 | } |
3568 | | |
3569 | 0 | int ggml_cpu_has_ssse3(void) { |
3570 | 0 | #if defined(__SSSE3__) |
3571 | 0 | return 1; |
3572 | | #else |
3573 | | return 0; |
3574 | | #endif |
3575 | 0 | } |
3576 | | |
3577 | 0 | int ggml_cpu_has_vsx(void) { |
3578 | | #if defined(__POWER9_VECTOR__) |
3579 | | return 1; |
3580 | | #else |
3581 | 0 | return 0; |
3582 | 0 | #endif |
3583 | 0 | } |
3584 | | |
3585 | 0 | int ggml_cpu_has_vxe(void) { |
3586 | | #if defined(__VXE__) || defined(__VXE2__) |
3587 | | return 1; |
3588 | | #else |
3589 | 0 | return 0; |
3590 | 0 | #endif |
3591 | 0 | } |
3592 | | |
3593 | 0 | int ggml_cpu_has_neon(void) { |
3594 | | #if defined(__ARM_ARCH) && defined(__ARM_NEON) |
3595 | | return 1; |
3596 | | #else |
3597 | 0 | return 0; |
3598 | 0 | #endif |
3599 | 0 | } |
3600 | | |
3601 | 0 | int ggml_cpu_has_dotprod(void) { |
3602 | | #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) |
3603 | | return 1; |
3604 | | #else |
3605 | 0 | return 0; |
3606 | 0 | #endif |
3607 | 0 | } |
3608 | | |
3609 | 0 | int ggml_cpu_has_sve(void) { |
3610 | | #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) |
3611 | | return 1; |
3612 | | #else |
3613 | 0 | return 0; |
3614 | 0 | #endif |
3615 | 0 | } |
3616 | | |
3617 | 0 | int ggml_cpu_has_matmul_int8(void) { |
3618 | | #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) |
3619 | | return 1; |
3620 | | #else |
3621 | 0 | return 0; |
3622 | 0 | #endif |
3623 | 0 | } |
3624 | | |
3625 | 0 | int ggml_cpu_get_sve_cnt(void) { |
3626 | | #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) |
3627 | | return ggml_arm_arch_features.sve_cnt; |
3628 | | #else |
3629 | 0 | return 0; |
3630 | 0 | #endif |
3631 | 0 | } |
3632 | | |
3633 | 0 | int ggml_cpu_has_sme(void) { |
3634 | | #if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME) |
3635 | | return 1; |
3636 | | #else |
3637 | 0 | return 0; |
3638 | 0 | #endif |
3639 | 0 | } |
3640 | | |
3641 | 1 | void ggml_cpu_init(void) { |
3642 | | // needed to initialize ggml_time |
3643 | 1 | { |
3644 | 1 | struct ggml_init_params params = { 0, NULL, false }; |
3645 | 1 | struct ggml_context * ctx = ggml_init(params); |
3646 | 1 | ggml_free(ctx); |
3647 | 1 | } |
3648 | | |
3649 | 1 | ggml_critical_section_start(); |
3650 | | |
3651 | 1 | static bool is_first_call = true; |
3652 | | |
3653 | 1 | if (is_first_call) { |
3654 | | // initialize GELU, Quick GELU, SILU and EXP F32 tables |
3655 | 1 | { |
3656 | 1 | const uint64_t t_start = ggml_time_us(); UNUSED(t_start); |
3657 | | |
3658 | 65.5k | for (int i = 0; i < (1 << 16); ++i) { |
3659 | 65.5k | union { |
3660 | 65.5k | uint16_t u16; |
3661 | 65.5k | ggml_fp16_t fp16; |
3662 | 65.5k | } u = {i}; |
3663 | 65.5k | float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16); |
3664 | 65.5k | ggml_table_f32_f16[i] = f; |
3665 | 65.5k | ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f)); |
3666 | 65.5k | ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f)); |
3667 | 65.5k | } |
3668 | | |
3669 | 1 | const uint64_t t_end = ggml_time_us(); UNUSED(t_end); |
3670 | | |
3671 | 1 | GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0); |
3672 | | |
3673 | | #ifdef GGML_USE_OPENMP |
3674 | | //if (!getenv("OMP_WAIT_POLICY")) { |
3675 | | // // set the wait policy to active, so that OpenMP threads don't sleep |
3676 | | // setenv("OMP_WAIT_POLICY", "active", 0) |
3677 | | //} |
3678 | | |
3679 | | if (!getenv("KMP_BLOCKTIME")) { |
3680 | | // set the time to wait before sleeping a thread |
3681 | | // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases |
3682 | | #ifdef _WIN32 |
3683 | | _putenv_s("KMP_BLOCKTIME", "200"); // 200ms |
3684 | | #else |
3685 | | setenv("KMP_BLOCKTIME", "200", 0); // 200ms |
3686 | | #endif |
3687 | | } |
3688 | | #endif |
3689 | 1 | } |
3690 | | |
3691 | | #if defined(__ARM_ARCH) |
3692 | | ggml_init_arm_arch_features(); |
3693 | | #endif |
3694 | | |
3695 | | #if defined(__riscv) |
3696 | | ggml_init_riscv_arch_features(); |
3697 | | #endif |
3698 | | |
3699 | 1 | is_first_call = false; |
3700 | 1 | } |
3701 | | |
3702 | 1 | ggml_critical_section_end(); |
3703 | 1 | } |