/src/llama.cpp/src/llama-impl.h
Line | Count | Source |
1 | | #pragma once |
2 | | |
3 | | #include "ggml.h" // for ggml_log_level |
4 | | |
5 | | #include <string> |
6 | | #include <vector> |
7 | | |
8 | | #ifdef __GNUC__ |
9 | | # if defined(__MINGW32__) && !defined(__clang__) |
10 | | # define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) |
11 | | # else |
12 | | # define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) |
13 | | # endif |
14 | | #else |
15 | | # define LLAMA_ATTRIBUTE_FORMAT(...) |
16 | | #endif |
17 | | |
18 | | // |
19 | | // logging |
20 | | // |
21 | | |
22 | | LLAMA_ATTRIBUTE_FORMAT(2, 3) |
23 | | void llama_log_internal (ggml_log_level level, const char * format, ...); |
24 | | void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data); |
25 | | |
26 | | #define LLAMA_LOG(...) llama_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) |
27 | 0 | #define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_0::operator()(unsigned int) const Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_1::operator()(unsigned int) const Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_2::operator()(unsigned int) const Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_3::operator()(unsigned int) const Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_4::operator()(unsigned int) const Unexecuted instantiation: llama-model.cpp:llama_model::print_info() const::$_5::operator()(unsigned int) const |
28 | 0 | #define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__) |
29 | 2 | #define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__) |
30 | 0 | #define LLAMA_LOG_DEBUG(...) llama_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__) |
31 | 0 | #define LLAMA_LOG_CONT(...) llama_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__) |
32 | | |
33 | | // |
34 | | // helpers |
35 | | // |
36 | | |
37 | | template <typename T> |
38 | | struct no_init { |
39 | | T value; |
40 | | no_init() = default; |
41 | | }; |
42 | | |
43 | | struct time_meas { |
44 | | time_meas(int64_t & t_acc, bool disable = false); |
45 | | ~time_meas(); |
46 | | |
47 | | const int64_t t_start_us; |
48 | | |
49 | | int64_t & t_acc; |
50 | | }; |
51 | | |
52 | | template <typename T> |
53 | | struct buffer_view { |
54 | | T * data; |
55 | | size_t size = 0; |
56 | | |
57 | 0 | bool has_data() const { |
58 | 0 | return data && size > 0; |
59 | 0 | } Unexecuted instantiation: buffer_view<int>::has_data() const Unexecuted instantiation: buffer_view<float>::has_data() const |
60 | | }; |
61 | | |
62 | | void replace_all(std::string & s, const std::string & search, const std::string & replace); |
63 | | |
64 | | // TODO: rename to llama_format ? |
65 | | LLAMA_ATTRIBUTE_FORMAT(1, 2) |
66 | | std::string format(const char * fmt, ...); |
67 | | |
68 | | std::string llama_format_tensor_shape(const std::vector<int64_t> & ne); |
69 | | std::string llama_format_tensor_shape(const struct ggml_tensor * t); |
70 | | |
71 | | std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i); |
72 | | |
73 | 0 | #define LLAMA_TENSOR_NAME_FATTN "__fattn__" |
74 | 0 | #define LLAMA_TENSOR_NAME_FGDN_AR "__fgdn_ar__" |
75 | 0 | #define LLAMA_TENSOR_NAME_FGDN_CH "__fgdn_ch__" |