/src/llama.cpp/src/models/codeshell.cpp
Line | Count | Source |
1 | | #include "models.h" |
2 | | |
3 | 0 | llm_build_codeshell::llm_build_codeshell(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { |
4 | 0 | const int64_t n_embd_head = hparams.n_embd_head_v; |
5 | 0 | const int64_t n_embd_gqa = hparams.n_embd_v_gqa(); |
6 | |
|
7 | 0 | GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); |
8 | 0 | GGML_ASSERT(n_embd_head == hparams.n_rot); |
9 | |
|
10 | 0 | ggml_tensor * cur; |
11 | 0 | ggml_tensor * inpL; |
12 | |
|
13 | 0 | inpL = build_inp_embd(model.tok_embd); |
14 | | |
15 | | // inp_pos - contains the positions |
16 | 0 | ggml_tensor * inp_pos = build_inp_pos(); |
17 | |
|
18 | 0 | auto * inp_attn = build_attn_inp_kv(); |
19 | |
|
20 | 0 | ggml_tensor * inp_out_ids = build_inp_out_ids(); |
21 | |
|
22 | 0 | for (int il = 0; il < n_layer; ++il) { |
23 | 0 | cur = build_norm(inpL, |
24 | 0 | model.layers[il].attn_norm, |
25 | 0 | model.layers[il].attn_norm_b, |
26 | 0 | LLM_NORM, il); |
27 | 0 | cb(cur, "attn_norm", il); |
28 | | |
29 | | // self-attention |
30 | 0 | { |
31 | 0 | cur = build_lora_mm(model.layers[il].wqkv, cur); |
32 | 0 | cb(cur, "wqkv", il); |
33 | |
|
34 | 0 | cur = ggml_add(ctx0, cur, model.layers[il].bqkv); |
35 | 0 | cb(cur, "bqkv", il); |
36 | |
|
37 | 0 | ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd)); |
38 | 0 | ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd)); |
39 | 0 | ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)); |
40 | |
|
41 | 0 | Qcur = ggml_rope_ext( |
42 | 0 | ctx0, Qcur, inp_pos, nullptr, |
43 | 0 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
44 | 0 | ext_factor, attn_factor, beta_fast, beta_slow |
45 | 0 | ); |
46 | |
|
47 | 0 | Kcur = ggml_rope_ext( |
48 | 0 | ctx0, Kcur, inp_pos, nullptr, |
49 | 0 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, |
50 | 0 | ext_factor, attn_factor, beta_fast, beta_slow |
51 | 0 | ); |
52 | |
|
53 | 0 | cb(Qcur, "Qcur", il); |
54 | 0 | cb(Kcur, "Kcur", il); |
55 | 0 | cb(Vcur, "Vcur", il); |
56 | |
|
57 | 0 | cur = build_attn(inp_attn, |
58 | 0 | model.layers[il].wo, model.layers[il].bo, |
59 | 0 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); |
60 | 0 | } |
61 | |
|
62 | 0 | if (il == n_layer - 1 && inp_out_ids) { |
63 | 0 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); |
64 | 0 | inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); |
65 | 0 | } |
66 | | |
67 | | // add the input |
68 | 0 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); |
69 | 0 | cb(ffn_inp, "ffn_inp", il); |
70 | | |
71 | | // FF |
72 | 0 | { |
73 | 0 | cur = build_norm(ffn_inp, |
74 | 0 | model.layers[il].ffn_norm, |
75 | 0 | model.layers[il].ffn_norm_b, |
76 | 0 | LLM_NORM, il); |
77 | 0 | cb(cur, "ffn_norm", il); |
78 | |
|
79 | 0 | cur = build_ffn(cur, |
80 | 0 | model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL, |
81 | 0 | NULL, NULL, NULL, |
82 | 0 | model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL, |
83 | 0 | NULL, |
84 | 0 | LLM_FFN_GELU, LLM_FFN_SEQ, il); |
85 | 0 | cb(cur, "ffn_out", il); |
86 | 0 | } |
87 | |
|
88 | 0 | cur = ggml_add(ctx0, cur, ffn_inp); |
89 | |
|
90 | 0 | cur = build_cvec(cur, il); |
91 | 0 | cb(cur, "l_out", il); |
92 | | |
93 | | // input for next layer |
94 | 0 | inpL = cur; |
95 | 0 | } |
96 | |
|
97 | 0 | cur = build_norm(inpL, |
98 | 0 | model.output_norm, |
99 | 0 | model.output_norm_b, |
100 | 0 | LLM_NORM, -1); |
101 | |
|
102 | 0 | cb(cur, "result_norm", -1); |
103 | 0 | res->t_embd = cur; |
104 | |
|
105 | 0 | cur = build_lora_mm(model.output, cur); |
106 | |
|
107 | 0 | cb(cur, "result_output", -1); |
108 | 0 | res->t_logits = cur; |
109 | |
|
110 | 0 | ggml_build_forward_expand(gf, cur); |
111 | 0 | } |