Coverage Report

Created: 2025-11-24 06:10

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/llama.cpp/fuzzers/fuzz_load_model.cpp
Line
Count
Source
1
/* Copyright 2024 Google LLC
2
Licensed under the Apache License, Version 2.0 (the "License");
3
you may not use this file except in compliance with the License.
4
You may obtain a copy of the License at
5
      http://www.apache.org/licenses/LICENSE-2.0
6
Unless required by applicable law or agreed to in writing, software
7
distributed under the License is distributed on an "AS IS" BASIS,
8
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
See the License for the specific language governing permissions and
10
limitations under the License.
11
*/
12
13
#include "llama.h"
14
#include <iostream>
15
#include <setjmp.h>
16
#include <unistd.h>
17
18
const char *model_arch = "llama";
19
const char *gen_arch = "general.architecture";
20
jmp_buf fuzzing_jmp_buf;
21
22
struct llama_model_kv_override fuzz_kv_overrides[2];
23
24
55
extern "C" void __wrap_abort(void) { longjmp(fuzzing_jmp_buf, 1); }
25
26
8
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
27
8
  llama_backend_init();
28
29
8
  char filename[256];
30
8
  sprintf(filename, "/tmp/libfuzzer.%d", getpid());
31
32
8
  FILE *fp = fopen(filename, "wb");
33
8
  if (!fp) {
34
0
    return 0;
35
0
  }
36
8
  fwrite(data, size, 1, fp);
37
8
  fclose(fp);
38
39
8
  auto params = llama_model_params{};
40
8
  memset(&params, 0x0, sizeof(struct llama_model_params));
41
8
  params.use_mmap = false;
42
8
  params.progress_callback = [](float progress, void *ctx) {
43
0
    (void)ctx;
44
0
    return progress > 0.50;
45
0
  };
46
47
8
  fuzz_kv_overrides[0].tag = LLAMA_KV_OVERRIDE_TYPE_STR;
48
8
  strcpy(fuzz_kv_overrides[0].val_str, model_arch);
49
8
  std::strcpy(fuzz_kv_overrides[0].key, gen_arch);
50
51
8
  params.kv_overrides =
52
8
      (const struct llama_model_kv_override *)fuzz_kv_overrides;
53
54
8
  if (setjmp(fuzzing_jmp_buf) == 0) {
55
8
    auto *model = llama_load_model_from_file(filename, params);
56
8
    if (model != nullptr) {
57
0
      llama_free_model(model);
58
0
    }
59
8
  }
60
8
  llama_backend_free();
61
62
8
  unlink(filename);
63
8
  return 0;
64
8
}