/src/llama.cpp/src/llama-io.h
Line | Count | Source |
1 | | #pragma once |
2 | | |
3 | | #include <cstddef> |
4 | | #include <cstdint> |
5 | | #include <string> |
6 | | |
7 | | struct ggml_tensor; |
8 | | |
9 | | class llama_io_write_i { |
10 | | public: |
11 | 0 | llama_io_write_i() = default; |
12 | 0 | virtual ~llama_io_write_i() = default; |
13 | | |
14 | | virtual void write(const void * src, size_t size) = 0; |
15 | | virtual void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) = 0; |
16 | | |
17 | | // bytes written so far |
18 | | virtual size_t n_bytes() = 0; |
19 | | |
20 | | void write_string(const std::string & str); |
21 | | }; |
22 | | |
23 | | class llama_io_read_i { |
24 | | public: |
25 | 0 | llama_io_read_i() = default; |
26 | 0 | virtual ~llama_io_read_i() = default; |
27 | | |
28 | | virtual const uint8_t * read(size_t size) = 0; |
29 | | virtual void read_to(void * dst, size_t size) = 0; |
30 | | |
31 | | // bytes read so far |
32 | | virtual size_t n_bytes() = 0; |
33 | | |
34 | | void read_string(std::string & str); |
35 | | }; |