Coverage Report

Created: 2026-03-12 06:35

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/CMake/Utilities/cmliblzma/liblzma/common/outqueue.c
Line
Count
Source
1
// SPDX-License-Identifier: 0BSD
2
3
///////////////////////////////////////////////////////////////////////////////
4
//
5
/// \file       outqueue.c
6
/// \brief      Output queue handling in multithreaded coding
7
//
8
//  Author:     Lasse Collin
9
//
10
///////////////////////////////////////////////////////////////////////////////
11
12
#include "outqueue.h"
13
14
15
/// Get the maximum number of buffers that may be allocated based
16
/// on the number of threads. For now this is twice the number of threads.
17
/// It's a compromise between RAM usage and keeping the worker threads busy
18
/// when buffers finish out of order.
19
0
#define GET_BUFS_LIMIT(threads) (2 * (threads))
20
21
22
extern uint64_t
23
lzma_outq_memusage(uint64_t buf_size_max, uint32_t threads)
24
0
{
25
  // This is to ease integer overflow checking: We may allocate up to
26
  // GET_BUFS_LIMIT(LZMA_THREADS_MAX) buffers and we need some extra
27
  // memory for other data structures too (that's the /2).
28
  //
29
  // lzma_outq_prealloc_buf() will still accept bigger buffers than this.
30
0
  const uint64_t limit
31
0
      = UINT64_MAX / GET_BUFS_LIMIT(LZMA_THREADS_MAX) / 2;
32
33
0
  if (threads > LZMA_THREADS_MAX || buf_size_max > limit)
34
0
    return UINT64_MAX;
35
36
0
  return GET_BUFS_LIMIT(threads)
37
0
      * lzma_outq_outbuf_memusage(buf_size_max);
38
0
}
39
40
41
static void
42
move_head_to_cache(lzma_outq *outq, const lzma_allocator *allocator)
43
0
{
44
0
  assert(outq->head != NULL);
45
0
  assert(outq->tail != NULL);
46
0
  assert(outq->bufs_in_use > 0);
47
48
0
  lzma_outbuf *buf = outq->head;
49
0
  outq->head = buf->next;
50
0
  if (outq->head == NULL)
51
0
    outq->tail = NULL;
52
53
0
  if (outq->cache != NULL && outq->cache->allocated != buf->allocated)
54
0
    lzma_outq_clear_cache(outq, allocator);
55
56
0
  buf->next = outq->cache;
57
0
  outq->cache = buf;
58
59
0
  --outq->bufs_in_use;
60
0
  outq->mem_in_use -= lzma_outq_outbuf_memusage(buf->allocated);
61
62
0
  return;
63
0
}
64
65
66
static void
67
free_one_cached_buffer(lzma_outq *outq, const lzma_allocator *allocator)
68
0
{
69
0
  assert(outq->cache != NULL);
70
71
0
  lzma_outbuf *buf = outq->cache;
72
0
  outq->cache = buf->next;
73
74
0
  --outq->bufs_allocated;
75
0
  outq->mem_allocated -= lzma_outq_outbuf_memusage(buf->allocated);
76
77
0
  lzma_free(buf, allocator);
78
0
  return;
79
0
}
80
81
82
extern void
83
lzma_outq_clear_cache(lzma_outq *outq, const lzma_allocator *allocator)
84
0
{
85
0
  while (outq->cache != NULL)
86
0
    free_one_cached_buffer(outq, allocator);
87
88
0
  return;
89
0
}
90
91
92
extern void
93
lzma_outq_clear_cache2(lzma_outq *outq, const lzma_allocator *allocator,
94
    size_t keep_size)
95
0
{
96
0
  if (outq->cache == NULL)
97
0
    return;
98
99
  // Free all but one.
100
0
  while (outq->cache->next != NULL)
101
0
    free_one_cached_buffer(outq, allocator);
102
103
  // Free the last one only if its size doesn't equal to keep_size.
104
0
  if (outq->cache->allocated != keep_size)
105
0
    free_one_cached_buffer(outq, allocator);
106
107
0
  return;
108
0
}
109
110
111
extern lzma_ret
112
lzma_outq_init(lzma_outq *outq, const lzma_allocator *allocator,
113
    uint32_t threads)
114
0
{
115
0
  if (threads > LZMA_THREADS_MAX)
116
0
    return LZMA_OPTIONS_ERROR;
117
118
0
  const uint32_t bufs_limit = GET_BUFS_LIMIT(threads);
119
120
  // Clear head/tail.
121
0
  while (outq->head != NULL)
122
0
    move_head_to_cache(outq, allocator);
123
124
  // If new buf_limit is lower than the old one, we may need to free
125
  // a few cached buffers.
126
0
  while (bufs_limit < outq->bufs_allocated)
127
0
    free_one_cached_buffer(outq, allocator);
128
129
0
  outq->bufs_limit = bufs_limit;
130
0
  outq->read_pos = 0;
131
132
0
  return LZMA_OK;
133
0
}
134
135
136
extern void
137
lzma_outq_end(lzma_outq *outq, const lzma_allocator *allocator)
138
0
{
139
0
  while (outq->head != NULL)
140
0
    move_head_to_cache(outq, allocator);
141
142
0
  lzma_outq_clear_cache(outq, allocator);
143
0
  return;
144
0
}
145
146
147
extern lzma_ret
148
lzma_outq_prealloc_buf(lzma_outq *outq, const lzma_allocator *allocator,
149
    size_t size)
150
0
{
151
  // Caller must have checked it with lzma_outq_has_buf().
152
0
  assert(outq->bufs_in_use < outq->bufs_limit);
153
154
  // If there already is appropriately-sized buffer in the cache,
155
  // we need to do nothing.
156
0
  if (outq->cache != NULL && outq->cache->allocated == size)
157
0
    return LZMA_OK;
158
159
0
  if (size > SIZE_MAX - sizeof(lzma_outbuf))
160
0
    return LZMA_MEM_ERROR;
161
162
0
  const size_t alloc_size = lzma_outq_outbuf_memusage(size);
163
164
  // The cache may have buffers but their size is wrong.
165
0
  lzma_outq_clear_cache(outq, allocator);
166
167
0
  outq->cache = lzma_alloc(alloc_size, allocator);
168
0
  if (outq->cache == NULL)
169
0
    return LZMA_MEM_ERROR;
170
171
0
  outq->cache->next = NULL;
172
0
  outq->cache->allocated = size;
173
174
0
  ++outq->bufs_allocated;
175
0
  outq->mem_allocated += alloc_size;
176
177
0
  return LZMA_OK;
178
0
}
179
180
181
extern lzma_outbuf *
182
lzma_outq_get_buf(lzma_outq *outq, void *worker)
183
0
{
184
  // Caller must have used lzma_outq_prealloc_buf() to ensure these.
185
0
  assert(outq->bufs_in_use < outq->bufs_limit);
186
0
  assert(outq->bufs_in_use < outq->bufs_allocated);
187
0
  assert(outq->cache != NULL);
188
189
0
  lzma_outbuf *buf = outq->cache;
190
0
  outq->cache = buf->next;
191
0
  buf->next = NULL;
192
193
0
  if (outq->tail != NULL) {
194
0
    assert(outq->head != NULL);
195
0
    outq->tail->next = buf;
196
0
  } else {
197
0
    assert(outq->head == NULL);
198
0
    outq->head = buf;
199
0
  }
200
201
0
  outq->tail = buf;
202
203
0
  buf->worker = worker;
204
0
  buf->finished = false;
205
0
  buf->finish_ret = LZMA_STREAM_END;
206
0
  buf->pos = 0;
207
0
  buf->decoder_in_pos = 0;
208
209
0
  buf->unpadded_size = 0;
210
0
  buf->uncompressed_size = 0;
211
212
0
  ++outq->bufs_in_use;
213
0
  outq->mem_in_use += lzma_outq_outbuf_memusage(buf->allocated);
214
215
0
  return buf;
216
0
}
217
218
219
extern bool
220
lzma_outq_is_readable(const lzma_outq *outq)
221
0
{
222
0
  if (outq->head == NULL)
223
0
    return false;
224
225
0
  return outq->read_pos < outq->head->pos || outq->head->finished;
226
0
}
227
228
229
extern lzma_ret
230
lzma_outq_read(lzma_outq *restrict outq,
231
    const lzma_allocator *restrict allocator,
232
    uint8_t *restrict out, size_t *restrict out_pos,
233
    size_t out_size,
234
    lzma_vli *restrict unpadded_size,
235
    lzma_vli *restrict uncompressed_size)
236
0
{
237
  // There must be at least one buffer from which to read.
238
0
  if (outq->bufs_in_use == 0)
239
0
    return LZMA_OK;
240
241
  // Get the buffer.
242
0
  lzma_outbuf *buf = outq->head;
243
244
  // Copy from the buffer to output.
245
  //
246
  // FIXME? In threaded decoder it may be bad to do this copy while
247
  // the mutex is being held.
248
0
  lzma_bufcpy(buf->buf, &outq->read_pos, buf->pos,
249
0
      out, out_pos, out_size);
250
251
  // Return if we didn't get all the data from the buffer.
252
0
  if (!buf->finished || outq->read_pos < buf->pos)
253
0
    return LZMA_OK;
254
255
  // The buffer was finished. Tell the caller its size information.
256
0
  if (unpadded_size != NULL)
257
0
    *unpadded_size = buf->unpadded_size;
258
259
0
  if (uncompressed_size != NULL)
260
0
    *uncompressed_size = buf->uncompressed_size;
261
262
  // Remember the return value.
263
0
  const lzma_ret finish_ret = buf->finish_ret;
264
265
  // Free this buffer for further use.
266
0
  move_head_to_cache(outq, allocator);
267
0
  outq->read_pos = 0;
268
269
0
  return finish_ret;
270
0
}
271
272
273
extern void
274
lzma_outq_enable_partial_output(lzma_outq *outq,
275
    void (*enable_partial_output)(void *worker))
276
0
{
277
0
  if (outq->head != NULL && !outq->head->finished
278
0
      && outq->head->worker != NULL) {
279
0
    enable_partial_output(outq->head->worker);
280
281
    // Set it to NULL since calling it twice is pointless.
282
0
    outq->head->worker = NULL;
283
0
  }
284
285
0
  return;
286
0
}