Coverage Report

Created: 2025-11-09 06:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/xz/src/liblzma/common/stream_decoder_mt.c
Line
Count
Source
1
// SPDX-License-Identifier: 0BSD
2
3
///////////////////////////////////////////////////////////////////////////////
4
//
5
/// \file       stream_decoder_mt.c
6
/// \brief      Multithreaded .xz Stream decoder
7
//
8
//  Authors:    Sebastian Andrzej Siewior
9
//              Lasse Collin
10
//
11
///////////////////////////////////////////////////////////////////////////////
12
13
#include "common.h"
14
#include "block_decoder.h"
15
#include "stream_decoder.h"
16
#include "index.h"
17
#include "outqueue.h"
18
19
20
typedef enum {
21
  /// Waiting for work.
22
  /// Main thread may change this to THR_RUN or THR_EXIT.
23
  THR_IDLE,
24
25
  /// Decoding is in progress.
26
  /// Main thread may change this to THR_IDLE or THR_EXIT.
27
  /// The worker thread may change this to THR_IDLE.
28
  THR_RUN,
29
30
  /// The main thread wants the thread to exit.
31
  THR_EXIT,
32
33
} worker_state;
34
35
36
typedef enum {
37
  /// Partial updates (storing of worker thread progress
38
  /// to lzma_outbuf) are disabled.
39
  PARTIAL_DISABLED,
40
41
  /// Main thread requests partial updates to be enabled but
42
  /// no partial update has been done by the worker thread yet.
43
  ///
44
  /// Changing from PARTIAL_DISABLED to PARTIAL_START requires
45
  /// use of the worker-thread mutex. Other transitions don't
46
  /// need a mutex.
47
  PARTIAL_START,
48
49
  /// Partial updates are enabled and the worker thread has done
50
  /// at least one partial update.
51
  PARTIAL_ENABLED,
52
53
} partial_update_mode;
54
55
56
struct worker_thread {
57
  /// Worker state is protected with our mutex.
58
  worker_state state;
59
60
  /// Input buffer that will contain the whole Block except Block Header.
61
  uint8_t *in;
62
63
  /// Amount of memory allocated for "in"
64
  size_t in_size;
65
66
  /// Number of bytes written to "in" by the main thread
67
  size_t in_filled;
68
69
  /// Number of bytes consumed from "in" by the worker thread.
70
  size_t in_pos;
71
72
  /// Amount of uncompressed data that has been decoded. This local
73
  /// copy is needed because updating outbuf->pos requires locking
74
  /// the main mutex (coder->mutex).
75
  size_t out_pos;
76
77
  /// Pointer to the main structure is needed to (1) lock the main
78
  /// mutex (coder->mutex) when updating outbuf->pos and (2) when
79
  /// putting this thread back to the stack of free threads.
80
  struct lzma_stream_coder *coder;
81
82
  /// The allocator is set by the main thread. Since a copy of the
83
  /// pointer is kept here, the application must not change the
84
  /// allocator before calling lzma_end().
85
  const lzma_allocator *allocator;
86
87
  /// Output queue buffer to which the uncompressed data is written.
88
  lzma_outbuf *outbuf;
89
90
  /// Amount of compressed data that has already been decompressed.
91
  /// This is updated from in_pos when our mutex is locked.
92
  /// This is size_t, not uint64_t, because per-thread progress
93
  /// is limited to sizes of allocated buffers.
94
  size_t progress_in;
95
96
  /// Like progress_in but for uncompressed data.
97
  size_t progress_out;
98
99
  /// Updating outbuf->pos requires locking the main mutex
100
  /// (coder->mutex). Since the main thread will only read output
101
  /// from the oldest outbuf in the queue, only the worker thread
102
  /// that is associated with the oldest outbuf needs to update its
103
  /// outbuf->pos. This avoids useless mutex contention that would
104
  /// happen if all worker threads were frequently locking the main
105
  /// mutex to update their outbuf->pos.
106
  ///
107
  /// Only when partial_update is something else than PARTIAL_DISABLED,
108
  /// this worker thread will update outbuf->pos after each call to
109
  /// the Block decoder.
110
  partial_update_mode partial_update;
111
112
  /// Block decoder
113
  lzma_next_coder block_decoder;
114
115
  /// Thread-specific Block options are needed because the Block
116
  /// decoder modifies the struct given to it at initialization.
117
  lzma_block block_options;
118
119
  /// Filter chain memory usage
120
  uint64_t mem_filters;
121
122
  /// Next structure in the stack of free worker threads.
123
  struct worker_thread *next;
124
125
  mythread_mutex mutex;
126
  mythread_cond cond;
127
128
  /// The ID of this thread is used to join the thread
129
  /// when it's not needed anymore.
130
  mythread thread_id;
131
};
132
133
134
struct lzma_stream_coder {
135
  enum {
136
    SEQ_STREAM_HEADER,
137
    SEQ_BLOCK_HEADER,
138
    SEQ_BLOCK_INIT,
139
    SEQ_BLOCK_THR_INIT,
140
    SEQ_BLOCK_THR_RUN,
141
    SEQ_BLOCK_DIRECT_INIT,
142
    SEQ_BLOCK_DIRECT_RUN,
143
    SEQ_INDEX_WAIT_OUTPUT,
144
    SEQ_INDEX_DECODE,
145
    SEQ_STREAM_FOOTER,
146
    SEQ_STREAM_PADDING,
147
    SEQ_ERROR,
148
  } sequence;
149
150
  /// Block decoder
151
  lzma_next_coder block_decoder;
152
153
  /// Every Block Header will be decoded into this structure.
154
  /// This is also used to initialize a Block decoder when in
155
  /// direct mode. In threaded mode, a thread-specific copy will
156
  /// be made for decoder initialization because the Block decoder
157
  /// will modify the structure given to it.
158
  lzma_block block_options;
159
160
  /// Buffer to hold a filter chain for Block Header decoding and
161
  /// initialization. These are freed after successful Block decoder
162
  /// initialization or at stream_decoder_mt_end(). The thread-specific
163
  /// copy of block_options won't hold a pointer to filters[] after
164
  /// initialization.
165
  lzma_filter filters[LZMA_FILTERS_MAX + 1];
166
167
  /// Stream Flags from Stream Header
168
  lzma_stream_flags stream_flags;
169
170
  /// Index is hashed so that it can be compared to the sizes of Blocks
171
  /// with O(1) memory usage.
172
  lzma_index_hash *index_hash;
173
174
175
  /// Maximum wait time if cannot use all the input and cannot
176
  /// fill the output buffer. This is in milliseconds.
177
  uint32_t timeout;
178
179
180
  /// Error code from a worker thread.
181
  ///
182
  /// \note       Use mutex.
183
  lzma_ret thread_error;
184
185
  /// Error code to return after pending output has been copied out. If
186
  /// set in read_output_and_wait(), this is a mirror of thread_error.
187
  /// If set in stream_decode_mt() then it's, for example, error that
188
  /// occurred when decoding Block Header.
189
  lzma_ret pending_error;
190
191
  /// Number of threads that will be created at maximum.
192
  uint32_t threads_max;
193
194
  /// Number of thread structures that have been initialized from
195
  /// "threads", and thus the number of worker threads actually
196
  /// created so far.
197
  uint32_t threads_initialized;
198
199
  /// Array of allocated thread-specific structures. When no threads
200
  /// are in use (direct mode) this is NULL. In threaded mode this
201
  /// points to an array of threads_max number of worker_thread structs.
202
  struct worker_thread *threads;
203
204
  /// Stack of free threads. When a thread finishes, it puts itself
205
  /// back into this stack. This starts as empty because threads
206
  /// are created only when actually needed.
207
  ///
208
  /// \note       Use mutex.
209
  struct worker_thread *threads_free;
210
211
  /// The most recent worker thread to which the main thread writes
212
  /// the new input from the application.
213
  struct worker_thread *thr;
214
215
  /// Output buffer queue for decompressed data from the worker threads
216
  ///
217
  /// \note       Use mutex with operations that need it.
218
  lzma_outq outq;
219
220
  mythread_mutex mutex;
221
  mythread_cond cond;
222
223
224
  /// Memory usage that will not be exceeded in multi-threaded mode.
225
  /// Single-threaded mode can exceed this even by a large amount.
226
  uint64_t memlimit_threading;
227
228
  /// Memory usage limit that should never be exceeded.
229
  /// LZMA_MEMLIMIT_ERROR will be returned if decoding isn't possible
230
  /// even in single-threaded mode without exceeding this limit.
231
  uint64_t memlimit_stop;
232
233
  /// Amount of memory in use by the direct mode decoder
234
  /// (coder->block_decoder). In threaded mode this is 0.
235
  uint64_t mem_direct_mode;
236
237
  /// Amount of memory needed by the running worker threads.
238
  /// This doesn't include the memory needed by the output buffer.
239
  ///
240
  /// \note       Use mutex.
241
  uint64_t mem_in_use;
242
243
  /// Amount of memory used by the idle (cached) threads.
244
  ///
245
  /// \note       Use mutex.
246
  uint64_t mem_cached;
247
248
249
  /// Amount of memory needed for the filter chain of the next Block.
250
  uint64_t mem_next_filters;
251
252
  /// Amount of memory needed for the thread-specific input buffer
253
  /// for the next Block.
254
  uint64_t mem_next_in;
255
256
  /// Amount of memory actually needed to decode the next Block
257
  /// in threaded mode. This is
258
  /// mem_next_filters + mem_next_in + memory needed for lzma_outbuf.
259
  uint64_t mem_next_block;
260
261
262
  /// Amount of compressed data in Stream Header + Blocks that have
263
  /// already been finished.
264
  ///
265
  /// \note       Use mutex.
266
  uint64_t progress_in;
267
268
  /// Amount of uncompressed data in Blocks that have already
269
  /// been finished.
270
  ///
271
  /// \note       Use mutex.
272
  uint64_t progress_out;
273
274
275
  /// If true, LZMA_NO_CHECK is returned if the Stream has
276
  /// no integrity check.
277
  bool tell_no_check;
278
279
  /// If true, LZMA_UNSUPPORTED_CHECK is returned if the Stream has
280
  /// an integrity check that isn't supported by this liblzma build.
281
  bool tell_unsupported_check;
282
283
  /// If true, LZMA_GET_CHECK is returned after decoding Stream Header.
284
  bool tell_any_check;
285
286
  /// If true, we will tell the Block decoder to skip calculating
287
  /// and verifying the integrity check.
288
  bool ignore_check;
289
290
  /// If true, we will decode concatenated Streams that possibly have
291
  /// Stream Padding between or after them. LZMA_STREAM_END is returned
292
  /// once the application isn't giving us any new input (LZMA_FINISH),
293
  /// and we aren't in the middle of a Stream, and possible
294
  /// Stream Padding is a multiple of four bytes.
295
  bool concatenated;
296
297
  /// If true, we will return any errors immediately instead of first
298
  /// producing all output before the location of the error.
299
  bool fail_fast;
300
301
302
  /// When decoding concatenated Streams, this is true as long as we
303
  /// are decoding the first Stream. This is needed to avoid misleading
304
  /// LZMA_FORMAT_ERROR in case the later Streams don't have valid magic
305
  /// bytes.
306
  bool first_stream;
307
308
  /// This is used to track if the previous call to stream_decode_mt()
309
  /// had output space (*out_pos < out_size) and managed to fill the
310
  /// output buffer (*out_pos == out_size). This may be set to true
311
  /// in read_output_and_wait(). This is read and then reset to false
312
  /// at the beginning of stream_decode_mt().
313
  ///
314
  /// This is needed to support applications that call lzma_code() in
315
  /// such a way that more input is provided only when lzma_code()
316
  /// didn't fill the output buffer completely. Basically, this makes
317
  /// it easier to convert such applications from single-threaded
318
  /// decoder to multi-threaded decoder.
319
  bool out_was_filled;
320
321
  /// Write position in buffer[] and position in Stream Padding
322
  size_t pos;
323
324
  /// Buffer to hold Stream Header, Block Header, and Stream Footer.
325
  /// Block Header has biggest maximum size.
326
  uint8_t buffer[LZMA_BLOCK_HEADER_SIZE_MAX];
327
};
328
329
330
/// Enables updating of outbuf->pos. This is a callback function that is
331
/// used with lzma_outq_enable_partial_output().
332
static void
333
worker_enable_partial_update(void *thr_ptr)
334
91.2k
{
335
91.2k
  struct worker_thread *thr = thr_ptr;
336
337
91.2k
  mythread_sync(thr->mutex) {
338
91.2k
    thr->partial_update = PARTIAL_START;
339
91.2k
    mythread_cond_signal(&thr->cond);
340
91.2k
  }
341
91.2k
}
342
343
344
static MYTHREAD_RET_TYPE
345
worker_decoder(void *thr_ptr)
346
37.7k
{
347
37.7k
  struct worker_thread *thr = thr_ptr;
348
37.7k
  size_t in_filled;
349
37.7k
  partial_update_mode partial_update;
350
37.7k
  lzma_ret ret;
351
352
159k
next_loop_lock:
353
354
159k
  mythread_mutex_lock(&thr->mutex);
355
281k
next_loop_unlocked:
356
357
281k
  if (thr->state == THR_IDLE) {
358
120k
    mythread_cond_wait(&thr->cond, &thr->mutex);
359
120k
    goto next_loop_unlocked;
360
120k
  }
361
362
160k
  if (thr->state == THR_EXIT) {
363
37.7k
    mythread_mutex_unlock(&thr->mutex);
364
365
37.7k
    lzma_free(thr->in, thr->allocator);
366
37.7k
    lzma_next_end(&thr->block_decoder, thr->allocator);
367
368
37.7k
    mythread_mutex_destroy(&thr->mutex);
369
37.7k
    mythread_cond_destroy(&thr->cond);
370
371
37.7k
    return MYTHREAD_RET_VALUE;
372
37.7k
  }
373
374
160k
  assert(thr->state == THR_RUN);
375
376
  // Update progress info for get_progress().
377
122k
  thr->progress_in = thr->in_pos;
378
122k
  thr->progress_out = thr->out_pos;
379
380
  // If we don't have any new input, wait for a signal from the main
381
  // thread except if partial output has just been enabled. In that
382
  // case we will do one normal run so that the partial output info
383
  // gets passed to the main thread. The call to block_decoder.code()
384
  // is useless but harmless as it can occur only once per Block.
385
122k
  in_filled = thr->in_filled;
386
122k
  partial_update = thr->partial_update;
387
388
122k
  if (in_filled == thr->in_pos && partial_update != PARTIAL_START) {
389
895
    mythread_cond_wait(&thr->cond, &thr->mutex);
390
895
    goto next_loop_unlocked;
391
895
  }
392
393
121k
  mythread_mutex_unlock(&thr->mutex);
394
395
  // Pass the input in small chunks to the Block decoder.
396
  // This way we react reasonably fast if we are told to stop/exit,
397
  // and (when partial update is enabled) we tell about our progress
398
  // to the main thread frequently enough.
399
121k
  const size_t chunk_size = 16384;
400
121k
  if ((in_filled - thr->in_pos) > chunk_size)
401
792
    in_filled = thr->in_pos + chunk_size;
402
403
121k
  ret = thr->block_decoder.code(
404
121k
      thr->block_decoder.coder, thr->allocator,
405
121k
      thr->in, &thr->in_pos, in_filled,
406
121k
      thr->outbuf->buf, &thr->out_pos,
407
121k
      thr->outbuf->allocated, LZMA_RUN);
408
409
121k
  if (ret == LZMA_OK) {
410
993
    if (partial_update != PARTIAL_DISABLED) {
411
      // The main thread uses thr->mutex to change from
412
      // PARTIAL_DISABLED to PARTIAL_START. The main thread
413
      // doesn't care about this variable after that so we
414
      // can safely change it here to PARTIAL_ENABLED
415
      // without a mutex.
416
937
      thr->partial_update = PARTIAL_ENABLED;
417
418
      // The main thread is reading decompressed data
419
      // from thr->outbuf. Tell the main thread about
420
      // our progress.
421
      //
422
      // NOTE: It's possible that we consumed input without
423
      // producing any new output so it's possible that
424
      // only in_pos has changed. In case of PARTIAL_START
425
      // it is possible that neither in_pos nor out_pos has
426
      // changed.
427
937
      mythread_sync(thr->coder->mutex) {
428
937
        thr->outbuf->pos = thr->out_pos;
429
937
        thr->outbuf->decoder_in_pos = thr->in_pos;
430
937
        mythread_cond_signal(&thr->coder->cond);
431
937
      }
432
937
    }
433
434
993
    goto next_loop_lock;
435
993
  }
436
437
  // Either we finished successfully (LZMA_STREAM_END) or an error
438
  // occurred.
439
  //
440
  // The sizes are in the Block Header and the Block decoder
441
  // checks that they match, thus we know these:
442
121k
  assert(ret != LZMA_STREAM_END || thr->in_pos == thr->in_size);
443
120k
  assert(ret != LZMA_STREAM_END
444
120k
    || thr->out_pos == thr->block_options.uncompressed_size);
445
446
120k
  mythread_sync(thr->mutex) {
447
    // Block decoder ensures this, but do a sanity check anyway
448
    // because thr->in_filled < thr->in_size means that the main
449
    // thread is still writing to thr->in.
450
120k
    if (ret == LZMA_STREAM_END && thr->in_filled != thr->in_size) {
451
0
      assert(0);
452
0
      ret = LZMA_PROG_ERROR;
453
0
    }
454
455
120k
    if (thr->state != THR_EXIT)
456
120k
      thr->state = THR_IDLE;
457
120k
  }
458
459
  // Free the input buffer. Don't update in_size as we need
460
  // it later to update thr->coder->mem_in_use.
461
  //
462
  // This step is skipped if an error occurred because the main thread
463
  // might still be writing to thr->in. The memory will be freed after
464
  // threads_end() sets thr->state = THR_EXIT.
465
120k
  if (ret == LZMA_STREAM_END) {
466
119k
    lzma_free(thr->in, thr->allocator);
467
119k
    thr->in = NULL;
468
119k
  }
469
470
120k
  mythread_sync(thr->coder->mutex) {
471
    // Move our progress info to the main thread.
472
120k
    thr->coder->progress_in += thr->in_pos;
473
120k
    thr->coder->progress_out += thr->out_pos;
474
120k
    thr->progress_in = 0;
475
120k
    thr->progress_out = 0;
476
477
    // Mark the outbuf as finished.
478
120k
    thr->outbuf->pos = thr->out_pos;
479
120k
    thr->outbuf->decoder_in_pos = thr->in_pos;
480
120k
    thr->outbuf->finished = true;
481
120k
    thr->outbuf->finish_ret = ret;
482
120k
    thr->outbuf = NULL;
483
484
    // If an error occurred, tell it to the main thread.
485
120k
    if (ret != LZMA_STREAM_END
486
1.28k
        && thr->coder->thread_error == LZMA_OK)
487
1.23k
      thr->coder->thread_error = ret;
488
489
    // Return the worker thread to the stack of available
490
    // threads only if no errors occurred.
491
120k
    if (ret == LZMA_STREAM_END) {
492
      // Update memory usage counters.
493
119k
      thr->coder->mem_in_use -= thr->in_size;
494
119k
      thr->coder->mem_in_use -= thr->mem_filters;
495
119k
      thr->coder->mem_cached += thr->mem_filters;
496
497
      // Put this thread to the stack of free threads.
498
119k
      thr->next = thr->coder->threads_free;
499
119k
      thr->coder->threads_free = thr;
500
119k
    }
501
502
120k
    mythread_cond_signal(&thr->coder->cond);
503
120k
  }
504
505
120k
  goto next_loop_lock;
506
120k
}
507
508
509
/// Tells the worker threads to exit and waits for them to terminate.
510
static void
511
threads_end(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
512
207k
{
513
244k
  for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
514
37.7k
    mythread_sync(coder->threads[i].mutex) {
515
37.7k
      coder->threads[i].state = THR_EXIT;
516
37.7k
      mythread_cond_signal(&coder->threads[i].cond);
517
37.7k
    }
518
37.7k
  }
519
520
244k
  for (uint32_t i = 0; i < coder->threads_initialized; ++i)
521
37.7k
    mythread_join(coder->threads[i].thread_id);
522
523
207k
  lzma_free(coder->threads, allocator);
524
207k
  coder->threads_initialized = 0;
525
207k
  coder->threads = NULL;
526
207k
  coder->threads_free = NULL;
527
528
  // The threads don't update these when they exit. Do it here.
529
207k
  coder->mem_in_use = 0;
530
207k
  coder->mem_cached = 0;
531
532
207k
  return;
533
207k
}
534
535
536
/// Tell worker threads to stop without doing any cleaning up.
537
/// The clean up will be done when threads_exit() is called;
538
/// it's not possible to reuse the threads after threads_stop().
539
///
540
/// This is called before returning an unrecoverable error code
541
/// to the application. It would be waste of processor time
542
/// to keep the threads running in such a situation.
543
static void
544
threads_stop(struct lzma_stream_coder *coder)
545
1.23k
{
546
2.75k
  for (uint32_t i = 0; i < coder->threads_initialized; ++i) {
547
    // The threads that are in the THR_RUN state will stop
548
    // when they check the state the next time. There's no
549
    // need to signal coder->threads[i].cond.
550
1.52k
    mythread_sync(coder->threads[i].mutex) {
551
1.52k
      coder->threads[i].state = THR_IDLE;
552
1.52k
    }
553
1.52k
  }
554
555
1.23k
  return;
556
1.23k
}
557
558
559
/// Initialize a new worker_thread structure and create a new thread.
560
static lzma_ret
561
initialize_new_thread(struct lzma_stream_coder *coder,
562
    const lzma_allocator *allocator)
563
37.7k
{
564
  // Allocate the coder->threads array if needed. It's done here instead
565
  // of when initializing the decoder because we don't need this if we
566
  // use the direct mode (we may even free coder->threads in the middle
567
  // of the file if we switch from threaded to direct mode).
568
37.7k
  if (coder->threads == NULL) {
569
19.8k
    coder->threads = lzma_alloc(
570
19.8k
      coder->threads_max * sizeof(struct worker_thread),
571
19.8k
      allocator);
572
573
19.8k
    if (coder->threads == NULL)
574
0
      return LZMA_MEM_ERROR;
575
19.8k
  }
576
577
  // Pick a free structure.
578
37.7k
  assert(coder->threads_initialized < coder->threads_max);
579
37.7k
  struct worker_thread *thr
580
37.7k
      = &coder->threads[coder->threads_initialized];
581
582
37.7k
  if (mythread_mutex_init(&thr->mutex))
583
0
    goto error_mutex;
584
585
37.7k
  if (mythread_cond_init(&thr->cond))
586
0
    goto error_cond;
587
588
37.7k
  thr->state = THR_IDLE;
589
37.7k
  thr->in = NULL;
590
37.7k
  thr->in_size = 0;
591
37.7k
  thr->allocator = allocator;
592
37.7k
  thr->coder = coder;
593
37.7k
  thr->outbuf = NULL;
594
37.7k
  thr->block_decoder = LZMA_NEXT_CODER_INIT;
595
37.7k
  thr->mem_filters = 0;
596
597
37.7k
  if (mythread_create(&thr->thread_id, worker_decoder, thr))
598
0
    goto error_thread;
599
600
37.7k
  ++coder->threads_initialized;
601
37.7k
  coder->thr = thr;
602
603
37.7k
  return LZMA_OK;
604
605
0
error_thread:
606
0
  mythread_cond_destroy(&thr->cond);
607
608
0
error_cond:
609
0
  mythread_mutex_destroy(&thr->mutex);
610
611
0
error_mutex:
612
0
  return LZMA_MEM_ERROR;
613
0
}
614
615
616
static lzma_ret
617
get_thread(struct lzma_stream_coder *coder, const lzma_allocator *allocator)
618
121k
{
619
  // If there is a free structure on the stack, use it.
620
121k
  mythread_sync(coder->mutex) {
621
121k
    if (coder->threads_free != NULL) {
622
83.5k
      coder->thr = coder->threads_free;
623
83.5k
      coder->threads_free = coder->threads_free->next;
624
625
      // The thread is no longer in the cache so subtract
626
      // it from the cached memory usage. Don't add it
627
      // to mem_in_use though; the caller will handle it
628
      // since it knows how much memory it will actually
629
      // use (the filter chain might change).
630
83.5k
      coder->mem_cached -= coder->thr->mem_filters;
631
83.5k
    }
632
121k
  }
633
634
121k
  if (coder->thr == NULL) {
635
37.7k
    assert(coder->threads_initialized < coder->threads_max);
636
637
    // Initialize a new thread.
638
37.7k
    return_if_error(initialize_new_thread(coder, allocator));
639
37.7k
  }
640
641
121k
  coder->thr->in_filled = 0;
642
121k
  coder->thr->in_pos = 0;
643
121k
  coder->thr->out_pos = 0;
644
645
121k
  coder->thr->progress_in = 0;
646
121k
  coder->thr->progress_out = 0;
647
648
121k
  coder->thr->partial_update = PARTIAL_DISABLED;
649
650
121k
  return LZMA_OK;
651
121k
}
652
653
654
static lzma_ret
655
read_output_and_wait(struct lzma_stream_coder *coder,
656
    const lzma_allocator *allocator,
657
    uint8_t *restrict out, size_t *restrict out_pos,
658
    size_t out_size,
659
    bool *input_is_possible,
660
    bool waiting_allowed,
661
    mythread_condtime *wait_abs, bool *has_blocked)
662
530k
{
663
530k
  lzma_ret ret = LZMA_OK;
664
665
530k
  mythread_sync(coder->mutex) {
666
597k
    do {
667
      // Get as much output from the queue as is possible
668
      // without blocking.
669
597k
      const size_t out_start = *out_pos;
670
716k
      do {
671
716k
        ret = lzma_outq_read(&coder->outq, allocator,
672
716k
            out, out_pos, out_size,
673
716k
            NULL, NULL);
674
675
        // If a Block was finished, tell the worker
676
        // thread of the next Block (if it is still
677
        // running) to start telling the main thread
678
        // when new output is available.
679
716k
        if (ret == LZMA_STREAM_END)
680
119k
          lzma_outq_enable_partial_output(
681
119k
            &coder->outq,
682
119k
            &worker_enable_partial_update);
683
684
        // Loop until a Block wasn't finished.
685
        // It's important to loop around even if
686
        // *out_pos == out_size because there could
687
        // be an empty Block that will return
688
        // LZMA_STREAM_END without needing any
689
        // output space.
690
716k
      } while (ret == LZMA_STREAM_END);
691
692
      // Check if lzma_outq_read reported an error from
693
      // the Block decoder.
694
597k
      if (ret != LZMA_OK)
695
1.23k
        break;
696
697
      // If the output buffer is now full but it wasn't full
698
      // when this function was called, set out_was_filled.
699
      // This way the next call to stream_decode_mt() knows
700
      // that some output was produced and no output space
701
      // remained in the previous call to stream_decode_mt().
702
595k
      if (*out_pos == out_size && *out_pos != out_start)
703
16.7k
        coder->out_was_filled = true;
704
705
      // Check if any thread has indicated an error.
706
595k
      if (coder->thread_error != LZMA_OK) {
707
        // If LZMA_FAIL_FAST was used, report errors
708
        // from worker threads immediately.
709
10.1k
        if (coder->fail_fast) {
710
0
          ret = coder->thread_error;
711
0
          break;
712
0
        }
713
714
        // Otherwise set pending_error. The value we
715
        // set here will not actually get used other
716
        // than working as a flag that an error has
717
        // occurred. This is because in SEQ_ERROR
718
        // all output before the error will be read
719
        // first by calling this function, and once we
720
        // reach the location of the (first) error the
721
        // error code from the above lzma_outq_read()
722
        // will be returned to the application.
723
        //
724
        // Use LZMA_PROG_ERROR since the value should
725
        // never leak to the application. It's
726
        // possible that pending_error has already
727
        // been set but that doesn't matter: if we get
728
        // here, pending_error only works as a flag.
729
10.1k
        coder->pending_error = LZMA_PROG_ERROR;
730
10.1k
      }
731
732
      // Check if decoding of the next Block can be started.
733
      // The memusage of the active threads must be low
734
      // enough, there must be a free buffer slot in the
735
      // output queue, and there must be a free thread
736
      // (that can be either created or an existing one
737
      // reused).
738
      //
739
      // NOTE: This is checked after reading the output
740
      // above because reading the output can free a slot in
741
      // the output queue and also reduce active memusage.
742
      //
743
      // NOTE: If output queue is empty, then input will
744
      // always be possible.
745
595k
      if (input_is_possible != NULL
746
163k
          && coder->memlimit_threading
747
163k
            - coder->mem_in_use
748
163k
            - coder->outq.mem_in_use
749
163k
            >= coder->mem_next_block
750
156k
          && lzma_outq_has_buf(&coder->outq)
751
153k
          && (coder->threads_initialized
752
153k
              < coder->threads_max
753
114k
            || coder->threads_free
754
121k
              != NULL)) {
755
121k
        *input_is_possible = true;
756
121k
        break;
757
121k
      }
758
759
      // If the caller doesn't want us to block, return now.
760
474k
      if (!waiting_allowed)
761
131k
        break;
762
763
      // This check is needed only when input_is_possible
764
      // is NULL. We must return if we aren't waiting for
765
      // input to become possible and there is no more
766
      // output coming from the queue.
767
343k
      if (lzma_outq_is_empty(&coder->outq)) {
768
258k
        assert(input_is_possible == NULL);
769
258k
        break;
770
258k
      }
771
772
      // If there is more data available from the queue,
773
      // our out buffer must be full and we need to return
774
      // so that the application can provide more output
775
      // space.
776
      //
777
      // NOTE: In general lzma_outq_is_readable() can return
778
      // true also when there are no more bytes available.
779
      // This can happen when a Block has finished without
780
      // providing any new output. We know that this is not
781
      // the case because in the beginning of this loop we
782
      // tried to read as much as possible even when we had
783
      // no output space left and the mutex has been locked
784
      // all the time (so worker threads cannot have changed
785
      // anything). Thus there must be actual pending output
786
      // in the queue.
787
84.3k
      if (lzma_outq_is_readable(&coder->outq)) {
788
16.6k
        assert(*out_pos == out_size);
789
16.6k
        break;
790
16.6k
      }
791
792
      // If the application stops providing more input
793
      // in the middle of a Block, there will eventually
794
      // be one worker thread left that is stuck waiting for
795
      // more input (that might never arrive) and a matching
796
      // outbuf which the worker thread cannot finish due
797
      // to lack of input. We must detect this situation,
798
      // otherwise we would end up waiting indefinitely
799
      // (if no timeout is in use) or keep returning
800
      // LZMA_TIMED_OUT while making no progress. Thus, the
801
      // application would never get LZMA_BUF_ERROR from
802
      // lzma_code() which would tell the application that
803
      // no more progress is possible. No LZMA_BUF_ERROR
804
      // means that, for example, truncated .xz files could
805
      // cause an infinite loop.
806
      //
807
      // A worker thread doing partial updates will
808
      // store not only the output position in outbuf->pos
809
      // but also the matching input position in
810
      // outbuf->decoder_in_pos. Here we check if that
811
      // input position matches the amount of input that
812
      // the worker thread has been given (in_filled).
813
      // If so, we must return and not wait as no more
814
      // output will be coming without first getting more
815
      // input to the worker thread. If the application
816
      // keeps calling lzma_code() without providing more
817
      // input, it will eventually get LZMA_BUF_ERROR.
818
      //
819
      // NOTE: We can read partial_update and in_filled
820
      // without thr->mutex as only the main thread
821
      // modifies these variables. decoder_in_pos requires
822
      // coder->mutex which we are already holding.
823
67.6k
      if (coder->thr != NULL && coder->thr->partial_update
824
1.67k
          != PARTIAL_DISABLED) {
825
        // There is exactly one outbuf in the queue.
826
1.55k
        assert(coder->thr->outbuf == coder->outq.head);
827
1.55k
        assert(coder->thr->outbuf == coder->outq.tail);
828
829
1.55k
        if (coder->thr->outbuf->decoder_in_pos
830
1.55k
            == coder->thr->in_filled)
831
582
          break;
832
1.55k
      }
833
834
      // Wait for input or output to become possible.
835
67.0k
      if (coder->timeout != 0) {
836
        // See the comment in stream_encoder_mt.c
837
        // about why mythread_condtime_set() is used
838
        // like this.
839
        //
840
        // FIXME?
841
        // In contrast to the encoder, this calls
842
        // _condtime_set while the mutex is locked.
843
0
        if (!*has_blocked) {
844
0
          *has_blocked = true;
845
0
          mythread_condtime_set(wait_abs,
846
0
              &coder->cond,
847
0
              coder->timeout);
848
0
        }
849
850
0
        if (mythread_cond_timedwait(&coder->cond,
851
0
            &coder->mutex,
852
0
            wait_abs) != 0) {
853
0
          ret = LZMA_TIMED_OUT;
854
0
          break;
855
0
        }
856
67.0k
      } else {
857
67.0k
        mythread_cond_wait(&coder->cond,
858
67.0k
            &coder->mutex);
859
67.0k
      }
860
67.0k
    } while (ret == LZMA_OK);
861
530k
  }
862
863
  // If we are returning an error, then the application cannot get
864
  // more output from us and thus keeping the threads running is
865
  // useless and waste of CPU time.
866
530k
  if (ret != LZMA_OK && ret != LZMA_TIMED_OUT)
867
1.23k
    threads_stop(coder);
868
869
530k
  return ret;
870
530k
}
871
872
873
static lzma_ret
874
decode_block_header(struct lzma_stream_coder *coder,
875
    const lzma_allocator *allocator, const uint8_t *restrict in,
876
    size_t *restrict in_pos, size_t in_size)
877
383k
{
878
383k
  if (*in_pos >= in_size)
879
419
    return LZMA_OK;
880
881
382k
  if (coder->pos == 0) {
882
    // Detect if it's Index.
883
380k
    if (in[*in_pos] == INDEX_INDICATOR)
884
62.4k
      return LZMA_INDEX_DETECTED;
885
886
    // Calculate the size of the Block Header. Note that
887
    // Block Header decoder wants to see this byte too
888
    // so don't advance *in_pos.
889
318k
    coder->block_options.header_size
890
318k
        = lzma_block_header_size_decode(
891
318k
          in[*in_pos]);
892
318k
  }
893
894
  // Copy the Block Header to the internal buffer.
895
320k
  lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
896
320k
      coder->block_options.header_size);
897
898
  // Return if we didn't get the whole Block Header yet.
899
320k
  if (coder->pos < coder->block_options.header_size)
900
2.31k
    return LZMA_OK;
901
902
318k
  coder->pos = 0;
903
904
  // Version 1 is needed to support the .ignore_check option.
905
318k
  coder->block_options.version = 1;
906
907
  // Block Header decoder will initialize all members of this array
908
  // so we don't need to do it here.
909
318k
  coder->block_options.filters = coder->filters;
910
911
  // Decode the Block Header.
912
318k
  return_if_error(lzma_block_header_decode(&coder->block_options,
913
318k
      allocator, coder->buffer));
914
915
  // If LZMA_IGNORE_CHECK was used, this flag needs to be set.
916
  // It has to be set after lzma_block_header_decode() because
917
  // it always resets this to false.
918
317k
  coder->block_options.ignore_check = coder->ignore_check;
919
920
  // coder->block_options is ready now.
921
317k
  return LZMA_STREAM_END;
922
318k
}
923
924
925
/// Get the size of the Compressed Data + Block Padding + Check.
926
static size_t
927
comp_blk_size(const struct lzma_stream_coder *coder)
928
141k
{
929
141k
  return vli_ceil4(coder->block_options.compressed_size)
930
141k
      + lzma_check_size(coder->stream_flags.check);
931
141k
}
932
933
934
/// Returns true if the size (compressed or uncompressed) is such that
935
/// threaded decompression cannot be used. Sizes that are too big compared
936
/// to SIZE_MAX must be rejected to avoid integer overflows and truncations
937
/// when lzma_vli is assigned to a size_t.
938
static bool
939
is_direct_mode_needed(lzma_vli size)
940
459k
{
941
459k
  return size == LZMA_VLI_UNKNOWN || size > SIZE_MAX / 3;
942
459k
}
943
944
945
static lzma_ret
946
stream_decoder_reset(struct lzma_stream_coder *coder,
947
    const lzma_allocator *allocator)
948
67.3k
{
949
  // Initialize the Index hash used to verify the Index.
950
67.3k
  coder->index_hash = lzma_index_hash_init(coder->index_hash, allocator);
951
67.3k
  if (coder->index_hash == NULL)
952
0
    return LZMA_MEM_ERROR;
953
954
  // Reset the rest of the variables.
955
67.3k
  coder->sequence = SEQ_STREAM_HEADER;
956
67.3k
  coder->pos = 0;
957
958
67.3k
  return LZMA_OK;
959
67.3k
}
960
961
962
static lzma_ret
963
stream_decode_mt(void *coder_ptr, const lzma_allocator *allocator,
964
     const uint8_t *restrict in, size_t *restrict in_pos,
965
     size_t in_size,
966
     uint8_t *restrict out, size_t *restrict out_pos,
967
     size_t out_size, lzma_action action)
968
79.2k
{
969
79.2k
  struct lzma_stream_coder *coder = coder_ptr;
970
971
79.2k
  mythread_condtime wait_abs;
972
79.2k
  bool has_blocked = false;
973
974
  // Determine if in SEQ_BLOCK_HEADER and SEQ_BLOCK_THR_RUN we should
975
  // tell read_output_and_wait() to wait until it can fill the output
976
  // buffer (or a timeout occurs). Two conditions must be met:
977
  //
978
  // (1) If the caller provided no new input. The reason for this
979
  //     can be, for example, the end of the file or that there is
980
  //     a pause in the input stream and more input is available
981
  //     a little later. In this situation we should wait for output
982
  //     because otherwise we would end up in a busy-waiting loop where
983
  //     we make no progress and the application just calls us again
984
  //     without providing any new input. This would then result in
985
  //     LZMA_BUF_ERROR even though more output would be available
986
  //     once the worker threads decode more data.
987
  //
988
  // (2) Even if (1) is true, we will not wait if the previous call to
989
  //     this function managed to produce some output and the output
990
  //     buffer became full. This is for compatibility with applications
991
  //     that call lzma_code() in such a way that new input is provided
992
  //     only when the output buffer didn't become full. Without this
993
  //     trick such applications would have bad performance (bad
994
  //     parallelization due to decoder not getting input fast enough).
995
  //
996
  //     NOTE: Such loops might require that timeout is disabled (0)
997
  //     if they assume that output-not-full implies that all input has
998
  //     been consumed. If and only if timeout is enabled, we may return
999
  //     when output isn't full *and* not all input has been consumed.
1000
  //
1001
  // However, if LZMA_FINISH is used, the above is ignored and we always
1002
  // wait (timeout can still cause us to return) because we know that
1003
  // we won't get any more input. This matters if the input file is
1004
  // truncated and we are doing single-shot decoding, that is,
1005
  // timeout = 0 and LZMA_FINISH is used on the first call to
1006
  // lzma_code() and the output buffer is known to be big enough
1007
  // to hold all uncompressed data:
1008
  //
1009
  //   - If LZMA_FINISH wasn't handled specially, we could return
1010
  //     LZMA_OK before providing all output that is possible with the
1011
  //     truncated input. The rest would be available if lzma_code() was
1012
  //     called again but then it's not single-shot decoding anymore.
1013
  //
1014
  //   - By handling LZMA_FINISH specially here, the first call will
1015
  //     produce all the output, matching the behavior of the
1016
  //     single-threaded decoder.
1017
  //
1018
  // So it's a very specific corner case but also easy to avoid. Note
1019
  // that this special handling of LZMA_FINISH has no effect for
1020
  // single-shot decoding when the input file is valid (not truncated);
1021
  // premature LZMA_OK wouldn't be possible as long as timeout = 0.
1022
79.2k
  const bool waiting_allowed = action == LZMA_FINISH
1023
37.7k
      || (*in_pos == in_size && !coder->out_was_filled);
1024
79.2k
  coder->out_was_filled = false;
1025
1026
79.2k
  while (true)
1027
714k
  switch (coder->sequence) {
1028
67.6k
  case SEQ_STREAM_HEADER: {
1029
    // Copy the Stream Header to the internal buffer.
1030
67.6k
    const size_t in_old = *in_pos;
1031
67.6k
    lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1032
67.6k
        LZMA_STREAM_HEADER_SIZE);
1033
67.6k
    coder->progress_in += *in_pos - in_old;
1034
1035
    // Return if we didn't get the whole Stream Header yet.
1036
67.6k
    if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1037
305
      return LZMA_OK;
1038
1039
67.3k
    coder->pos = 0;
1040
1041
    // Decode the Stream Header.
1042
67.3k
    const lzma_ret ret = lzma_stream_header_decode(
1043
67.3k
        &coder->stream_flags, coder->buffer);
1044
67.3k
    if (ret != LZMA_OK)
1045
85
      return ret == LZMA_FORMAT_ERROR && !coder->first_stream
1046
85
          ? LZMA_DATA_ERROR : ret;
1047
1048
    // If we are decoding concatenated Streams, and the later
1049
    // Streams have invalid Header Magic Bytes, we give
1050
    // LZMA_DATA_ERROR instead of LZMA_FORMAT_ERROR.
1051
67.2k
    coder->first_stream = false;
1052
1053
    // Copy the type of the Check so that Block Header and Block
1054
    // decoders see it.
1055
67.2k
    coder->block_options.check = coder->stream_flags.check;
1056
1057
    // Even if we return LZMA_*_CHECK below, we want
1058
    // to continue from Block Header decoding.
1059
67.2k
    coder->sequence = SEQ_BLOCK_HEADER;
1060
1061
    // Detect if there's no integrity check or if it is
1062
    // unsupported if those were requested by the application.
1063
67.2k
    if (coder->tell_no_check && coder->stream_flags.check
1064
0
        == LZMA_CHECK_NONE)
1065
0
      return LZMA_NO_CHECK;
1066
1067
67.2k
    if (coder->tell_unsupported_check
1068
0
        && !lzma_check_is_supported(
1069
0
          coder->stream_flags.check))
1070
0
      return LZMA_UNSUPPORTED_CHECK;
1071
1072
67.2k
    if (coder->tell_any_check)
1073
0
      return LZMA_GET_CHECK;
1074
1075
67.2k
    FALLTHROUGH;
1076
67.2k
  }
1077
1078
383k
  case SEQ_BLOCK_HEADER: {
1079
383k
    const size_t in_old = *in_pos;
1080
383k
    const lzma_ret ret = decode_block_header(coder, allocator,
1081
383k
        in, in_pos, in_size);
1082
383k
    coder->progress_in += *in_pos - in_old;
1083
1084
383k
    if (ret == LZMA_OK) {
1085
      // We didn't decode the whole Block Header yet.
1086
      //
1087
      // Read output from the queue before returning. This
1088
      // is important because it is possible that the
1089
      // application doesn't have any new input available
1090
      // immediately. If we didn't try to copy output from
1091
      // the output queue here, lzma_code() could end up
1092
      // returning LZMA_BUF_ERROR even though queued output
1093
      // is available.
1094
      //
1095
      // If the lzma_code() call provided at least one input
1096
      // byte, only copy as much data from the output queue
1097
      // as is available immediately. This way the
1098
      // application will be able to provide more input
1099
      // without a delay.
1100
      //
1101
      // On the other hand, if lzma_code() was called with
1102
      // an empty input buffer(*), treat it specially: try
1103
      // to fill the output buffer even if it requires
1104
      // waiting for the worker threads to provide output
1105
      // (timeout, if specified, can still cause us to
1106
      // return).
1107
      //
1108
      //   - This way the application will be able to get all
1109
      //     data that can be decoded from the input provided
1110
      //     so far.
1111
      //
1112
      //   - We avoid both premature LZMA_BUF_ERROR and
1113
      //     busy-waiting where the application repeatedly
1114
      //     calls lzma_code() which immediately returns
1115
      //     LZMA_OK without providing new data.
1116
      //
1117
      //   - If the queue becomes empty, we won't wait
1118
      //     anything and will return LZMA_OK immediately
1119
      //     (coder->timeout is completely ignored).
1120
      //
1121
      // (*) See the comment at the beginning of this
1122
      //     function how waiting_allowed is determined
1123
      //     and why there is an exception to the rule
1124
      //     of "called with an empty input buffer".
1125
2.73k
      assert(*in_pos == in_size);
1126
1127
      // If LZMA_FINISH was used we know that we won't get
1128
      // more input, so the file must be truncated if we
1129
      // get here. If worker threads don't detect any
1130
      // errors, eventually there will be no more output
1131
      // while we keep returning LZMA_OK which gets
1132
      // converted to LZMA_BUF_ERROR in lzma_code().
1133
      //
1134
      // If fail-fast is enabled then we will return
1135
      // immediately using LZMA_DATA_ERROR instead of
1136
      // LZMA_OK or LZMA_BUF_ERROR. Rationale for the
1137
      // error code:
1138
      //
1139
      //   - Worker threads may have a large amount of
1140
      //     not-yet-decoded input data and we don't
1141
      //     know for sure if all data is valid. Bad
1142
      //     data there would result in LZMA_DATA_ERROR
1143
      //     when fail-fast isn't used.
1144
      //
1145
      //   - Immediate LZMA_BUF_ERROR would be a bit weird
1146
      //     considering the older liblzma code. lzma_code()
1147
      //     even has an assertion to prevent coders from
1148
      //     returning LZMA_BUF_ERROR directly.
1149
      //
1150
      // The downside of this is that with fail-fast apps
1151
      // cannot always distinguish between corrupt and
1152
      // truncated files.
1153
2.73k
      if (action == LZMA_FINISH && coder->fail_fast) {
1154
        // We won't produce any more output. Stop
1155
        // the unfinished worker threads so they
1156
        // won't waste CPU time.
1157
0
        threads_stop(coder);
1158
0
        return LZMA_DATA_ERROR;
1159
0
      }
1160
1161
      // read_output_and_wait() will call threads_stop()
1162
      // if needed so with that we can use return_if_error.
1163
2.73k
      return_if_error(read_output_and_wait(coder, allocator,
1164
2.73k
        out, out_pos, out_size,
1165
2.73k
        NULL, waiting_allowed,
1166
2.73k
        &wait_abs, &has_blocked));
1167
1168
2.63k
      if (coder->pending_error != LZMA_OK) {
1169
29
        coder->sequence = SEQ_ERROR;
1170
29
        break;
1171
29
      }
1172
1173
2.60k
      return LZMA_OK;
1174
2.63k
    }
1175
1176
380k
    if (ret == LZMA_INDEX_DETECTED) {
1177
62.4k
      coder->sequence = SEQ_INDEX_WAIT_OUTPUT;
1178
62.4k
      break;
1179
62.4k
    }
1180
1181
    // See if an error occurred.
1182
318k
    if (ret != LZMA_STREAM_END) {
1183
      // NOTE: Here and in all other places where
1184
      // pending_error is set, it may overwrite the value
1185
      // (LZMA_PROG_ERROR) set by read_output_and_wait().
1186
      // That function might overwrite value set here too.
1187
      // These are fine because when read_output_and_wait()
1188
      // sets pending_error, it actually works as a flag
1189
      // variable only ("some error has occurred") and the
1190
      // actual value of pending_error is not used in
1191
      // SEQ_ERROR. In such cases SEQ_ERROR will eventually
1192
      // get the correct error code from the return value of
1193
      // a later read_output_and_wait() call.
1194
655
      coder->pending_error = ret;
1195
655
      coder->sequence = SEQ_ERROR;
1196
655
      break;
1197
655
    }
1198
1199
    // Calculate the memory usage of the filters / Block decoder.
1200
317k
    coder->mem_next_filters = lzma_raw_decoder_memusage(
1201
317k
        coder->filters);
1202
1203
317k
    if (coder->mem_next_filters == UINT64_MAX) {
1204
      // One or more unknown Filter IDs.
1205
11
      coder->pending_error = LZMA_OPTIONS_ERROR;
1206
11
      coder->sequence = SEQ_ERROR;
1207
11
      break;
1208
11
    }
1209
1210
317k
    coder->sequence = SEQ_BLOCK_INIT;
1211
317k
    FALLTHROUGH;
1212
317k
  }
1213
1214
317k
  case SEQ_BLOCK_INIT: {
1215
    // Check if decoding is possible at all with the current
1216
    // memlimit_stop which we must never exceed.
1217
    //
1218
    // This needs to be the first thing in SEQ_BLOCK_INIT
1219
    // to make it possible to restart decoding after increasing
1220
    // memlimit_stop with lzma_memlimit_set().
1221
317k
    if (coder->mem_next_filters > coder->memlimit_stop) {
1222
      // Flush pending output before returning
1223
      // LZMA_MEMLIMIT_ERROR. If the application doesn't
1224
      // want to increase the limit, at least it will get
1225
      // all the output possible so far.
1226
371
      return_if_error(read_output_and_wait(coder, allocator,
1227
371
          out, out_pos, out_size,
1228
371
          NULL, true, &wait_abs, &has_blocked));
1229
1230
361
      if (!lzma_outq_is_empty(&coder->outq))
1231
351
        return LZMA_OK;
1232
1233
10
      return LZMA_MEMLIMIT_ERROR;
1234
361
    }
1235
1236
    // Check if the size information is available in Block Header.
1237
    // If it is, check if the sizes are small enough that we don't
1238
    // need to worry *too* much about integer overflows later in
1239
    // the code. If these conditions are not met, we must use the
1240
    // single-threaded direct mode.
1241
317k
    if (is_direct_mode_needed(coder->block_options.compressed_size)
1242
141k
        || is_direct_mode_needed(
1243
176k
        coder->block_options.uncompressed_size)) {
1244
176k
      coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1245
176k
      break;
1246
176k
    }
1247
1248
    // Calculate the amount of memory needed for the input and
1249
    // output buffers in threaded mode.
1250
    //
1251
    // These cannot overflow because we already checked that
1252
    // the sizes are small enough using is_direct_mode_needed().
1253
141k
    coder->mem_next_in = comp_blk_size(coder);
1254
141k
    const uint64_t mem_buffers = coder->mem_next_in
1255
141k
        + lzma_outq_outbuf_memusage(
1256
141k
        coder->block_options.uncompressed_size);
1257
1258
    // Add the amount needed by the filters.
1259
    // Avoid integer overflows.
1260
141k
    if (UINT64_MAX - mem_buffers < coder->mem_next_filters) {
1261
      // Use direct mode if the memusage would overflow.
1262
      // This is a theoretical case that shouldn't happen
1263
      // in practice unless the input file is weird (broken
1264
      // or malicious).
1265
0
      coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1266
0
      break;
1267
0
    }
1268
1269
    // Amount of memory needed to decode this Block in
1270
    // threaded mode:
1271
141k
    coder->mem_next_block = coder->mem_next_filters + mem_buffers;
1272
1273
    // If this alone would exceed memlimit_threading, then we must
1274
    // use the single-threaded direct mode.
1275
141k
    if (coder->mem_next_block > coder->memlimit_threading) {
1276
19.9k
      coder->sequence = SEQ_BLOCK_DIRECT_INIT;
1277
19.9k
      break;
1278
19.9k
    }
1279
1280
    // Use the threaded mode. Free the direct mode decoder in
1281
    // case it has been initialized.
1282
121k
    lzma_next_end(&coder->block_decoder, allocator);
1283
121k
    coder->mem_direct_mode = 0;
1284
1285
    // Since we already know what the sizes are supposed to be,
1286
    // we can already add them to the Index hash. The Block
1287
    // decoder will verify the values while decoding.
1288
121k
    const lzma_ret ret = lzma_index_hash_append(coder->index_hash,
1289
121k
        lzma_block_unpadded_size(
1290
121k
          &coder->block_options),
1291
121k
        coder->block_options.uncompressed_size);
1292
121k
    if (ret != LZMA_OK) {
1293
0
      coder->pending_error = ret;
1294
0
      coder->sequence = SEQ_ERROR;
1295
0
      break;
1296
0
    }
1297
1298
121k
    coder->sequence = SEQ_BLOCK_THR_INIT;
1299
121k
    FALLTHROUGH;
1300
121k
  }
1301
1302
121k
  case SEQ_BLOCK_THR_INIT: {
1303
    // We need to wait for a multiple conditions to become true
1304
    // until we can initialize the Block decoder and let a worker
1305
    // thread decode it:
1306
    //
1307
    //   - Wait for the memory usage of the active threads to drop
1308
    //     so that starting the decoding of this Block won't make
1309
    //     us go over memlimit_threading.
1310
    //
1311
    //   - Wait for at least one free output queue slot.
1312
    //
1313
    //   - Wait for a free worker thread.
1314
    //
1315
    // While we wait, we must copy decompressed data to the out
1316
    // buffer and catch possible decoder errors.
1317
    //
1318
    // read_output_and_wait() does all the above.
1319
121k
    bool block_can_start = false;
1320
1321
121k
    return_if_error(read_output_and_wait(coder, allocator,
1322
121k
        out, out_pos, out_size,
1323
121k
        &block_can_start, true,
1324
121k
        &wait_abs, &has_blocked));
1325
1326
121k
    if (coder->pending_error != LZMA_OK) {
1327
8
      coder->sequence = SEQ_ERROR;
1328
8
      break;
1329
8
    }
1330
1331
121k
    if (!block_can_start) {
1332
      // It's not a timeout because return_if_error handles
1333
      // it already. Output queue cannot be empty either
1334
      // because in that case block_can_start would have
1335
      // been true. Thus the output buffer must be full and
1336
      // the queue isn't empty.
1337
199
      assert(*out_pos == out_size);
1338
199
      assert(!lzma_outq_is_empty(&coder->outq));
1339
199
      return LZMA_OK;
1340
199
    }
1341
1342
    // We know that we can start decoding this Block without
1343
    // exceeding memlimit_threading. However, to stay below
1344
    // memlimit_threading may require freeing some of the
1345
    // cached memory.
1346
    //
1347
    // Get a local copy of variables that require locking the
1348
    // mutex. It is fine if the worker threads modify the real
1349
    // values after we read these as those changes can only be
1350
    // towards more favorable conditions (less memory in use,
1351
    // more in cache).
1352
    //
1353
    // These are initialized to silence warnings.
1354
121k
    uint64_t mem_in_use = 0;
1355
121k
    uint64_t mem_cached = 0;
1356
121k
    struct worker_thread *thr = NULL;
1357
1358
121k
    mythread_sync(coder->mutex) {
1359
121k
      mem_in_use = coder->mem_in_use;
1360
121k
      mem_cached = coder->mem_cached;
1361
121k
      thr = coder->threads_free;
1362
121k
    }
1363
1364
    // The maximum amount of memory that can be held by other
1365
    // threads and cached buffers while allowing us to start
1366
    // decoding the next Block.
1367
121k
    const uint64_t mem_max = coder->memlimit_threading
1368
121k
        - coder->mem_next_block;
1369
1370
    // If the existing allocations are so large that starting
1371
    // to decode this Block might exceed memlimit_threads,
1372
    // try to free memory from the output queue cache first.
1373
    //
1374
    // NOTE: This math assumes the worst case. It's possible
1375
    // that the limit wouldn't be exceeded if the existing cached
1376
    // allocations are reused.
1377
121k
    if (mem_in_use + mem_cached + coder->outq.mem_allocated
1378
121k
        > mem_max) {
1379
      // Clear the outq cache except leave one buffer in
1380
      // the cache if its size is correct. That way we
1381
      // don't free and almost immediately reallocate
1382
      // an identical buffer.
1383
8.14k
      lzma_outq_clear_cache2(&coder->outq, allocator,
1384
8.14k
        coder->block_options.uncompressed_size);
1385
8.14k
    }
1386
1387
    // If there is at least one worker_thread in the cache and
1388
    // the existing allocations are so large that starting to
1389
    // decode this Block might exceed memlimit_threads, free
1390
    // memory by freeing cached Block decoders.
1391
    //
1392
    // NOTE: The comparison is different here than above.
1393
    // Here we don't care about cached buffers in outq anymore
1394
    // and only look at memory actually in use. This is because
1395
    // if there is something in outq cache, it's a single buffer
1396
    // that can be used as is. We ensured this in the above
1397
    // if-block.
1398
121k
    uint64_t mem_freed = 0;
1399
121k
    if (thr != NULL && mem_in_use + mem_cached
1400
83.5k
        + coder->outq.mem_in_use > mem_max) {
1401
      // Don't free the first Block decoder if its memory
1402
      // usage isn't greater than what this Block will need.
1403
      // Typically the same filter chain is used for all
1404
      // Blocks so this way the allocations can be reused
1405
      // when get_thread() picks the first worker_thread
1406
      // from the cache.
1407
8.13k
      if (thr->mem_filters <= coder->mem_next_filters)
1408
5.60k
        thr = thr->next;
1409
1410
15.2k
      while (thr != NULL) {
1411
7.11k
        lzma_next_end(&thr->block_decoder, allocator);
1412
7.11k
        mem_freed += thr->mem_filters;
1413
7.11k
        thr->mem_filters = 0;
1414
7.11k
        thr = thr->next;
1415
7.11k
      }
1416
8.13k
    }
1417
1418
    // Update the memory usage counters. Note that coder->mem_*
1419
    // may have changed since we read them so we must subtract
1420
    // or add the changes.
1421
121k
    mythread_sync(coder->mutex) {
1422
121k
      coder->mem_cached -= mem_freed;
1423
1424
      // Memory needed for the filters and the input buffer.
1425
      // The output queue takes care of its own counter so
1426
      // we don't touch it here.
1427
      //
1428
      // NOTE: After this, coder->mem_in_use +
1429
      // coder->mem_cached might count the same thing twice.
1430
      // If so, this will get corrected in get_thread() when
1431
      // a worker_thread is picked from coder->free_threads
1432
      // and its memory usage is subtracted from mem_cached.
1433
121k
      coder->mem_in_use += coder->mem_next_in
1434
121k
          + coder->mem_next_filters;
1435
121k
    }
1436
1437
    // Allocate memory for the output buffer in the output queue.
1438
121k
    lzma_ret ret = lzma_outq_prealloc_buf(
1439
121k
        &coder->outq, allocator,
1440
121k
        coder->block_options.uncompressed_size);
1441
121k
    if (ret != LZMA_OK) {
1442
0
      threads_stop(coder);
1443
0
      return ret;
1444
0
    }
1445
1446
    // Set up coder->thr.
1447
121k
    ret = get_thread(coder, allocator);
1448
121k
    if (ret != LZMA_OK) {
1449
0
      threads_stop(coder);
1450
0
      return ret;
1451
0
    }
1452
1453
    // The new Block decoder memory usage is already counted in
1454
    // coder->mem_in_use. Store it in the thread too.
1455
121k
    coder->thr->mem_filters = coder->mem_next_filters;
1456
1457
    // Initialize the Block decoder.
1458
121k
    coder->thr->block_options = coder->block_options;
1459
121k
    ret = lzma_block_decoder_init(
1460
121k
          &coder->thr->block_decoder, allocator,
1461
121k
          &coder->thr->block_options);
1462
1463
    // Free the allocated filter options since they are needed
1464
    // only to initialize the Block decoder.
1465
121k
    lzma_filters_free(coder->filters, allocator);
1466
121k
    coder->thr->block_options.filters = NULL;
1467
1468
    // Check if memory usage calculation and Block encoder
1469
    // initialization succeeded.
1470
121k
    if (ret != LZMA_OK) {
1471
6
      coder->pending_error = ret;
1472
6
      coder->sequence = SEQ_ERROR;
1473
6
      break;
1474
6
    }
1475
1476
    // Allocate the input buffer.
1477
121k
    coder->thr->in_size = coder->mem_next_in;
1478
121k
    coder->thr->in = lzma_alloc(coder->thr->in_size, allocator);
1479
121k
    if (coder->thr->in == NULL) {
1480
0
      threads_stop(coder);
1481
0
      return LZMA_MEM_ERROR;
1482
0
    }
1483
1484
    // Get the preallocated output buffer.
1485
121k
    coder->thr->outbuf = lzma_outq_get_buf(
1486
121k
        &coder->outq, coder->thr);
1487
1488
    // Start the decoder.
1489
121k
    mythread_sync(coder->thr->mutex) {
1490
121k
      assert(coder->thr->state == THR_IDLE);
1491
121k
      coder->thr->state = THR_RUN;
1492
121k
      mythread_cond_signal(&coder->thr->cond);
1493
121k
    }
1494
1495
    // Enable output from the thread that holds the oldest output
1496
    // buffer in the output queue (if such a thread exists).
1497
121k
    mythread_sync(coder->mutex) {
1498
121k
      lzma_outq_enable_partial_output(&coder->outq,
1499
121k
          &worker_enable_partial_update);
1500
121k
    }
1501
1502
121k
    coder->sequence = SEQ_BLOCK_THR_RUN;
1503
121k
    FALLTHROUGH;
1504
121k
  }
1505
1506
136k
  case SEQ_BLOCK_THR_RUN: {
1507
136k
    if (action == LZMA_FINISH && coder->fail_fast) {
1508
      // We know that we won't get more input and that
1509
      // the caller wants fail-fast behavior. If we see
1510
      // that we don't have enough input to finish this
1511
      // Block, return LZMA_DATA_ERROR immediately.
1512
      // See SEQ_BLOCK_HEADER for the error code rationale.
1513
0
      const size_t in_avail = in_size - *in_pos;
1514
0
      const size_t in_needed = coder->thr->in_size
1515
0
          - coder->thr->in_filled;
1516
0
      if (in_avail < in_needed) {
1517
0
        threads_stop(coder);
1518
0
        return LZMA_DATA_ERROR;
1519
0
      }
1520
0
    }
1521
1522
    // Copy input to the worker thread.
1523
136k
    size_t cur_in_filled = coder->thr->in_filled;
1524
136k
    lzma_bufcpy(in, in_pos, in_size, coder->thr->in,
1525
136k
        &cur_in_filled, coder->thr->in_size);
1526
1527
    // Tell the thread how much we copied.
1528
136k
    mythread_sync(coder->thr->mutex) {
1529
136k
      coder->thr->in_filled = cur_in_filled;
1530
1531
      // NOTE: Most of the time we are copying input faster
1532
      // than the thread can decode so most of the time
1533
      // calling mythread_cond_signal() is useless but
1534
      // we cannot make it conditional because thr->in_pos
1535
      // is updated without a mutex. And the overhead should
1536
      // be very much negligible anyway.
1537
136k
      mythread_cond_signal(&coder->thr->cond);
1538
136k
    }
1539
1540
    // Read output from the output queue. Just like in
1541
    // SEQ_BLOCK_HEADER, we wait to fill the output buffer
1542
    // only if waiting_allowed was set to true in the beginning
1543
    // of this function (see the comment there) and there is
1544
    // no input available. In SEQ_BLOCK_HEADER, there is never
1545
    // input available when read_output_and_wait() is called,
1546
    // but here there can be when LZMA_FINISH is used, thus we
1547
    // need to check if *in_pos == in_size. Otherwise we would
1548
    // wait here instead of using the available input to start
1549
    // a new thread.
1550
136k
    return_if_error(read_output_and_wait(coder, allocator,
1551
136k
        out, out_pos, out_size,
1552
136k
        NULL,
1553
136k
        waiting_allowed && *in_pos == in_size,
1554
136k
        &wait_abs, &has_blocked));
1555
1556
136k
    if (coder->pending_error != LZMA_OK) {
1557
111
      coder->sequence = SEQ_ERROR;
1558
111
      break;
1559
111
    }
1560
1561
    // Return if the input didn't contain the whole Block.
1562
    //
1563
    // NOTE: When we updated coder->thr->in_filled a few lines
1564
    // above, the worker thread might by now have finished its
1565
    // work and returned itself back to the stack of free threads.
1566
135k
    if (coder->thr->in_filled < coder->thr->in_size) {
1567
15.3k
      assert(*in_pos == in_size);
1568
15.3k
      return LZMA_OK;
1569
15.3k
    }
1570
1571
    // The whole Block has been copied to the thread-specific
1572
    // buffer. Continue from the next Block Header or Index.
1573
120k
    coder->thr = NULL;
1574
120k
    coder->sequence = SEQ_BLOCK_HEADER;
1575
120k
    break;
1576
135k
  }
1577
1578
196k
  case SEQ_BLOCK_DIRECT_INIT: {
1579
    // Wait for the threads to finish and that all decoded data
1580
    // has been copied to the output. That is, wait until the
1581
    // output queue becomes empty.
1582
    //
1583
    // NOTE: No need to check for coder->pending_error as
1584
    // we aren't consuming any input until the queue is empty
1585
    // and if there is a pending error, read_output_and_wait()
1586
    // will eventually return it before the queue is empty.
1587
196k
    return_if_error(read_output_and_wait(coder, allocator,
1588
196k
        out, out_pos, out_size,
1589
196k
        NULL, true, &wait_abs, &has_blocked));
1590
196k
    if (!lzma_outq_is_empty(&coder->outq))
1591
305
      return LZMA_OK;
1592
1593
    // Free the cached output buffers.
1594
195k
    lzma_outq_clear_cache(&coder->outq, allocator);
1595
1596
    // Get rid of the worker threads, including the coder->threads
1597
    // array.
1598
195k
    threads_end(coder, allocator);
1599
1600
    // Initialize the Block decoder.
1601
195k
    const lzma_ret ret = lzma_block_decoder_init(
1602
195k
        &coder->block_decoder, allocator,
1603
195k
        &coder->block_options);
1604
1605
    // Free the allocated filter options since they are needed
1606
    // only to initialize the Block decoder.
1607
195k
    lzma_filters_free(coder->filters, allocator);
1608
195k
    coder->block_options.filters = NULL;
1609
1610
    // Check if Block decoder initialization succeeded.
1611
195k
    if (ret != LZMA_OK)
1612
6
      return ret;
1613
1614
    // Make the memory usage visible to _memconfig().
1615
195k
    coder->mem_direct_mode = coder->mem_next_filters;
1616
1617
195k
    coder->sequence = SEQ_BLOCK_DIRECT_RUN;
1618
195k
    FALLTHROUGH;
1619
195k
  }
1620
1621
236k
  case SEQ_BLOCK_DIRECT_RUN: {
1622
236k
    const size_t in_old = *in_pos;
1623
236k
    const size_t out_old = *out_pos;
1624
236k
    const lzma_ret ret = coder->block_decoder.code(
1625
236k
        coder->block_decoder.coder, allocator,
1626
236k
        in, in_pos, in_size, out, out_pos, out_size,
1627
236k
        action);
1628
236k
    coder->progress_in += *in_pos - in_old;
1629
236k
    coder->progress_out += *out_pos - out_old;
1630
1631
236k
    if (ret != LZMA_STREAM_END)
1632
43.9k
      return ret;
1633
1634
    // Block decoded successfully. Add the new size pair to
1635
    // the Index hash.
1636
193k
    return_if_error(lzma_index_hash_append(coder->index_hash,
1637
193k
        lzma_block_unpadded_size(
1638
193k
          &coder->block_options),
1639
193k
        coder->block_options.uncompressed_size));
1640
1641
193k
    coder->sequence = SEQ_BLOCK_HEADER;
1642
193k
    break;
1643
193k
  }
1644
1645
64.1k
  case SEQ_INDEX_WAIT_OUTPUT:
1646
    // Flush the output from all worker threads so that we can
1647
    // decode the Index without thinking about threading.
1648
64.1k
    return_if_error(read_output_and_wait(coder, allocator,
1649
64.1k
        out, out_pos, out_size,
1650
64.1k
        NULL, true, &wait_abs, &has_blocked));
1651
1652
64.1k
    if (!lzma_outq_is_empty(&coder->outq))
1653
1.74k
      return LZMA_OK;
1654
1655
62.3k
    coder->sequence = SEQ_INDEX_DECODE;
1656
62.3k
    FALLTHROUGH;
1657
1658
62.7k
  case SEQ_INDEX_DECODE: {
1659
    // If we don't have any input, don't call
1660
    // lzma_index_hash_decode() since it would return
1661
    // LZMA_BUF_ERROR, which we must not do here.
1662
62.7k
    if (*in_pos >= in_size)
1663
28
      return LZMA_OK;
1664
1665
    // Decode the Index and compare it to the hash calculated
1666
    // from the sizes of the Blocks (if any).
1667
62.7k
    const size_t in_old = *in_pos;
1668
62.7k
    const lzma_ret ret = lzma_index_hash_decode(coder->index_hash,
1669
62.7k
        in, in_pos, in_size);
1670
62.7k
    coder->progress_in += *in_pos - in_old;
1671
62.7k
    if (ret != LZMA_STREAM_END)
1672
888
      return ret;
1673
1674
61.8k
    coder->sequence = SEQ_STREAM_FOOTER;
1675
61.8k
    FALLTHROUGH;
1676
61.8k
  }
1677
1678
62.1k
  case SEQ_STREAM_FOOTER: {
1679
    // Copy the Stream Footer to the internal buffer.
1680
62.1k
    const size_t in_old = *in_pos;
1681
62.1k
    lzma_bufcpy(in, in_pos, in_size, coder->buffer, &coder->pos,
1682
62.1k
        LZMA_STREAM_HEADER_SIZE);
1683
62.1k
    coder->progress_in += *in_pos - in_old;
1684
1685
    // Return if we didn't get the whole Stream Footer yet.
1686
62.1k
    if (coder->pos < LZMA_STREAM_HEADER_SIZE)
1687
293
      return LZMA_OK;
1688
1689
61.8k
    coder->pos = 0;
1690
1691
    // Decode the Stream Footer. The decoder gives
1692
    // LZMA_FORMAT_ERROR if the magic bytes don't match,
1693
    // so convert that return code to LZMA_DATA_ERROR.
1694
61.8k
    lzma_stream_flags footer_flags;
1695
61.8k
    const lzma_ret ret = lzma_stream_footer_decode(
1696
61.8k
        &footer_flags, coder->buffer);
1697
61.8k
    if (ret != LZMA_OK)
1698
74
      return ret == LZMA_FORMAT_ERROR
1699
74
          ? LZMA_DATA_ERROR : ret;
1700
1701
    // Check that Index Size stored in the Stream Footer matches
1702
    // the real size of the Index field.
1703
61.7k
    if (lzma_index_hash_size(coder->index_hash)
1704
61.7k
        != footer_flags.backward_size)
1705
35
      return LZMA_DATA_ERROR;
1706
1707
    // Compare that the Stream Flags fields are identical in
1708
    // both Stream Header and Stream Footer.
1709
61.7k
    return_if_error(lzma_stream_flags_compare(
1710
61.7k
        &coder->stream_flags, &footer_flags));
1711
1712
61.7k
    if (!coder->concatenated)
1713
0
      return LZMA_STREAM_END;
1714
1715
61.7k
    coder->sequence = SEQ_STREAM_PADDING;
1716
61.7k
    FALLTHROUGH;
1717
61.7k
  }
1718
1719
65.4k
  case SEQ_STREAM_PADDING:
1720
65.4k
    assert(coder->concatenated);
1721
1722
    // Skip over possible Stream Padding.
1723
13.9M
    while (true) {
1724
13.9M
      if (*in_pos >= in_size) {
1725
        // Unless LZMA_FINISH was used, we cannot
1726
        // know if there's more input coming later.
1727
3.69k
        if (action != LZMA_FINISH)
1728
3.67k
          return LZMA_OK;
1729
1730
        // Stream Padding must be a multiple of
1731
        // four bytes.
1732
17
        return coder->pos == 0
1733
17
            ? LZMA_STREAM_END
1734
17
            : LZMA_DATA_ERROR;
1735
3.69k
      }
1736
1737
      // If the byte is not zero, it probably indicates
1738
      // beginning of a new Stream (or the file is corrupt).
1739
13.9M
      if (in[*in_pos] != 0x00)
1740
61.7k
        break;
1741
1742
13.8M
      ++*in_pos;
1743
13.8M
      ++coder->progress_in;
1744
13.8M
      coder->pos = (coder->pos + 1) & 3;
1745
13.8M
    }
1746
1747
    // Stream Padding must be a multiple of four bytes (empty
1748
    // Stream Padding is OK).
1749
61.7k
    if (coder->pos != 0) {
1750
47
      ++*in_pos;
1751
47
      ++coder->progress_in;
1752
47
      return LZMA_DATA_ERROR;
1753
47
    }
1754
1755
    // Prepare to decode the next Stream.
1756
61.6k
    return_if_error(stream_decoder_reset(coder, allocator));
1757
61.6k
    break;
1758
1759
61.6k
  case SEQ_ERROR:
1760
8.41k
    if (!coder->fail_fast) {
1761
      // Let the application get all data before the point
1762
      // where the error was detected. This matches the
1763
      // behavior of single-threaded use.
1764
      //
1765
      // FIXME? Some errors (LZMA_MEM_ERROR) don't get here,
1766
      // they are returned immediately. Thus in rare cases
1767
      // the output will be less than in the single-threaded
1768
      // mode. Maybe this doesn't matter much in practice.
1769
8.41k
      return_if_error(read_output_and_wait(coder, allocator,
1770
8.41k
          out, out_pos, out_size,
1771
8.41k
          NULL, true, &wait_abs, &has_blocked));
1772
1773
      // We get here only if the error happened in the main
1774
      // thread, for example, unsupported Block Header.
1775
8.06k
      if (!lzma_outq_is_empty(&coder->outq))
1776
7.59k
        return LZMA_OK;
1777
8.06k
    }
1778
1779
    // We only get here if no errors were detected by the worker
1780
    // threads. Errors from worker threads would have already been
1781
    // returned by the call to read_output_and_wait() above.
1782
473
    return coder->pending_error;
1783
1784
0
  default:
1785
0
    assert(0);
1786
0
    return LZMA_PROG_ERROR;
1787
714k
  }
1788
1789
  // Never reached
1790
79.2k
}
1791
1792
1793
static void
1794
stream_decoder_mt_end(void *coder_ptr, const lzma_allocator *allocator)
1795
5.63k
{
1796
5.63k
  struct lzma_stream_coder *coder = coder_ptr;
1797
1798
5.63k
  threads_end(coder, allocator);
1799
5.63k
  lzma_outq_end(&coder->outq, allocator);
1800
1801
5.63k
  lzma_next_end(&coder->block_decoder, allocator);
1802
5.63k
  lzma_filters_free(coder->filters, allocator);
1803
5.63k
  lzma_index_hash_end(coder->index_hash, allocator);
1804
1805
5.63k
  lzma_free(coder, allocator);
1806
5.63k
  return;
1807
5.63k
}
1808
1809
1810
static lzma_check
1811
stream_decoder_mt_get_check(const void *coder_ptr)
1812
0
{
1813
0
  const struct lzma_stream_coder *coder = coder_ptr;
1814
0
  return coder->stream_flags.check;
1815
0
}
1816
1817
1818
static lzma_ret
1819
stream_decoder_mt_memconfig(void *coder_ptr, uint64_t *memusage,
1820
    uint64_t *old_memlimit, uint64_t new_memlimit)
1821
0
{
1822
  // NOTE: This function gets/sets memlimit_stop. For now,
1823
  // memlimit_threading cannot be modified after initialization.
1824
  //
1825
  // *memusage will include cached memory too. Excluding cached memory
1826
  // would be misleading and it wouldn't help the applications to
1827
  // know how much memory is actually needed to decompress the file
1828
  // because the higher the number of threads and the memlimits are
1829
  // the more memory the decoder may use.
1830
  //
1831
  // Setting a new limit includes the cached memory too and too low
1832
  // limits will be rejected. Alternative could be to free the cached
1833
  // memory immediately if that helps to bring the limit down but
1834
  // the current way is the simplest. It's unlikely that limit needs
1835
  // to be lowered in the middle of a file anyway; the typical reason
1836
  // to want a new limit is to increase after LZMA_MEMLIMIT_ERROR
1837
  // and even such use isn't common.
1838
0
  struct lzma_stream_coder *coder = coder_ptr;
1839
1840
0
  mythread_sync(coder->mutex) {
1841
0
    *memusage = coder->mem_direct_mode
1842
0
        + coder->mem_in_use
1843
0
        + coder->mem_cached
1844
0
        + coder->outq.mem_allocated;
1845
0
  }
1846
1847
  // If no filter chains are allocated, *memusage may be zero.
1848
  // Always return at least LZMA_MEMUSAGE_BASE.
1849
0
  if (*memusage < LZMA_MEMUSAGE_BASE)
1850
0
    *memusage = LZMA_MEMUSAGE_BASE;
1851
1852
0
  *old_memlimit = coder->memlimit_stop;
1853
1854
0
  if (new_memlimit != 0) {
1855
0
    if (new_memlimit < *memusage)
1856
0
      return LZMA_MEMLIMIT_ERROR;
1857
1858
0
    coder->memlimit_stop = new_memlimit;
1859
0
  }
1860
1861
0
  return LZMA_OK;
1862
0
}
1863
1864
1865
static void
1866
stream_decoder_mt_get_progress(void *coder_ptr,
1867
    uint64_t *progress_in, uint64_t *progress_out)
1868
0
{
1869
0
  struct lzma_stream_coder *coder = coder_ptr;
1870
1871
  // Lock coder->mutex to prevent finishing threads from moving their
1872
  // progress info from the worker_thread structure to lzma_stream_coder.
1873
0
  mythread_sync(coder->mutex) {
1874
0
    *progress_in = coder->progress_in;
1875
0
    *progress_out = coder->progress_out;
1876
1877
0
    for (size_t i = 0; i < coder->threads_initialized; ++i) {
1878
0
      mythread_sync(coder->threads[i].mutex) {
1879
0
        *progress_in += coder->threads[i].progress_in;
1880
0
        *progress_out += coder->threads[i]
1881
0
            .progress_out;
1882
0
      }
1883
0
    }
1884
0
  }
1885
1886
0
  return;
1887
0
}
1888
1889
1890
static lzma_ret
1891
stream_decoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
1892
           const lzma_mt *options)
1893
5.63k
{
1894
5.63k
  struct lzma_stream_coder *coder;
1895
1896
5.63k
  if (options->threads == 0 || options->threads > LZMA_THREADS_MAX)
1897
0
    return LZMA_OPTIONS_ERROR;
1898
1899
5.63k
  if (options->flags & ~LZMA_SUPPORTED_FLAGS)
1900
0
    return LZMA_OPTIONS_ERROR;
1901
1902
5.63k
  lzma_next_coder_init(&stream_decoder_mt_init, next, allocator);
1903
1904
5.63k
  coder = next->coder;
1905
5.63k
  if (!coder) {
1906
5.63k
    coder = lzma_alloc(sizeof(struct lzma_stream_coder), allocator);
1907
5.63k
    if (coder == NULL)
1908
0
      return LZMA_MEM_ERROR;
1909
1910
5.63k
    next->coder = coder;
1911
1912
5.63k
    if (mythread_mutex_init(&coder->mutex)) {
1913
0
      lzma_free(coder, allocator);
1914
0
      return LZMA_MEM_ERROR;
1915
0
    }
1916
1917
5.63k
    if (mythread_cond_init(&coder->cond)) {
1918
0
      mythread_mutex_destroy(&coder->mutex);
1919
0
      lzma_free(coder, allocator);
1920
0
      return LZMA_MEM_ERROR;
1921
0
    }
1922
1923
5.63k
    next->code = &stream_decode_mt;
1924
5.63k
    next->end = &stream_decoder_mt_end;
1925
5.63k
    next->get_check = &stream_decoder_mt_get_check;
1926
5.63k
    next->memconfig = &stream_decoder_mt_memconfig;
1927
5.63k
    next->get_progress = &stream_decoder_mt_get_progress;
1928
1929
5.63k
    coder->filters[0].id = LZMA_VLI_UNKNOWN;
1930
5.63k
    memzero(&coder->outq, sizeof(coder->outq));
1931
1932
5.63k
    coder->block_decoder = LZMA_NEXT_CODER_INIT;
1933
5.63k
    coder->mem_direct_mode = 0;
1934
1935
5.63k
    coder->index_hash = NULL;
1936
5.63k
    coder->threads = NULL;
1937
5.63k
    coder->threads_free = NULL;
1938
5.63k
    coder->threads_initialized = 0;
1939
5.63k
  }
1940
1941
  // Cleanup old filter chain if one remains after unfinished decoding
1942
  // of a previous Stream.
1943
5.63k
  lzma_filters_free(coder->filters, allocator);
1944
1945
  // By allocating threads from scratch we can start memory-usage
1946
  // accounting from scratch, too. Changes in filter and block sizes may
1947
  // affect number of threads.
1948
  //
1949
  // Reusing threads doesn't seem worth it. Unlike the single-threaded
1950
  // decoder, with some types of input file combinations reusing
1951
  // could leave quite a lot of memory allocated but unused (first
1952
  // file could allocate a lot, the next files could use fewer
1953
  // threads and some of the allocations from the first file would not
1954
  // get freed unless memlimit_threading forces us to clear caches).
1955
  //
1956
  // NOTE: The direct mode decoder isn't freed here if one exists.
1957
  // It will be reused or freed as needed in the main loop.
1958
5.63k
  threads_end(coder, allocator);
1959
1960
  // All memusage counters start at 0 (including mem_direct_mode).
1961
  // The little extra that is needed for the structs in this file
1962
  // get accounted well enough by the filter chain memory usage
1963
  // which adds LZMA_MEMUSAGE_BASE for each chain. However,
1964
  // stream_decoder_mt_memconfig() has to handle this specially so that
1965
  // it will never return less than LZMA_MEMUSAGE_BASE as memory usage.
1966
5.63k
  coder->mem_in_use = 0;
1967
5.63k
  coder->mem_cached = 0;
1968
5.63k
  coder->mem_next_block = 0;
1969
1970
5.63k
  coder->progress_in = 0;
1971
5.63k
  coder->progress_out = 0;
1972
1973
5.63k
  coder->sequence = SEQ_STREAM_HEADER;
1974
5.63k
  coder->thread_error = LZMA_OK;
1975
5.63k
  coder->pending_error = LZMA_OK;
1976
5.63k
  coder->thr = NULL;
1977
1978
5.63k
  coder->timeout = options->timeout;
1979
1980
5.63k
  coder->memlimit_threading = my_max(1, options->memlimit_threading);
1981
5.63k
  coder->memlimit_stop = my_max(1, options->memlimit_stop);
1982
5.63k
  if (coder->memlimit_threading > coder->memlimit_stop)
1983
0
    coder->memlimit_threading = coder->memlimit_stop;
1984
1985
5.63k
  coder->tell_no_check = (options->flags & LZMA_TELL_NO_CHECK) != 0;
1986
5.63k
  coder->tell_unsupported_check
1987
5.63k
      = (options->flags & LZMA_TELL_UNSUPPORTED_CHECK) != 0;
1988
5.63k
  coder->tell_any_check = (options->flags & LZMA_TELL_ANY_CHECK) != 0;
1989
5.63k
  coder->ignore_check = (options->flags & LZMA_IGNORE_CHECK) != 0;
1990
5.63k
  coder->concatenated = (options->flags & LZMA_CONCATENATED) != 0;
1991
5.63k
  coder->fail_fast = (options->flags & LZMA_FAIL_FAST) != 0;
1992
1993
5.63k
  coder->first_stream = true;
1994
5.63k
  coder->out_was_filled = false;
1995
5.63k
  coder->pos = 0;
1996
1997
5.63k
  coder->threads_max = options->threads;
1998
1999
5.63k
  return_if_error(lzma_outq_init(&coder->outq, allocator,
2000
5.63k
               coder->threads_max));
2001
2002
5.63k
  return stream_decoder_reset(coder, allocator);
2003
5.63k
}
2004
2005
2006
extern LZMA_API(lzma_ret)
2007
lzma_stream_decoder_mt(lzma_stream *strm, const lzma_mt *options)
2008
5.63k
{
2009
5.63k
  lzma_next_strm_init(stream_decoder_mt_init, strm, options);
2010
2011
5.63k
  strm->internal->supported_actions[LZMA_RUN] = true;
2012
5.63k
  strm->internal->supported_actions[LZMA_FINISH] = true;
2013
2014
5.63k
  return LZMA_OK;
2015
5.63k
}