/src/libjxl/third_party/brotli/c/enc/ringbuffer.h
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright 2013 Google Inc. All Rights Reserved. |
2 | | |
3 | | Distributed under MIT license. |
4 | | See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
5 | | */ |
6 | | |
7 | | /* Sliding window over the input data. */ |
8 | | |
9 | | #ifndef BROTLI_ENC_RINGBUFFER_H_ |
10 | | #define BROTLI_ENC_RINGBUFFER_H_ |
11 | | |
12 | | #include <string.h> /* memcpy */ |
13 | | |
14 | | #include <brotli/types.h> |
15 | | |
16 | | #include "../common/platform.h" |
17 | | #include "memory.h" |
18 | | #include "quality.h" |
19 | | |
20 | | #if defined(__cplusplus) || defined(c_plusplus) |
21 | | extern "C" { |
22 | | #endif |
23 | | |
24 | | /* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of |
25 | | data in a circular manner: writing a byte writes it to: |
26 | | `position() % (1 << window_bits)'. |
27 | | For convenience, the RingBuffer array contains another copy of the |
28 | | first `1 << tail_bits' bytes: |
29 | | buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits), |
30 | | and another copy of the last two bytes: |
31 | | buffer_[-1] == buffer_[(1 << window_bits) - 1] and |
32 | | buffer_[-2] == buffer_[(1 << window_bits) - 2]. */ |
33 | | typedef struct RingBuffer { |
34 | | /* Size of the ring-buffer is (1 << window_bits) + tail_size_. */ |
35 | | const uint32_t size_; |
36 | | const uint32_t mask_; |
37 | | const uint32_t tail_size_; |
38 | | const uint32_t total_size_; |
39 | | |
40 | | uint32_t cur_size_; |
41 | | /* Position to write in the ring buffer. */ |
42 | | uint32_t pos_; |
43 | | /* The actual ring buffer containing the copy of the last two bytes, the data, |
44 | | and the copy of the beginning as a tail. */ |
45 | | uint8_t* data_; |
46 | | /* The start of the ring-buffer. */ |
47 | | uint8_t* buffer_; |
48 | | } RingBuffer; |
49 | | |
50 | 0 | static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) { |
51 | 0 | rb->cur_size_ = 0; |
52 | 0 | rb->pos_ = 0; |
53 | 0 | rb->data_ = 0; |
54 | 0 | rb->buffer_ = 0; |
55 | 0 | } |
56 | | |
57 | | static BROTLI_INLINE void RingBufferSetup( |
58 | 0 | const BrotliEncoderParams* params, RingBuffer* rb) { |
59 | 0 | int window_bits = ComputeRbBits(params); |
60 | 0 | int tail_bits = params->lgblock; |
61 | 0 | *(uint32_t*)&rb->size_ = 1u << window_bits; |
62 | 0 | *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1; |
63 | 0 | *(uint32_t*)&rb->tail_size_ = 1u << tail_bits; |
64 | 0 | *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_; |
65 | 0 | } |
66 | | |
67 | 0 | static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) { |
68 | 0 | BROTLI_FREE(m, rb->data_); |
69 | 0 | } |
70 | | |
71 | | /* Allocates or re-allocates data_ to the given length + plus some slack |
72 | | region before and after. Fills the slack regions with zeros. */ |
73 | | static BROTLI_INLINE void RingBufferInitBuffer( |
74 | 0 | MemoryManager* m, const uint32_t buflen, RingBuffer* rb) { |
75 | 0 | static const size_t kSlackForEightByteHashingEverywhere = 7; |
76 | 0 | uint8_t* new_data = BROTLI_ALLOC( |
77 | 0 | m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere); |
78 | 0 | size_t i; |
79 | 0 | if (BROTLI_IS_OOM(m) || BROTLI_IS_NULL(new_data)) return; |
80 | 0 | if (rb->data_) { |
81 | 0 | memcpy(new_data, rb->data_, |
82 | 0 | 2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere); |
83 | 0 | BROTLI_FREE(m, rb->data_); |
84 | 0 | } |
85 | 0 | rb->data_ = new_data; |
86 | 0 | rb->cur_size_ = buflen; |
87 | 0 | rb->buffer_ = rb->data_ + 2; |
88 | 0 | rb->buffer_[-2] = rb->buffer_[-1] = 0; |
89 | 0 | for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) { |
90 | 0 | rb->buffer_[rb->cur_size_ + i] = 0; |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | | static BROTLI_INLINE void RingBufferWriteTail( |
95 | 0 | const uint8_t* bytes, size_t n, RingBuffer* rb) { |
96 | 0 | const size_t masked_pos = rb->pos_ & rb->mask_; |
97 | 0 | if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) { |
98 | | /* Just fill the tail buffer with the beginning data. */ |
99 | 0 | const size_t p = rb->size_ + masked_pos; |
100 | 0 | memcpy(&rb->buffer_[p], bytes, |
101 | 0 | BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos)); |
102 | 0 | } |
103 | 0 | } |
104 | | |
105 | | /* Push bytes into the ring buffer. */ |
106 | | static BROTLI_INLINE void RingBufferWrite( |
107 | 0 | MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) { |
108 | 0 | if (rb->pos_ == 0 && n < rb->tail_size_) { |
109 | | /* Special case for the first write: to process the first block, we don't |
110 | | need to allocate the whole ring-buffer and we don't need the tail |
111 | | either. However, we do this memory usage optimization only if the |
112 | | first write is less than the tail size, which is also the input block |
113 | | size, otherwise it is likely that other blocks will follow and we |
114 | | will need to reallocate to the full size anyway. */ |
115 | 0 | rb->pos_ = (uint32_t)n; |
116 | 0 | RingBufferInitBuffer(m, rb->pos_, rb); |
117 | 0 | if (BROTLI_IS_OOM(m)) return; |
118 | 0 | memcpy(rb->buffer_, bytes, n); |
119 | 0 | return; |
120 | 0 | } |
121 | 0 | if (rb->cur_size_ < rb->total_size_) { |
122 | | /* Lazily allocate the full buffer. */ |
123 | 0 | RingBufferInitBuffer(m, rb->total_size_, rb); |
124 | 0 | if (BROTLI_IS_OOM(m)) return; |
125 | | /* Initialize the last two bytes to zero, so that we don't have to worry |
126 | | later when we copy the last two bytes to the first two positions. */ |
127 | 0 | rb->buffer_[rb->size_ - 2] = 0; |
128 | 0 | rb->buffer_[rb->size_ - 1] = 0; |
129 | | /* Initialize tail; might be touched by "best_len++" optimization when |
130 | | ring buffer is "full". */ |
131 | 0 | rb->buffer_[rb->size_] = 241; |
132 | 0 | } |
133 | 0 | { |
134 | 0 | const size_t masked_pos = rb->pos_ & rb->mask_; |
135 | | /* The length of the writes is limited so that we do not need to worry |
136 | | about a write */ |
137 | 0 | RingBufferWriteTail(bytes, n, rb); |
138 | 0 | if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) { |
139 | | /* A single write fits. */ |
140 | 0 | memcpy(&rb->buffer_[masked_pos], bytes, n); |
141 | 0 | } else { |
142 | | /* Split into two writes. |
143 | | Copy into the end of the buffer, including the tail buffer. */ |
144 | 0 | memcpy(&rb->buffer_[masked_pos], bytes, |
145 | 0 | BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos)); |
146 | | /* Copy into the beginning of the buffer */ |
147 | 0 | memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos), |
148 | 0 | n - (rb->size_ - masked_pos)); |
149 | 0 | } |
150 | 0 | } |
151 | 0 | { |
152 | 0 | BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0; |
153 | 0 | uint32_t rb_pos_mask = (1u << 31) - 1; |
154 | 0 | rb->buffer_[-2] = rb->buffer_[rb->size_ - 2]; |
155 | 0 | rb->buffer_[-1] = rb->buffer_[rb->size_ - 1]; |
156 | 0 | rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask); |
157 | 0 | if (not_first_lap) { |
158 | | /* Wrap, but preserve not-a-first-lap feature. */ |
159 | 0 | rb->pos_ |= 1u << 31; |
160 | 0 | } |
161 | 0 | } |
162 | 0 | } |
163 | | |
164 | | #if defined(__cplusplus) || defined(c_plusplus) |
165 | | } /* extern "C" */ |
166 | | #endif |
167 | | |
168 | | #endif /* BROTLI_ENC_RINGBUFFER_H_ */ |