/src/libwebp/src/enc/backward_references_enc.c
Line | Count | Source |
1 | | // Copyright 2012 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style license |
4 | | // that can be found in the COPYING file in the root of the source |
5 | | // tree. An additional intellectual property rights grant can be found |
6 | | // in the file PATENTS. All contributing project authors may |
7 | | // be found in the AUTHORS file in the root of the source tree. |
8 | | // ----------------------------------------------------------------------------- |
9 | | // |
10 | | // Author: Jyrki Alakuijala (jyrki@google.com) |
11 | | // |
12 | | |
13 | | #include "src/enc/backward_references_enc.h" |
14 | | |
15 | | #include <assert.h> |
16 | | #include <string.h> |
17 | | |
18 | | #include "src/dsp/cpu.h" |
19 | | #include "src/dsp/lossless.h" |
20 | | #include "src/dsp/lossless_common.h" |
21 | | #include "src/enc/histogram_enc.h" |
22 | | #include "src/enc/vp8i_enc.h" |
23 | | #include "src/utils/color_cache_utils.h" |
24 | | #include "src/utils/utils.h" |
25 | | #include "src/webp/encode.h" |
26 | | #include "src/webp/format_constants.h" |
27 | | #include "src/webp/types.h" |
28 | | |
29 | 0 | #define MIN_BLOCK_SIZE 256 // minimum block size for backward references |
30 | | |
31 | | // 1M window (4M bytes) minus 120 special codes for short distances. |
32 | 0 | #define WINDOW_SIZE ((1 << WINDOW_SIZE_BITS) - 120) |
33 | | |
34 | | // Minimum number of pixels for which it is cheaper to encode a |
35 | | // distance + length instead of each pixel as a literal. |
36 | 0 | #define MIN_LENGTH 4 |
37 | | |
38 | | // ----------------------------------------------------------------------------- |
39 | | |
40 | | static const uint8_t plane_to_code_lut[128] = { |
41 | | 96, 73, 55, 39, 23, 13, 5, 1, 255, 255, 255, 255, 255, 255, 255, 255, |
42 | | 101, 78, 58, 42, 26, 16, 8, 2, 0, 3, 9, 17, 27, 43, 59, 79, |
43 | | 102, 86, 62, 46, 32, 20, 10, 6, 4, 7, 11, 21, 33, 47, 63, 87, |
44 | | 105, 90, 70, 52, 37, 28, 18, 14, 12, 15, 19, 29, 38, 53, 71, 91, |
45 | | 110, 99, 82, 66, 48, 35, 30, 24, 22, 25, 31, 36, 49, 67, 83, 100, |
46 | | 115, 108, 94, 76, 64, 50, 44, 40, 34, 41, 45, 51, 65, 77, 95, 109, |
47 | | 118, 113, 103, 92, 80, 68, 60, 56, 54, 57, 61, 69, 81, 93, 104, 114, |
48 | | 119, 116, 111, 106, 97, 88, 84, 74, 72, 75, 85, 89, 98, 107, 112, 117}; |
49 | | |
50 | | extern int VP8LDistanceToPlaneCode(int xsize, int dist); |
51 | 0 | int VP8LDistanceToPlaneCode(int xsize, int dist) { |
52 | 0 | const int yoffset = dist / xsize; |
53 | 0 | const int xoffset = dist - yoffset * xsize; |
54 | 0 | if (xoffset <= 8 && yoffset < 8) { |
55 | 0 | return plane_to_code_lut[yoffset * 16 + 8 - xoffset] + 1; |
56 | 0 | } else if (xoffset > xsize - 8 && yoffset < 7) { |
57 | 0 | return plane_to_code_lut[(yoffset + 1) * 16 + 8 + (xsize - xoffset)] + 1; |
58 | 0 | } |
59 | 0 | return dist + 120; |
60 | 0 | } |
61 | | |
62 | | // Returns the exact index where array1 and array2 are different. For an index |
63 | | // inferior or equal to best_len_match, the return value just has to be strictly |
64 | | // inferior to best_len_match. The current behavior is to return 0 if this index |
65 | | // is best_len_match, and the index itself otherwise. |
66 | | // If no two elements are the same, it returns max_limit. |
67 | | static WEBP_INLINE int FindMatchLength(const uint32_t* const array1, |
68 | | const uint32_t* const array2, |
69 | 0 | int best_len_match, int max_limit) { |
70 | | // Before 'expensive' linear match, check if the two arrays match at the |
71 | | // current best length index. |
72 | 0 | if (array1[best_len_match] != array2[best_len_match]) return 0; |
73 | | |
74 | 0 | return VP8LVectorMismatch(array1, array2, max_limit); |
75 | 0 | } |
76 | | |
77 | | // ----------------------------------------------------------------------------- |
78 | | // VP8LBackwardRefs |
79 | | |
80 | | struct PixOrCopyBlock { |
81 | | PixOrCopyBlock* next; // next block (or NULL) |
82 | | PixOrCopy* start; // data start |
83 | | int size; // currently used size |
84 | | }; |
85 | | |
86 | | extern void VP8LClearBackwardRefs(VP8LBackwardRefs* const refs); |
87 | 0 | void VP8LClearBackwardRefs(VP8LBackwardRefs* const refs) { |
88 | 0 | assert(refs != NULL); |
89 | 0 | if (refs->tail != NULL) { |
90 | 0 | *refs->tail = refs->free_blocks; // recycle all blocks at once |
91 | 0 | } |
92 | 0 | refs->free_blocks = refs->refs; |
93 | 0 | refs->tail = &refs->refs; |
94 | 0 | refs->last_block = NULL; |
95 | 0 | refs->refs = NULL; |
96 | 0 | } |
97 | | |
98 | 0 | void VP8LBackwardRefsClear(VP8LBackwardRefs* const refs) { |
99 | 0 | assert(refs != NULL); |
100 | 0 | VP8LClearBackwardRefs(refs); |
101 | 0 | while (refs->free_blocks != NULL) { |
102 | 0 | PixOrCopyBlock* const next = refs->free_blocks->next; |
103 | 0 | WebPSafeFree(refs->free_blocks); |
104 | 0 | refs->free_blocks = next; |
105 | 0 | } |
106 | 0 | } |
107 | | |
108 | | // Swaps the content of two VP8LBackwardRefs. |
109 | | static void BackwardRefsSwap(VP8LBackwardRefs* const refs1, |
110 | 0 | VP8LBackwardRefs* const refs2) { |
111 | 0 | const int point_to_refs1 = |
112 | 0 | (refs1->tail != NULL && refs1->tail == &refs1->refs); |
113 | 0 | const int point_to_refs2 = |
114 | 0 | (refs2->tail != NULL && refs2->tail == &refs2->refs); |
115 | 0 | const VP8LBackwardRefs tmp = *refs1; |
116 | 0 | *refs1 = *refs2; |
117 | 0 | *refs2 = tmp; |
118 | 0 | if (point_to_refs2) refs1->tail = &refs1->refs; |
119 | 0 | if (point_to_refs1) refs2->tail = &refs2->refs; |
120 | 0 | } |
121 | | |
122 | 0 | void VP8LBackwardRefsInit(VP8LBackwardRefs* const refs, int block_size) { |
123 | 0 | assert(refs != NULL); |
124 | 0 | memset(refs, 0, sizeof(*refs)); |
125 | 0 | refs->tail = &refs->refs; |
126 | 0 | refs->block_size = |
127 | 0 | (block_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : block_size; |
128 | 0 | } |
129 | | |
130 | 0 | VP8LRefsCursor VP8LRefsCursorInit(const VP8LBackwardRefs* const refs) { |
131 | 0 | VP8LRefsCursor c; |
132 | 0 | c.cur_block = refs->refs; |
133 | 0 | if (refs->refs != NULL) { |
134 | 0 | c.cur_pos = c.cur_block->start; |
135 | 0 | c.last_pos = c.cur_pos + c.cur_block->size; |
136 | 0 | } else { |
137 | 0 | c.cur_pos = NULL; |
138 | 0 | c.last_pos = NULL; |
139 | 0 | } |
140 | 0 | return c; |
141 | 0 | } |
142 | | |
143 | 0 | void VP8LRefsCursorNextBlock(VP8LRefsCursor* const c) { |
144 | 0 | PixOrCopyBlock* const b = c->cur_block->next; |
145 | 0 | c->cur_pos = (b == NULL) ? NULL : b->start; |
146 | 0 | c->last_pos = (b == NULL) ? NULL : b->start + b->size; |
147 | 0 | c->cur_block = b; |
148 | 0 | } |
149 | | |
150 | | // Create a new block, either from the free list or allocated |
151 | 0 | static PixOrCopyBlock* BackwardRefsNewBlock(VP8LBackwardRefs* const refs) { |
152 | 0 | PixOrCopyBlock* b = refs->free_blocks; |
153 | 0 | if (b == NULL) { // allocate new memory chunk |
154 | 0 | const size_t total_size = sizeof(*b) + refs->block_size * sizeof(*b->start); |
155 | 0 | b = (PixOrCopyBlock*)WebPSafeMalloc(1ULL, total_size); |
156 | 0 | if (b == NULL) { |
157 | 0 | refs->error |= 1; |
158 | 0 | return NULL; |
159 | 0 | } |
160 | 0 | b->start = (PixOrCopy*)((uint8_t*)b + sizeof(*b)); // not always aligned |
161 | 0 | } else { // recycle from free-list |
162 | 0 | refs->free_blocks = b->next; |
163 | 0 | } |
164 | 0 | *refs->tail = b; |
165 | 0 | refs->tail = &b->next; |
166 | 0 | refs->last_block = b; |
167 | 0 | b->next = NULL; |
168 | 0 | b->size = 0; |
169 | 0 | return b; |
170 | 0 | } |
171 | | |
172 | | // Return 1 on success, 0 on error. |
173 | | static int BackwardRefsClone(const VP8LBackwardRefs* const from, |
174 | 0 | VP8LBackwardRefs* const to) { |
175 | 0 | const PixOrCopyBlock* block_from = from->refs; |
176 | 0 | VP8LClearBackwardRefs(to); |
177 | 0 | while (block_from != NULL) { |
178 | 0 | PixOrCopyBlock* const block_to = BackwardRefsNewBlock(to); |
179 | 0 | if (block_to == NULL) return 0; |
180 | 0 | memcpy(block_to->start, block_from->start, |
181 | 0 | block_from->size * sizeof(PixOrCopy)); |
182 | 0 | block_to->size = block_from->size; |
183 | 0 | block_from = block_from->next; |
184 | 0 | } |
185 | 0 | return 1; |
186 | 0 | } |
187 | | |
188 | | extern void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, |
189 | | const PixOrCopy v); |
190 | | void VP8LBackwardRefsCursorAdd(VP8LBackwardRefs* const refs, |
191 | 0 | const PixOrCopy v) { |
192 | 0 | PixOrCopyBlock* b = refs->last_block; |
193 | 0 | if (b == NULL || b->size == refs->block_size) { |
194 | 0 | b = BackwardRefsNewBlock(refs); |
195 | 0 | if (b == NULL) return; // refs->error is set |
196 | 0 | } |
197 | 0 | b->start[b->size++] = v; |
198 | 0 | } |
199 | | |
200 | | // ----------------------------------------------------------------------------- |
201 | | // Hash chains |
202 | | |
203 | 0 | int VP8LHashChainInit(VP8LHashChain* const p, int size) { |
204 | 0 | assert(p->size == 0); |
205 | 0 | assert(p->offset_length == NULL); |
206 | 0 | assert(size > 0); |
207 | 0 | p->offset_length = (uint32_t*)WebPSafeMalloc(size, sizeof(*p->offset_length)); |
208 | 0 | if (p->offset_length == NULL) return 0; |
209 | 0 | p->size = size; |
210 | |
|
211 | 0 | return 1; |
212 | 0 | } |
213 | | |
214 | 0 | void VP8LHashChainClear(VP8LHashChain* const p) { |
215 | 0 | assert(p != NULL); |
216 | 0 | WebPSafeFree(p->offset_length); |
217 | |
|
218 | 0 | p->size = 0; |
219 | 0 | p->offset_length = NULL; |
220 | 0 | } |
221 | | |
222 | | // ----------------------------------------------------------------------------- |
223 | | |
224 | | static const uint32_t kHashMultiplierHi = 0xc6a4a793u; |
225 | | static const uint32_t kHashMultiplierLo = 0x5bd1e996u; |
226 | | |
227 | | static WEBP_UBSAN_IGNORE_UNSIGNED_OVERFLOW WEBP_INLINE uint32_t |
228 | 0 | GetPixPairHash64(const uint32_t* const argb) { |
229 | 0 | uint32_t key; |
230 | 0 | key = argb[1] * kHashMultiplierHi; |
231 | 0 | key += argb[0] * kHashMultiplierLo; |
232 | 0 | key = key >> (32 - HASH_BITS); |
233 | 0 | return key; |
234 | 0 | } |
235 | | |
236 | | // Returns the maximum number of hash chain lookups to do for a |
237 | | // given compression quality. Return value in range [8, 86]. |
238 | 0 | static int GetMaxItersForQuality(int quality) { |
239 | 0 | return 8 + (quality * quality) / 128; |
240 | 0 | } |
241 | | |
242 | 0 | static int GetWindowSizeForHashChain(int quality, int xsize) { |
243 | 0 | const int max_window_size = (quality > 75) ? WINDOW_SIZE |
244 | 0 | : (quality > 50) ? (xsize << 8) |
245 | 0 | : (quality > 25) ? (xsize << 6) |
246 | 0 | : (xsize << 4); |
247 | 0 | assert(xsize > 0); |
248 | 0 | return (max_window_size > WINDOW_SIZE) ? WINDOW_SIZE : max_window_size; |
249 | 0 | } |
250 | | |
251 | 0 | static WEBP_INLINE int MaxFindCopyLength(int len) { |
252 | 0 | return (len < MAX_LENGTH) ? len : MAX_LENGTH; |
253 | 0 | } |
254 | | |
255 | | int VP8LHashChainFill(VP8LHashChain* const p, int quality, |
256 | | const uint32_t* const argb, int xsize, int ysize, |
257 | | int low_effort, const WebPPicture* const pic, |
258 | 0 | int percent_range, int* const percent) { |
259 | 0 | const int size = xsize * ysize; |
260 | 0 | const int iter_max = GetMaxItersForQuality(quality); |
261 | 0 | const uint32_t window_size = GetWindowSizeForHashChain(quality, xsize); |
262 | 0 | int remaining_percent = percent_range; |
263 | 0 | int percent_start = *percent; |
264 | 0 | int pos; |
265 | 0 | int argb_comp; |
266 | 0 | uint32_t base_position; |
267 | 0 | int32_t* hash_to_first_index; |
268 | | // Temporarily use the p->offset_length as a hash chain. |
269 | 0 | int32_t* chain = (int32_t*)p->offset_length; |
270 | 0 | assert(size > 0); |
271 | 0 | assert(p->size != 0); |
272 | 0 | assert(p->offset_length != NULL); |
273 | |
|
274 | 0 | if (size <= 2) { |
275 | 0 | p->offset_length[0] = p->offset_length[size - 1] = 0; |
276 | 0 | return 1; |
277 | 0 | } |
278 | | |
279 | 0 | hash_to_first_index = |
280 | 0 | (int32_t*)WebPSafeMalloc(HASH_SIZE, sizeof(*hash_to_first_index)); |
281 | 0 | if (hash_to_first_index == NULL) { |
282 | 0 | return WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY); |
283 | 0 | } |
284 | | |
285 | 0 | percent_range = remaining_percent / 2; |
286 | 0 | remaining_percent -= percent_range; |
287 | | |
288 | | // Set the int32_t array to -1. |
289 | 0 | memset(hash_to_first_index, 0xff, HASH_SIZE * sizeof(*hash_to_first_index)); |
290 | | // Fill the chain linking pixels with the same hash. |
291 | 0 | argb_comp = (argb[0] == argb[1]); |
292 | 0 | for (pos = 0; pos < size - 2;) { |
293 | 0 | uint32_t hash_code; |
294 | 0 | const int argb_comp_next = (argb[pos + 1] == argb[pos + 2]); |
295 | 0 | if (argb_comp && argb_comp_next) { |
296 | | // Consecutive pixels with the same color will share the same hash. |
297 | | // We therefore use a different hash: the color and its repetition |
298 | | // length. |
299 | 0 | uint32_t tmp[2]; |
300 | 0 | uint32_t len = 1; |
301 | 0 | tmp[0] = argb[pos]; |
302 | | // Figure out how far the pixels are the same. |
303 | | // The last pixel has a different 64 bit hash, as its next pixel does |
304 | | // not have the same color, so we just need to get to the last pixel equal |
305 | | // to its follower. |
306 | 0 | while (pos + (int)len + 2 < size && argb[pos + len + 2] == argb[pos]) { |
307 | 0 | ++len; |
308 | 0 | } |
309 | 0 | if (len > MAX_LENGTH) { |
310 | | // Skip the pixels that match for distance=1 and length>MAX_LENGTH |
311 | | // because they are linked to their predecessor and we automatically |
312 | | // check that in the main for loop below. Skipping means setting no |
313 | | // predecessor in the chain, hence -1. |
314 | 0 | memset(chain + pos, 0xff, (len - MAX_LENGTH) * sizeof(*chain)); |
315 | 0 | pos += len - MAX_LENGTH; |
316 | 0 | len = MAX_LENGTH; |
317 | 0 | } |
318 | | // Process the rest of the hash chain. |
319 | 0 | while (len) { |
320 | 0 | tmp[1] = len--; |
321 | 0 | hash_code = GetPixPairHash64(tmp); |
322 | 0 | chain[pos] = hash_to_first_index[hash_code]; |
323 | 0 | hash_to_first_index[hash_code] = pos++; |
324 | 0 | } |
325 | 0 | argb_comp = 0; |
326 | 0 | } else { |
327 | | // Just move one pixel forward. |
328 | 0 | hash_code = GetPixPairHash64(argb + pos); |
329 | 0 | chain[pos] = hash_to_first_index[hash_code]; |
330 | 0 | hash_to_first_index[hash_code] = pos++; |
331 | 0 | argb_comp = argb_comp_next; |
332 | 0 | } |
333 | |
|
334 | 0 | if (!WebPReportProgress( |
335 | 0 | pic, percent_start + percent_range * pos / (size - 2), percent)) { |
336 | 0 | WebPSafeFree(hash_to_first_index); |
337 | 0 | return 0; |
338 | 0 | } |
339 | 0 | } |
340 | | // Process the penultimate pixel. |
341 | 0 | chain[pos] = hash_to_first_index[GetPixPairHash64(argb + pos)]; |
342 | |
|
343 | 0 | WebPSafeFree(hash_to_first_index); |
344 | |
|
345 | 0 | percent_start += percent_range; |
346 | 0 | if (!WebPReportProgress(pic, percent_start, percent)) return 0; |
347 | 0 | percent_range = remaining_percent; |
348 | | |
349 | | // Find the best match interval at each pixel, defined by an offset to the |
350 | | // pixel and a length. The right-most pixel cannot match anything to the right |
351 | | // (hence a best length of 0) and the left-most pixel nothing to the left |
352 | | // (hence an offset of 0). |
353 | 0 | assert(size > 2); |
354 | 0 | p->offset_length[0] = p->offset_length[size - 1] = 0; |
355 | 0 | for (base_position = size - 2; base_position > 0;) { |
356 | 0 | const int max_len = MaxFindCopyLength(size - 1 - base_position); |
357 | 0 | const uint32_t* const argb_start = argb + base_position; |
358 | 0 | int iter = iter_max; |
359 | 0 | int best_length = 0; |
360 | 0 | uint32_t best_distance = 0; |
361 | 0 | uint32_t best_argb; |
362 | 0 | const int min_pos = |
363 | 0 | (base_position > window_size) ? base_position - window_size : 0; |
364 | 0 | const int length_max = (max_len < 256) ? max_len : 256; |
365 | 0 | uint32_t max_base_position; |
366 | |
|
367 | 0 | pos = chain[base_position]; |
368 | 0 | if (!low_effort) { |
369 | 0 | int curr_length; |
370 | | // Heuristic: use the comparison with the above line as an initialization. |
371 | 0 | if (base_position >= (uint32_t)xsize) { |
372 | 0 | curr_length = FindMatchLength(argb_start - xsize, argb_start, |
373 | 0 | best_length, max_len); |
374 | 0 | if (curr_length > best_length) { |
375 | 0 | best_length = curr_length; |
376 | 0 | best_distance = xsize; |
377 | 0 | } |
378 | 0 | --iter; |
379 | 0 | } |
380 | | // Heuristic: compare to the previous pixel. |
381 | 0 | curr_length = |
382 | 0 | FindMatchLength(argb_start - 1, argb_start, best_length, max_len); |
383 | 0 | if (curr_length > best_length) { |
384 | 0 | best_length = curr_length; |
385 | 0 | best_distance = 1; |
386 | 0 | } |
387 | 0 | --iter; |
388 | | // Skip the for loop if we already have the maximum. |
389 | 0 | if (best_length == MAX_LENGTH) pos = min_pos - 1; |
390 | 0 | } |
391 | 0 | best_argb = argb_start[best_length]; |
392 | |
|
393 | 0 | for (; pos >= min_pos && --iter; pos = chain[pos]) { |
394 | 0 | int curr_length; |
395 | 0 | assert(base_position > (uint32_t)pos); |
396 | |
|
397 | 0 | if (argb[pos + best_length] != best_argb) continue; |
398 | | |
399 | 0 | curr_length = VP8LVectorMismatch(argb + pos, argb_start, max_len); |
400 | 0 | if (best_length < curr_length) { |
401 | 0 | best_length = curr_length; |
402 | 0 | best_distance = base_position - pos; |
403 | 0 | best_argb = argb_start[best_length]; |
404 | | // Stop if we have reached a good enough length. |
405 | 0 | if (best_length >= length_max) break; |
406 | 0 | } |
407 | 0 | } |
408 | | // We have the best match but in case the two intervals continue matching |
409 | | // to the left, we have the best matches for the left-extended pixels. |
410 | 0 | max_base_position = base_position; |
411 | 0 | while (1) { |
412 | 0 | assert(best_length <= MAX_LENGTH); |
413 | 0 | assert(best_distance <= WINDOW_SIZE); |
414 | 0 | p->offset_length[base_position] = |
415 | 0 | (best_distance << MAX_LENGTH_BITS) | (uint32_t)best_length; |
416 | 0 | --base_position; |
417 | | // Stop if we don't have a match or if we are out of bounds. |
418 | 0 | if (best_distance == 0 || base_position == 0) break; |
419 | | // Stop if we cannot extend the matching intervals to the left. |
420 | 0 | if (base_position < best_distance || |
421 | 0 | argb[base_position - best_distance] != argb[base_position]) { |
422 | 0 | break; |
423 | 0 | } |
424 | | // Stop if we are matching at its limit because there could be a closer |
425 | | // matching interval with the same maximum length. Then again, if the |
426 | | // matching interval is as close as possible (best_distance == 1), we will |
427 | | // never find anything better so let's continue. |
428 | 0 | if (best_length == MAX_LENGTH && best_distance != 1 && |
429 | 0 | base_position + MAX_LENGTH < max_base_position) { |
430 | 0 | break; |
431 | 0 | } |
432 | 0 | if (best_length < MAX_LENGTH) { |
433 | 0 | ++best_length; |
434 | 0 | max_base_position = base_position; |
435 | 0 | } |
436 | 0 | } |
437 | | |
438 | 0 | if (!WebPReportProgress(pic, |
439 | 0 | percent_start + percent_range * |
440 | 0 | (size - 2 - base_position) / |
441 | 0 | (size - 2), |
442 | 0 | percent)) { |
443 | 0 | return 0; |
444 | 0 | } |
445 | 0 | } |
446 | | |
447 | 0 | return WebPReportProgress(pic, percent_start + percent_range, percent); |
448 | 0 | } |
449 | | |
450 | | static WEBP_INLINE void AddSingleLiteral(uint32_t pixel, int use_color_cache, |
451 | | VP8LColorCache* const hashers, |
452 | 0 | VP8LBackwardRefs* const refs) { |
453 | 0 | PixOrCopy v; |
454 | 0 | if (use_color_cache) { |
455 | 0 | const uint32_t key = VP8LColorCacheGetIndex(hashers, pixel); |
456 | 0 | if (VP8LColorCacheLookup(hashers, key) == pixel) { |
457 | 0 | v = PixOrCopyCreateCacheIdx(key); |
458 | 0 | } else { |
459 | 0 | v = PixOrCopyCreateLiteral(pixel); |
460 | 0 | VP8LColorCacheSet(hashers, key, pixel); |
461 | 0 | } |
462 | 0 | } else { |
463 | 0 | v = PixOrCopyCreateLiteral(pixel); |
464 | 0 | } |
465 | 0 | VP8LBackwardRefsCursorAdd(refs, v); |
466 | 0 | } |
467 | | |
468 | | static int BackwardReferencesRle(int xsize, int ysize, |
469 | | const uint32_t* const argb, int cache_bits, |
470 | 0 | VP8LBackwardRefs* const refs) { |
471 | 0 | const int pix_count = xsize * ysize; |
472 | 0 | int i, k; |
473 | 0 | const int use_color_cache = (cache_bits > 0); |
474 | 0 | VP8LColorCache hashers; |
475 | |
|
476 | 0 | if (use_color_cache && !VP8LColorCacheInit(&hashers, cache_bits)) { |
477 | 0 | return 0; |
478 | 0 | } |
479 | 0 | VP8LClearBackwardRefs(refs); |
480 | | // Add first pixel as literal. |
481 | 0 | AddSingleLiteral(argb[0], use_color_cache, &hashers, refs); |
482 | 0 | i = 1; |
483 | 0 | while (i < pix_count) { |
484 | 0 | const int max_len = MaxFindCopyLength(pix_count - i); |
485 | 0 | const int rle_len = FindMatchLength(argb + i, argb + i - 1, 0, max_len); |
486 | 0 | const int prev_row_len = |
487 | 0 | (i < xsize) ? 0 |
488 | 0 | : FindMatchLength(argb + i, argb + i - xsize, 0, max_len); |
489 | 0 | if (rle_len >= prev_row_len && rle_len >= MIN_LENGTH) { |
490 | 0 | VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(1, rle_len)); |
491 | | // We don't need to update the color cache here since it is always the |
492 | | // same pixel being copied, and that does not change the color cache |
493 | | // state. |
494 | 0 | i += rle_len; |
495 | 0 | } else if (prev_row_len >= MIN_LENGTH) { |
496 | 0 | VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(xsize, prev_row_len)); |
497 | 0 | if (use_color_cache) { |
498 | 0 | for (k = 0; k < prev_row_len; ++k) { |
499 | 0 | VP8LColorCacheInsert(&hashers, argb[i + k]); |
500 | 0 | } |
501 | 0 | } |
502 | 0 | i += prev_row_len; |
503 | 0 | } else { |
504 | 0 | AddSingleLiteral(argb[i], use_color_cache, &hashers, refs); |
505 | 0 | i++; |
506 | 0 | } |
507 | 0 | } |
508 | 0 | if (use_color_cache) VP8LColorCacheClear(&hashers); |
509 | 0 | return !refs->error; |
510 | 0 | } |
511 | | |
512 | | static int BackwardReferencesLz77(int xsize, int ysize, |
513 | | const uint32_t* const argb, int cache_bits, |
514 | | const VP8LHashChain* const hash_chain, |
515 | 0 | VP8LBackwardRefs* const refs) { |
516 | 0 | int i; |
517 | 0 | int i_last_check = -1; |
518 | 0 | int ok = 0; |
519 | 0 | int cc_init = 0; |
520 | 0 | const int use_color_cache = (cache_bits > 0); |
521 | 0 | const int pix_count = xsize * ysize; |
522 | 0 | VP8LColorCache hashers; |
523 | |
|
524 | 0 | if (use_color_cache) { |
525 | 0 | cc_init = VP8LColorCacheInit(&hashers, cache_bits); |
526 | 0 | if (!cc_init) goto Error; |
527 | 0 | } |
528 | 0 | VP8LClearBackwardRefs(refs); |
529 | 0 | for (i = 0; i < pix_count;) { |
530 | | // Alternative#1: Code the pixels starting at 'i' using backward reference. |
531 | 0 | int offset = 0; |
532 | 0 | int len = 0; |
533 | 0 | int j; |
534 | 0 | VP8LHashChainFindCopy(hash_chain, i, &offset, &len); |
535 | 0 | if (len >= MIN_LENGTH) { |
536 | 0 | const int len_ini = len; |
537 | 0 | int max_reach = 0; |
538 | 0 | const int j_max = |
539 | 0 | (i + len_ini >= pix_count) ? pix_count - 1 : i + len_ini; |
540 | | // Only start from what we have not checked already. |
541 | 0 | i_last_check = (i > i_last_check) ? i : i_last_check; |
542 | | // We know the best match for the current pixel but we try to find the |
543 | | // best matches for the current pixel AND the next one combined. |
544 | | // The naive method would use the intervals: |
545 | | // [i,i+len) + [i+len, length of best match at i+len) |
546 | | // while we check if we can use: |
547 | | // [i,j) (where j<=i+len) + [j, length of best match at j) |
548 | 0 | for (j = i_last_check + 1; j <= j_max; ++j) { |
549 | 0 | const int len_j = VP8LHashChainFindLength(hash_chain, j); |
550 | 0 | const int reach = |
551 | 0 | j + (len_j >= MIN_LENGTH ? len_j : 1); // 1 for single literal. |
552 | 0 | if (reach > max_reach) { |
553 | 0 | len = j - i; |
554 | 0 | max_reach = reach; |
555 | 0 | if (max_reach >= pix_count) break; |
556 | 0 | } |
557 | 0 | } |
558 | 0 | } else { |
559 | 0 | len = 1; |
560 | 0 | } |
561 | | // Go with literal or backward reference. |
562 | 0 | assert(len > 0); |
563 | 0 | if (len == 1) { |
564 | 0 | AddSingleLiteral(argb[i], use_color_cache, &hashers, refs); |
565 | 0 | } else { |
566 | 0 | VP8LBackwardRefsCursorAdd(refs, PixOrCopyCreateCopy(offset, len)); |
567 | 0 | if (use_color_cache) { |
568 | 0 | for (j = i; j < i + len; ++j) VP8LColorCacheInsert(&hashers, argb[j]); |
569 | 0 | } |
570 | 0 | } |
571 | 0 | i += len; |
572 | 0 | } |
573 | |
|
574 | 0 | ok = !refs->error; |
575 | 0 | Error: |
576 | 0 | if (cc_init) VP8LColorCacheClear(&hashers); |
577 | 0 | return ok; |
578 | 0 | } |
579 | | |
580 | | // Compute an LZ77 by forcing matches to happen within a given distance cost. |
581 | | // We therefore limit the algorithm to the lowest 32 values in the PlaneCode |
582 | | // definition. |
583 | 0 | #define WINDOW_OFFSETS_SIZE_MAX 32 |
584 | | static int BackwardReferencesLz77Box(int xsize, int ysize, |
585 | | const uint32_t* const argb, int cache_bits, |
586 | | const VP8LHashChain* const hash_chain_best, |
587 | | VP8LHashChain* hash_chain, |
588 | 0 | VP8LBackwardRefs* const refs) { |
589 | 0 | int i; |
590 | 0 | const int pix_count = xsize * ysize; |
591 | 0 | uint16_t* counts; |
592 | 0 | int window_offsets[WINDOW_OFFSETS_SIZE_MAX] = {0}; |
593 | 0 | int window_offsets_new[WINDOW_OFFSETS_SIZE_MAX] = {0}; |
594 | 0 | int window_offsets_size = 0; |
595 | 0 | int window_offsets_new_size = 0; |
596 | 0 | uint16_t* const counts_ini = |
597 | 0 | (uint16_t*)WebPSafeMalloc(xsize * ysize, sizeof(*counts_ini)); |
598 | 0 | int best_offset_prev = -1, best_length_prev = -1; |
599 | 0 | if (counts_ini == NULL) return 0; |
600 | | |
601 | | // counts[i] counts how many times a pixel is repeated starting at position i. |
602 | 0 | i = pix_count - 2; |
603 | 0 | counts = counts_ini + i; |
604 | 0 | counts[1] = 1; |
605 | 0 | for (; i >= 0; --i, --counts) { |
606 | 0 | if (argb[i] == argb[i + 1]) { |
607 | | // Max out the counts to MAX_LENGTH. |
608 | 0 | counts[0] = counts[1] + (counts[1] != MAX_LENGTH); |
609 | 0 | } else { |
610 | 0 | counts[0] = 1; |
611 | 0 | } |
612 | 0 | } |
613 | | |
614 | | // Figure out the window offsets around a pixel. They are stored in a |
615 | | // spiraling order around the pixel as defined by VP8LDistanceToPlaneCode. |
616 | 0 | { |
617 | 0 | int x, y; |
618 | 0 | for (y = 0; y <= 6; ++y) { |
619 | 0 | for (x = -6; x <= 6; ++x) { |
620 | 0 | const int offset = y * xsize + x; |
621 | 0 | int plane_code; |
622 | | // Ignore offsets that bring us after the pixel. |
623 | 0 | if (offset <= 0) continue; |
624 | 0 | plane_code = VP8LDistanceToPlaneCode(xsize, offset) - 1; |
625 | 0 | if (plane_code >= WINDOW_OFFSETS_SIZE_MAX) continue; |
626 | 0 | window_offsets[plane_code] = offset; |
627 | 0 | } |
628 | 0 | } |
629 | | // For narrow images, not all plane codes are reached, so remove those. |
630 | 0 | for (i = 0; i < WINDOW_OFFSETS_SIZE_MAX; ++i) { |
631 | 0 | if (window_offsets[i] == 0) continue; |
632 | 0 | window_offsets[window_offsets_size++] = window_offsets[i]; |
633 | 0 | } |
634 | | // Given a pixel P, find the offsets that reach pixels unreachable from P-1 |
635 | | // with any of the offsets in window_offsets[]. |
636 | 0 | for (i = 0; i < window_offsets_size; ++i) { |
637 | 0 | int j; |
638 | 0 | int is_reachable = 0; |
639 | 0 | for (j = 0; j < window_offsets_size && !is_reachable; ++j) { |
640 | 0 | is_reachable |= (window_offsets[i] == window_offsets[j] + 1); |
641 | 0 | } |
642 | 0 | if (!is_reachable) { |
643 | 0 | window_offsets_new[window_offsets_new_size] = window_offsets[i]; |
644 | 0 | ++window_offsets_new_size; |
645 | 0 | } |
646 | 0 | } |
647 | 0 | } |
648 | |
|
649 | 0 | hash_chain->offset_length[0] = 0; |
650 | 0 | for (i = 1; i < pix_count; ++i) { |
651 | 0 | int ind; |
652 | 0 | int best_length = VP8LHashChainFindLength(hash_chain_best, i); |
653 | 0 | int best_offset; |
654 | 0 | int do_compute = 1; |
655 | |
|
656 | 0 | if (best_length >= MAX_LENGTH) { |
657 | | // Do not recompute the best match if we already have a maximal one in the |
658 | | // window. |
659 | 0 | best_offset = VP8LHashChainFindOffset(hash_chain_best, i); |
660 | 0 | for (ind = 0; ind < window_offsets_size; ++ind) { |
661 | 0 | if (best_offset == window_offsets[ind]) { |
662 | 0 | do_compute = 0; |
663 | 0 | break; |
664 | 0 | } |
665 | 0 | } |
666 | 0 | } |
667 | 0 | if (do_compute) { |
668 | | // Figure out if we should use the offset/length from the previous pixel |
669 | | // as an initial guess and therefore only inspect the offsets in |
670 | | // window_offsets_new[]. |
671 | 0 | const int use_prev = |
672 | 0 | (best_length_prev > 1) && (best_length_prev < MAX_LENGTH); |
673 | 0 | const int num_ind = |
674 | 0 | use_prev ? window_offsets_new_size : window_offsets_size; |
675 | 0 | best_length = use_prev ? best_length_prev - 1 : 0; |
676 | 0 | best_offset = use_prev ? best_offset_prev : 0; |
677 | | // Find the longest match in a window around the pixel. |
678 | 0 | for (ind = 0; ind < num_ind; ++ind) { |
679 | 0 | int curr_length = 0; |
680 | 0 | int j = i; |
681 | 0 | int j_offset = |
682 | 0 | use_prev ? i - window_offsets_new[ind] : i - window_offsets[ind]; |
683 | 0 | if (j_offset < 0 || argb[j_offset] != argb[i]) continue; |
684 | | // The longest match is the sum of how many times each pixel is |
685 | | // repeated. |
686 | 0 | do { |
687 | 0 | const int counts_j_offset = counts_ini[j_offset]; |
688 | 0 | const int counts_j = counts_ini[j]; |
689 | 0 | if (counts_j_offset != counts_j) { |
690 | 0 | curr_length += |
691 | 0 | (counts_j_offset < counts_j) ? counts_j_offset : counts_j; |
692 | 0 | break; |
693 | 0 | } |
694 | | // The same color is repeated counts_pos times at j_offset and j. |
695 | 0 | curr_length += counts_j_offset; |
696 | 0 | j_offset += counts_j_offset; |
697 | 0 | j += counts_j_offset; |
698 | 0 | } while (curr_length <= MAX_LENGTH && j < pix_count && |
699 | 0 | argb[j_offset] == argb[j]); |
700 | 0 | if (best_length < curr_length) { |
701 | 0 | best_offset = |
702 | 0 | use_prev ? window_offsets_new[ind] : window_offsets[ind]; |
703 | 0 | if (curr_length >= MAX_LENGTH) { |
704 | 0 | best_length = MAX_LENGTH; |
705 | 0 | break; |
706 | 0 | } else { |
707 | 0 | best_length = curr_length; |
708 | 0 | } |
709 | 0 | } |
710 | 0 | } |
711 | 0 | } |
712 | |
|
713 | 0 | assert(i + best_length <= pix_count); |
714 | 0 | assert(best_length <= MAX_LENGTH); |
715 | 0 | if (best_length <= MIN_LENGTH) { |
716 | 0 | hash_chain->offset_length[i] = 0; |
717 | 0 | best_offset_prev = 0; |
718 | 0 | best_length_prev = 0; |
719 | 0 | } else { |
720 | 0 | hash_chain->offset_length[i] = |
721 | 0 | (best_offset << MAX_LENGTH_BITS) | (uint32_t)best_length; |
722 | 0 | best_offset_prev = best_offset; |
723 | 0 | best_length_prev = best_length; |
724 | 0 | } |
725 | 0 | } |
726 | 0 | hash_chain->offset_length[0] = 0; |
727 | 0 | WebPSafeFree(counts_ini); |
728 | |
|
729 | 0 | return BackwardReferencesLz77(xsize, ysize, argb, cache_bits, hash_chain, |
730 | 0 | refs); |
731 | 0 | } |
732 | | |
733 | | // ----------------------------------------------------------------------------- |
734 | | |
735 | | static void BackwardReferences2DLocality(int xsize, |
736 | 0 | const VP8LBackwardRefs* const refs) { |
737 | 0 | VP8LRefsCursor c = VP8LRefsCursorInit(refs); |
738 | 0 | while (VP8LRefsCursorOk(&c)) { |
739 | 0 | if (PixOrCopyIsCopy(c.cur_pos)) { |
740 | 0 | const int dist = c.cur_pos->argb_or_distance; |
741 | 0 | const int transformed_dist = VP8LDistanceToPlaneCode(xsize, dist); |
742 | 0 | c.cur_pos->argb_or_distance = transformed_dist; |
743 | 0 | } |
744 | 0 | VP8LRefsCursorNext(&c); |
745 | 0 | } |
746 | 0 | } |
747 | | |
748 | | // Evaluate optimal cache bits for the local color cache. |
749 | | // The input *best_cache_bits sets the maximum cache bits to use (passing 0 |
750 | | // implies disabling the local color cache). The local color cache is also |
751 | | // disabled for the lower (<= 25) quality. |
752 | | // Returns 0 in case of memory error. |
753 | | static int CalculateBestCacheSize(const uint32_t* argb, int quality, |
754 | | const VP8LBackwardRefs* const refs, |
755 | 0 | int* const best_cache_bits) { |
756 | 0 | int i; |
757 | 0 | const int cache_bits_max = (quality <= 25) ? 0 : *best_cache_bits; |
758 | 0 | uint64_t entropy_min = WEBP_UINT64_MAX; |
759 | 0 | int cc_init[MAX_COLOR_CACHE_BITS + 1] = {0}; |
760 | 0 | VP8LColorCache hashers[MAX_COLOR_CACHE_BITS + 1]; |
761 | 0 | VP8LRefsCursor c = VP8LRefsCursorInit(refs); |
762 | 0 | VP8LHistogram* histos[MAX_COLOR_CACHE_BITS + 1] = {NULL}; |
763 | 0 | int ok = 0; |
764 | |
|
765 | 0 | assert(cache_bits_max >= 0 && cache_bits_max <= MAX_COLOR_CACHE_BITS); |
766 | |
|
767 | 0 | if (cache_bits_max == 0) { |
768 | 0 | *best_cache_bits = 0; |
769 | | // Local color cache is disabled. |
770 | 0 | return 1; |
771 | 0 | } |
772 | | |
773 | | // Allocate data. |
774 | 0 | for (i = 0; i <= cache_bits_max; ++i) { |
775 | 0 | histos[i] = VP8LAllocateHistogram(i); |
776 | 0 | if (histos[i] == NULL) goto Error; |
777 | 0 | VP8LHistogramInit(histos[i], i, /*init_arrays=*/1); |
778 | 0 | if (i == 0) continue; |
779 | 0 | cc_init[i] = VP8LColorCacheInit(&hashers[i], i); |
780 | 0 | if (!cc_init[i]) goto Error; |
781 | 0 | } |
782 | | |
783 | | // Find the cache_bits giving the lowest entropy. The search is done in a |
784 | | // brute-force way as the function (entropy w.r.t cache_bits) can be |
785 | | // anything in practice. |
786 | 0 | while (VP8LRefsCursorOk(&c)) { |
787 | 0 | const PixOrCopy* const v = c.cur_pos; |
788 | 0 | if (PixOrCopyIsLiteral(v)) { |
789 | 0 | const uint32_t pix = *argb++; |
790 | 0 | const uint32_t a = (pix >> 24) & 0xff; |
791 | 0 | const uint32_t r = (pix >> 16) & 0xff; |
792 | 0 | const uint32_t g = (pix >> 8) & 0xff; |
793 | 0 | const uint32_t b = (pix >> 0) & 0xff; |
794 | | // The keys of the caches can be derived from the longest one. |
795 | 0 | int key = VP8LHashPix(pix, 32 - cache_bits_max); |
796 | | // Do not use the color cache for cache_bits = 0. |
797 | 0 | ++histos[0]->blue[b]; |
798 | 0 | ++histos[0]->literal[g]; |
799 | 0 | ++histos[0]->red[r]; |
800 | 0 | ++histos[0]->alpha[a]; |
801 | | // Deal with cache_bits > 0. |
802 | 0 | for (i = cache_bits_max; i >= 1; --i, key >>= 1) { |
803 | 0 | if (VP8LColorCacheLookup(&hashers[i], key) == pix) { |
804 | 0 | ++histos[i]->literal[NUM_LITERAL_CODES + NUM_LENGTH_CODES + key]; |
805 | 0 | } else { |
806 | 0 | VP8LColorCacheSet(&hashers[i], key, pix); |
807 | 0 | ++histos[i]->blue[b]; |
808 | 0 | ++histos[i]->literal[g]; |
809 | 0 | ++histos[i]->red[r]; |
810 | 0 | ++histos[i]->alpha[a]; |
811 | 0 | } |
812 | 0 | } |
813 | 0 | } else { |
814 | 0 | int code, extra_bits, extra_bits_value; |
815 | | // We should compute the contribution of the (distance,length) |
816 | | // histograms but those are the same independently from the cache size. |
817 | | // As those constant contributions are in the end added to the other |
818 | | // histogram contributions, we can ignore them, except for the length |
819 | | // prefix that is part of the 'literal' histogram. |
820 | 0 | int len = PixOrCopyLength(v); |
821 | 0 | uint32_t argb_prev = *argb ^ 0xffffffffu; |
822 | 0 | VP8LPrefixEncode(len, &code, &extra_bits, &extra_bits_value); |
823 | 0 | for (i = 0; i <= cache_bits_max; ++i) { |
824 | 0 | ++histos[i]->literal[NUM_LITERAL_CODES + code]; |
825 | 0 | } |
826 | | // Update the color caches. |
827 | 0 | do { |
828 | 0 | if (*argb != argb_prev) { |
829 | | // Efficiency: insert only if the color changes. |
830 | 0 | int key = VP8LHashPix(*argb, 32 - cache_bits_max); |
831 | 0 | for (i = cache_bits_max; i >= 1; --i, key >>= 1) { |
832 | 0 | hashers[i].colors[key] = *argb; |
833 | 0 | } |
834 | 0 | argb_prev = *argb; |
835 | 0 | } |
836 | 0 | argb++; |
837 | 0 | } while (--len != 0); |
838 | 0 | } |
839 | 0 | VP8LRefsCursorNext(&c); |
840 | 0 | } |
841 | |
|
842 | 0 | for (i = 0; i <= cache_bits_max; ++i) { |
843 | 0 | const uint64_t entropy = VP8LHistogramEstimateBits(histos[i]); |
844 | 0 | if (i == 0 || entropy < entropy_min) { |
845 | 0 | entropy_min = entropy; |
846 | 0 | *best_cache_bits = i; |
847 | 0 | } |
848 | 0 | } |
849 | 0 | ok = 1; |
850 | 0 | Error: |
851 | 0 | for (i = 0; i <= cache_bits_max; ++i) { |
852 | 0 | if (cc_init[i]) VP8LColorCacheClear(&hashers[i]); |
853 | 0 | VP8LFreeHistogram(histos[i]); |
854 | 0 | } |
855 | 0 | return ok; |
856 | 0 | } |
857 | | |
858 | | // Update (in-place) backward references for specified cache_bits. |
859 | | static int BackwardRefsWithLocalCache(const uint32_t* const argb, |
860 | | int cache_bits, |
861 | 0 | VP8LBackwardRefs* const refs) { |
862 | 0 | int pixel_index = 0; |
863 | 0 | VP8LColorCache hashers; |
864 | 0 | VP8LRefsCursor c = VP8LRefsCursorInit(refs); |
865 | 0 | if (!VP8LColorCacheInit(&hashers, cache_bits)) return 0; |
866 | | |
867 | 0 | while (VP8LRefsCursorOk(&c)) { |
868 | 0 | PixOrCopy* const v = c.cur_pos; |
869 | 0 | if (PixOrCopyIsLiteral(v)) { |
870 | 0 | const uint32_t argb_literal = v->argb_or_distance; |
871 | 0 | const int ix = VP8LColorCacheContains(&hashers, argb_literal); |
872 | 0 | if (ix >= 0) { |
873 | | // hashers contains argb_literal |
874 | 0 | *v = PixOrCopyCreateCacheIdx(ix); |
875 | 0 | } else { |
876 | 0 | VP8LColorCacheInsert(&hashers, argb_literal); |
877 | 0 | } |
878 | 0 | ++pixel_index; |
879 | 0 | } else { |
880 | | // refs was created without local cache, so it can not have cache indexes. |
881 | 0 | int k; |
882 | 0 | assert(PixOrCopyIsCopy(v)); |
883 | 0 | for (k = 0; k < v->len; ++k) { |
884 | 0 | VP8LColorCacheInsert(&hashers, argb[pixel_index++]); |
885 | 0 | } |
886 | 0 | } |
887 | 0 | VP8LRefsCursorNext(&c); |
888 | 0 | } |
889 | 0 | VP8LColorCacheClear(&hashers); |
890 | 0 | return 1; |
891 | 0 | } |
892 | | |
893 | | static VP8LBackwardRefs* GetBackwardReferencesLowEffort( |
894 | | int width, int height, const uint32_t* const argb, int* const cache_bits, |
895 | 0 | const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs_lz77) { |
896 | 0 | *cache_bits = 0; |
897 | 0 | if (!BackwardReferencesLz77(width, height, argb, 0, hash_chain, refs_lz77)) { |
898 | 0 | return NULL; |
899 | 0 | } |
900 | 0 | BackwardReferences2DLocality(width, refs_lz77); |
901 | 0 | return refs_lz77; |
902 | 0 | } |
903 | | |
904 | | extern int VP8LBackwardReferencesTraceBackwards( |
905 | | int xsize, int ysize, const uint32_t* const argb, int cache_bits, |
906 | | const VP8LHashChain* const hash_chain, |
907 | | const VP8LBackwardRefs* const refs_src, VP8LBackwardRefs* const refs_dst); |
908 | | static int GetBackwardReferences(int width, int height, |
909 | | const uint32_t* const argb, int quality, |
910 | | int lz77_types_to_try, int cache_bits_max, |
911 | | int do_no_cache, |
912 | | const VP8LHashChain* const hash_chain, |
913 | | VP8LBackwardRefs* const refs, |
914 | 0 | int* const cache_bits_best) { |
915 | 0 | VP8LHistogram* histo = NULL; |
916 | 0 | int i, lz77_type; |
917 | | // Index 0 is for a color cache, index 1 for no cache (if needed). |
918 | 0 | int lz77_types_best[2] = {0, 0}; |
919 | 0 | uint64_t bit_costs_best[2] = {WEBP_UINT64_MAX, WEBP_UINT64_MAX}; |
920 | 0 | VP8LHashChain hash_chain_box; |
921 | 0 | VP8LBackwardRefs* const refs_tmp = &refs[do_no_cache ? 2 : 1]; |
922 | 0 | int status = 0; |
923 | 0 | memset(&hash_chain_box, 0, sizeof(hash_chain_box)); |
924 | |
|
925 | 0 | histo = VP8LAllocateHistogram(MAX_COLOR_CACHE_BITS); |
926 | 0 | if (histo == NULL) goto Error; |
927 | | |
928 | 0 | for (lz77_type = 1; lz77_types_to_try; |
929 | 0 | lz77_types_to_try &= ~lz77_type, lz77_type <<= 1) { |
930 | 0 | int res = 0; |
931 | 0 | uint64_t bit_cost = 0u; |
932 | 0 | if ((lz77_types_to_try & lz77_type) == 0) continue; |
933 | 0 | switch (lz77_type) { |
934 | 0 | case kLZ77RLE: |
935 | 0 | res = BackwardReferencesRle(width, height, argb, 0, refs_tmp); |
936 | 0 | break; |
937 | 0 | case kLZ77Standard: |
938 | | // Compute LZ77 with no cache (0 bits), as the ideal LZ77 with a color |
939 | | // cache is not that different in practice. |
940 | 0 | res = BackwardReferencesLz77(width, height, argb, 0, hash_chain, |
941 | 0 | refs_tmp); |
942 | 0 | break; |
943 | 0 | case kLZ77Box: |
944 | 0 | if (!VP8LHashChainInit(&hash_chain_box, width * height)) goto Error; |
945 | 0 | res = BackwardReferencesLz77Box(width, height, argb, 0, hash_chain, |
946 | 0 | &hash_chain_box, refs_tmp); |
947 | 0 | break; |
948 | 0 | default: |
949 | 0 | assert(0); |
950 | 0 | } |
951 | 0 | if (!res) goto Error; |
952 | | |
953 | | // Start with the no color cache case. |
954 | 0 | for (i = 1; i >= 0; --i) { |
955 | 0 | int cache_bits = (i == 1) ? 0 : cache_bits_max; |
956 | |
|
957 | 0 | if (i == 1 && !do_no_cache) continue; |
958 | | |
959 | 0 | if (i == 0) { |
960 | | // Try with a color cache. |
961 | 0 | if (!CalculateBestCacheSize(argb, quality, refs_tmp, &cache_bits)) { |
962 | 0 | goto Error; |
963 | 0 | } |
964 | 0 | if (cache_bits > 0) { |
965 | 0 | if (!BackwardRefsWithLocalCache(argb, cache_bits, refs_tmp)) { |
966 | 0 | goto Error; |
967 | 0 | } |
968 | 0 | } |
969 | 0 | } |
970 | | |
971 | 0 | if (i == 0 && do_no_cache && cache_bits == 0) { |
972 | | // No need to re-compute bit_cost as it was computed at i == 1. |
973 | 0 | } else { |
974 | 0 | VP8LHistogramCreate(histo, refs_tmp, cache_bits); |
975 | 0 | bit_cost = VP8LHistogramEstimateBits(histo); |
976 | 0 | } |
977 | |
|
978 | 0 | if (bit_cost < bit_costs_best[i]) { |
979 | 0 | if (i == 1) { |
980 | | // Do not swap as the full cache analysis would have the wrong |
981 | | // VP8LBackwardRefs to start with. |
982 | 0 | if (!BackwardRefsClone(refs_tmp, &refs[1])) goto Error; |
983 | 0 | } else { |
984 | 0 | BackwardRefsSwap(refs_tmp, &refs[0]); |
985 | 0 | } |
986 | 0 | bit_costs_best[i] = bit_cost; |
987 | 0 | lz77_types_best[i] = lz77_type; |
988 | 0 | if (i == 0) *cache_bits_best = cache_bits; |
989 | 0 | } |
990 | 0 | } |
991 | 0 | } |
992 | 0 | assert(lz77_types_best[0] > 0); |
993 | 0 | assert(!do_no_cache || lz77_types_best[1] > 0); |
994 | | |
995 | | // Improve on simple LZ77 but only for high quality (TraceBackwards is |
996 | | // costly). |
997 | 0 | for (i = 1; i >= 0; --i) { |
998 | 0 | if (i == 1 && !do_no_cache) continue; |
999 | 0 | if ((lz77_types_best[i] == kLZ77Standard || |
1000 | 0 | lz77_types_best[i] == kLZ77Box) && |
1001 | 0 | quality >= 25) { |
1002 | 0 | const VP8LHashChain* const hash_chain_tmp = |
1003 | 0 | (lz77_types_best[i] == kLZ77Standard) ? hash_chain : &hash_chain_box; |
1004 | 0 | const int cache_bits = (i == 1) ? 0 : *cache_bits_best; |
1005 | 0 | uint64_t bit_cost_trace; |
1006 | 0 | if (!VP8LBackwardReferencesTraceBackwards(width, height, argb, cache_bits, |
1007 | 0 | hash_chain_tmp, &refs[i], |
1008 | 0 | refs_tmp)) { |
1009 | 0 | goto Error; |
1010 | 0 | } |
1011 | 0 | VP8LHistogramCreate(histo, refs_tmp, cache_bits); |
1012 | 0 | bit_cost_trace = VP8LHistogramEstimateBits(histo); |
1013 | 0 | if (bit_cost_trace < bit_costs_best[i]) { |
1014 | 0 | BackwardRefsSwap(refs_tmp, &refs[i]); |
1015 | 0 | } |
1016 | 0 | } |
1017 | | |
1018 | 0 | BackwardReferences2DLocality(width, &refs[i]); |
1019 | |
|
1020 | 0 | if (i == 1 && lz77_types_best[0] == lz77_types_best[1] && |
1021 | 0 | *cache_bits_best == 0) { |
1022 | | // If the best cache size is 0 and we have the same best LZ77, just copy |
1023 | | // the data over and stop here. |
1024 | 0 | if (!BackwardRefsClone(&refs[1], &refs[0])) goto Error; |
1025 | 0 | break; |
1026 | 0 | } |
1027 | 0 | } |
1028 | 0 | status = 1; |
1029 | |
|
1030 | 0 | Error: |
1031 | 0 | VP8LHashChainClear(&hash_chain_box); |
1032 | 0 | VP8LFreeHistogram(histo); |
1033 | 0 | return status; |
1034 | 0 | } |
1035 | | |
1036 | | int VP8LGetBackwardReferences( |
1037 | | int width, int height, const uint32_t* const argb, int quality, |
1038 | | int low_effort, int lz77_types_to_try, int cache_bits_max, int do_no_cache, |
1039 | | const VP8LHashChain* const hash_chain, VP8LBackwardRefs* const refs, |
1040 | | int* const cache_bits_best, const WebPPicture* const pic, int percent_range, |
1041 | 0 | int* const percent) { |
1042 | 0 | if (low_effort) { |
1043 | 0 | VP8LBackwardRefs* refs_best; |
1044 | 0 | *cache_bits_best = cache_bits_max; |
1045 | 0 | refs_best = GetBackwardReferencesLowEffort( |
1046 | 0 | width, height, argb, cache_bits_best, hash_chain, refs); |
1047 | 0 | if (refs_best == NULL) { |
1048 | 0 | return WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY); |
1049 | 0 | } |
1050 | | // Set it in first position. |
1051 | 0 | BackwardRefsSwap(refs_best, &refs[0]); |
1052 | 0 | } else { |
1053 | 0 | if (!GetBackwardReferences(width, height, argb, quality, lz77_types_to_try, |
1054 | 0 | cache_bits_max, do_no_cache, hash_chain, refs, |
1055 | 0 | cache_bits_best)) { |
1056 | 0 | return WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY); |
1057 | 0 | } |
1058 | 0 | } |
1059 | | |
1060 | 0 | return WebPReportProgress(pic, *percent + percent_range, percent); |
1061 | 0 | } |