/src/libwebp/src/enc/histogram_enc.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2012 Google Inc. All Rights Reserved.  | 
2  |  | //  | 
3  |  | // Use of this source code is governed by a BSD-style license  | 
4  |  | // that can be found in the COPYING file in the root of the source  | 
5  |  | // tree. An additional intellectual property rights grant can be found  | 
6  |  | // in the file PATENTS. All contributing project authors may  | 
7  |  | // be found in the AUTHORS file in the root of the source tree.  | 
8  |  | // -----------------------------------------------------------------------------  | 
9  |  | //  | 
10  |  | // Author: Jyrki Alakuijala (jyrki@google.com)  | 
11  |  | //  | 
12  |  | #ifdef HAVE_CONFIG_H  | 
13  |  | #include "src/webp/config.h"  | 
14  |  | #endif  | 
15  |  |  | 
16  |  | #include <assert.h>  | 
17  |  | #include <stdlib.h>  | 
18  |  | #include <string.h>  | 
19  |  |  | 
20  |  | #include "src/dsp/lossless.h"  | 
21  |  | #include "src/dsp/lossless_common.h"  | 
22  |  | #include "src/enc/backward_references_enc.h"  | 
23  |  | #include "src/enc/histogram_enc.h"  | 
24  |  | #include "src/enc/vp8i_enc.h"  | 
25  |  | #include "src/utils/utils.h"  | 
26  |  | #include "src/webp/encode.h"  | 
27  |  | #include "src/webp/format_constants.h"  | 
28  |  | #include "src/webp/types.h"  | 
29  |  |  | 
30  |  | // Number of partitions for the three dominant (literal, red and blue) symbol  | 
31  |  | // costs.  | 
32  | 0  | #define NUM_PARTITIONS 4  | 
33  |  | // The size of the bin-hash corresponding to the three dominant costs.  | 
34  | 0  | #define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS)  | 
35  |  | // Maximum number of histograms allowed in greedy combining algorithm.  | 
36  | 0  | #define MAX_HISTO_GREEDY 100  | 
37  |  |  | 
38  |  | // Enum to meaningfully access the elements of the Histogram arrays.  | 
39  |  | typedef enum { | 
40  |  |   LITERAL = 0,  | 
41  |  |   RED,  | 
42  |  |   BLUE,  | 
43  |  |   ALPHA,  | 
44  |  |   DISTANCE  | 
45  |  | } HistogramIndex;  | 
46  |  |  | 
47  |  | // Return the size of the histogram for a given cache_bits.  | 
48  | 0  | static int GetHistogramSize(int cache_bits) { | 
49  | 0  |   const int literal_size = VP8LHistogramNumCodes(cache_bits);  | 
50  | 0  |   const size_t total_size = sizeof(VP8LHistogram) + sizeof(int) * literal_size;  | 
51  | 0  |   assert(total_size <= (size_t)0x7fffffff);  | 
52  | 0  |   return (int)total_size;  | 
53  | 0  | }  | 
54  |  |  | 
55  | 0  | static void HistogramStatsClear(VP8LHistogram* const h) { | 
56  | 0  |   int i;  | 
57  | 0  |   for (i = 0; i < 5; ++i) { | 
58  | 0  |     h->trivial_symbol[i] = VP8L_NON_TRIVIAL_SYM;  | 
59  |  |     // By default, the histogram is assumed to be used.  | 
60  | 0  |     h->is_used[i] = 1;  | 
61  | 0  |   }  | 
62  | 0  |   h->bit_cost = 0;  | 
63  | 0  |   memset(h->costs, 0, sizeof(h->costs));  | 
64  | 0  | }  | 
65  |  |  | 
66  | 0  | static void HistogramClear(VP8LHistogram* const h) { | 
67  | 0  |   uint32_t* const literal = h->literal;  | 
68  | 0  |   const int cache_bits = h->palette_code_bits;  | 
69  | 0  |   const int histo_size = GetHistogramSize(cache_bits);  | 
70  | 0  |   memset(h, 0, histo_size);  | 
71  | 0  |   h->palette_code_bits = cache_bits;  | 
72  | 0  |   h->literal = literal;  | 
73  | 0  |   HistogramStatsClear(h);  | 
74  | 0  | }  | 
75  |  |  | 
76  |  | // Swap two histogram pointers.  | 
77  | 0  | static void HistogramSwap(VP8LHistogram** const h1, VP8LHistogram** const h2) { | 
78  | 0  |   VP8LHistogram* const tmp = *h1;  | 
79  | 0  |   *h1 = *h2;  | 
80  | 0  |   *h2 = tmp;  | 
81  | 0  | }  | 
82  |  |  | 
83  |  | static void HistogramCopy(const VP8LHistogram* const src,  | 
84  | 0  |                           VP8LHistogram* const dst) { | 
85  | 0  |   uint32_t* const dst_literal = dst->literal;  | 
86  | 0  |   const int dst_cache_bits = dst->palette_code_bits;  | 
87  | 0  |   const int literal_size = VP8LHistogramNumCodes(dst_cache_bits);  | 
88  | 0  |   const int histo_size = GetHistogramSize(dst_cache_bits);  | 
89  | 0  |   assert(src->palette_code_bits == dst_cache_bits);  | 
90  | 0  |   memcpy(dst, src, histo_size);  | 
91  | 0  |   dst->literal = dst_literal;  | 
92  | 0  |   memcpy(dst->literal, src->literal, literal_size * sizeof(*dst->literal));  | 
93  | 0  | }  | 
94  |  |  | 
95  | 0  | void VP8LFreeHistogram(VP8LHistogram* const h) { WebPSafeFree(h); } | 
96  |  |  | 
97  | 0  | void VP8LFreeHistogramSet(VP8LHistogramSet* const histograms) { | 
98  | 0  |   WebPSafeFree(histograms);  | 
99  | 0  | }  | 
100  |  |  | 
101  |  | void VP8LHistogramCreate(VP8LHistogram* const h,  | 
102  |  |                          const VP8LBackwardRefs* const refs,  | 
103  | 0  |                          int palette_code_bits) { | 
104  | 0  |   if (palette_code_bits >= 0) { | 
105  | 0  |     h->palette_code_bits = palette_code_bits;  | 
106  | 0  |   }  | 
107  | 0  |   HistogramClear(h);  | 
108  | 0  |   VP8LHistogramStoreRefs(refs, /*distance_modifier=*/NULL,  | 
109  | 0  |                          /*distance_modifier_arg0=*/0, h);  | 
110  | 0  | }  | 
111  |  |  | 
112  |  | void VP8LHistogramInit(VP8LHistogram* const h, int palette_code_bits,  | 
113  | 0  |                        int init_arrays) { | 
114  | 0  |   h->palette_code_bits = palette_code_bits;  | 
115  | 0  |   if (init_arrays) { | 
116  | 0  |     HistogramClear(h);  | 
117  | 0  |   } else { | 
118  | 0  |     HistogramStatsClear(h);  | 
119  | 0  |   }  | 
120  | 0  | }  | 
121  |  |  | 
122  | 0  | VP8LHistogram* VP8LAllocateHistogram(int cache_bits) { | 
123  | 0  |   VP8LHistogram* histo = NULL;  | 
124  | 0  |   const int total_size = GetHistogramSize(cache_bits);  | 
125  | 0  |   uint8_t* const memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));  | 
126  | 0  |   if (memory == NULL) return NULL;  | 
127  | 0  |   histo = (VP8LHistogram*)memory;  | 
128  |  |   // 'literal' won't necessary be aligned.  | 
129  | 0  |   histo->literal = (uint32_t*)(memory + sizeof(VP8LHistogram));  | 
130  | 0  |   VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0);  | 
131  | 0  |   return histo;  | 
132  | 0  | }  | 
133  |  |  | 
134  |  | // Resets the pointers of the histograms to point to the bit buffer in the set.  | 
135  |  | static void HistogramSetResetPointers(VP8LHistogramSet* const set,  | 
136  | 0  |                                       int cache_bits) { | 
137  | 0  |   int i;  | 
138  | 0  |   const int histo_size = GetHistogramSize(cache_bits);  | 
139  | 0  |   uint8_t* memory = (uint8_t*) (set->histograms);  | 
140  | 0  |   memory += set->max_size * sizeof(*set->histograms);  | 
141  | 0  |   for (i = 0; i < set->max_size; ++i) { | 
142  | 0  |     memory = (uint8_t*) WEBP_ALIGN(memory);  | 
143  | 0  |     set->histograms[i] = (VP8LHistogram*) memory;  | 
144  |  |     // 'literal' won't necessary be aligned.  | 
145  | 0  |     set->histograms[i]->literal = (uint32_t*)(memory + sizeof(VP8LHistogram));  | 
146  | 0  |     memory += histo_size;  | 
147  | 0  |   }  | 
148  | 0  | }  | 
149  |  |  | 
150  |  | // Returns the total size of the VP8LHistogramSet.  | 
151  | 0  | static size_t HistogramSetTotalSize(int size, int cache_bits) { | 
152  | 0  |   const int histo_size = GetHistogramSize(cache_bits);  | 
153  | 0  |   return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) +  | 
154  | 0  |           histo_size + WEBP_ALIGN_CST));  | 
155  | 0  | }  | 
156  |  |  | 
157  | 0  | VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) { | 
158  | 0  |   int i;  | 
159  | 0  |   VP8LHistogramSet* set;  | 
160  | 0  |   const size_t total_size = HistogramSetTotalSize(size, cache_bits);  | 
161  | 0  |   uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));  | 
162  | 0  |   if (memory == NULL) return NULL;  | 
163  |  |  | 
164  | 0  |   set = (VP8LHistogramSet*)memory;  | 
165  | 0  |   memory += sizeof(*set);  | 
166  | 0  |   set->histograms = (VP8LHistogram**)memory;  | 
167  | 0  |   set->max_size = size;  | 
168  | 0  |   set->size = size;  | 
169  | 0  |   HistogramSetResetPointers(set, cache_bits);  | 
170  | 0  |   for (i = 0; i < size; ++i) { | 
171  | 0  |     VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0);  | 
172  | 0  |   }  | 
173  | 0  |   return set;  | 
174  | 0  | }  | 
175  |  |  | 
176  | 0  | void VP8LHistogramSetClear(VP8LHistogramSet* const set) { | 
177  | 0  |   int i;  | 
178  | 0  |   const int cache_bits = set->histograms[0]->palette_code_bits;  | 
179  | 0  |   const int size = set->max_size;  | 
180  | 0  |   const size_t total_size = HistogramSetTotalSize(size, cache_bits);  | 
181  | 0  |   uint8_t* memory = (uint8_t*)set;  | 
182  |  | 
  | 
183  | 0  |   memset(memory, 0, total_size);  | 
184  | 0  |   memory += sizeof(*set);  | 
185  | 0  |   set->histograms = (VP8LHistogram**)memory;  | 
186  | 0  |   set->max_size = size;  | 
187  | 0  |   set->size = size;  | 
188  | 0  |   HistogramSetResetPointers(set, cache_bits);  | 
189  | 0  |   for (i = 0; i < size; ++i) { | 
190  | 0  |     set->histograms[i]->palette_code_bits = cache_bits;  | 
191  | 0  |   }  | 
192  | 0  | }  | 
193  |  |  | 
194  |  | // Removes the histogram 'i' from 'set'.  | 
195  | 0  | static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i) { | 
196  | 0  |   set->histograms[i] = set->histograms[set->size - 1];  | 
197  | 0  |   --set->size;  | 
198  | 0  |   assert(set->size > 0);  | 
199  | 0  | }  | 
200  |  |  | 
201  |  | // -----------------------------------------------------------------------------  | 
202  |  |  | 
203  |  | static void HistogramAddSinglePixOrCopy(  | 
204  |  |     VP8LHistogram* const histo, const PixOrCopy* const v,  | 
205  | 0  |     int (*const distance_modifier)(int, int), int distance_modifier_arg0) { | 
206  | 0  |   if (PixOrCopyIsLiteral(v)) { | 
207  | 0  |     ++histo->alpha[PixOrCopyLiteral(v, 3)];  | 
208  | 0  |     ++histo->red[PixOrCopyLiteral(v, 2)];  | 
209  | 0  |     ++histo->literal[PixOrCopyLiteral(v, 1)];  | 
210  | 0  |     ++histo->blue[PixOrCopyLiteral(v, 0)];  | 
211  | 0  |   } else if (PixOrCopyIsCacheIdx(v)) { | 
212  | 0  |     const int literal_ix =  | 
213  | 0  |         NUM_LITERAL_CODES + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v);  | 
214  | 0  |     assert(histo->palette_code_bits != 0);  | 
215  | 0  |     ++histo->literal[literal_ix];  | 
216  | 0  |   } else { | 
217  | 0  |     int code, extra_bits;  | 
218  | 0  |     VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits);  | 
219  | 0  |     ++histo->literal[NUM_LITERAL_CODES + code];  | 
220  | 0  |     if (distance_modifier == NULL) { | 
221  | 0  |       VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits);  | 
222  | 0  |     } else { | 
223  | 0  |       VP8LPrefixEncodeBits(  | 
224  | 0  |           distance_modifier(distance_modifier_arg0, PixOrCopyDistance(v)),  | 
225  | 0  |           &code, &extra_bits);  | 
226  | 0  |     }  | 
227  | 0  |     ++histo->distance[code];  | 
228  | 0  |   }  | 
229  | 0  | }  | 
230  |  |  | 
231  |  | void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,  | 
232  |  |                             int (*const distance_modifier)(int, int),  | 
233  |  |                             int distance_modifier_arg0,  | 
234  | 0  |                             VP8LHistogram* const histo) { | 
235  | 0  |   VP8LRefsCursor c = VP8LRefsCursorInit(refs);  | 
236  | 0  |   while (VP8LRefsCursorOk(&c)) { | 
237  | 0  |     HistogramAddSinglePixOrCopy(histo, c.cur_pos, distance_modifier,  | 
238  | 0  |                                 distance_modifier_arg0);  | 
239  | 0  |     VP8LRefsCursorNext(&c);  | 
240  | 0  |   }  | 
241  | 0  | }  | 
242  |  |  | 
243  |  | // -----------------------------------------------------------------------------  | 
244  |  | // Entropy-related functions.  | 
245  |  |  | 
246  | 0  | static WEBP_INLINE uint64_t BitsEntropyRefine(const VP8LBitEntropy* entropy) { | 
247  | 0  |   uint64_t mix;  | 
248  | 0  |   if (entropy->nonzeros < 5) { | 
249  | 0  |     if (entropy->nonzeros <= 1) { | 
250  | 0  |       return 0;  | 
251  | 0  |     }  | 
252  |  |     // Two symbols, they will be 0 and 1 in a Huffman code.  | 
253  |  |     // Let's mix in a bit of entropy to favor good clustering when  | 
254  |  |     // distributions of these are combined.  | 
255  | 0  |     if (entropy->nonzeros == 2) { | 
256  | 0  |       return DivRound(99 * ((uint64_t)entropy->sum << LOG_2_PRECISION_BITS) +  | 
257  | 0  |                           entropy->entropy,  | 
258  | 0  |                       100);  | 
259  | 0  |     }  | 
260  |  |     // No matter what the entropy says, we cannot be better than min_limit  | 
261  |  |     // with Huffman coding. I am mixing a bit of entropy into the  | 
262  |  |     // min_limit since it produces much better (~0.5 %) compression results  | 
263  |  |     // perhaps because of better entropy clustering.  | 
264  | 0  |     if (entropy->nonzeros == 3) { | 
265  | 0  |       mix = 950;  | 
266  | 0  |     } else { | 
267  | 0  |       mix = 700;  // nonzeros == 4.  | 
268  | 0  |     }  | 
269  | 0  |   } else { | 
270  | 0  |     mix = 627;  | 
271  | 0  |   }  | 
272  |  |  | 
273  | 0  |   { | 
274  | 0  |     uint64_t min_limit = (uint64_t)(2 * entropy->sum - entropy->max_val)  | 
275  | 0  |                          << LOG_2_PRECISION_BITS;  | 
276  | 0  |     min_limit =  | 
277  | 0  |         DivRound(mix * min_limit + (1000 - mix) * entropy->entropy, 1000);  | 
278  | 0  |     return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;  | 
279  | 0  |   }  | 
280  | 0  | }  | 
281  |  |  | 
282  | 0  | uint64_t VP8LBitsEntropy(const uint32_t* const array, int n) { | 
283  | 0  |   VP8LBitEntropy entropy;  | 
284  | 0  |   VP8LBitsEntropyUnrefined(array, n, &entropy);  | 
285  |  | 
  | 
286  | 0  |   return BitsEntropyRefine(&entropy);  | 
287  | 0  | }  | 
288  |  |  | 
289  | 0  | static uint64_t InitialHuffmanCost(void) { | 
290  |  |   // Small bias because Huffman code length is typically not stored in  | 
291  |  |   // full length.  | 
292  | 0  |   static const uint64_t kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;  | 
293  |  |   // Subtract a bias of 9.1.  | 
294  | 0  |   return (kHuffmanCodeOfHuffmanCodeSize << LOG_2_PRECISION_BITS) -  | 
295  | 0  |          DivRound(91ll << LOG_2_PRECISION_BITS, 10);  | 
296  | 0  | }  | 
297  |  |  | 
298  |  | // Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)  | 
299  | 0  | static uint64_t FinalHuffmanCost(const VP8LStreaks* const stats) { | 
300  |  |   // The constants in this function are empirical and got rounded from  | 
301  |  |   // their original values in 1/8 when switched to 1/1024.  | 
302  | 0  |   uint64_t retval = InitialHuffmanCost();  | 
303  |  |   // Second coefficient: Many zeros in the histogram are covered efficiently  | 
304  |  |   // by a run-length encode. Originally 2/8.  | 
305  | 0  |   uint32_t retval_extra = stats->counts[0] * 1600 + 240 * stats->streaks[0][1];  | 
306  |  |   // Second coefficient: Constant values are encoded less efficiently, but still  | 
307  |  |   // RLE'ed. Originally 6/8.  | 
308  | 0  |   retval_extra += stats->counts[1] * 2640 + 720 * stats->streaks[1][1];  | 
309  |  |   // 0s are usually encoded more efficiently than non-0s.  | 
310  |  |   // Originally 15/8.  | 
311  | 0  |   retval_extra += 1840 * stats->streaks[0][0];  | 
312  |  |   // Originally 26/8.  | 
313  | 0  |   retval_extra += 3360 * stats->streaks[1][0];  | 
314  | 0  |   return retval + ((uint64_t)retval_extra << (LOG_2_PRECISION_BITS - 10));  | 
315  | 0  | }  | 
316  |  |  | 
317  |  | // Get the symbol entropy for the distribution 'population'.  | 
318  |  | // Set 'trivial_sym', if there's only one symbol present in the distribution.  | 
319  |  | static uint64_t PopulationCost(const uint32_t* const population, int length,  | 
320  |  |                                uint16_t* const trivial_sym,  | 
321  | 0  |                                uint8_t* const is_used) { | 
322  | 0  |   VP8LBitEntropy bit_entropy;  | 
323  | 0  |   VP8LStreaks stats;  | 
324  | 0  |   VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);  | 
325  | 0  |   if (trivial_sym != NULL) { | 
326  | 0  |     *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code  | 
327  | 0  |                                                : VP8L_NON_TRIVIAL_SYM;  | 
328  | 0  |   }  | 
329  | 0  |   if (is_used != NULL) { | 
330  |  |     // The histogram is used if there is at least one non-zero streak.  | 
331  | 0  |     *is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0);  | 
332  | 0  |   }  | 
333  |  | 
  | 
334  | 0  |   return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);  | 
335  | 0  | }  | 
336  |  |  | 
337  |  | static WEBP_INLINE void GetPopulationInfo(const VP8LHistogram* const histo,  | 
338  |  |                                           HistogramIndex index,  | 
339  |  |                                           const uint32_t** population,  | 
340  | 0  |                                           int* length) { | 
341  | 0  |   switch (index) { | 
342  | 0  |     case LITERAL:  | 
343  | 0  |       *population = histo->literal;  | 
344  | 0  |       *length = VP8LHistogramNumCodes(histo->palette_code_bits);  | 
345  | 0  |       break;  | 
346  | 0  |     case RED:  | 
347  | 0  |       *population = histo->red;  | 
348  | 0  |       *length = NUM_LITERAL_CODES;  | 
349  | 0  |       break;  | 
350  | 0  |     case BLUE:  | 
351  | 0  |       *population = histo->blue;  | 
352  | 0  |       *length = NUM_LITERAL_CODES;  | 
353  | 0  |       break;  | 
354  | 0  |     case ALPHA:  | 
355  | 0  |       *population = histo->alpha;  | 
356  | 0  |       *length = NUM_LITERAL_CODES;  | 
357  | 0  |       break;  | 
358  | 0  |     case DISTANCE:  | 
359  | 0  |       *population = histo->distance;  | 
360  | 0  |       *length = NUM_DISTANCE_CODES;  | 
361  | 0  |       break;  | 
362  | 0  |   }  | 
363  | 0  | }  | 
364  |  |  | 
365  |  | // trivial_at_end is 1 if the two histograms only have one element that is  | 
366  |  | // non-zero: both the zero-th one, or both the last one.  | 
367  |  | // 'index' is the index of the symbol in the histogram (literal, red, blue,  | 
368  |  | // alpha, distance).  | 
369  |  | static WEBP_INLINE uint64_t GetCombinedEntropy(const VP8LHistogram* const h1,  | 
370  |  |                                                const VP8LHistogram* const h2,  | 
371  | 0  |                                                HistogramIndex index) { | 
372  | 0  |   const uint32_t* X;  | 
373  | 0  |   const uint32_t* Y;  | 
374  | 0  |   int length;  | 
375  | 0  |   VP8LStreaks stats;  | 
376  | 0  |   VP8LBitEntropy bit_entropy;  | 
377  | 0  |   const int is_h1_used = h1->is_used[index];  | 
378  | 0  |   const int is_h2_used = h2->is_used[index];  | 
379  | 0  |   const int is_trivial = h1->trivial_symbol[index] != VP8L_NON_TRIVIAL_SYM &&  | 
380  | 0  |                          h1->trivial_symbol[index] == h2->trivial_symbol[index];  | 
381  |  | 
  | 
382  | 0  |   if (is_trivial || !is_h1_used || !is_h2_used) { | 
383  | 0  |     if (is_h1_used) return h1->costs[index];  | 
384  | 0  |     return h2->costs[index];  | 
385  | 0  |   }  | 
386  | 0  |   assert(is_h1_used && is_h2_used);  | 
387  |  |  | 
388  | 0  |   GetPopulationInfo(h1, index, &X, &length);  | 
389  | 0  |   GetPopulationInfo(h2, index, &Y, &length);  | 
390  | 0  |   VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);  | 
391  | 0  |   return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);  | 
392  | 0  | }  | 
393  |  |  | 
394  |  | // Estimates the Entropy + Huffman + other block overhead size cost.  | 
395  | 0  | uint64_t VP8LHistogramEstimateBits(const VP8LHistogram* const h) { | 
396  | 0  |   int i;  | 
397  | 0  |   uint64_t cost = 0;  | 
398  | 0  |   for (i = 0; i < 5; ++i) { | 
399  | 0  |     int length;  | 
400  | 0  |     const uint32_t* population;  | 
401  | 0  |     GetPopulationInfo(h, (HistogramIndex)i, &population, &length);  | 
402  | 0  |     cost += PopulationCost(population, length, /*trivial_sym=*/NULL,  | 
403  |  |                            /*is_used=*/NULL);  | 
404  | 0  |   }  | 
405  | 0  |   cost += ((uint64_t)(VP8LExtraCost(h->literal + NUM_LITERAL_CODES,  | 
406  | 0  |                                     NUM_LENGTH_CODES) +  | 
407  | 0  |                       VP8LExtraCost(h->distance, NUM_DISTANCE_CODES))  | 
408  | 0  |            << LOG_2_PRECISION_BITS);  | 
409  | 0  |   return cost;  | 
410  | 0  | }  | 
411  |  |  | 
412  |  | // -----------------------------------------------------------------------------  | 
413  |  | // Various histogram combine/cost-eval functions  | 
414  |  |  | 
415  |  | // Set a + b in b, saturating at WEBP_INT64_MAX.  | 
416  | 0  | static WEBP_INLINE void SaturateAdd(uint64_t a, int64_t* b) { | 
417  | 0  |   if (*b < 0 || (int64_t)a <= WEBP_INT64_MAX - *b) { | 
418  | 0  |     *b += (int64_t)a;  | 
419  | 0  |   } else { | 
420  | 0  |     *b = WEBP_INT64_MAX;  | 
421  | 0  |   }  | 
422  | 0  | }  | 
423  |  |  | 
424  |  | // Returns 1 if the cost of the combined histogram is less than the threshold.  | 
425  |  | // Otherwise returns 0 and the cost is invalid due to early bail-out.  | 
426  |  | WEBP_NODISCARD static int GetCombinedHistogramEntropy(  | 
427  |  |     const VP8LHistogram* const a, const VP8LHistogram* const b,  | 
428  | 0  |     int64_t cost_threshold_in, uint64_t* cost, uint64_t costs[5]) { | 
429  | 0  |   int i;  | 
430  | 0  |   const uint64_t cost_threshold = (uint64_t)cost_threshold_in;  | 
431  | 0  |   assert(a->palette_code_bits == b->palette_code_bits);  | 
432  | 0  |   if (cost_threshold_in <= 0) return 0;  | 
433  | 0  |   *cost = 0;  | 
434  |  |  | 
435  |  |   // No need to add the extra cost for length and distance as it is a constant  | 
436  |  |   // that does not influence the histograms.  | 
437  | 0  |   for (i = 0; i < 5; ++i) { | 
438  | 0  |     costs[i] = GetCombinedEntropy(a, b, (HistogramIndex)i);  | 
439  | 0  |     *cost += costs[i];  | 
440  | 0  |     if (*cost >= cost_threshold) return 0;  | 
441  | 0  |   }  | 
442  |  |  | 
443  | 0  |   return 1;  | 
444  | 0  | }  | 
445  |  |  | 
446  |  | static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const h1,  | 
447  |  |                                      const VP8LHistogram* const h2,  | 
448  | 0  |                                      VP8LHistogram* const hout) { | 
449  | 0  |   int i;  | 
450  | 0  |   assert(h1->palette_code_bits == h2->palette_code_bits);  | 
451  |  |  | 
452  | 0  |   for (i = 0; i < 5; ++i) { | 
453  | 0  |     int length;  | 
454  | 0  |     const uint32_t *p1, *p2, *pout_const;  | 
455  | 0  |     uint32_t* pout;  | 
456  | 0  |     GetPopulationInfo(h1, (HistogramIndex)i, &p1, &length);  | 
457  | 0  |     GetPopulationInfo(h2, (HistogramIndex)i, &p2, &length);  | 
458  | 0  |     GetPopulationInfo(hout, (HistogramIndex)i, &pout_const, &length);  | 
459  | 0  |     pout = (uint32_t*)pout_const;  | 
460  | 0  |     if (h2 == hout) { | 
461  | 0  |       if (h1->is_used[i]) { | 
462  | 0  |         if (hout->is_used[i]) { | 
463  | 0  |           VP8LAddVectorEq(p1, pout, length);  | 
464  | 0  |         } else { | 
465  | 0  |           memcpy(pout, p1, length * sizeof(pout[0]));  | 
466  | 0  |         }  | 
467  | 0  |       }  | 
468  | 0  |     } else { | 
469  | 0  |       if (h1->is_used[i]) { | 
470  | 0  |         if (h2->is_used[i]) { | 
471  | 0  |           VP8LAddVector(p1, p2, pout, length);  | 
472  | 0  |         } else { | 
473  | 0  |           memcpy(pout, p1, length * sizeof(pout[0]));  | 
474  | 0  |         }  | 
475  | 0  |       } else if (h2->is_used[i]) { | 
476  | 0  |         memcpy(pout, p2, length * sizeof(pout[0]));  | 
477  | 0  |       } else { | 
478  | 0  |         memset(pout, 0, length * sizeof(pout[0]));  | 
479  | 0  |       }  | 
480  | 0  |     }  | 
481  | 0  |   }  | 
482  |  | 
  | 
483  | 0  |   for (i = 0; i < 5; ++i) { | 
484  | 0  |     hout->trivial_symbol[i] = h1->trivial_symbol[i] == h2->trivial_symbol[i]  | 
485  | 0  |                                   ? h1->trivial_symbol[i]  | 
486  | 0  |                                   : VP8L_NON_TRIVIAL_SYM;  | 
487  | 0  |     hout->is_used[i] = h1->is_used[i] || h2->is_used[i];  | 
488  | 0  |   }  | 
489  | 0  | }  | 
490  |  |  | 
491  |  | static void UpdateHistogramCost(uint64_t bit_cost, uint64_t costs[5],  | 
492  | 0  |                                 VP8LHistogram* const h) { | 
493  | 0  |   int i;  | 
494  | 0  |   h->bit_cost = bit_cost;  | 
495  | 0  |   for (i = 0; i < 5; ++i) { | 
496  | 0  |     h->costs[i] = costs[i];  | 
497  | 0  |   }  | 
498  | 0  | }  | 
499  |  |  | 
500  |  | // Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing  | 
501  |  | // to the threshold value 'cost_threshold'. The score returned is  | 
502  |  | //  Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed.  | 
503  |  | // Since the previous score passed is 'cost_threshold', we only need to compare  | 
504  |  | // the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out  | 
505  |  | // early.  | 
506  |  | // Returns 1 if the cost is less than the threshold.  | 
507  |  | // Otherwise returns 0 and the cost is invalid due to early bail-out.  | 
508  |  | WEBP_NODISCARD static int HistogramAddEval(const VP8LHistogram* const a,  | 
509  |  |                                            const VP8LHistogram* const b,  | 
510  |  |                                            VP8LHistogram* const out,  | 
511  | 0  |                                            int64_t cost_threshold) { | 
512  | 0  |   const uint64_t sum_cost = a->bit_cost + b->bit_cost;  | 
513  | 0  |   uint64_t bit_cost, costs[5];  | 
514  | 0  |   SaturateAdd(sum_cost, &cost_threshold);  | 
515  | 0  |   if (!GetCombinedHistogramEntropy(a, b, cost_threshold, &bit_cost, costs)) { | 
516  | 0  |     return 0;  | 
517  | 0  |   }  | 
518  |  |  | 
519  | 0  |   HistogramAdd(a, b, out);  | 
520  | 0  |   UpdateHistogramCost(bit_cost, costs, out);  | 
521  | 0  |   return 1;  | 
522  | 0  | }  | 
523  |  |  | 
524  |  | // Same as HistogramAddEval(), except that the resulting histogram  | 
525  |  | // is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit  | 
526  |  | // the term C(b) which is constant over all the evaluations.  | 
527  |  | // Returns 1 if the cost is less than the threshold.  | 
528  |  | // Otherwise returns 0 and the cost is invalid due to early bail-out.  | 
529  |  | WEBP_NODISCARD static int HistogramAddThresh(const VP8LHistogram* const a,  | 
530  |  |                                              const VP8LHistogram* const b,  | 
531  |  |                                              int64_t cost_threshold,  | 
532  | 0  |                                              int64_t* cost_out) { | 
533  | 0  |   uint64_t cost, costs[5];  | 
534  | 0  |   assert(a != NULL && b != NULL);  | 
535  | 0  |   SaturateAdd(a->bit_cost, &cost_threshold);  | 
536  | 0  |   if (!GetCombinedHistogramEntropy(a, b, cost_threshold, &cost, costs)) { | 
537  | 0  |     return 0;  | 
538  | 0  |   }  | 
539  |  |  | 
540  | 0  |   *cost_out = (int64_t)cost - (int64_t)a->bit_cost;  | 
541  | 0  |   return 1;  | 
542  | 0  | }  | 
543  |  |  | 
544  |  | // -----------------------------------------------------------------------------  | 
545  |  |  | 
546  |  | // The structure to keep track of cost range for the three dominant entropy  | 
547  |  | // symbols.  | 
548  |  | typedef struct { | 
549  |  |   uint64_t literal_max;  | 
550  |  |   uint64_t literal_min;  | 
551  |  |   uint64_t red_max;  | 
552  |  |   uint64_t red_min;  | 
553  |  |   uint64_t blue_max;  | 
554  |  |   uint64_t blue_min;  | 
555  |  | } DominantCostRange;  | 
556  |  |  | 
557  | 0  | static void DominantCostRangeInit(DominantCostRange* const c) { | 
558  | 0  |   c->literal_max = 0;  | 
559  | 0  |   c->literal_min = WEBP_UINT64_MAX;  | 
560  | 0  |   c->red_max = 0;  | 
561  | 0  |   c->red_min = WEBP_UINT64_MAX;  | 
562  | 0  |   c->blue_max = 0;  | 
563  | 0  |   c->blue_min = WEBP_UINT64_MAX;  | 
564  | 0  | }  | 
565  |  |  | 
566  |  | static void UpdateDominantCostRange(  | 
567  | 0  |     const VP8LHistogram* const h, DominantCostRange* const c) { | 
568  | 0  |   if (c->literal_max < h->costs[LITERAL]) c->literal_max = h->costs[LITERAL];  | 
569  | 0  |   if (c->literal_min > h->costs[LITERAL]) c->literal_min = h->costs[LITERAL];  | 
570  | 0  |   if (c->red_max < h->costs[RED]) c->red_max = h->costs[RED];  | 
571  | 0  |   if (c->red_min > h->costs[RED]) c->red_min = h->costs[RED];  | 
572  | 0  |   if (c->blue_max < h->costs[BLUE]) c->blue_max = h->costs[BLUE];  | 
573  | 0  |   if (c->blue_min > h->costs[BLUE]) c->blue_min = h->costs[BLUE];  | 
574  | 0  | }  | 
575  |  |  | 
576  | 0  | static void ComputeHistogramCost(VP8LHistogram* const h) { | 
577  | 0  |   int i;  | 
578  |  |   // No need to add the extra cost for length and distance as it is a constant  | 
579  |  |   // that does not influence the histograms.  | 
580  | 0  |   for (i = 0; i < 5; ++i) { | 
581  | 0  |     const uint32_t* population;  | 
582  | 0  |     int length;  | 
583  | 0  |     GetPopulationInfo(h, i, &population, &length);  | 
584  | 0  |     h->costs[i] = PopulationCost(population, length, &h->trivial_symbol[i],  | 
585  | 0  |                                  &h->is_used[i]);  | 
586  | 0  |   }  | 
587  | 0  |   h->bit_cost = h->costs[LITERAL] + h->costs[RED] + h->costs[BLUE] +  | 
588  | 0  |                 h->costs[ALPHA] + h->costs[DISTANCE];  | 
589  | 0  | }  | 
590  |  |  | 
591  | 0  | static int GetBinIdForEntropy(uint64_t min, uint64_t max, uint64_t val) { | 
592  | 0  |   const uint64_t range = max - min;  | 
593  | 0  |   if (range > 0) { | 
594  | 0  |     const uint64_t delta = val - min;  | 
595  | 0  |     return (int)((NUM_PARTITIONS - 1e-6) * delta / range);  | 
596  | 0  |   } else { | 
597  | 0  |     return 0;  | 
598  | 0  |   }  | 
599  | 0  | }  | 
600  |  |  | 
601  |  | static int GetHistoBinIndex(const VP8LHistogram* const h,  | 
602  | 0  |                             const DominantCostRange* const c, int low_effort) { | 
603  | 0  |   int bin_id =  | 
604  | 0  |       GetBinIdForEntropy(c->literal_min, c->literal_max, h->costs[LITERAL]);  | 
605  | 0  |   assert(bin_id < NUM_PARTITIONS);  | 
606  | 0  |   if (!low_effort) { | 
607  | 0  |     bin_id = bin_id * NUM_PARTITIONS +  | 
608  | 0  |              GetBinIdForEntropy(c->red_min, c->red_max, h->costs[RED]);  | 
609  | 0  |     bin_id = bin_id * NUM_PARTITIONS +  | 
610  | 0  |              GetBinIdForEntropy(c->blue_min, c->blue_max, h->costs[BLUE]);  | 
611  | 0  |     assert(bin_id < BIN_SIZE);  | 
612  | 0  |   }  | 
613  | 0  |   return bin_id;  | 
614  | 0  | }  | 
615  |  |  | 
616  |  | // Construct the histograms from backward references.  | 
617  |  | static void HistogramBuild(  | 
618  |  |     int xsize, int histo_bits, const VP8LBackwardRefs* const backward_refs,  | 
619  | 0  |     VP8LHistogramSet* const image_histo) { | 
620  | 0  |   int x = 0, y = 0;  | 
621  | 0  |   const int histo_xsize = VP8LSubSampleSize(xsize, histo_bits);  | 
622  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
623  | 0  |   VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);  | 
624  | 0  |   assert(histo_bits > 0);  | 
625  | 0  |   VP8LHistogramSetClear(image_histo);  | 
626  | 0  |   while (VP8LRefsCursorOk(&c)) { | 
627  | 0  |     const PixOrCopy* const v = c.cur_pos;  | 
628  | 0  |     const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);  | 
629  | 0  |     HistogramAddSinglePixOrCopy(histograms[ix], v, NULL, 0);  | 
630  | 0  |     x += PixOrCopyLength(v);  | 
631  | 0  |     while (x >= xsize) { | 
632  | 0  |       x -= xsize;  | 
633  | 0  |       ++y;  | 
634  | 0  |     }  | 
635  | 0  |     VP8LRefsCursorNext(&c);  | 
636  | 0  |   }  | 
637  | 0  | }  | 
638  |  |  | 
639  |  | // Copies the histograms and computes its bit_cost.  | 
640  |  | static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo,  | 
641  | 0  |                                     VP8LHistogramSet* const image_histo) { | 
642  | 0  |   int i;  | 
643  | 0  |   VP8LHistogram** const orig_histograms = orig_histo->histograms;  | 
644  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
645  | 0  |   assert(image_histo->max_size == orig_histo->max_size);  | 
646  | 0  |   image_histo->size = 0;  | 
647  | 0  |   for (i = 0; i < orig_histo->max_size; ++i) { | 
648  | 0  |     VP8LHistogram* const histo = orig_histograms[i];  | 
649  | 0  |     ComputeHistogramCost(histo);  | 
650  |  |  | 
651  |  |     // Skip the histogram if it is completely empty, which can happen for tiles  | 
652  |  |     // with no information (when they are skipped because of LZ77).  | 
653  | 0  |     if (!histo->is_used[LITERAL] && !histo->is_used[RED] &&  | 
654  | 0  |         !histo->is_used[BLUE] && !histo->is_used[ALPHA] &&  | 
655  | 0  |         !histo->is_used[DISTANCE]) { | 
656  |  |       // The first histogram is always used.  | 
657  | 0  |       assert(i > 0);  | 
658  | 0  |       orig_histograms[i] = NULL;  | 
659  | 0  |     } else { | 
660  |  |       // Copy histograms from orig_histo[] to image_histo[].  | 
661  | 0  |       HistogramCopy(histo, histograms[image_histo->size]);  | 
662  | 0  |       ++image_histo->size;  | 
663  | 0  |     }  | 
664  | 0  |   }  | 
665  | 0  | }  | 
666  |  |  | 
667  |  | // Partition histograms to different entropy bins for three dominant (literal,  | 
668  |  | // red and blue) symbol costs and compute the histogram aggregate bit_cost.  | 
669  |  | static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,  | 
670  | 0  |                                        int low_effort) { | 
671  | 0  |   int i;  | 
672  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
673  | 0  |   const int histo_size = image_histo->size;  | 
674  | 0  |   DominantCostRange cost_range;  | 
675  | 0  |   DominantCostRangeInit(&cost_range);  | 
676  |  |  | 
677  |  |   // Analyze the dominant (literal, red and blue) entropy costs.  | 
678  | 0  |   for (i = 0; i < histo_size; ++i) { | 
679  | 0  |     UpdateDominantCostRange(histograms[i], &cost_range);  | 
680  | 0  |   }  | 
681  |  |  | 
682  |  |   // bin-hash histograms on three of the dominant (literal, red and blue)  | 
683  |  |   // symbol costs and store the resulting bin_id for each histogram.  | 
684  | 0  |   for (i = 0; i < histo_size; ++i) { | 
685  | 0  |     histograms[i]->bin_id =  | 
686  | 0  |         GetHistoBinIndex(histograms[i], &cost_range, low_effort);  | 
687  | 0  |   }  | 
688  | 0  | }  | 
689  |  |  | 
690  |  | // Merges some histograms with same bin_id together if it's advantageous.  | 
691  |  | // Sets the remaining histograms to NULL.  | 
692  |  | // 'combine_cost_factor' has to be divided by 100.  | 
693  |  | static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,  | 
694  |  |                                        VP8LHistogram* cur_combo, int num_bins,  | 
695  |  |                                        int32_t combine_cost_factor,  | 
696  | 0  |                                        int low_effort) { | 
697  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
698  | 0  |   int idx;  | 
699  | 0  |   struct { | 
700  | 0  |     int16_t first;    // position of the histogram that accumulates all  | 
701  |  |                       // histograms with the same bin_id  | 
702  | 0  |     uint16_t num_combine_failures;   // number of combine failures per bin_id  | 
703  | 0  |   } bin_info[BIN_SIZE];  | 
704  |  | 
  | 
705  | 0  |   assert(num_bins <= BIN_SIZE);  | 
706  | 0  |   for (idx = 0; idx < num_bins; ++idx) { | 
707  | 0  |     bin_info[idx].first = -1;  | 
708  | 0  |     bin_info[idx].num_combine_failures = 0;  | 
709  | 0  |   }  | 
710  |  | 
  | 
711  | 0  |   for (idx = 0; idx < image_histo->size;) { | 
712  | 0  |     const int bin_id = histograms[idx]->bin_id;  | 
713  | 0  |     const int first = bin_info[bin_id].first;  | 
714  | 0  |     if (first == -1) { | 
715  | 0  |       bin_info[bin_id].first = idx;  | 
716  | 0  |       ++idx;  | 
717  | 0  |     } else if (low_effort) { | 
718  | 0  |       HistogramAdd(histograms[idx], histograms[first], histograms[first]);  | 
719  | 0  |       HistogramSetRemoveHistogram(image_histo, idx);  | 
720  | 0  |     } else { | 
721  |  |       // try to merge #idx into #first (both share the same bin_id)  | 
722  | 0  |       const uint64_t bit_cost = histograms[idx]->bit_cost;  | 
723  | 0  |       const int64_t bit_cost_thresh =  | 
724  | 0  |           -DivRound((int64_t)bit_cost * combine_cost_factor, 100);  | 
725  | 0  |       if (HistogramAddEval(histograms[first], histograms[idx], cur_combo,  | 
726  | 0  |                            bit_cost_thresh)) { | 
727  | 0  |         const int max_combine_failures = 32;  | 
728  |  |         // Try to merge two histograms only if the combo is a trivial one or  | 
729  |  |         // the two candidate histograms are already non-trivial.  | 
730  |  |         // For some images, 'try_combine' turns out to be false for a lot of  | 
731  |  |         // histogram pairs. In that case, we fallback to combining  | 
732  |  |         // histograms as usual to avoid increasing the header size.  | 
733  | 0  |         int try_combine =  | 
734  | 0  |             cur_combo->trivial_symbol[RED] != VP8L_NON_TRIVIAL_SYM &&  | 
735  | 0  |             cur_combo->trivial_symbol[BLUE] != VP8L_NON_TRIVIAL_SYM &&  | 
736  | 0  |             cur_combo->trivial_symbol[ALPHA] != VP8L_NON_TRIVIAL_SYM;  | 
737  | 0  |         if (!try_combine) { | 
738  | 0  |           try_combine =  | 
739  | 0  |               histograms[idx]->trivial_symbol[RED] == VP8L_NON_TRIVIAL_SYM ||  | 
740  | 0  |               histograms[idx]->trivial_symbol[BLUE] == VP8L_NON_TRIVIAL_SYM ||  | 
741  | 0  |               histograms[idx]->trivial_symbol[ALPHA] == VP8L_NON_TRIVIAL_SYM;  | 
742  | 0  |           try_combine &=  | 
743  | 0  |               histograms[first]->trivial_symbol[RED] == VP8L_NON_TRIVIAL_SYM ||  | 
744  | 0  |               histograms[first]->trivial_symbol[BLUE] == VP8L_NON_TRIVIAL_SYM ||  | 
745  | 0  |               histograms[first]->trivial_symbol[ALPHA] == VP8L_NON_TRIVIAL_SYM;  | 
746  | 0  |         }  | 
747  | 0  |         if (try_combine ||  | 
748  | 0  |             bin_info[bin_id].num_combine_failures >= max_combine_failures) { | 
749  |  |           // move the (better) merged histogram to its final slot  | 
750  | 0  |           HistogramSwap(&cur_combo, &histograms[first]);  | 
751  | 0  |           HistogramSetRemoveHistogram(image_histo, idx);  | 
752  | 0  |         } else { | 
753  | 0  |           ++bin_info[bin_id].num_combine_failures;  | 
754  | 0  |           ++idx;  | 
755  | 0  |         }  | 
756  | 0  |       } else { | 
757  | 0  |         ++idx;  | 
758  | 0  |       }  | 
759  | 0  |     }  | 
760  | 0  |   }  | 
761  | 0  |   if (low_effort) { | 
762  |  |     // for low_effort case, update the final cost when everything is merged  | 
763  | 0  |     for (idx = 0; idx < image_histo->size; ++idx) { | 
764  | 0  |       ComputeHistogramCost(histograms[idx]);  | 
765  | 0  |     }  | 
766  | 0  |   }  | 
767  | 0  | }  | 
768  |  |  | 
769  |  | // Implement a Lehmer random number generator with a multiplicative constant of  | 
770  |  | // 48271 and a modulo constant of 2^31 - 1.  | 
771  | 0  | static uint32_t MyRand(uint32_t* const seed) { | 
772  | 0  |   *seed = (uint32_t)(((uint64_t)(*seed) * 48271u) % 2147483647u);  | 
773  | 0  |   assert(*seed > 0);  | 
774  | 0  |   return *seed;  | 
775  | 0  | }  | 
776  |  |  | 
777  |  | // -----------------------------------------------------------------------------  | 
778  |  | // Histogram pairs priority queue  | 
779  |  |  | 
780  |  | // Pair of histograms. Negative idx1 value means that pair is out-of-date.  | 
781  |  | typedef struct { | 
782  |  |   int idx1;  | 
783  |  |   int idx2;  | 
784  |  |   int64_t cost_diff;  | 
785  |  |   uint64_t cost_combo;  | 
786  |  |   uint64_t costs[5];  | 
787  |  | } HistogramPair;  | 
788  |  |  | 
789  |  | typedef struct { | 
790  |  |   HistogramPair* queue;  | 
791  |  |   int size;  | 
792  |  |   int max_size;  | 
793  |  | } HistoQueue;  | 
794  |  |  | 
795  | 0  | static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) { | 
796  | 0  |   histo_queue->size = 0;  | 
797  | 0  |   histo_queue->max_size = max_size;  | 
798  |  |   // We allocate max_size + 1 because the last element at index "size" is  | 
799  |  |   // used as temporary data (and it could be up to max_size).  | 
800  | 0  |   histo_queue->queue = (HistogramPair*)WebPSafeMalloc(  | 
801  | 0  |       histo_queue->max_size + 1, sizeof(*histo_queue->queue));  | 
802  | 0  |   return histo_queue->queue != NULL;  | 
803  | 0  | }  | 
804  |  |  | 
805  | 0  | static void HistoQueueClear(HistoQueue* const histo_queue) { | 
806  | 0  |   assert(histo_queue != NULL);  | 
807  | 0  |   WebPSafeFree(histo_queue->queue);  | 
808  | 0  |   histo_queue->size = 0;  | 
809  | 0  |   histo_queue->max_size = 0;  | 
810  | 0  | }  | 
811  |  |  | 
812  |  | // Pop a specific pair in the queue by replacing it with the last one  | 
813  |  | // and shrinking the queue.  | 
814  |  | static void HistoQueuePopPair(HistoQueue* const histo_queue,  | 
815  | 0  |                               HistogramPair* const pair) { | 
816  | 0  |   assert(pair >= histo_queue->queue &&  | 
817  | 0  |          pair < (histo_queue->queue + histo_queue->size));  | 
818  | 0  |   assert(histo_queue->size > 0);  | 
819  | 0  |   *pair = histo_queue->queue[histo_queue->size - 1];  | 
820  | 0  |   --histo_queue->size;  | 
821  | 0  | }  | 
822  |  |  | 
823  |  | // Check whether a pair in the queue should be updated as head or not.  | 
824  |  | static void HistoQueueUpdateHead(HistoQueue* const histo_queue,  | 
825  | 0  |                                  HistogramPair* const pair) { | 
826  | 0  |   assert(pair->cost_diff < 0);  | 
827  | 0  |   assert(pair >= histo_queue->queue &&  | 
828  | 0  |          pair < (histo_queue->queue + histo_queue->size));  | 
829  | 0  |   assert(histo_queue->size > 0);  | 
830  | 0  |   if (pair->cost_diff < histo_queue->queue[0].cost_diff) { | 
831  |  |     // Replace the best pair.  | 
832  | 0  |     const HistogramPair tmp = histo_queue->queue[0];  | 
833  | 0  |     histo_queue->queue[0] = *pair;  | 
834  | 0  |     *pair = tmp;  | 
835  | 0  |   }  | 
836  | 0  | }  | 
837  |  |  | 
838  |  | // Replaces the bad_id with good_id in the pair.  | 
839  |  | static void HistoQueueFixPair(int bad_id, int good_id,  | 
840  | 0  |                               HistogramPair* const pair) { | 
841  | 0  |   if (pair->idx1 == bad_id) pair->idx1 = good_id;  | 
842  | 0  |   if (pair->idx2 == bad_id) pair->idx2 = good_id;  | 
843  | 0  |   if (pair->idx1 > pair->idx2) { | 
844  | 0  |     const int tmp = pair->idx1;  | 
845  | 0  |     pair->idx1 = pair->idx2;  | 
846  | 0  |     pair->idx2 = tmp;  | 
847  | 0  |   }  | 
848  | 0  | }  | 
849  |  |  | 
850  |  | // Update the cost diff and combo of a pair of histograms. This needs to be  | 
851  |  | // called when the histograms have been merged with a third one.  | 
852  |  | // Returns 1 if the cost diff is less than the threshold.  | 
853  |  | // Otherwise returns 0 and the cost is invalid due to early bail-out.  | 
854  |  | WEBP_NODISCARD static int HistoQueueUpdatePair(const VP8LHistogram* const h1,  | 
855  |  |                                                const VP8LHistogram* const h2,  | 
856  |  |                                                int64_t cost_threshold,  | 
857  | 0  |                                                HistogramPair* const pair) { | 
858  | 0  |   const int64_t sum_cost = h1->bit_cost + h2->bit_cost;  | 
859  | 0  |   SaturateAdd(sum_cost, &cost_threshold);  | 
860  | 0  |   if (!GetCombinedHistogramEntropy(h1, h2, cost_threshold, &pair->cost_combo,  | 
861  | 0  |                                    pair->costs)) { | 
862  | 0  |     return 0;  | 
863  | 0  |   }  | 
864  | 0  |   pair->cost_diff = (int64_t)pair->cost_combo - sum_cost;  | 
865  | 0  |   return 1;  | 
866  | 0  | }  | 
867  |  |  | 
868  |  | // Create a pair from indices "idx1" and "idx2" provided its cost  | 
869  |  | // is inferior to "threshold", a negative entropy.  | 
870  |  | // It returns the cost of the pair, or 0 if it superior to threshold.  | 
871  |  | static int64_t HistoQueuePush(HistoQueue* const histo_queue,  | 
872  |  |                               VP8LHistogram** const histograms, int idx1,  | 
873  | 0  |                               int idx2, int64_t threshold) { | 
874  | 0  |   const VP8LHistogram* h1;  | 
875  | 0  |   const VP8LHistogram* h2;  | 
876  | 0  |   HistogramPair pair;  | 
877  |  |  | 
878  |  |   // Stop here if the queue is full.  | 
879  | 0  |   if (histo_queue->size == histo_queue->max_size) return 0;  | 
880  | 0  |   assert(threshold <= 0);  | 
881  | 0  |   if (idx1 > idx2) { | 
882  | 0  |     const int tmp = idx2;  | 
883  | 0  |     idx2 = idx1;  | 
884  | 0  |     idx1 = tmp;  | 
885  | 0  |   }  | 
886  | 0  |   pair.idx1 = idx1;  | 
887  | 0  |   pair.idx2 = idx2;  | 
888  | 0  |   h1 = histograms[idx1];  | 
889  | 0  |   h2 = histograms[idx2];  | 
890  |  |  | 
891  |  |   // Do not even consider the pair if it does not improve the entropy.  | 
892  | 0  |   if (!HistoQueueUpdatePair(h1, h2, threshold, &pair)) return 0;  | 
893  |  |  | 
894  | 0  |   histo_queue->queue[histo_queue->size++] = pair;  | 
895  | 0  |   HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);  | 
896  |  | 
  | 
897  | 0  |   return pair.cost_diff;  | 
898  | 0  | }  | 
899  |  |  | 
900  |  | // -----------------------------------------------------------------------------  | 
901  |  |  | 
902  |  | // Combines histograms by continuously choosing the one with the highest cost  | 
903  |  | // reduction.  | 
904  | 0  | static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) { | 
905  | 0  |   int ok = 0;  | 
906  | 0  |   const int image_histo_size = image_histo->size;  | 
907  | 0  |   int i, j;  | 
908  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
909  |  |   // Priority queue of histogram pairs.  | 
910  | 0  |   HistoQueue histo_queue;  | 
911  |  |  | 
912  |  |   // image_histo_size^2 for the queue size is safe. If you look at  | 
913  |  |   // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes  | 
914  |  |   // data to the queue, you insert at most:  | 
915  |  |   // - image_histo_size*(image_histo_size-1)/2 (the first two for loops)  | 
916  |  |   // - image_histo_size - 1 in the last for loop at the first iteration of  | 
917  |  |   //   the while loop, image_histo_size - 2 at the second iteration ...  | 
918  |  |   //   therefore image_histo_size*(image_histo_size-1)/2 overall too  | 
919  | 0  |   if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) { | 
920  | 0  |     goto End;  | 
921  | 0  |   }  | 
922  |  |  | 
923  |  |   // Initialize the queue.  | 
924  | 0  |   for (i = 0; i < image_histo_size; ++i) { | 
925  | 0  |     for (j = i + 1; j < image_histo_size; ++j) { | 
926  | 0  |       HistoQueuePush(&histo_queue, histograms, i, j, 0);  | 
927  | 0  |     }  | 
928  | 0  |   }  | 
929  |  | 
  | 
930  | 0  |   while (histo_queue.size > 0) { | 
931  | 0  |     const int idx1 = histo_queue.queue[0].idx1;  | 
932  | 0  |     const int idx2 = histo_queue.queue[0].idx2;  | 
933  | 0  |     HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);  | 
934  | 0  |     UpdateHistogramCost(histo_queue.queue[0].cost_combo,  | 
935  | 0  |                         histo_queue.queue[0].costs, histograms[idx1]);  | 
936  |  |  | 
937  |  |     // Remove merged histogram.  | 
938  | 0  |     HistogramSetRemoveHistogram(image_histo, idx2);  | 
939  |  |  | 
940  |  |     // Remove pairs intersecting the just combined best pair.  | 
941  | 0  |     for (i = 0; i < histo_queue.size;) { | 
942  | 0  |       HistogramPair* const p = histo_queue.queue + i;  | 
943  | 0  |       if (p->idx1 == idx1 || p->idx2 == idx1 ||  | 
944  | 0  |           p->idx1 == idx2 || p->idx2 == idx2) { | 
945  | 0  |         HistoQueuePopPair(&histo_queue, p);  | 
946  | 0  |       } else { | 
947  | 0  |         HistoQueueFixPair(image_histo->size, idx2, p);  | 
948  | 0  |         HistoQueueUpdateHead(&histo_queue, p);  | 
949  | 0  |         ++i;  | 
950  | 0  |       }  | 
951  | 0  |     }  | 
952  |  |  | 
953  |  |     // Push new pairs formed with combined histogram to the queue.  | 
954  | 0  |     for (i = 0; i < image_histo->size; ++i) { | 
955  | 0  |       if (i == idx1) continue;  | 
956  | 0  |       HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0);  | 
957  | 0  |     }  | 
958  | 0  |   }  | 
959  |  | 
  | 
960  | 0  |   ok = 1;  | 
961  |  | 
  | 
962  | 0  |  End:  | 
963  | 0  |   HistoQueueClear(&histo_queue);  | 
964  | 0  |   return ok;  | 
965  | 0  | }  | 
966  |  |  | 
967  |  | // Perform histogram aggregation using a stochastic approach.  | 
968  |  | // 'do_greedy' is set to 1 if a greedy approach needs to be performed  | 
969  |  | // afterwards, 0 otherwise.  | 
970  |  | static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,  | 
971  |  |                                       int min_cluster_size,  | 
972  | 0  |                                       int* const do_greedy) { | 
973  | 0  |   int j, iter;  | 
974  | 0  |   uint32_t seed = 1;  | 
975  | 0  |   int tries_with_no_success = 0;  | 
976  | 0  |   const int outer_iters = image_histo->size;  | 
977  | 0  |   const int num_tries_no_success = outer_iters / 2;  | 
978  | 0  |   VP8LHistogram** const histograms = image_histo->histograms;  | 
979  |  |   // Priority queue of histogram pairs. Its size of 'kHistoQueueSize'  | 
980  |  |   // impacts the quality of the compression and the speed: the smaller the  | 
981  |  |   // faster but the worse for the compression.  | 
982  | 0  |   HistoQueue histo_queue;  | 
983  | 0  |   const int kHistoQueueSize = 9;  | 
984  | 0  |   int ok = 0;  | 
985  |  | 
  | 
986  | 0  |   if (image_histo->size < min_cluster_size) { | 
987  | 0  |     *do_greedy = 1;  | 
988  | 0  |     return 1;  | 
989  | 0  |   }  | 
990  |  |  | 
991  | 0  |   if (!HistoQueueInit(&histo_queue, kHistoQueueSize)) goto End;  | 
992  |  |  | 
993  |  |   // Collapse similar histograms in 'image_histo'.  | 
994  | 0  |   for (iter = 0; iter < outer_iters && image_histo->size >= min_cluster_size &&  | 
995  | 0  |                 ++tries_with_no_success < num_tries_no_success;  | 
996  | 0  |       ++iter) { | 
997  | 0  |     int64_t best_cost =  | 
998  | 0  |         (histo_queue.size == 0) ? 0 : histo_queue.queue[0].cost_diff;  | 
999  | 0  |     int best_idx1 = -1, best_idx2 = 1;  | 
1000  | 0  |     const uint32_t rand_range = (image_histo->size - 1) * (image_histo->size);  | 
1001  |  |     // (image_histo->size) / 2 was chosen empirically. Less means faster but  | 
1002  |  |     // worse compression.  | 
1003  | 0  |     const int num_tries = (image_histo->size) / 2;  | 
1004  |  |  | 
1005  |  |     // Pick random samples.  | 
1006  | 0  |     for (j = 0; image_histo->size >= 2 && j < num_tries; ++j) { | 
1007  | 0  |       int64_t curr_cost;  | 
1008  |  |       // Choose two different histograms at random and try to combine them.  | 
1009  | 0  |       const uint32_t tmp = MyRand(&seed) % rand_range;  | 
1010  | 0  |       uint32_t idx1 = tmp / (image_histo->size - 1);  | 
1011  | 0  |       uint32_t idx2 = tmp % (image_histo->size - 1);  | 
1012  | 0  |       if (idx2 >= idx1) ++idx2;  | 
1013  |  |  | 
1014  |  |       // Calculate cost reduction on combination.  | 
1015  | 0  |       curr_cost =  | 
1016  | 0  |           HistoQueuePush(&histo_queue, histograms, idx1, idx2, best_cost);  | 
1017  | 0  |       if (curr_cost < 0) {  // found a better pair? | 
1018  | 0  |         best_cost = curr_cost;  | 
1019  |  |         // Empty the queue if we reached full capacity.  | 
1020  | 0  |         if (histo_queue.size == histo_queue.max_size) break;  | 
1021  | 0  |       }  | 
1022  | 0  |     }  | 
1023  | 0  |     if (histo_queue.size == 0) continue;  | 
1024  |  |  | 
1025  |  |     // Get the best histograms.  | 
1026  | 0  |     best_idx1 = histo_queue.queue[0].idx1;  | 
1027  | 0  |     best_idx2 = histo_queue.queue[0].idx2;  | 
1028  | 0  |     assert(best_idx1 < best_idx2);  | 
1029  |  |     // Merge the histograms and remove best_idx2 from the queue.  | 
1030  | 0  |     HistogramAdd(histograms[best_idx2], histograms[best_idx1],  | 
1031  | 0  |                  histograms[best_idx1]);  | 
1032  | 0  |     UpdateHistogramCost(histo_queue.queue[0].cost_combo,  | 
1033  | 0  |                         histo_queue.queue[0].costs, histograms[best_idx1]);  | 
1034  | 0  |     HistogramSetRemoveHistogram(image_histo, best_idx2);  | 
1035  |  |     // Parse the queue and update each pair that deals with best_idx1,  | 
1036  |  |     // best_idx2 or image_histo_size.  | 
1037  | 0  |     for (j = 0; j < histo_queue.size;) { | 
1038  | 0  |       HistogramPair* const p = histo_queue.queue + j;  | 
1039  | 0  |       const int is_idx1_best = p->idx1 == best_idx1 || p->idx1 == best_idx2;  | 
1040  | 0  |       const int is_idx2_best = p->idx2 == best_idx1 || p->idx2 == best_idx2;  | 
1041  |  |       // The front pair could have been duplicated by a random pick so  | 
1042  |  |       // check for it all the time nevertheless.  | 
1043  | 0  |       if (is_idx1_best && is_idx2_best) { | 
1044  | 0  |         HistoQueuePopPair(&histo_queue, p);  | 
1045  | 0  |         continue;  | 
1046  | 0  |       }  | 
1047  |  |       // Any pair containing one of the two best indices should only refer to  | 
1048  |  |       // best_idx1. Its cost should also be updated.  | 
1049  | 0  |       if (is_idx1_best || is_idx2_best) { | 
1050  | 0  |         HistoQueueFixPair(best_idx2, best_idx1, p);  | 
1051  |  |         // Re-evaluate the cost of an updated pair.  | 
1052  | 0  |         if (!HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0,  | 
1053  | 0  |                                   p)) { | 
1054  | 0  |           HistoQueuePopPair(&histo_queue, p);  | 
1055  | 0  |           continue;  | 
1056  | 0  |         }  | 
1057  | 0  |       }  | 
1058  | 0  |       HistoQueueFixPair(image_histo->size, best_idx2, p);  | 
1059  | 0  |       HistoQueueUpdateHead(&histo_queue, p);  | 
1060  | 0  |       ++j;  | 
1061  | 0  |     }  | 
1062  | 0  |     tries_with_no_success = 0;  | 
1063  | 0  |   }  | 
1064  | 0  |   *do_greedy = (image_histo->size <= min_cluster_size);  | 
1065  | 0  |   ok = 1;  | 
1066  |  | 
  | 
1067  | 0  |  End:  | 
1068  | 0  |   HistoQueueClear(&histo_queue);  | 
1069  | 0  |   return ok;  | 
1070  | 0  | }  | 
1071  |  |  | 
1072  |  | // -----------------------------------------------------------------------------  | 
1073  |  | // Histogram refinement  | 
1074  |  |  | 
1075  |  | // Find the best 'out' histogram for each of the 'in' histograms.  | 
1076  |  | // At call-time, 'out' contains the histograms of the clusters.  | 
1077  |  | // Note: we assume that out[]->bit_cost is already up-to-date.  | 
1078  |  | static void HistogramRemap(const VP8LHistogramSet* const in,  | 
1079  |  |                            VP8LHistogramSet* const out,  | 
1080  | 0  |                            uint32_t* const symbols) { | 
1081  | 0  |   int i;  | 
1082  | 0  |   VP8LHistogram** const in_histo = in->histograms;  | 
1083  | 0  |   VP8LHistogram** const out_histo = out->histograms;  | 
1084  | 0  |   const int in_size = out->max_size;  | 
1085  | 0  |   const int out_size = out->size;  | 
1086  | 0  |   if (out_size > 1) { | 
1087  | 0  |     for (i = 0; i < in_size; ++i) { | 
1088  | 0  |       int best_out = 0;  | 
1089  | 0  |       int64_t best_bits = WEBP_INT64_MAX;  | 
1090  | 0  |       int k;  | 
1091  | 0  |       if (in_histo[i] == NULL) { | 
1092  |  |         // Arbitrarily set to the previous value if unused to help future LZ77.  | 
1093  | 0  |         symbols[i] = symbols[i - 1];  | 
1094  | 0  |         continue;  | 
1095  | 0  |       }  | 
1096  | 0  |       for (k = 0; k < out_size; ++k) { | 
1097  | 0  |         int64_t cur_bits;  | 
1098  | 0  |         if (HistogramAddThresh(out_histo[k], in_histo[i], best_bits,  | 
1099  | 0  |                                &cur_bits)) { | 
1100  | 0  |           best_bits = cur_bits;  | 
1101  | 0  |           best_out = k;  | 
1102  | 0  |         }  | 
1103  | 0  |       }  | 
1104  | 0  |       symbols[i] = best_out;  | 
1105  | 0  |     }  | 
1106  | 0  |   } else { | 
1107  | 0  |     assert(out_size == 1);  | 
1108  | 0  |     for (i = 0; i < in_size; ++i) { | 
1109  | 0  |       symbols[i] = 0;  | 
1110  | 0  |     }  | 
1111  | 0  |   }  | 
1112  |  |  | 
1113  |  |   // Recompute each out based on raw and symbols.  | 
1114  | 0  |   VP8LHistogramSetClear(out);  | 
1115  | 0  |   out->size = out_size;  | 
1116  |  | 
  | 
1117  | 0  |   for (i = 0; i < in_size; ++i) { | 
1118  | 0  |     int idx;  | 
1119  | 0  |     if (in_histo[i] == NULL) continue;  | 
1120  | 0  |     idx = symbols[i];  | 
1121  | 0  |     HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);  | 
1122  | 0  |   }  | 
1123  | 0  | }  | 
1124  |  |  | 
1125  | 0  | static int32_t GetCombineCostFactor(int histo_size, int quality) { | 
1126  | 0  |   int32_t combine_cost_factor = 16;  | 
1127  | 0  |   if (quality < 90) { | 
1128  | 0  |     if (histo_size > 256) combine_cost_factor /= 2;  | 
1129  | 0  |     if (histo_size > 512) combine_cost_factor /= 2;  | 
1130  | 0  |     if (histo_size > 1024) combine_cost_factor /= 2;  | 
1131  | 0  |     if (quality <= 50) combine_cost_factor /= 2;  | 
1132  | 0  |   }  | 
1133  | 0  |   return combine_cost_factor;  | 
1134  | 0  | }  | 
1135  |  |  | 
1136  |  | int VP8LGetHistoImageSymbols(int xsize, int ysize,  | 
1137  |  |                              const VP8LBackwardRefs* const refs, int quality,  | 
1138  |  |                              int low_effort, int histogram_bits, int cache_bits,  | 
1139  |  |                              VP8LHistogramSet* const image_histo,  | 
1140  |  |                              VP8LHistogram* const tmp_histo,  | 
1141  |  |                              uint32_t* const histogram_symbols,  | 
1142  |  |                              const WebPPicture* const pic, int percent_range,  | 
1143  | 0  |                              int* const percent) { | 
1144  | 0  |   const int histo_xsize =  | 
1145  | 0  |       histogram_bits ? VP8LSubSampleSize(xsize, histogram_bits) : 1;  | 
1146  | 0  |   const int histo_ysize =  | 
1147  | 0  |       histogram_bits ? VP8LSubSampleSize(ysize, histogram_bits) : 1;  | 
1148  | 0  |   const int image_histo_raw_size = histo_xsize * histo_ysize;  | 
1149  | 0  |   VP8LHistogramSet* const orig_histo =  | 
1150  | 0  |       VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits);  | 
1151  |  |   // Don't attempt linear bin-partition heuristic for  | 
1152  |  |   // histograms of small sizes (as bin_map will be very sparse) and  | 
1153  |  |   // maximum quality q==100 (to preserve the compression gains at that level).  | 
1154  | 0  |   const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;  | 
1155  | 0  |   int entropy_combine;  | 
1156  | 0  |   if (orig_histo == NULL) { | 
1157  | 0  |     WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);  | 
1158  | 0  |     goto Error;  | 
1159  | 0  |   }  | 
1160  |  |  | 
1161  |  |   // Construct the histograms from backward references.  | 
1162  | 0  |   HistogramBuild(xsize, histogram_bits, refs, orig_histo);  | 
1163  | 0  |   HistogramCopyAndAnalyze(orig_histo, image_histo);  | 
1164  | 0  |   entropy_combine =  | 
1165  | 0  |       (image_histo->size > entropy_combine_num_bins * 2) && (quality < 100);  | 
1166  |  | 
  | 
1167  | 0  |   if (entropy_combine) { | 
1168  | 0  |     const int32_t combine_cost_factor =  | 
1169  | 0  |         GetCombineCostFactor(image_histo_raw_size, quality);  | 
1170  |  | 
  | 
1171  | 0  |     HistogramAnalyzeEntropyBin(image_histo, low_effort);  | 
1172  |  |     // Collapse histograms with similar entropy.  | 
1173  | 0  |     HistogramCombineEntropyBin(image_histo, tmp_histo, entropy_combine_num_bins,  | 
1174  | 0  |                                combine_cost_factor, low_effort);  | 
1175  | 0  |   }  | 
1176  |  |  | 
1177  |  |   // Don't combine the histograms using stochastic and greedy heuristics for  | 
1178  |  |   // low-effort compression mode.  | 
1179  | 0  |   if (!low_effort || !entropy_combine) { | 
1180  |  |     // cubic ramp between 1 and MAX_HISTO_GREEDY:  | 
1181  | 0  |     const int threshold_size =  | 
1182  | 0  |         (int)(1 + DivRound(quality * quality * quality * (MAX_HISTO_GREEDY - 1),  | 
1183  | 0  |                            100 * 100 * 100));  | 
1184  | 0  |     int do_greedy;  | 
1185  | 0  |     if (!HistogramCombineStochastic(image_histo, threshold_size, &do_greedy)) { | 
1186  | 0  |       WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);  | 
1187  | 0  |       goto Error;  | 
1188  | 0  |     }  | 
1189  | 0  |     if (do_greedy) { | 
1190  | 0  |       if (!HistogramCombineGreedy(image_histo)) { | 
1191  | 0  |         WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);  | 
1192  | 0  |         goto Error;  | 
1193  | 0  |       }  | 
1194  | 0  |     }  | 
1195  | 0  |   }  | 
1196  |  |  | 
1197  |  |   // Find the optimal map from original histograms to the final ones.  | 
1198  | 0  |   HistogramRemap(orig_histo, image_histo, histogram_symbols);  | 
1199  |  | 
  | 
1200  | 0  |   if (!WebPReportProgress(pic, *percent + percent_range, percent)) { | 
1201  | 0  |     goto Error;  | 
1202  | 0  |   }  | 
1203  |  |  | 
1204  | 0  |  Error:  | 
1205  | 0  |   VP8LFreeHistogramSet(orig_histo);  | 
1206  | 0  |   return (pic->error_code == VP8_ENC_OK);  | 
1207  | 0  | }  |