/src/libwebp/src/dsp/lossless_enc_avx2.c
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2025 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style license |
4 | | // that can be found in the COPYING file in the root of the source |
5 | | // tree. An additional intellectual property rights grant can be found |
6 | | // in the file PATENTS. All contributing project authors may |
7 | | // be found in the AUTHORS file in the root of the source tree. |
8 | | // ----------------------------------------------------------------------------- |
9 | | // |
10 | | // AVX2 variant of methods for lossless encoder |
11 | | // |
12 | | // Author: Vincent Rabaud (vrabaud@google.com) |
13 | | |
14 | | #include "src/dsp/dsp.h" |
15 | | |
16 | | #if defined(WEBP_USE_AVX2) |
17 | | #include <emmintrin.h> |
18 | | #include <immintrin.h> |
19 | | |
20 | | #include <assert.h> |
21 | | #include <stddef.h> |
22 | | |
23 | | #include "src/dsp/cpu.h" |
24 | | #include "src/dsp/lossless.h" |
25 | | #include "src/dsp/lossless_common.h" |
26 | | #include "src/utils/utils.h" |
27 | | #include "src/webp/format_constants.h" |
28 | | #include "src/webp/types.h" |
29 | | |
30 | | //------------------------------------------------------------------------------ |
31 | | // Subtract-Green Transform |
32 | | |
33 | | static void SubtractGreenFromBlueAndRed_AVX2(uint32_t* argb_data, |
34 | 19.0k | int num_pixels) { |
35 | 19.0k | int i; |
36 | 19.0k | const __m256i kCstShuffle = _mm256_set_epi8( |
37 | 19.0k | -1, 29, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13, |
38 | 19.0k | -1, 13, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1); |
39 | 6.65M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
40 | 6.63M | const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]); // argb |
41 | 6.63M | const __m256i in_0g0g = _mm256_shuffle_epi8(in, kCstShuffle); |
42 | 6.63M | const __m256i out = _mm256_sub_epi8(in, in_0g0g); |
43 | 6.63M | _mm256_storeu_si256((__m256i*)&argb_data[i], out); |
44 | 6.63M | } |
45 | | // fallthrough and finish off with plain-SSE |
46 | 19.0k | if (i != num_pixels) { |
47 | 10.3k | VP8LSubtractGreenFromBlueAndRed_SSE(argb_data + i, num_pixels - i); |
48 | 10.3k | } |
49 | 19.0k | } |
50 | | |
51 | | //------------------------------------------------------------------------------ |
52 | | // Color Transform |
53 | | |
54 | | // For sign-extended multiplying constants, pre-shifted by 5: |
55 | | #define CST_5b(X) (((int16_t)((uint16_t)(X) << 8)) >> 5) |
56 | | |
57 | | #define MK_CST_16(HI, LO) \ |
58 | 56.9M | _mm256_set1_epi32((int)(((uint32_t)(HI) << 16) | ((LO) & 0xffff))) |
59 | | |
60 | | static void TransformColor_AVX2(const VP8LMultipliers* WEBP_RESTRICT const m, |
61 | | uint32_t* WEBP_RESTRICT argb_data, |
62 | 5.86M | int num_pixels) { |
63 | 5.86M | const __m256i mults_rb = |
64 | 5.86M | MK_CST_16(CST_5b(m->green_to_red), CST_5b(m->green_to_blue)); |
65 | 5.86M | const __m256i mults_b2 = MK_CST_16(CST_5b(m->red_to_blue), 0); |
66 | 5.86M | const __m256i mask_rb = _mm256_set1_epi32(0x00ff00ff); // red-blue masks |
67 | 5.86M | const __m256i kCstShuffle = _mm256_set_epi8( |
68 | 5.86M | 29, -1, 29, -1, 25, -1, 25, -1, 21, -1, 21, -1, 17, -1, 17, -1, 13, -1, |
69 | 5.86M | 13, -1, 9, -1, 9, -1, 5, -1, 5, -1, 1, -1, 1, -1); |
70 | 5.86M | int i; |
71 | 10.6M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
72 | 4.80M | const __m256i in = _mm256_loadu_si256((__m256i*)&argb_data[i]); // argb |
73 | 4.80M | const __m256i A = _mm256_shuffle_epi8(in, kCstShuffle); // g0g0 |
74 | 4.80M | const __m256i B = _mm256_mulhi_epi16(A, mults_rb); // x dr x db1 |
75 | 4.80M | const __m256i C = _mm256_slli_epi16(in, 8); // r 0 b 0 |
76 | 4.80M | const __m256i D = _mm256_mulhi_epi16(C, mults_b2); // x db2 0 0 |
77 | 4.80M | const __m256i E = _mm256_srli_epi32(D, 16); // 0 0 x db2 |
78 | 4.80M | const __m256i F = _mm256_add_epi8(E, B); // x dr x db |
79 | 4.80M | const __m256i G = _mm256_and_si256(F, mask_rb); // 0 dr 0 db |
80 | 4.80M | const __m256i out = _mm256_sub_epi8(in, G); |
81 | 4.80M | _mm256_storeu_si256((__m256i*)&argb_data[i], out); |
82 | 4.80M | } |
83 | | // fallthrough and finish off with plain-C |
84 | 5.86M | if (i != num_pixels) { |
85 | 2.21M | VP8LTransformColor_SSE(m, argb_data + i, num_pixels - i); |
86 | 2.21M | } |
87 | 5.86M | } |
88 | | |
89 | | //------------------------------------------------------------------------------ |
90 | | #define SPAN 16 |
91 | | static void CollectColorBlueTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb, |
92 | | int stride, int tile_width, |
93 | | int tile_height, int green_to_blue, |
94 | 34.1M | int red_to_blue, uint32_t histo[]) { |
95 | 34.1M | const __m256i mult = |
96 | 34.1M | MK_CST_16(CST_5b(red_to_blue) + 256, CST_5b(green_to_blue)); |
97 | 34.1M | const __m256i perm = _mm256_setr_epi8( |
98 | 34.1M | -1, 1, -1, 2, -1, 5, -1, 6, -1, 9, -1, 10, -1, 13, -1, 14, -1, 17, -1, 18, |
99 | 34.1M | -1, 21, -1, 22, -1, 25, -1, 26, -1, 29, -1, 30); |
100 | 34.1M | if (tile_width >= 8) { |
101 | 15.3M | int y, i; |
102 | 142M | for (y = 0; y < tile_height; ++y) { |
103 | 126M | uint8_t values[32]; |
104 | 126M | const uint32_t* const src = argb + y * stride; |
105 | 126M | const __m256i A1 = _mm256_loadu_si256((const __m256i*)src); |
106 | 126M | const __m256i B1 = _mm256_shuffle_epi8(A1, perm); |
107 | 126M | const __m256i C1 = _mm256_mulhi_epi16(B1, mult); |
108 | 126M | const __m256i D1 = _mm256_sub_epi16(A1, C1); |
109 | 126M | __m256i E = _mm256_add_epi16(_mm256_srli_epi32(D1, 16), D1); |
110 | 126M | int x; |
111 | 164M | for (x = 8; x + 8 <= tile_width; x += 8) { |
112 | 37.4M | const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x)); |
113 | 37.4M | __m256i B2, C2, D2; |
114 | 37.4M | _mm256_storeu_si256((__m256i*)values, E); |
115 | 337M | for (i = 0; i < 32; i += 4) ++histo[values[i]]; |
116 | 37.4M | B2 = _mm256_shuffle_epi8(A2, perm); |
117 | 37.4M | C2 = _mm256_mulhi_epi16(B2, mult); |
118 | 37.4M | D2 = _mm256_sub_epi16(A2, C2); |
119 | 37.4M | E = _mm256_add_epi16(_mm256_srli_epi32(D2, 16), D2); |
120 | 37.4M | } |
121 | 126M | _mm256_storeu_si256((__m256i*)values, E); |
122 | 1.14G | for (i = 0; i < 32; i += 4) ++histo[values[i]]; |
123 | 126M | } |
124 | 15.3M | } |
125 | 34.1M | { |
126 | 34.1M | const int left_over = tile_width & 7; |
127 | 34.1M | if (left_over > 0) { |
128 | 18.8M | VP8LCollectColorBlueTransforms_SSE(argb + tile_width - left_over, stride, |
129 | 18.8M | left_over, tile_height, green_to_blue, |
130 | 18.8M | red_to_blue, histo); |
131 | 18.8M | } |
132 | 34.1M | } |
133 | 34.1M | } |
134 | | |
135 | | static void CollectColorRedTransforms_AVX2(const uint32_t* WEBP_RESTRICT argb, |
136 | | int stride, int tile_width, |
137 | | int tile_height, int green_to_red, |
138 | 11.1M | uint32_t histo[]) { |
139 | 11.1M | const __m256i mult = MK_CST_16(0, CST_5b(green_to_red)); |
140 | 11.1M | const __m256i mask_g = _mm256_set1_epi32(0x0000ff00); |
141 | 11.1M | if (tile_width >= 8) { |
142 | 4.99M | int y, i; |
143 | 46.2M | for (y = 0; y < tile_height; ++y) { |
144 | 41.2M | uint8_t values[32]; |
145 | 41.2M | const uint32_t* const src = argb + y * stride; |
146 | 41.2M | const __m256i A1 = _mm256_loadu_si256((const __m256i*)src); |
147 | 41.2M | const __m256i B1 = _mm256_and_si256(A1, mask_g); |
148 | 41.2M | const __m256i C1 = _mm256_madd_epi16(B1, mult); |
149 | 41.2M | __m256i D = _mm256_sub_epi16(A1, C1); |
150 | 41.2M | int x; |
151 | 53.2M | for (x = 8; x + 8 <= tile_width; x += 8) { |
152 | 11.9M | const __m256i A2 = _mm256_loadu_si256((const __m256i*)(src + x)); |
153 | 11.9M | __m256i B2, C2; |
154 | 11.9M | _mm256_storeu_si256((__m256i*)values, D); |
155 | 107M | for (i = 2; i < 32; i += 4) ++histo[values[i]]; |
156 | 11.9M | B2 = _mm256_and_si256(A2, mask_g); |
157 | 11.9M | C2 = _mm256_madd_epi16(B2, mult); |
158 | 11.9M | D = _mm256_sub_epi16(A2, C2); |
159 | 11.9M | } |
160 | 41.2M | _mm256_storeu_si256((__m256i*)values, D); |
161 | 371M | for (i = 2; i < 32; i += 4) ++histo[values[i]]; |
162 | 41.2M | } |
163 | 4.99M | } |
164 | 11.1M | { |
165 | 11.1M | const int left_over = tile_width & 7; |
166 | 11.1M | if (left_over > 0) { |
167 | 6.12M | VP8LCollectColorRedTransforms_SSE(argb + tile_width - left_over, stride, |
168 | 6.12M | left_over, tile_height, green_to_red, |
169 | 6.12M | histo); |
170 | 6.12M | } |
171 | 11.1M | } |
172 | 11.1M | } |
173 | | #undef SPAN |
174 | | #undef MK_CST_16 |
175 | | |
176 | | //------------------------------------------------------------------------------ |
177 | | |
178 | | // Note we are adding uint32_t's as *signed* int32's (using _mm256_add_epi32). |
179 | | // But that's ok since the histogram values are less than 1<<28 (max picture |
180 | | // size). |
181 | | static void AddVector_AVX2(const uint32_t* WEBP_RESTRICT a, |
182 | | const uint32_t* WEBP_RESTRICT b, |
183 | 3.02M | uint32_t* WEBP_RESTRICT out, int size) { |
184 | 3.02M | int i = 0; |
185 | 3.02M | int aligned_size = size & ~31; |
186 | | // Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as |
187 | | // NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of |
188 | | // 2). See the usage in VP8LHistogramAdd(). |
189 | 3.02M | assert(size >= 32); |
190 | 3.02M | assert(size % 2 == 0); |
191 | | |
192 | 28.7M | do { |
193 | 28.7M | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]); |
194 | 28.7M | const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]); |
195 | 28.7M | const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]); |
196 | 28.7M | const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]); |
197 | 28.7M | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]); |
198 | 28.7M | const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]); |
199 | 28.7M | const __m256i b2 = _mm256_loadu_si256((const __m256i*)&b[i + 16]); |
200 | 28.7M | const __m256i b3 = _mm256_loadu_si256((const __m256i*)&b[i + 24]); |
201 | 28.7M | _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0)); |
202 | 28.7M | _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1)); |
203 | 28.7M | _mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2)); |
204 | 28.7M | _mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3)); |
205 | 28.7M | i += 32; |
206 | 28.7M | } while (i != aligned_size); |
207 | | |
208 | 3.02M | if ((size & 16) != 0) { |
209 | 729k | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]); |
210 | 729k | const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]); |
211 | 729k | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i + 0]); |
212 | 729k | const __m256i b1 = _mm256_loadu_si256((const __m256i*)&b[i + 8]); |
213 | 729k | _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0)); |
214 | 729k | _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1)); |
215 | 729k | i += 16; |
216 | 729k | } |
217 | | |
218 | 3.02M | size &= 15; |
219 | 3.02M | if (size == 8) { |
220 | 968k | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]); |
221 | 968k | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&b[i]); |
222 | 968k | _mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0)); |
223 | 2.05M | } else { |
224 | 2.46M | for (; size--; ++i) { |
225 | 412k | out[i] = a[i] + b[i]; |
226 | 412k | } |
227 | 2.05M | } |
228 | 3.02M | } |
229 | | |
230 | | static void AddVectorEq_AVX2(const uint32_t* WEBP_RESTRICT a, |
231 | 13.2M | uint32_t* WEBP_RESTRICT out, int size) { |
232 | 13.2M | int i = 0; |
233 | 13.2M | int aligned_size = size & ~31; |
234 | | // Size is, at minimum, NUM_DISTANCE_CODES (40) and may be as large as |
235 | | // NUM_LITERAL_CODES (256) + NUM_LENGTH_CODES (24) + (0 or a non-zero power of |
236 | | // 2). See the usage in VP8LHistogramAdd(). |
237 | 13.2M | assert(size >= 32); |
238 | 13.2M | assert(size % 2 == 0); |
239 | | |
240 | 165M | do { |
241 | 165M | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]); |
242 | 165M | const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]); |
243 | 165M | const __m256i a2 = _mm256_loadu_si256((const __m256i*)&a[i + 16]); |
244 | 165M | const __m256i a3 = _mm256_loadu_si256((const __m256i*)&a[i + 24]); |
245 | 165M | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]); |
246 | 165M | const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]); |
247 | 165M | const __m256i b2 = _mm256_loadu_si256((const __m256i*)&out[i + 16]); |
248 | 165M | const __m256i b3 = _mm256_loadu_si256((const __m256i*)&out[i + 24]); |
249 | 165M | _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0)); |
250 | 165M | _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1)); |
251 | 165M | _mm256_storeu_si256((__m256i*)&out[i + 16], _mm256_add_epi32(a2, b2)); |
252 | 165M | _mm256_storeu_si256((__m256i*)&out[i + 24], _mm256_add_epi32(a3, b3)); |
253 | 165M | i += 32; |
254 | 165M | } while (i != aligned_size); |
255 | | |
256 | 13.2M | if ((size & 16) != 0) { |
257 | 2.64M | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i + 0]); |
258 | 2.64M | const __m256i a1 = _mm256_loadu_si256((const __m256i*)&a[i + 8]); |
259 | 2.64M | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i + 0]); |
260 | 2.64M | const __m256i b1 = _mm256_loadu_si256((const __m256i*)&out[i + 8]); |
261 | 2.64M | _mm256_storeu_si256((__m256i*)&out[i + 0], _mm256_add_epi32(a0, b0)); |
262 | 2.64M | _mm256_storeu_si256((__m256i*)&out[i + 8], _mm256_add_epi32(a1, b1)); |
263 | 2.64M | i += 16; |
264 | 2.64M | } |
265 | | |
266 | 13.2M | size &= 15; |
267 | 13.2M | if (size == 8) { |
268 | 3.45M | const __m256i a0 = _mm256_loadu_si256((const __m256i*)&a[i]); |
269 | 3.45M | const __m256i b0 = _mm256_loadu_si256((const __m256i*)&out[i]); |
270 | 3.45M | _mm256_storeu_si256((__m256i*)&out[i], _mm256_add_epi32(a0, b0)); |
271 | 9.79M | } else { |
272 | 12.5M | for (; size--; ++i) { |
273 | 2.77M | out[i] += a[i]; |
274 | 2.77M | } |
275 | 9.79M | } |
276 | 13.2M | } |
277 | | |
278 | | //------------------------------------------------------------------------------ |
279 | | // Entropy |
280 | | |
281 | | #if !defined(WEBP_HAVE_SLOW_CLZ_CTZ) |
282 | | |
283 | | static uint64_t CombinedShannonEntropy_AVX2(const uint32_t X[256], |
284 | 169M | const uint32_t Y[256]) { |
285 | 169M | int i; |
286 | 169M | uint64_t retval = 0; |
287 | 169M | uint32_t sumX = 0, sumXY = 0; |
288 | 169M | const __m256i zero = _mm256_setzero_si256(); |
289 | | |
290 | 1.52G | for (i = 0; i < 256; i += 32) { |
291 | 1.35G | const __m256i x0 = _mm256_loadu_si256((const __m256i*)(X + i + 0)); |
292 | 1.35G | const __m256i y0 = _mm256_loadu_si256((const __m256i*)(Y + i + 0)); |
293 | 1.35G | const __m256i x1 = _mm256_loadu_si256((const __m256i*)(X + i + 8)); |
294 | 1.35G | const __m256i y1 = _mm256_loadu_si256((const __m256i*)(Y + i + 8)); |
295 | 1.35G | const __m256i x2 = _mm256_loadu_si256((const __m256i*)(X + i + 16)); |
296 | 1.35G | const __m256i y2 = _mm256_loadu_si256((const __m256i*)(Y + i + 16)); |
297 | 1.35G | const __m256i x3 = _mm256_loadu_si256((const __m256i*)(X + i + 24)); |
298 | 1.35G | const __m256i y3 = _mm256_loadu_si256((const __m256i*)(Y + i + 24)); |
299 | 1.35G | const __m256i x4 = _mm256_packs_epi16(_mm256_packs_epi32(x0, x1), |
300 | 1.35G | _mm256_packs_epi32(x2, x3)); |
301 | 1.35G | const __m256i y4 = _mm256_packs_epi16(_mm256_packs_epi32(y0, y1), |
302 | 1.35G | _mm256_packs_epi32(y2, y3)); |
303 | | // Packed pixels are actually in order: ... 17 16 12 11 10 9 8 3 2 1 0 |
304 | 1.35G | const __m256i x5 = _mm256_permutevar8x32_epi32( |
305 | 1.35G | x4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0)); |
306 | 1.35G | const __m256i y5 = _mm256_permutevar8x32_epi32( |
307 | 1.35G | y4, _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0)); |
308 | 1.35G | const uint32_t mx = |
309 | 1.35G | (uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(x5, zero)); |
310 | 1.35G | uint32_t my = |
311 | 1.35G | (uint32_t)_mm256_movemask_epi8(_mm256_cmpgt_epi8(y5, zero)) | mx; |
312 | 5.45G | while (my) { |
313 | 4.10G | const int32_t j = BitsCtz(my); |
314 | 4.10G | uint32_t xy; |
315 | 4.10G | if ((mx >> j) & 1) { |
316 | 788M | const int x = X[i + j]; |
317 | 788M | sumXY += x; |
318 | 788M | retval += VP8LFastSLog2(x); |
319 | 788M | } |
320 | 4.10G | xy = X[i + j] + Y[i + j]; |
321 | 4.10G | sumX += xy; |
322 | 4.10G | retval += VP8LFastSLog2(xy); |
323 | 4.10G | my &= my - 1; |
324 | 4.10G | } |
325 | 1.35G | } |
326 | 169M | retval = VP8LFastSLog2(sumX) + VP8LFastSLog2(sumXY) - retval; |
327 | 169M | return retval; |
328 | 169M | } |
329 | | |
330 | | #else |
331 | | |
332 | | #define DONT_USE_COMBINED_SHANNON_ENTROPY_SSE2_FUNC // won't be faster |
333 | | |
334 | | #endif |
335 | | |
336 | | //------------------------------------------------------------------------------ |
337 | | |
338 | | static int VectorMismatch_AVX2(const uint32_t* const array1, |
339 | 91.1M | const uint32_t* const array2, int length) { |
340 | 91.1M | int match_len; |
341 | | |
342 | 91.1M | if (length >= 24) { |
343 | 89.3M | __m256i A0 = _mm256_loadu_si256((const __m256i*)&array1[0]); |
344 | 89.3M | __m256i A1 = _mm256_loadu_si256((const __m256i*)&array2[0]); |
345 | 89.3M | match_len = 0; |
346 | 97.9M | do { |
347 | | // Loop unrolling and early load both provide a speedup of 10% for the |
348 | | // current function. Also, max_limit can be MAX_LENGTH=4096 at most. |
349 | 97.9M | const __m256i cmpA = _mm256_cmpeq_epi32(A0, A1); |
350 | 97.9M | const __m256i B0 = |
351 | 97.9M | _mm256_loadu_si256((const __m256i*)&array1[match_len + 8]); |
352 | 97.9M | const __m256i B1 = |
353 | 97.9M | _mm256_loadu_si256((const __m256i*)&array2[match_len + 8]); |
354 | 97.9M | if ((uint32_t)_mm256_movemask_epi8(cmpA) != 0xffffffff) break; |
355 | 12.0M | match_len += 8; |
356 | | |
357 | 12.0M | { |
358 | 12.0M | const __m256i cmpB = _mm256_cmpeq_epi32(B0, B1); |
359 | 12.0M | A0 = _mm256_loadu_si256((const __m256i*)&array1[match_len + 8]); |
360 | 12.0M | A1 = _mm256_loadu_si256((const __m256i*)&array2[match_len + 8]); |
361 | 12.0M | if ((uint32_t)_mm256_movemask_epi8(cmpB) != 0xffffffff) break; |
362 | 8.66M | match_len += 8; |
363 | 8.66M | } |
364 | 8.66M | } while (match_len + 24 < length); |
365 | 89.3M | } else { |
366 | 1.83M | match_len = 0; |
367 | | // Unroll the potential first two loops. |
368 | 1.83M | if (length >= 8 && |
369 | 1.83M | (uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32( |
370 | 681k | _mm256_loadu_si256((const __m256i*)&array1[0]), |
371 | 681k | _mm256_loadu_si256((const __m256i*)&array2[0]))) == 0xffffffff) { |
372 | 62.8k | match_len = 8; |
373 | 62.8k | if (length >= 16 && |
374 | 62.8k | (uint32_t)_mm256_movemask_epi8(_mm256_cmpeq_epi32( |
375 | 27.2k | _mm256_loadu_si256((const __m256i*)&array1[8]), |
376 | 27.2k | _mm256_loadu_si256((const __m256i*)&array2[8]))) == 0xffffffff) { |
377 | 12.5k | match_len = 16; |
378 | 12.5k | } |
379 | 62.8k | } |
380 | 1.83M | } |
381 | | |
382 | 303M | while (match_len < length && array1[match_len] == array2[match_len]) { |
383 | 212M | ++match_len; |
384 | 212M | } |
385 | 91.1M | return match_len; |
386 | 91.1M | } |
387 | | |
388 | | // Bundles multiple (1, 2, 4 or 8) pixels into a single pixel. |
389 | | static void BundleColorMap_AVX2(const uint8_t* WEBP_RESTRICT const row, |
390 | | int width, int xbits, |
391 | 1.94M | uint32_t* WEBP_RESTRICT dst) { |
392 | 1.94M | int x = 0; |
393 | 1.94M | assert(xbits >= 0); |
394 | 1.94M | assert(xbits <= 3); |
395 | 1.94M | switch (xbits) { |
396 | 766k | case 0: { |
397 | 766k | const __m256i ff = _mm256_set1_epi16((short)0xff00); |
398 | 766k | const __m256i zero = _mm256_setzero_si256(); |
399 | | // Store 0xff000000 | (row[x] << 8). |
400 | 1.48M | for (x = 0; x + 32 <= width; x += 32, dst += 32) { |
401 | 720k | const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]); |
402 | 720k | const __m256i in_lo = _mm256_unpacklo_epi8(zero, in); |
403 | 720k | const __m256i dst0 = _mm256_unpacklo_epi16(in_lo, ff); |
404 | 720k | const __m256i dst1 = _mm256_unpackhi_epi16(in_lo, ff); |
405 | 720k | const __m256i in_hi = _mm256_unpackhi_epi8(zero, in); |
406 | 720k | const __m256i dst2 = _mm256_unpacklo_epi16(in_hi, ff); |
407 | 720k | const __m256i dst3 = _mm256_unpackhi_epi16(in_hi, ff); |
408 | 720k | _mm256_storeu2_m128i((__m128i*)&dst[16], (__m128i*)&dst[0], dst0); |
409 | 720k | _mm256_storeu2_m128i((__m128i*)&dst[20], (__m128i*)&dst[4], dst1); |
410 | 720k | _mm256_storeu2_m128i((__m128i*)&dst[24], (__m128i*)&dst[8], dst2); |
411 | 720k | _mm256_storeu2_m128i((__m128i*)&dst[28], (__m128i*)&dst[12], dst3); |
412 | 720k | } |
413 | 766k | break; |
414 | 0 | } |
415 | 309k | case 1: { |
416 | 309k | const __m256i ff = _mm256_set1_epi16((short)0xff00); |
417 | 309k | const __m256i mul = _mm256_set1_epi16(0x110); |
418 | 685k | for (x = 0; x + 32 <= width; x += 32, dst += 16) { |
419 | | // 0a0b | (where a/b are 4 bits). |
420 | 376k | const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]); |
421 | 376k | const __m256i tmp = _mm256_mullo_epi16(in, mul); // aba0 |
422 | 376k | const __m256i pack = _mm256_and_si256(tmp, ff); // ab00 |
423 | 376k | const __m256i dst0 = _mm256_unpacklo_epi16(pack, ff); |
424 | 376k | const __m256i dst1 = _mm256_unpackhi_epi16(pack, ff); |
425 | 376k | _mm256_storeu2_m128i((__m128i*)&dst[8], (__m128i*)&dst[0], dst0); |
426 | 376k | _mm256_storeu2_m128i((__m128i*)&dst[12], (__m128i*)&dst[4], dst1); |
427 | 376k | } |
428 | 309k | break; |
429 | 0 | } |
430 | 323k | case 2: { |
431 | 323k | const __m256i mask_or = _mm256_set1_epi32((int)0xff000000); |
432 | 323k | const __m256i mul_cst = _mm256_set1_epi16(0x0104); |
433 | 323k | const __m256i mask_mul = _mm256_set1_epi16(0x0f00); |
434 | 806k | for (x = 0; x + 32 <= width; x += 32, dst += 8) { |
435 | | // 000a000b000c000d | (where a/b/c/d are 2 bits). |
436 | 483k | const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]); |
437 | 483k | const __m256i mul = |
438 | 483k | _mm256_mullo_epi16(in, mul_cst); // 00ab00b000cd00d0 |
439 | 483k | const __m256i tmp = |
440 | 483k | _mm256_and_si256(mul, mask_mul); // 00ab000000cd0000 |
441 | 483k | const __m256i shift = _mm256_srli_epi32(tmp, 12); // 00000000ab000000 |
442 | 483k | const __m256i pack = _mm256_or_si256(shift, tmp); // 00000000abcd0000 |
443 | | // Convert to 0xff00**00. |
444 | 483k | const __m256i res = _mm256_or_si256(pack, mask_or); |
445 | 483k | _mm256_storeu_si256((__m256i*)dst, res); |
446 | 483k | } |
447 | 323k | break; |
448 | 0 | } |
449 | 546k | default: { |
450 | 546k | assert(xbits == 3); |
451 | 981k | for (x = 0; x + 32 <= width; x += 32, dst += 4) { |
452 | | // 0000000a00000000b... | (where a/b are 1 bit). |
453 | 435k | const __m256i in = _mm256_loadu_si256((const __m256i*)&row[x]); |
454 | 435k | const __m256i shift = _mm256_slli_epi64(in, 7); |
455 | 435k | const uint32_t move = _mm256_movemask_epi8(shift); |
456 | 435k | dst[0] = 0xff000000 | ((move & 0xff) << 8); |
457 | 435k | dst[1] = 0xff000000 | (move & 0xff00); |
458 | 435k | dst[2] = 0xff000000 | ((move & 0xff0000) >> 8); |
459 | 435k | dst[3] = 0xff000000 | ((move & 0xff000000) >> 16); |
460 | 435k | } |
461 | 546k | break; |
462 | 0 | } |
463 | 1.94M | } |
464 | 1.94M | if (x != width) { |
465 | 1.64M | VP8LBundleColorMap_SSE(row + x, width - x, xbits, dst); |
466 | 1.64M | } |
467 | 1.94M | } |
468 | | |
469 | | //------------------------------------------------------------------------------ |
470 | | // Batch version of Predictor Transform subtraction |
471 | | |
472 | | static WEBP_INLINE void Average2_m256i(const __m256i* const a0, |
473 | | const __m256i* const a1, |
474 | 28.2M | __m256i* const avg) { |
475 | | // (a + b) >> 1 = ((a + b + 1) >> 1) - ((a ^ b) & 1) |
476 | 28.2M | const __m256i ones = _mm256_set1_epi8(1); |
477 | 28.2M | const __m256i avg1 = _mm256_avg_epu8(*a0, *a1); |
478 | 28.2M | const __m256i one = _mm256_and_si256(_mm256_xor_si256(*a0, *a1), ones); |
479 | 28.2M | *avg = _mm256_sub_epi8(avg1, one); |
480 | 28.2M | } |
481 | | |
482 | | // Predictor0: ARGB_BLACK. |
483 | | static void PredictorSub0_AVX2(const uint32_t* in, const uint32_t* upper, |
484 | 5.19M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
485 | 5.19M | int i; |
486 | 5.19M | const __m256i black = _mm256_set1_epi32((int)ARGB_BLACK); |
487 | 8.37M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
488 | 3.18M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
489 | 3.18M | const __m256i res = _mm256_sub_epi8(src, black); |
490 | 3.18M | _mm256_storeu_si256((__m256i*)&out[i], res); |
491 | 3.18M | } |
492 | 5.19M | if (i != num_pixels) { |
493 | 3.41M | VP8LPredictorsSub_SSE[0](in + i, NULL, num_pixels - i, out + i); |
494 | 3.41M | } |
495 | 5.19M | (void)upper; |
496 | 5.19M | } |
497 | | |
498 | | #define GENERATE_PREDICTOR_1(X, IN) \ |
499 | | static void PredictorSub##X##_AVX2( \ |
500 | | const uint32_t* const in, const uint32_t* const upper, int num_pixels, \ |
501 | 35.3M | uint32_t* WEBP_RESTRICT const out) { \ |
502 | 35.3M | int i; \ |
503 | 50.1M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ |
504 | 14.8M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ |
505 | 14.8M | const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \ |
506 | 14.8M | const __m256i res = _mm256_sub_epi8(src, pred); \ |
507 | 14.8M | _mm256_storeu_si256((__m256i*)&out[i], res); \ |
508 | 14.8M | } \ |
509 | 35.3M | if (i != num_pixels) { \ |
510 | 27.8M | VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ |
511 | 27.8M | num_pixels - i, out + i); \ |
512 | 27.8M | } \ |
513 | 35.3M | } lossless_enc_avx2.c:PredictorSub1_AVX2 Line | Count | Source | 501 | 8.48M | uint32_t* WEBP_RESTRICT const out) { \ | 502 | 8.48M | int i; \ | 503 | 13.1M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 504 | 4.62M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 505 | 4.62M | const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \ | 506 | 4.62M | const __m256i res = _mm256_sub_epi8(src, pred); \ | 507 | 4.62M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 508 | 4.62M | } \ | 509 | 8.48M | if (i != num_pixels) { \ | 510 | 6.23M | VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ | 511 | 6.23M | num_pixels - i, out + i); \ | 512 | 6.23M | } \ | 513 | 8.48M | } |
lossless_enc_avx2.c:PredictorSub2_AVX2 Line | Count | Source | 501 | 17.4M | uint32_t* WEBP_RESTRICT const out) { \ | 502 | 17.4M | int i; \ | 503 | 21.0M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 504 | 3.67M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 505 | 3.67M | const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \ | 506 | 3.67M | const __m256i res = _mm256_sub_epi8(src, pred); \ | 507 | 3.67M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 508 | 3.67M | } \ | 509 | 17.4M | if (i != num_pixels) { \ | 510 | 15.4M | VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ | 511 | 15.4M | num_pixels - i, out + i); \ | 512 | 15.4M | } \ | 513 | 17.4M | } |
lossless_enc_avx2.c:PredictorSub3_AVX2 Line | Count | Source | 501 | 4.76M | uint32_t* WEBP_RESTRICT const out) { \ | 502 | 4.76M | int i; \ | 503 | 8.03M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 504 | 3.27M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 505 | 3.27M | const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \ | 506 | 3.27M | const __m256i res = _mm256_sub_epi8(src, pred); \ | 507 | 3.27M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 508 | 3.27M | } \ | 509 | 4.76M | if (i != num_pixels) { \ | 510 | 3.06M | VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ | 511 | 3.06M | num_pixels - i, out + i); \ | 512 | 3.06M | } \ | 513 | 4.76M | } |
lossless_enc_avx2.c:PredictorSub4_AVX2 Line | Count | Source | 501 | 4.70M | uint32_t* WEBP_RESTRICT const out) { \ | 502 | 4.70M | int i; \ | 503 | 7.94M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 504 | 3.24M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 505 | 3.24M | const __m256i pred = _mm256_loadu_si256((const __m256i*)&(IN)); \ | 506 | 3.24M | const __m256i res = _mm256_sub_epi8(src, pred); \ | 507 | 3.24M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 508 | 3.24M | } \ | 509 | 4.70M | if (i != num_pixels) { \ | 510 | 3.04M | VP8LPredictorsSub_SSE[(X)](in + i, WEBP_OFFSET_PTR(upper, i), \ | 511 | 3.04M | num_pixels - i, out + i); \ | 512 | 3.04M | } \ | 513 | 4.70M | } |
|
514 | | |
515 | | GENERATE_PREDICTOR_1(1, in[i - 1]) // Predictor1: L |
516 | | GENERATE_PREDICTOR_1(2, upper[i]) // Predictor2: T |
517 | | GENERATE_PREDICTOR_1(3, upper[i + 1]) // Predictor3: TR |
518 | | GENERATE_PREDICTOR_1(4, upper[i - 1]) // Predictor4: TL |
519 | | #undef GENERATE_PREDICTOR_1 |
520 | | |
521 | | // Predictor5: avg2(avg2(L, TR), T) |
522 | | static void PredictorSub5_AVX2(const uint32_t* in, const uint32_t* upper, |
523 | 4.64M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
524 | 4.64M | int i; |
525 | 7.79M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
526 | 3.15M | const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]); |
527 | 3.15M | const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]); |
528 | 3.15M | const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]); |
529 | 3.15M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
530 | 3.15M | __m256i avg, pred, res; |
531 | 3.15M | Average2_m256i(&L, &TR, &avg); |
532 | 3.15M | Average2_m256i(&avg, &T, &pred); |
533 | 3.15M | res = _mm256_sub_epi8(src, pred); |
534 | 3.15M | _mm256_storeu_si256((__m256i*)&out[i], res); |
535 | 3.15M | } |
536 | 4.64M | if (i != num_pixels) { |
537 | 3.00M | VP8LPredictorsSub_SSE[5](in + i, upper + i, num_pixels - i, out + i); |
538 | 3.00M | } |
539 | 4.64M | } |
540 | | |
541 | | #define GENERATE_PREDICTOR_2(X, A, B) \ |
542 | | static void PredictorSub##X##_AVX2(const uint32_t* in, \ |
543 | | const uint32_t* upper, int num_pixels, \ |
544 | 18.5M | uint32_t* WEBP_RESTRICT out) { \ |
545 | 18.5M | int i; \ |
546 | 31.0M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ |
547 | 12.5M | const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \ |
548 | 12.5M | const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \ |
549 | 12.5M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ |
550 | 12.5M | __m256i pred, res; \ |
551 | 12.5M | Average2_m256i(&tA, &tB, &pred); \ |
552 | 12.5M | res = _mm256_sub_epi8(src, pred); \ |
553 | 12.5M | _mm256_storeu_si256((__m256i*)&out[i], res); \ |
554 | 12.5M | } \ |
555 | 18.5M | if (i != num_pixels) { \ |
556 | 12.0M | VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \ |
557 | 12.0M | } \ |
558 | 18.5M | } lossless_enc_avx2.c:PredictorSub6_AVX2 Line | Count | Source | 544 | 4.61M | uint32_t* WEBP_RESTRICT out) { \ | 545 | 4.61M | int i; \ | 546 | 7.72M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 547 | 3.11M | const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \ | 548 | 3.11M | const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \ | 549 | 3.11M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 550 | 3.11M | __m256i pred, res; \ | 551 | 3.11M | Average2_m256i(&tA, &tB, &pred); \ | 552 | 3.11M | res = _mm256_sub_epi8(src, pred); \ | 553 | 3.11M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 554 | 3.11M | } \ | 555 | 4.61M | if (i != num_pixels) { \ | 556 | 3.00M | VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \ | 557 | 3.00M | } \ | 558 | 4.61M | } |
lossless_enc_avx2.c:PredictorSub7_AVX2 Line | Count | Source | 544 | 4.68M | uint32_t* WEBP_RESTRICT out) { \ | 545 | 4.68M | int i; \ | 546 | 7.88M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 547 | 3.19M | const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \ | 548 | 3.19M | const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \ | 549 | 3.19M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 550 | 3.19M | __m256i pred, res; \ | 551 | 3.19M | Average2_m256i(&tA, &tB, &pred); \ | 552 | 3.19M | res = _mm256_sub_epi8(src, pred); \ | 553 | 3.19M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 554 | 3.19M | } \ | 555 | 4.68M | if (i != num_pixels) { \ | 556 | 3.02M | VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \ | 557 | 3.02M | } \ | 558 | 4.68M | } |
lossless_enc_avx2.c:PredictorSub8_AVX2 Line | Count | Source | 544 | 4.61M | uint32_t* WEBP_RESTRICT out) { \ | 545 | 4.61M | int i; \ | 546 | 7.74M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 547 | 3.13M | const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \ | 548 | 3.13M | const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \ | 549 | 3.13M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 550 | 3.13M | __m256i pred, res; \ | 551 | 3.13M | Average2_m256i(&tA, &tB, &pred); \ | 552 | 3.13M | res = _mm256_sub_epi8(src, pred); \ | 553 | 3.13M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 554 | 3.13M | } \ | 555 | 4.61M | if (i != num_pixels) { \ | 556 | 3.00M | VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \ | 557 | 3.00M | } \ | 558 | 4.61M | } |
lossless_enc_avx2.c:PredictorSub9_AVX2 Line | Count | Source | 544 | 4.60M | uint32_t* WEBP_RESTRICT out) { \ | 545 | 4.60M | int i; \ | 546 | 7.72M | for (i = 0; i + 8 <= num_pixels; i += 8) { \ | 547 | 3.11M | const __m256i tA = _mm256_loadu_si256((const __m256i*)&(A)); \ | 548 | 3.11M | const __m256i tB = _mm256_loadu_si256((const __m256i*)&(B)); \ | 549 | 3.11M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); \ | 550 | 3.11M | __m256i pred, res; \ | 551 | 3.11M | Average2_m256i(&tA, &tB, &pred); \ | 552 | 3.11M | res = _mm256_sub_epi8(src, pred); \ | 553 | 3.11M | _mm256_storeu_si256((__m256i*)&out[i], res); \ | 554 | 3.11M | } \ | 555 | 4.60M | if (i != num_pixels) { \ | 556 | 3.00M | VP8LPredictorsSub_SSE[(X)](in + i, upper + i, num_pixels - i, out + i); \ | 557 | 3.00M | } \ | 558 | 4.60M | } |
|
559 | | |
560 | | GENERATE_PREDICTOR_2(6, in[i - 1], upper[i - 1]) // Predictor6: avg(L, TL) |
561 | | GENERATE_PREDICTOR_2(7, in[i - 1], upper[i]) // Predictor7: avg(L, T) |
562 | | GENERATE_PREDICTOR_2(8, upper[i - 1], upper[i]) // Predictor8: avg(TL, T) |
563 | | GENERATE_PREDICTOR_2(9, upper[i], upper[i + 1]) // Predictor9: average(T, TR) |
564 | | #undef GENERATE_PREDICTOR_2 |
565 | | |
566 | | // Predictor10: avg(avg(L,TL), avg(T, TR)). |
567 | | static void PredictorSub10_AVX2(const uint32_t* in, const uint32_t* upper, |
568 | 4.61M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
569 | 4.61M | int i; |
570 | 7.74M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
571 | 3.12M | const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]); |
572 | 3.12M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
573 | 3.12M | const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]); |
574 | 3.12M | const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]); |
575 | 3.12M | const __m256i TR = _mm256_loadu_si256((const __m256i*)&upper[i + 1]); |
576 | 3.12M | __m256i avgTTR, avgLTL, avg, res; |
577 | 3.12M | Average2_m256i(&T, &TR, &avgTTR); |
578 | 3.12M | Average2_m256i(&L, &TL, &avgLTL); |
579 | 3.12M | Average2_m256i(&avgTTR, &avgLTL, &avg); |
580 | 3.12M | res = _mm256_sub_epi8(src, avg); |
581 | 3.12M | _mm256_storeu_si256((__m256i*)&out[i], res); |
582 | 3.12M | } |
583 | 4.61M | if (i != num_pixels) { |
584 | 3.00M | VP8LPredictorsSub_SSE[10](in + i, upper + i, num_pixels - i, out + i); |
585 | 3.00M | } |
586 | 4.61M | } |
587 | | |
588 | | // Predictor11: select. |
589 | | static void GetSumAbsDiff32_AVX2(const __m256i* const A, const __m256i* const B, |
590 | 9.58M | __m256i* const out) { |
591 | | // We can unpack with any value on the upper 32 bits, provided it's the same |
592 | | // on both operands (to that their sum of abs diff is zero). Here we use *A. |
593 | 9.58M | const __m256i A_lo = _mm256_unpacklo_epi32(*A, *A); |
594 | 9.58M | const __m256i B_lo = _mm256_unpacklo_epi32(*B, *A); |
595 | 9.58M | const __m256i A_hi = _mm256_unpackhi_epi32(*A, *A); |
596 | 9.58M | const __m256i B_hi = _mm256_unpackhi_epi32(*B, *A); |
597 | 9.58M | const __m256i s_lo = _mm256_sad_epu8(A_lo, B_lo); |
598 | 9.58M | const __m256i s_hi = _mm256_sad_epu8(A_hi, B_hi); |
599 | 9.58M | *out = _mm256_packs_epi32(s_lo, s_hi); |
600 | 9.58M | } |
601 | | |
602 | | static void PredictorSub11_AVX2(const uint32_t* in, const uint32_t* upper, |
603 | 5.11M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
604 | 5.11M | int i; |
605 | 9.90M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
606 | 4.79M | const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]); |
607 | 4.79M | const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]); |
608 | 4.79M | const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]); |
609 | 4.79M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
610 | 4.79M | __m256i pa, pb; |
611 | 4.79M | GetSumAbsDiff32_AVX2(&T, &TL, &pa); // pa = sum |T-TL| |
612 | 4.79M | GetSumAbsDiff32_AVX2(&L, &TL, &pb); // pb = sum |L-TL| |
613 | 4.79M | { |
614 | 4.79M | const __m256i mask = _mm256_cmpgt_epi32(pb, pa); |
615 | 4.79M | const __m256i A = _mm256_and_si256(mask, L); |
616 | 4.79M | const __m256i B = _mm256_andnot_si256(mask, T); |
617 | 4.79M | const __m256i pred = _mm256_or_si256(A, B); // pred = (L > T)? L : T |
618 | 4.79M | const __m256i res = _mm256_sub_epi8(src, pred); |
619 | 4.79M | _mm256_storeu_si256((__m256i*)&out[i], res); |
620 | 4.79M | } |
621 | 4.79M | } |
622 | 5.11M | if (i != num_pixels) { |
623 | 3.16M | VP8LPredictorsSub_SSE[11](in + i, upper + i, num_pixels - i, out + i); |
624 | 3.16M | } |
625 | 5.11M | } |
626 | | |
627 | | // Predictor12: ClampedSubSubtractFull. |
628 | | static void PredictorSub12_AVX2(const uint32_t* in, const uint32_t* upper, |
629 | 5.05M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
630 | 5.05M | int i; |
631 | 5.05M | const __m256i zero = _mm256_setzero_si256(); |
632 | 8.95M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
633 | 3.90M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
634 | 3.90M | const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]); |
635 | 3.90M | const __m256i L_lo = _mm256_unpacklo_epi8(L, zero); |
636 | 3.90M | const __m256i L_hi = _mm256_unpackhi_epi8(L, zero); |
637 | 3.90M | const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]); |
638 | 3.90M | const __m256i T_lo = _mm256_unpacklo_epi8(T, zero); |
639 | 3.90M | const __m256i T_hi = _mm256_unpackhi_epi8(T, zero); |
640 | 3.90M | const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]); |
641 | 3.90M | const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero); |
642 | 3.90M | const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero); |
643 | 3.90M | const __m256i diff_lo = _mm256_sub_epi16(T_lo, TL_lo); |
644 | 3.90M | const __m256i diff_hi = _mm256_sub_epi16(T_hi, TL_hi); |
645 | 3.90M | const __m256i pred_lo = _mm256_add_epi16(L_lo, diff_lo); |
646 | 3.90M | const __m256i pred_hi = _mm256_add_epi16(L_hi, diff_hi); |
647 | 3.90M | const __m256i pred = _mm256_packus_epi16(pred_lo, pred_hi); |
648 | 3.90M | const __m256i res = _mm256_sub_epi8(src, pred); |
649 | 3.90M | _mm256_storeu_si256((__m256i*)&out[i], res); |
650 | 3.90M | } |
651 | 5.05M | if (i != num_pixels) { |
652 | 3.08M | VP8LPredictorsSub_SSE[12](in + i, upper + i, num_pixels - i, out + i); |
653 | 3.08M | } |
654 | 5.05M | } |
655 | | |
656 | | // Predictors13: ClampedAddSubtractHalf |
657 | | static void PredictorSub13_AVX2(const uint32_t* in, const uint32_t* upper, |
658 | 4.61M | int num_pixels, uint32_t* WEBP_RESTRICT out) { |
659 | 4.61M | int i; |
660 | 4.61M | const __m256i zero = _mm256_setzero_si256(); |
661 | 7.73M | for (i = 0; i + 8 <= num_pixels; i += 8) { |
662 | 3.12M | const __m256i L = _mm256_loadu_si256((const __m256i*)&in[i - 1]); |
663 | 3.12M | const __m256i src = _mm256_loadu_si256((const __m256i*)&in[i]); |
664 | 3.12M | const __m256i T = _mm256_loadu_si256((const __m256i*)&upper[i]); |
665 | 3.12M | const __m256i TL = _mm256_loadu_si256((const __m256i*)&upper[i - 1]); |
666 | | // lo. |
667 | 3.12M | const __m256i L_lo = _mm256_unpacklo_epi8(L, zero); |
668 | 3.12M | const __m256i T_lo = _mm256_unpacklo_epi8(T, zero); |
669 | 3.12M | const __m256i TL_lo = _mm256_unpacklo_epi8(TL, zero); |
670 | 3.12M | const __m256i sum_lo = _mm256_add_epi16(T_lo, L_lo); |
671 | 3.12M | const __m256i avg_lo = _mm256_srli_epi16(sum_lo, 1); |
672 | 3.12M | const __m256i A1_lo = _mm256_sub_epi16(avg_lo, TL_lo); |
673 | 3.12M | const __m256i bit_fix_lo = _mm256_cmpgt_epi16(TL_lo, avg_lo); |
674 | 3.12M | const __m256i A2_lo = _mm256_sub_epi16(A1_lo, bit_fix_lo); |
675 | 3.12M | const __m256i A3_lo = _mm256_srai_epi16(A2_lo, 1); |
676 | 3.12M | const __m256i A4_lo = _mm256_add_epi16(avg_lo, A3_lo); |
677 | | // hi. |
678 | 3.12M | const __m256i L_hi = _mm256_unpackhi_epi8(L, zero); |
679 | 3.12M | const __m256i T_hi = _mm256_unpackhi_epi8(T, zero); |
680 | 3.12M | const __m256i TL_hi = _mm256_unpackhi_epi8(TL, zero); |
681 | 3.12M | const __m256i sum_hi = _mm256_add_epi16(T_hi, L_hi); |
682 | 3.12M | const __m256i avg_hi = _mm256_srli_epi16(sum_hi, 1); |
683 | 3.12M | const __m256i A1_hi = _mm256_sub_epi16(avg_hi, TL_hi); |
684 | 3.12M | const __m256i bit_fix_hi = _mm256_cmpgt_epi16(TL_hi, avg_hi); |
685 | 3.12M | const __m256i A2_hi = _mm256_sub_epi16(A1_hi, bit_fix_hi); |
686 | 3.12M | const __m256i A3_hi = _mm256_srai_epi16(A2_hi, 1); |
687 | 3.12M | const __m256i A4_hi = _mm256_add_epi16(avg_hi, A3_hi); |
688 | | |
689 | 3.12M | const __m256i pred = _mm256_packus_epi16(A4_lo, A4_hi); |
690 | 3.12M | const __m256i res = _mm256_sub_epi8(src, pred); |
691 | 3.12M | _mm256_storeu_si256((__m256i*)&out[i], res); |
692 | 3.12M | } |
693 | 4.61M | if (i != num_pixels) { |
694 | 3.01M | VP8LPredictorsSub_SSE[13](in + i, upper + i, num_pixels - i, out + i); |
695 | 3.01M | } |
696 | 4.61M | } |
697 | | |
698 | | //------------------------------------------------------------------------------ |
699 | | // Entry point |
700 | | |
701 | | extern void VP8LEncDspInitAVX2(void); |
702 | | |
703 | 5.32k | WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitAVX2(void) { |
704 | 5.32k | VP8LSubtractGreenFromBlueAndRed = SubtractGreenFromBlueAndRed_AVX2; |
705 | 5.32k | VP8LTransformColor = TransformColor_AVX2; |
706 | 5.32k | VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_AVX2; |
707 | 5.32k | VP8LCollectColorRedTransforms = CollectColorRedTransforms_AVX2; |
708 | 5.32k | VP8LAddVector = AddVector_AVX2; |
709 | 5.32k | VP8LAddVectorEq = AddVectorEq_AVX2; |
710 | 5.32k | VP8LCombinedShannonEntropy = CombinedShannonEntropy_AVX2; |
711 | 5.32k | VP8LVectorMismatch = VectorMismatch_AVX2; |
712 | 5.32k | VP8LBundleColorMap = BundleColorMap_AVX2; |
713 | | |
714 | 5.32k | VP8LPredictorsSub[0] = PredictorSub0_AVX2; |
715 | 5.32k | VP8LPredictorsSub[1] = PredictorSub1_AVX2; |
716 | 5.32k | VP8LPredictorsSub[2] = PredictorSub2_AVX2; |
717 | 5.32k | VP8LPredictorsSub[3] = PredictorSub3_AVX2; |
718 | 5.32k | VP8LPredictorsSub[4] = PredictorSub4_AVX2; |
719 | 5.32k | VP8LPredictorsSub[5] = PredictorSub5_AVX2; |
720 | 5.32k | VP8LPredictorsSub[6] = PredictorSub6_AVX2; |
721 | 5.32k | VP8LPredictorsSub[7] = PredictorSub7_AVX2; |
722 | 5.32k | VP8LPredictorsSub[8] = PredictorSub8_AVX2; |
723 | 5.32k | VP8LPredictorsSub[9] = PredictorSub9_AVX2; |
724 | 5.32k | VP8LPredictorsSub[10] = PredictorSub10_AVX2; |
725 | 5.32k | VP8LPredictorsSub[11] = PredictorSub11_AVX2; |
726 | 5.32k | VP8LPredictorsSub[12] = PredictorSub12_AVX2; |
727 | 5.32k | VP8LPredictorsSub[13] = PredictorSub13_AVX2; |
728 | 5.32k | VP8LPredictorsSub[14] = PredictorSub0_AVX2; // <- padding security sentinels |
729 | 5.32k | VP8LPredictorsSub[15] = PredictorSub0_AVX2; |
730 | 5.32k | } |
731 | | |
732 | | #else // !WEBP_USE_AVX2 |
733 | | |
734 | | WEBP_DSP_INIT_STUB(VP8LEncDspInitAVX2) |
735 | | |
736 | | #endif // WEBP_USE_AVX2 |