/src/libwebp/src/dsp/rescaler_sse2.c
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright 2015 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style license |
4 | | // that can be found in the COPYING file in the root of the source |
5 | | // tree. An additional intellectual property rights grant can be found |
6 | | // in the file PATENTS. All contributing project authors may |
7 | | // be found in the AUTHORS file in the root of the source tree. |
8 | | // ----------------------------------------------------------------------------- |
9 | | // |
10 | | // SSE2 Rescaling functions |
11 | | // |
12 | | // Author: Skal (pascal.massimino@gmail.com) |
13 | | |
14 | | #include "src/dsp/dsp.h" |
15 | | |
16 | | #if defined(WEBP_USE_SSE2) && !defined(WEBP_REDUCE_SIZE) |
17 | | #include <emmintrin.h> |
18 | | |
19 | | #include <assert.h> |
20 | | #include "src/utils/rescaler_utils.h" |
21 | | #include "src/utils/utils.h" |
22 | | |
23 | | //------------------------------------------------------------------------------ |
24 | | // Implementations of critical functions ImportRow / ExportRow |
25 | | |
26 | 0 | #define ROUNDER (WEBP_RESCALER_ONE >> 1) |
27 | 0 | #define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX) |
28 | 0 | #define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX) |
29 | | |
30 | | // input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0 |
31 | 0 | static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) { |
32 | 0 | const __m128i zero = _mm_setzero_si128(); |
33 | 0 | const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH |
34 | 0 | const __m128i B = _mm_unpacklo_epi8(A, zero); // A0B0C0D0E0F0G0H0 |
35 | 0 | const __m128i C = _mm_srli_si128(B, 8); // E0F0G0H0 |
36 | 0 | *out = _mm_unpacklo_epi16(B, C); |
37 | 0 | } |
38 | | |
39 | | // input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0 |
40 | 0 | static void LoadEightPixels_SSE2(const uint8_t* const src, __m128i* out) { |
41 | 0 | const __m128i zero = _mm_setzero_si128(); |
42 | 0 | const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH |
43 | 0 | *out = _mm_unpacklo_epi8(A, zero); |
44 | 0 | } |
45 | | |
46 | | static void RescalerImportRowExpand_SSE2(WebPRescaler* const wrk, |
47 | 0 | const uint8_t* src) { |
48 | 0 | rescaler_t* frow = wrk->frow; |
49 | 0 | const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels; |
50 | 0 | const int x_add = wrk->x_add; |
51 | 0 | int accum = x_add; |
52 | 0 | __m128i cur_pixels; |
53 | | |
54 | | // SSE2 implementation only works with 16b signed arithmetic at max. |
55 | 0 | if (wrk->src_width < 8 || accum >= (1 << 15)) { |
56 | 0 | WebPRescalerImportRowExpand_C(wrk, src); |
57 | 0 | return; |
58 | 0 | } |
59 | | |
60 | 0 | assert(!WebPRescalerInputDone(wrk)); |
61 | 0 | assert(wrk->x_expand); |
62 | 0 | if (wrk->num_channels == 4) { |
63 | 0 | LoadTwoPixels_SSE2(src, &cur_pixels); |
64 | 0 | src += 4; |
65 | 0 | while (1) { |
66 | 0 | const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum); |
67 | 0 | const __m128i out = _mm_madd_epi16(cur_pixels, mult); |
68 | 0 | _mm_storeu_si128((__m128i*)frow, out); |
69 | 0 | frow += 4; |
70 | 0 | if (frow >= frow_end) break; |
71 | 0 | accum -= wrk->x_sub; |
72 | 0 | if (accum < 0) { |
73 | 0 | LoadTwoPixels_SSE2(src, &cur_pixels); |
74 | 0 | src += 4; |
75 | 0 | accum += x_add; |
76 | 0 | } |
77 | 0 | } |
78 | 0 | } else { |
79 | 0 | int left; |
80 | 0 | const uint8_t* const src_limit = src + wrk->src_width - 8; |
81 | 0 | LoadEightPixels_SSE2(src, &cur_pixels); |
82 | 0 | src += 7; |
83 | 0 | left = 7; |
84 | 0 | while (1) { |
85 | 0 | const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum); |
86 | 0 | const __m128i out = _mm_madd_epi16(cur_pixels, mult); |
87 | 0 | assert(sizeof(*frow) == sizeof(uint32_t)); |
88 | 0 | WebPInt32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out)); |
89 | 0 | frow += 1; |
90 | 0 | if (frow >= frow_end) break; |
91 | 0 | accum -= wrk->x_sub; |
92 | 0 | if (accum < 0) { |
93 | 0 | if (--left) { |
94 | 0 | cur_pixels = _mm_srli_si128(cur_pixels, 2); |
95 | 0 | } else if (src <= src_limit) { |
96 | 0 | LoadEightPixels_SSE2(src, &cur_pixels); |
97 | 0 | src += 7; |
98 | 0 | left = 7; |
99 | 0 | } else { // tail |
100 | 0 | cur_pixels = _mm_srli_si128(cur_pixels, 2); |
101 | 0 | cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1); |
102 | 0 | src += 1; |
103 | 0 | left = 1; |
104 | 0 | } |
105 | 0 | accum += x_add; |
106 | 0 | } |
107 | 0 | } |
108 | 0 | } |
109 | 0 | assert(accum == 0); |
110 | 0 | } |
111 | | |
112 | | static void RescalerImportRowShrink_SSE2(WebPRescaler* const wrk, |
113 | 0 | const uint8_t* src) { |
114 | 0 | const int x_sub = wrk->x_sub; |
115 | 0 | int accum = 0; |
116 | 0 | const __m128i zero = _mm_setzero_si128(); |
117 | 0 | const __m128i mult0 = _mm_set1_epi16(x_sub); |
118 | 0 | const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale); |
119 | 0 | const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); |
120 | 0 | __m128i sum = zero; |
121 | 0 | rescaler_t* frow = wrk->frow; |
122 | 0 | const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width; |
123 | |
|
124 | 0 | if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) { |
125 | 0 | WebPRescalerImportRowShrink_C(wrk, src); |
126 | 0 | return; |
127 | 0 | } |
128 | 0 | assert(!WebPRescalerInputDone(wrk)); |
129 | 0 | assert(!wrk->x_expand); |
130 | | |
131 | 0 | for (; frow < frow_end; frow += 4) { |
132 | 0 | __m128i base = zero; |
133 | 0 | accum += wrk->x_add; |
134 | 0 | while (accum > 0) { |
135 | 0 | const __m128i A = _mm_cvtsi32_si128(WebPMemToInt32(src)); |
136 | 0 | src += 4; |
137 | 0 | base = _mm_unpacklo_epi8(A, zero); |
138 | | // To avoid overflow, we need: base * x_add / x_sub < 32768 |
139 | | // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit. |
140 | 0 | sum = _mm_add_epi16(sum, base); |
141 | 0 | accum -= x_sub; |
142 | 0 | } |
143 | 0 | { // Emit next horizontal pixel. |
144 | 0 | const __m128i mult = _mm_set1_epi16(-accum); |
145 | 0 | const __m128i frac0 = _mm_mullo_epi16(base, mult); // 16b x 16b -> 32b |
146 | 0 | const __m128i frac1 = _mm_mulhi_epu16(base, mult); |
147 | 0 | const __m128i frac = _mm_unpacklo_epi16(frac0, frac1); // frac is 32b |
148 | 0 | const __m128i A0 = _mm_mullo_epi16(sum, mult0); |
149 | 0 | const __m128i A1 = _mm_mulhi_epu16(sum, mult0); |
150 | 0 | const __m128i B0 = _mm_unpacklo_epi16(A0, A1); // sum * x_sub |
151 | 0 | const __m128i frow_out = _mm_sub_epi32(B0, frac); // sum * x_sub - frac |
152 | 0 | const __m128i D0 = _mm_srli_epi64(frac, 32); |
153 | 0 | const __m128i D1 = _mm_mul_epu32(frac, mult1); // 32b x 16b -> 64b |
154 | 0 | const __m128i D2 = _mm_mul_epu32(D0, mult1); |
155 | 0 | const __m128i E1 = _mm_add_epi64(D1, rounder); |
156 | 0 | const __m128i E2 = _mm_add_epi64(D2, rounder); |
157 | 0 | const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2)); |
158 | 0 | const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2)); |
159 | 0 | const __m128i G = _mm_unpacklo_epi32(F1, F2); |
160 | 0 | sum = _mm_packs_epi32(G, zero); |
161 | 0 | _mm_storeu_si128((__m128i*)frow, frow_out); |
162 | 0 | } |
163 | 0 | } |
164 | 0 | assert(accum == 0); |
165 | 0 | } |
166 | | |
167 | | //------------------------------------------------------------------------------ |
168 | | // Row export |
169 | | |
170 | | // load *src as epi64, multiply by mult and store result in [out0 ... out3] |
171 | | static WEBP_INLINE void LoadDispatchAndMult_SSE2(const rescaler_t* const src, |
172 | | const __m128i* const mult, |
173 | | __m128i* const out0, |
174 | | __m128i* const out1, |
175 | | __m128i* const out2, |
176 | 0 | __m128i* const out3) { |
177 | 0 | const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0)); |
178 | 0 | const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4)); |
179 | 0 | const __m128i A2 = _mm_srli_epi64(A0, 32); |
180 | 0 | const __m128i A3 = _mm_srli_epi64(A1, 32); |
181 | 0 | if (mult != NULL) { |
182 | 0 | *out0 = _mm_mul_epu32(A0, *mult); |
183 | 0 | *out1 = _mm_mul_epu32(A1, *mult); |
184 | 0 | *out2 = _mm_mul_epu32(A2, *mult); |
185 | 0 | *out3 = _mm_mul_epu32(A3, *mult); |
186 | 0 | } else { |
187 | 0 | *out0 = A0; |
188 | 0 | *out1 = A1; |
189 | 0 | *out2 = A2; |
190 | 0 | *out3 = A3; |
191 | 0 | } |
192 | 0 | } |
193 | | |
194 | | static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0, |
195 | | const __m128i* const A1, |
196 | | const __m128i* const A2, |
197 | | const __m128i* const A3, |
198 | | const __m128i* const mult, |
199 | 0 | uint8_t* const dst) { |
200 | 0 | const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); |
201 | 0 | const __m128i mask = _mm_set_epi32(~0, 0, ~0, 0); |
202 | 0 | const __m128i B0 = _mm_mul_epu32(*A0, *mult); |
203 | 0 | const __m128i B1 = _mm_mul_epu32(*A1, *mult); |
204 | 0 | const __m128i B2 = _mm_mul_epu32(*A2, *mult); |
205 | 0 | const __m128i B3 = _mm_mul_epu32(*A3, *mult); |
206 | 0 | const __m128i C0 = _mm_add_epi64(B0, rounder); |
207 | 0 | const __m128i C1 = _mm_add_epi64(B1, rounder); |
208 | 0 | const __m128i C2 = _mm_add_epi64(B2, rounder); |
209 | 0 | const __m128i C3 = _mm_add_epi64(B3, rounder); |
210 | 0 | const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX); |
211 | 0 | const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX); |
212 | | #if (WEBP_RESCALER_RFIX < 32) |
213 | | const __m128i D2 = |
214 | | _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask); |
215 | | const __m128i D3 = |
216 | | _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask); |
217 | | #else |
218 | 0 | const __m128i D2 = _mm_and_si128(C2, mask); |
219 | 0 | const __m128i D3 = _mm_and_si128(C3, mask); |
220 | 0 | #endif |
221 | 0 | const __m128i E0 = _mm_or_si128(D0, D2); |
222 | 0 | const __m128i E1 = _mm_or_si128(D1, D3); |
223 | 0 | const __m128i F = _mm_packs_epi32(E0, E1); |
224 | 0 | const __m128i G = _mm_packus_epi16(F, F); |
225 | 0 | _mm_storel_epi64((__m128i*)dst, G); |
226 | 0 | } |
227 | | |
228 | 0 | static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) { |
229 | 0 | int x_out; |
230 | 0 | uint8_t* const dst = wrk->dst; |
231 | 0 | rescaler_t* const irow = wrk->irow; |
232 | 0 | const int x_out_max = wrk->dst_width * wrk->num_channels; |
233 | 0 | const rescaler_t* const frow = wrk->frow; |
234 | 0 | const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale); |
235 | |
|
236 | 0 | assert(!WebPRescalerOutputDone(wrk)); |
237 | 0 | assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0); |
238 | 0 | assert(wrk->y_expand); |
239 | 0 | if (wrk->y_accum == 0) { |
240 | 0 | for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { |
241 | 0 | __m128i A0, A1, A2, A3; |
242 | 0 | LoadDispatchAndMult_SSE2(frow + x_out, NULL, &A0, &A1, &A2, &A3); |
243 | 0 | ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out); |
244 | 0 | } |
245 | 0 | for (; x_out < x_out_max; ++x_out) { |
246 | 0 | const uint32_t J = frow[x_out]; |
247 | 0 | const int v = (int)MULT_FIX(J, wrk->fy_scale); |
248 | 0 | dst[x_out] = (v > 255) ? 255u : (uint8_t)v; |
249 | 0 | } |
250 | 0 | } else { |
251 | 0 | const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub); |
252 | 0 | const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B); |
253 | 0 | const __m128i mA = _mm_set_epi32(0, A, 0, A); |
254 | 0 | const __m128i mB = _mm_set_epi32(0, B, 0, B); |
255 | 0 | const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER); |
256 | 0 | for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { |
257 | 0 | __m128i A0, A1, A2, A3, B0, B1, B2, B3; |
258 | 0 | LoadDispatchAndMult_SSE2(frow + x_out, &mA, &A0, &A1, &A2, &A3); |
259 | 0 | LoadDispatchAndMult_SSE2(irow + x_out, &mB, &B0, &B1, &B2, &B3); |
260 | 0 | { |
261 | 0 | const __m128i C0 = _mm_add_epi64(A0, B0); |
262 | 0 | const __m128i C1 = _mm_add_epi64(A1, B1); |
263 | 0 | const __m128i C2 = _mm_add_epi64(A2, B2); |
264 | 0 | const __m128i C3 = _mm_add_epi64(A3, B3); |
265 | 0 | const __m128i D0 = _mm_add_epi64(C0, rounder); |
266 | 0 | const __m128i D1 = _mm_add_epi64(C1, rounder); |
267 | 0 | const __m128i D2 = _mm_add_epi64(C2, rounder); |
268 | 0 | const __m128i D3 = _mm_add_epi64(C3, rounder); |
269 | 0 | const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX); |
270 | 0 | const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX); |
271 | 0 | const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX); |
272 | 0 | const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX); |
273 | 0 | ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult, dst + x_out); |
274 | 0 | } |
275 | 0 | } |
276 | 0 | for (; x_out < x_out_max; ++x_out) { |
277 | 0 | const uint64_t I = (uint64_t)A * frow[x_out] |
278 | 0 | + (uint64_t)B * irow[x_out]; |
279 | 0 | const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX); |
280 | 0 | const int v = (int)MULT_FIX(J, wrk->fy_scale); |
281 | 0 | dst[x_out] = (v > 255) ? 255u : (uint8_t)v; |
282 | 0 | } |
283 | 0 | } |
284 | 0 | } |
285 | | |
286 | 0 | static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) { |
287 | 0 | int x_out; |
288 | 0 | uint8_t* const dst = wrk->dst; |
289 | 0 | rescaler_t* const irow = wrk->irow; |
290 | 0 | const int x_out_max = wrk->dst_width * wrk->num_channels; |
291 | 0 | const rescaler_t* const frow = wrk->frow; |
292 | 0 | const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum); |
293 | 0 | assert(!WebPRescalerOutputDone(wrk)); |
294 | 0 | assert(wrk->y_accum <= 0); |
295 | 0 | assert(!wrk->y_expand); |
296 | 0 | if (yscale) { |
297 | 0 | const int scale_xy = wrk->fxy_scale; |
298 | 0 | const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy); |
299 | 0 | const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale); |
300 | 0 | for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { |
301 | 0 | __m128i A0, A1, A2, A3, B0, B1, B2, B3; |
302 | 0 | LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3); |
303 | 0 | LoadDispatchAndMult_SSE2(frow + x_out, &mult_y, &B0, &B1, &B2, &B3); |
304 | 0 | { |
305 | 0 | const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX); // = frac |
306 | 0 | const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX); |
307 | 0 | const __m128i D2 = _mm_srli_epi64(B2, WEBP_RESCALER_RFIX); |
308 | 0 | const __m128i D3 = _mm_srli_epi64(B3, WEBP_RESCALER_RFIX); |
309 | 0 | const __m128i E0 = _mm_sub_epi64(A0, D0); // irow[x] - frac |
310 | 0 | const __m128i E1 = _mm_sub_epi64(A1, D1); |
311 | 0 | const __m128i E2 = _mm_sub_epi64(A2, D2); |
312 | 0 | const __m128i E3 = _mm_sub_epi64(A3, D3); |
313 | 0 | const __m128i F2 = _mm_slli_epi64(D2, 32); |
314 | 0 | const __m128i F3 = _mm_slli_epi64(D3, 32); |
315 | 0 | const __m128i G0 = _mm_or_si128(D0, F2); |
316 | 0 | const __m128i G1 = _mm_or_si128(D1, F3); |
317 | 0 | _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0); |
318 | 0 | _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1); |
319 | 0 | ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out); |
320 | 0 | } |
321 | 0 | } |
322 | 0 | for (; x_out < x_out_max; ++x_out) { |
323 | 0 | const uint32_t frac = (int)MULT_FIX_FLOOR(frow[x_out], yscale); |
324 | 0 | const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale); |
325 | 0 | dst[x_out] = (v > 255) ? 255u : (uint8_t)v; |
326 | 0 | irow[x_out] = frac; // new fractional start |
327 | 0 | } |
328 | 0 | } else { |
329 | 0 | const uint32_t scale = wrk->fxy_scale; |
330 | 0 | const __m128i mult = _mm_set_epi32(0, scale, 0, scale); |
331 | 0 | const __m128i zero = _mm_setzero_si128(); |
332 | 0 | for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { |
333 | 0 | __m128i A0, A1, A2, A3; |
334 | 0 | LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3); |
335 | 0 | _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero); |
336 | 0 | _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero); |
337 | 0 | ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out); |
338 | 0 | } |
339 | 0 | for (; x_out < x_out_max; ++x_out) { |
340 | 0 | const int v = (int)MULT_FIX(irow[x_out], scale); |
341 | 0 | dst[x_out] = (v > 255) ? 255u : (uint8_t)v; |
342 | 0 | irow[x_out] = 0; |
343 | 0 | } |
344 | 0 | } |
345 | 0 | } |
346 | | |
347 | | #undef MULT_FIX_FLOOR |
348 | | #undef MULT_FIX |
349 | | #undef ROUNDER |
350 | | |
351 | | //------------------------------------------------------------------------------ |
352 | | |
353 | | extern void WebPRescalerDspInitSSE2(void); |
354 | | |
355 | 0 | WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) { |
356 | 0 | WebPRescalerImportRowExpand = RescalerImportRowExpand_SSE2; |
357 | 0 | WebPRescalerImportRowShrink = RescalerImportRowShrink_SSE2; |
358 | 0 | WebPRescalerExportRowExpand = RescalerExportRowExpand_SSE2; |
359 | 0 | WebPRescalerExportRowShrink = RescalerExportRowShrink_SSE2; |
360 | 0 | } |
361 | | |
362 | | #else // !WEBP_USE_SSE2 |
363 | | |
364 | | WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2) |
365 | | |
366 | | #endif // WEBP_USE_SSE2 |