/src/libwebp/src/dsp/rescaler_sse2.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | // Copyright 2015 Google Inc. All Rights Reserved.  | 
2  |  | //  | 
3  |  | // Use of this source code is governed by a BSD-style license  | 
4  |  | // that can be found in the COPYING file in the root of the source  | 
5  |  | // tree. An additional intellectual property rights grant can be found  | 
6  |  | // in the file PATENTS. All contributing project authors may  | 
7  |  | // be found in the AUTHORS file in the root of the source tree.  | 
8  |  | // -----------------------------------------------------------------------------  | 
9  |  | //  | 
10  |  | // SSE2 Rescaling functions  | 
11  |  | //  | 
12  |  | // Author: Skal (pascal.massimino@gmail.com)  | 
13  |  |  | 
14  |  | #include "src/dsp/dsp.h"  | 
15  |  |  | 
16  |  | #if defined(WEBP_USE_SSE2) && !defined(WEBP_REDUCE_SIZE)  | 
17  |  | #include <emmintrin.h>  | 
18  |  |  | 
19  |  | #include <assert.h>  | 
20  |  | #include <stddef.h>  | 
21  |  |  | 
22  |  | #include "src/dsp/cpu.h"  | 
23  |  | #include "src/utils/rescaler_utils.h"  | 
24  |  | #include "src/utils/utils.h"  | 
25  |  | #include "src/webp/types.h"  | 
26  |  |  | 
27  |  | //------------------------------------------------------------------------------  | 
28  |  | // Implementations of critical functions ImportRow / ExportRow  | 
29  |  |  | 
30  | 0  | #define ROUNDER (WEBP_RESCALER_ONE >> 1)  | 
31  | 0  | #define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)  | 
32  | 0  | #define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)  | 
33  |  |  | 
34  |  | // input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0  | 
35  | 0  | static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) { | 
36  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
37  | 0  |   const __m128i A = _mm_loadl_epi64((const __m128i*)(src));  // ABCDEFGH  | 
38  | 0  |   const __m128i B = _mm_unpacklo_epi8(A, zero);              // A0B0C0D0E0F0G0H0  | 
39  | 0  |   const __m128i C = _mm_srli_si128(B, 8);                    // E0F0G0H0  | 
40  | 0  |   *out = _mm_unpacklo_epi16(B, C);  | 
41  | 0  | }  | 
42  |  |  | 
43  |  | // input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0  | 
44  | 0  | static void LoadEightPixels_SSE2(const uint8_t* const src, __m128i* out) { | 
45  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
46  | 0  |   const __m128i A = _mm_loadl_epi64((const __m128i*)(src));  // ABCDEFGH  | 
47  | 0  |   *out = _mm_unpacklo_epi8(A, zero);  | 
48  | 0  | }  | 
49  |  |  | 
50  |  | static void RescalerImportRowExpand_SSE2(WebPRescaler* WEBP_RESTRICT const wrk,  | 
51  | 0  |                                          const uint8_t* WEBP_RESTRICT src) { | 
52  | 0  |   rescaler_t* frow = wrk->frow;  | 
53  | 0  |   const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels;  | 
54  | 0  |   const int x_add = wrk->x_add;  | 
55  | 0  |   int accum = x_add;  | 
56  | 0  |   __m128i cur_pixels;  | 
57  |  |  | 
58  |  |   // SSE2 implementation only works with 16b signed arithmetic at max.  | 
59  | 0  |   if (wrk->src_width < 8 || accum >= (1 << 15)) { | 
60  | 0  |     WebPRescalerImportRowExpand_C(wrk, src);  | 
61  | 0  |     return;  | 
62  | 0  |   }  | 
63  |  |  | 
64  | 0  |   assert(!WebPRescalerInputDone(wrk));  | 
65  | 0  |   assert(wrk->x_expand);  | 
66  | 0  |   if (wrk->num_channels == 4) { | 
67  | 0  |     LoadTwoPixels_SSE2(src, &cur_pixels);  | 
68  | 0  |     src += 4;  | 
69  | 0  |     while (1) { | 
70  | 0  |       const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum);  | 
71  | 0  |       const __m128i out = _mm_madd_epi16(cur_pixels, mult);  | 
72  | 0  |       _mm_storeu_si128((__m128i*)frow, out);  | 
73  | 0  |       frow += 4;  | 
74  | 0  |       if (frow >= frow_end) break;  | 
75  | 0  |       accum -= wrk->x_sub;  | 
76  | 0  |       if (accum < 0) { | 
77  | 0  |         LoadTwoPixels_SSE2(src, &cur_pixels);  | 
78  | 0  |         src += 4;  | 
79  | 0  |         accum += x_add;  | 
80  | 0  |       }  | 
81  | 0  |     }  | 
82  | 0  |   } else { | 
83  | 0  |     int left;  | 
84  | 0  |     const uint8_t* const src_limit = src + wrk->src_width - 8;  | 
85  | 0  |     LoadEightPixels_SSE2(src, &cur_pixels);  | 
86  | 0  |     src += 7;  | 
87  | 0  |     left = 7;  | 
88  | 0  |     while (1) { | 
89  | 0  |       const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum);  | 
90  | 0  |       const __m128i out = _mm_madd_epi16(cur_pixels, mult);  | 
91  | 0  |       assert(sizeof(*frow) == sizeof(uint32_t));  | 
92  | 0  |       WebPInt32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out));  | 
93  | 0  |       frow += 1;  | 
94  | 0  |       if (frow >= frow_end) break;  | 
95  | 0  |       accum -= wrk->x_sub;  | 
96  | 0  |       if (accum < 0) { | 
97  | 0  |         if (--left) { | 
98  | 0  |           cur_pixels = _mm_srli_si128(cur_pixels, 2);  | 
99  | 0  |         } else if (src <= src_limit) { | 
100  | 0  |           LoadEightPixels_SSE2(src, &cur_pixels);  | 
101  | 0  |           src += 7;  | 
102  | 0  |           left = 7;  | 
103  | 0  |         } else {   // tail | 
104  | 0  |           cur_pixels = _mm_srli_si128(cur_pixels, 2);  | 
105  | 0  |           cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1);  | 
106  | 0  |           src += 1;  | 
107  | 0  |           left = 1;  | 
108  | 0  |         }  | 
109  | 0  |         accum += x_add;  | 
110  | 0  |       }  | 
111  | 0  |     }  | 
112  | 0  |   }  | 
113  | 0  |   assert(accum == 0);  | 
114  | 0  | }  | 
115  |  |  | 
116  |  | static void RescalerImportRowShrink_SSE2(WebPRescaler* WEBP_RESTRICT const wrk,  | 
117  | 0  |                                          const uint8_t* WEBP_RESTRICT src) { | 
118  | 0  |   const int x_sub = wrk->x_sub;  | 
119  | 0  |   int accum = 0;  | 
120  | 0  |   const __m128i zero = _mm_setzero_si128();  | 
121  | 0  |   const __m128i mult0 = _mm_set1_epi16(x_sub);  | 
122  | 0  |   const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale);  | 
123  | 0  |   const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);  | 
124  | 0  |   __m128i sum = zero;  | 
125  | 0  |   rescaler_t* frow = wrk->frow;  | 
126  | 0  |   const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width;  | 
127  |  | 
  | 
128  | 0  |   if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) { | 
129  | 0  |     WebPRescalerImportRowShrink_C(wrk, src);  | 
130  | 0  |     return;  | 
131  | 0  |   }  | 
132  | 0  |   assert(!WebPRescalerInputDone(wrk));  | 
133  | 0  |   assert(!wrk->x_expand);  | 
134  |  |  | 
135  | 0  |   for (; frow < frow_end; frow += 4) { | 
136  | 0  |     __m128i base = zero;  | 
137  | 0  |     accum += wrk->x_add;  | 
138  | 0  |     while (accum > 0) { | 
139  | 0  |       const __m128i A = _mm_cvtsi32_si128(WebPMemToInt32(src));  | 
140  | 0  |       src += 4;  | 
141  | 0  |       base = _mm_unpacklo_epi8(A, zero);  | 
142  |  |       // To avoid overflow, we need: base * x_add / x_sub < 32768  | 
143  |  |       // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit.  | 
144  | 0  |       sum = _mm_add_epi16(sum, base);  | 
145  | 0  |       accum -= x_sub;  | 
146  | 0  |     }  | 
147  | 0  |     {    // Emit next horizontal pixel. | 
148  | 0  |       const __m128i mult = _mm_set1_epi16(-accum);  | 
149  | 0  |       const __m128i frac0 = _mm_mullo_epi16(base, mult);  // 16b x 16b -> 32b  | 
150  | 0  |       const __m128i frac1 = _mm_mulhi_epu16(base, mult);  | 
151  | 0  |       const __m128i frac = _mm_unpacklo_epi16(frac0, frac1);  // frac is 32b  | 
152  | 0  |       const __m128i A0 = _mm_mullo_epi16(sum, mult0);  | 
153  | 0  |       const __m128i A1 = _mm_mulhi_epu16(sum, mult0);  | 
154  | 0  |       const __m128i B0 = _mm_unpacklo_epi16(A0, A1);      // sum * x_sub  | 
155  | 0  |       const __m128i frow_out = _mm_sub_epi32(B0, frac);   // sum * x_sub - frac  | 
156  | 0  |       const __m128i D0 = _mm_srli_epi64(frac, 32);  | 
157  | 0  |       const __m128i D1 = _mm_mul_epu32(frac, mult1);      // 32b x 16b -> 64b  | 
158  | 0  |       const __m128i D2 = _mm_mul_epu32(D0, mult1);  | 
159  | 0  |       const __m128i E1 = _mm_add_epi64(D1, rounder);  | 
160  | 0  |       const __m128i E2 = _mm_add_epi64(D2, rounder);  | 
161  | 0  |       const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2));  | 
162  | 0  |       const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2));  | 
163  | 0  |       const __m128i G = _mm_unpacklo_epi32(F1, F2);  | 
164  | 0  |       sum = _mm_packs_epi32(G, zero);  | 
165  | 0  |       _mm_storeu_si128((__m128i*)frow, frow_out);  | 
166  | 0  |     }  | 
167  | 0  |   }  | 
168  | 0  |   assert(accum == 0);  | 
169  | 0  | }  | 
170  |  |  | 
171  |  | //------------------------------------------------------------------------------  | 
172  |  | // Row export  | 
173  |  |  | 
174  |  | // load *src as epi64, multiply by mult and store result in [out0 ... out3]  | 
175  |  | static WEBP_INLINE void LoadDispatchAndMult_SSE2(  | 
176  |  |     const rescaler_t* WEBP_RESTRICT const src, const __m128i* const mult,  | 
177  |  |     __m128i* const out0, __m128i* const out1, __m128i* const out2,  | 
178  | 0  |     __m128i* const out3) { | 
179  | 0  |   const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0));  | 
180  | 0  |   const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4));  | 
181  | 0  |   const __m128i A2 = _mm_srli_epi64(A0, 32);  | 
182  | 0  |   const __m128i A3 = _mm_srli_epi64(A1, 32);  | 
183  | 0  |   if (mult != NULL) { | 
184  | 0  |     *out0 = _mm_mul_epu32(A0, *mult);  | 
185  | 0  |     *out1 = _mm_mul_epu32(A1, *mult);  | 
186  | 0  |     *out2 = _mm_mul_epu32(A2, *mult);  | 
187  | 0  |     *out3 = _mm_mul_epu32(A3, *mult);  | 
188  | 0  |   } else { | 
189  | 0  |     *out0 = A0;  | 
190  | 0  |     *out1 = A1;  | 
191  | 0  |     *out2 = A2;  | 
192  | 0  |     *out3 = A3;  | 
193  | 0  |   }  | 
194  | 0  | }  | 
195  |  |  | 
196  |  | static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0,  | 
197  |  |                                         const __m128i* const A1,  | 
198  |  |                                         const __m128i* const A2,  | 
199  |  |                                         const __m128i* const A3,  | 
200  |  |                                         const __m128i* const mult,  | 
201  | 0  |                                         uint8_t* const dst) { | 
202  | 0  |   const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);  | 
203  | 0  |   const __m128i mask = _mm_set_epi32(~0, 0, ~0, 0);  | 
204  | 0  |   const __m128i B0 = _mm_mul_epu32(*A0, *mult);  | 
205  | 0  |   const __m128i B1 = _mm_mul_epu32(*A1, *mult);  | 
206  | 0  |   const __m128i B2 = _mm_mul_epu32(*A2, *mult);  | 
207  | 0  |   const __m128i B3 = _mm_mul_epu32(*A3, *mult);  | 
208  | 0  |   const __m128i C0 = _mm_add_epi64(B0, rounder);  | 
209  | 0  |   const __m128i C1 = _mm_add_epi64(B1, rounder);  | 
210  | 0  |   const __m128i C2 = _mm_add_epi64(B2, rounder);  | 
211  | 0  |   const __m128i C3 = _mm_add_epi64(B3, rounder);  | 
212  | 0  |   const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX);  | 
213  | 0  |   const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);  | 
214  |  | #if (WEBP_RESCALER_RFIX < 32)  | 
215  |  |   const __m128i D2 =  | 
216  |  |       _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask);  | 
217  |  |   const __m128i D3 =  | 
218  |  |       _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask);  | 
219  |  | #else  | 
220  | 0  |   const __m128i D2 = _mm_and_si128(C2, mask);  | 
221  | 0  |   const __m128i D3 = _mm_and_si128(C3, mask);  | 
222  | 0  | #endif  | 
223  | 0  |   const __m128i E0 = _mm_or_si128(D0, D2);  | 
224  | 0  |   const __m128i E1 = _mm_or_si128(D1, D3);  | 
225  | 0  |   const __m128i F = _mm_packs_epi32(E0, E1);  | 
226  | 0  |   const __m128i G = _mm_packus_epi16(F, F);  | 
227  | 0  |   _mm_storel_epi64((__m128i*)dst, G);  | 
228  | 0  | }  | 
229  |  |  | 
230  | 0  | static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) { | 
231  | 0  |   int x_out;  | 
232  | 0  |   uint8_t* const dst = wrk->dst;  | 
233  | 0  |   rescaler_t* const irow = wrk->irow;  | 
234  | 0  |   const int x_out_max = wrk->dst_width * wrk->num_channels;  | 
235  | 0  |   const rescaler_t* const frow = wrk->frow;  | 
236  | 0  |   const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale);  | 
237  |  | 
  | 
238  | 0  |   assert(!WebPRescalerOutputDone(wrk));  | 
239  | 0  |   assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0);  | 
240  | 0  |   assert(wrk->y_expand);  | 
241  | 0  |   if (wrk->y_accum == 0) { | 
242  | 0  |     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { | 
243  | 0  |       __m128i A0, A1, A2, A3;  | 
244  | 0  |       LoadDispatchAndMult_SSE2(frow + x_out, NULL, &A0, &A1, &A2, &A3);  | 
245  | 0  |       ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out);  | 
246  | 0  |     }  | 
247  | 0  |     for (; x_out < x_out_max; ++x_out) { | 
248  | 0  |       const uint32_t J = frow[x_out];  | 
249  | 0  |       const int v = (int)MULT_FIX(J, wrk->fy_scale);  | 
250  | 0  |       dst[x_out] = (v > 255) ? 255u : (uint8_t)v;  | 
251  | 0  |     }  | 
252  | 0  |   } else { | 
253  | 0  |     const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub);  | 
254  | 0  |     const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B);  | 
255  | 0  |     const __m128i mA = _mm_set_epi32(0, A, 0, A);  | 
256  | 0  |     const __m128i mB = _mm_set_epi32(0, B, 0, B);  | 
257  | 0  |     const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);  | 
258  | 0  |     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { | 
259  | 0  |       __m128i A0, A1, A2, A3, B0, B1, B2, B3;  | 
260  | 0  |       LoadDispatchAndMult_SSE2(frow + x_out, &mA, &A0, &A1, &A2, &A3);  | 
261  | 0  |       LoadDispatchAndMult_SSE2(irow + x_out, &mB, &B0, &B1, &B2, &B3);  | 
262  | 0  |       { | 
263  | 0  |         const __m128i C0 = _mm_add_epi64(A0, B0);  | 
264  | 0  |         const __m128i C1 = _mm_add_epi64(A1, B1);  | 
265  | 0  |         const __m128i C2 = _mm_add_epi64(A2, B2);  | 
266  | 0  |         const __m128i C3 = _mm_add_epi64(A3, B3);  | 
267  | 0  |         const __m128i D0 = _mm_add_epi64(C0, rounder);  | 
268  | 0  |         const __m128i D1 = _mm_add_epi64(C1, rounder);  | 
269  | 0  |         const __m128i D2 = _mm_add_epi64(C2, rounder);  | 
270  | 0  |         const __m128i D3 = _mm_add_epi64(C3, rounder);  | 
271  | 0  |         const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX);  | 
272  | 0  |         const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX);  | 
273  | 0  |         const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX);  | 
274  | 0  |         const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX);  | 
275  | 0  |         ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult, dst + x_out);  | 
276  | 0  |       }  | 
277  | 0  |     }  | 
278  | 0  |     for (; x_out < x_out_max; ++x_out) { | 
279  | 0  |       const uint64_t I = (uint64_t)A * frow[x_out]  | 
280  | 0  |                        + (uint64_t)B * irow[x_out];  | 
281  | 0  |       const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX);  | 
282  | 0  |       const int v = (int)MULT_FIX(J, wrk->fy_scale);  | 
283  | 0  |       dst[x_out] = (v > 255) ? 255u : (uint8_t)v;  | 
284  | 0  |     }  | 
285  | 0  |   }  | 
286  | 0  | }  | 
287  |  |  | 
288  | 0  | static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) { | 
289  | 0  |   int x_out;  | 
290  | 0  |   uint8_t* const dst = wrk->dst;  | 
291  | 0  |   rescaler_t* const irow = wrk->irow;  | 
292  | 0  |   const int x_out_max = wrk->dst_width * wrk->num_channels;  | 
293  | 0  |   const rescaler_t* const frow = wrk->frow;  | 
294  | 0  |   const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum);  | 
295  | 0  |   assert(!WebPRescalerOutputDone(wrk));  | 
296  | 0  |   assert(wrk->y_accum <= 0);  | 
297  | 0  |   assert(!wrk->y_expand);  | 
298  | 0  |   if (yscale) { | 
299  | 0  |     const int scale_xy = wrk->fxy_scale;  | 
300  | 0  |     const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy);  | 
301  | 0  |     const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale);  | 
302  | 0  |     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { | 
303  | 0  |       __m128i A0, A1, A2, A3, B0, B1, B2, B3;  | 
304  | 0  |       LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3);  | 
305  | 0  |       LoadDispatchAndMult_SSE2(frow + x_out, &mult_y, &B0, &B1, &B2, &B3);  | 
306  | 0  |       { | 
307  | 0  |         const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX);   // = frac  | 
308  | 0  |         const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX);  | 
309  | 0  |         const __m128i D2 = _mm_srli_epi64(B2, WEBP_RESCALER_RFIX);  | 
310  | 0  |         const __m128i D3 = _mm_srli_epi64(B3, WEBP_RESCALER_RFIX);  | 
311  | 0  |         const __m128i E0 = _mm_sub_epi64(A0, D0);   // irow[x] - frac  | 
312  | 0  |         const __m128i E1 = _mm_sub_epi64(A1, D1);  | 
313  | 0  |         const __m128i E2 = _mm_sub_epi64(A2, D2);  | 
314  | 0  |         const __m128i E3 = _mm_sub_epi64(A3, D3);  | 
315  | 0  |         const __m128i F2 = _mm_slli_epi64(D2, 32);  | 
316  | 0  |         const __m128i F3 = _mm_slli_epi64(D3, 32);  | 
317  | 0  |         const __m128i G0 = _mm_or_si128(D0, F2);  | 
318  | 0  |         const __m128i G1 = _mm_or_si128(D1, F3);  | 
319  | 0  |         _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);  | 
320  | 0  |         _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);  | 
321  | 0  |         ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);  | 
322  | 0  |       }  | 
323  | 0  |     }  | 
324  | 0  |     for (; x_out < x_out_max; ++x_out) { | 
325  | 0  |       const uint32_t frac = (int)MULT_FIX_FLOOR(frow[x_out], yscale);  | 
326  | 0  |       const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);  | 
327  | 0  |       dst[x_out] = (v > 255) ? 255u : (uint8_t)v;  | 
328  | 0  |       irow[x_out] = frac;   // new fractional start  | 
329  | 0  |     }  | 
330  | 0  |   } else { | 
331  | 0  |     const uint32_t scale = wrk->fxy_scale;  | 
332  | 0  |     const __m128i mult = _mm_set_epi32(0, scale, 0, scale);  | 
333  | 0  |     const __m128i zero = _mm_setzero_si128();  | 
334  | 0  |     for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) { | 
335  | 0  |       __m128i A0, A1, A2, A3;  | 
336  | 0  |       LoadDispatchAndMult_SSE2(irow + x_out, NULL, &A0, &A1, &A2, &A3);  | 
337  | 0  |       _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero);  | 
338  | 0  |       _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero);  | 
339  | 0  |       ProcessRow_SSE2(&A0, &A1, &A2, &A3, &mult, dst + x_out);  | 
340  | 0  |     }  | 
341  | 0  |     for (; x_out < x_out_max; ++x_out) { | 
342  | 0  |       const int v = (int)MULT_FIX(irow[x_out], scale);  | 
343  | 0  |       dst[x_out] = (v > 255) ? 255u : (uint8_t)v;  | 
344  | 0  |       irow[x_out] = 0;  | 
345  | 0  |     }  | 
346  | 0  |   }  | 
347  | 0  | }  | 
348  |  |  | 
349  |  | #undef MULT_FIX_FLOOR  | 
350  |  | #undef MULT_FIX  | 
351  |  | #undef ROUNDER  | 
352  |  |  | 
353  |  | //------------------------------------------------------------------------------  | 
354  |  |  | 
355  |  | extern void WebPRescalerDspInitSSE2(void);  | 
356  |  |  | 
357  | 0  | WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) { | 
358  | 0  |   WebPRescalerImportRowExpand = RescalerImportRowExpand_SSE2;  | 
359  | 0  |   WebPRescalerImportRowShrink = RescalerImportRowShrink_SSE2;  | 
360  | 0  |   WebPRescalerExportRowExpand = RescalerExportRowExpand_SSE2;  | 
361  | 0  |   WebPRescalerExportRowShrink = RescalerExportRowShrink_SSE2;  | 
362  | 0  | }  | 
363  |  |  | 
364  |  | #else  // !WEBP_USE_SSE2  | 
365  |  |  | 
366  |  | WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2)  | 
367  |  |  | 
368  |  | #endif  // WEBP_USE_SSE2  |