/src/libwebp/src/dsp/enc_sse41.c
Line | Count | Source |
1 | | // Copyright 2015 Google Inc. All Rights Reserved. |
2 | | // |
3 | | // Use of this source code is governed by a BSD-style license |
4 | | // that can be found in the COPYING file in the root of the source |
5 | | // tree. An additional intellectual property rights grant can be found |
6 | | // in the file PATENTS. All contributing project authors may |
7 | | // be found in the AUTHORS file in the root of the source tree. |
8 | | // ----------------------------------------------------------------------------- |
9 | | // |
10 | | // SSE4 version of some encoding functions. |
11 | | // |
12 | | // Author: Skal (pascal.massimino@gmail.com) |
13 | | |
14 | | #include "src/dsp/dsp.h" |
15 | | |
16 | | #if defined(WEBP_USE_SSE41) |
17 | | #include <emmintrin.h> |
18 | | #include <smmintrin.h> |
19 | | #include <stdlib.h> // for abs() |
20 | | |
21 | | #include "src/dsp/common_sse2.h" |
22 | | #include "src/dsp/cpu.h" |
23 | | #include "src/enc/vp8i_enc.h" |
24 | | #include "src/webp/types.h" |
25 | | |
26 | | //------------------------------------------------------------------------------ |
27 | | // Compute susceptibility based on DCT-coeff histograms. |
28 | | |
29 | | static void CollectHistogram_SSE41(const uint8_t* WEBP_RESTRICT ref, |
30 | | const uint8_t* WEBP_RESTRICT pred, |
31 | | int start_block, int end_block, |
32 | 19.0M | VP8Histogram* WEBP_RESTRICT const histo) { |
33 | 19.0M | const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH); |
34 | 19.0M | int j; |
35 | 19.0M | int distribution[MAX_COEFF_THRESH + 1] = {0}; |
36 | 247M | for (j = start_block; j < end_block; ++j) { |
37 | 228M | int16_t out[16]; |
38 | 228M | int k; |
39 | | |
40 | 228M | VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out); |
41 | | |
42 | | // Convert coefficients to bin (within out[]). |
43 | 228M | { |
44 | | // Load. |
45 | 228M | const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]); |
46 | 228M | const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]); |
47 | | // v = abs(out) >> 3 |
48 | 228M | const __m128i abs0 = _mm_abs_epi16(out0); |
49 | 228M | const __m128i abs1 = _mm_abs_epi16(out1); |
50 | 228M | const __m128i v0 = _mm_srai_epi16(abs0, 3); |
51 | 228M | const __m128i v1 = _mm_srai_epi16(abs1, 3); |
52 | | // bin = min(v, MAX_COEFF_THRESH) |
53 | 228M | const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh); |
54 | 228M | const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh); |
55 | | // Store. |
56 | 228M | _mm_storeu_si128((__m128i*)&out[0], bin0); |
57 | 228M | _mm_storeu_si128((__m128i*)&out[8], bin1); |
58 | 228M | } |
59 | | |
60 | | // Convert coefficients to bin. |
61 | 3.88G | for (k = 0; k < 16; ++k) { |
62 | 3.65G | ++distribution[out[k]]; |
63 | 3.65G | } |
64 | 228M | } |
65 | 19.0M | VP8SetHistogramData(distribution, histo); |
66 | 19.0M | } |
67 | | |
68 | | //------------------------------------------------------------------------------ |
69 | | // Texture distortion |
70 | | // |
71 | | // We try to match the spectral content (weighted) between source and |
72 | | // reconstructed samples. |
73 | | |
74 | | // Hadamard transform |
75 | | // Returns the weighted sum of the absolute value of transformed coefficients. |
76 | | // w[] contains a row-major 4 by 4 symmetric matrix. |
77 | | static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB, |
78 | 790M | const uint16_t* const w) { |
79 | 790M | int32_t sum[4]; |
80 | 790M | __m128i tmp_0, tmp_1, tmp_2, tmp_3; |
81 | | |
82 | | // Load and combine inputs. |
83 | 790M | { |
84 | 790M | const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]); |
85 | 790M | const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]); |
86 | 790M | const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]); |
87 | | // In SSE4.1, with gcc 4.8 at least (maybe other versions), |
88 | | // _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump |
89 | | // of inA and inB, _mm_loadl_epi64 is still used not to have an out of |
90 | | // bound read. |
91 | 790M | const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]); |
92 | 790M | const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]); |
93 | 790M | const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]); |
94 | 790M | const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]); |
95 | 790M | const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]); |
96 | | |
97 | | // Combine inA and inB (we'll do two transforms in parallel). |
98 | 790M | const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0); |
99 | 790M | const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1); |
100 | 790M | const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2); |
101 | 790M | const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3); |
102 | 790M | tmp_0 = _mm_cvtepu8_epi16(inAB_0); |
103 | 790M | tmp_1 = _mm_cvtepu8_epi16(inAB_1); |
104 | 790M | tmp_2 = _mm_cvtepu8_epi16(inAB_2); |
105 | 790M | tmp_3 = _mm_cvtepu8_epi16(inAB_3); |
106 | | // a00 a01 a02 a03 b00 b01 b02 b03 |
107 | | // a10 a11 a12 a13 b10 b11 b12 b13 |
108 | | // a20 a21 a22 a23 b20 b21 b22 b23 |
109 | | // a30 a31 a32 a33 b30 b31 b32 b33 |
110 | 790M | } |
111 | | |
112 | | // Vertical pass first to avoid a transpose (vertical and horizontal passes |
113 | | // are commutative because w/kWeightY is symmetric) and subsequent transpose. |
114 | 790M | { |
115 | | // Calculate a and b (two 4x4 at once). |
116 | 790M | const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); |
117 | 790M | const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); |
118 | 790M | const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); |
119 | 790M | const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); |
120 | 790M | const __m128i b0 = _mm_add_epi16(a0, a1); |
121 | 790M | const __m128i b1 = _mm_add_epi16(a3, a2); |
122 | 790M | const __m128i b2 = _mm_sub_epi16(a3, a2); |
123 | 790M | const __m128i b3 = _mm_sub_epi16(a0, a1); |
124 | | // a00 a01 a02 a03 b00 b01 b02 b03 |
125 | | // a10 a11 a12 a13 b10 b11 b12 b13 |
126 | | // a20 a21 a22 a23 b20 b21 b22 b23 |
127 | | // a30 a31 a32 a33 b30 b31 b32 b33 |
128 | | |
129 | | // Transpose the two 4x4. |
130 | 790M | VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3); |
131 | 790M | } |
132 | | |
133 | | // Horizontal pass and difference of weighted sums. |
134 | 790M | { |
135 | | // Load all inputs. |
136 | 790M | const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]); |
137 | 790M | const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]); |
138 | | |
139 | | // Calculate a and b (two 4x4 at once). |
140 | 790M | const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2); |
141 | 790M | const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3); |
142 | 790M | const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); |
143 | 790M | const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); |
144 | 790M | const __m128i b0 = _mm_add_epi16(a0, a1); |
145 | 790M | const __m128i b1 = _mm_add_epi16(a3, a2); |
146 | 790M | const __m128i b2 = _mm_sub_epi16(a3, a2); |
147 | 790M | const __m128i b3 = _mm_sub_epi16(a0, a1); |
148 | | |
149 | | // Separate the transforms of inA and inB. |
150 | 790M | __m128i A_b0 = _mm_unpacklo_epi64(b0, b1); |
151 | 790M | __m128i A_b2 = _mm_unpacklo_epi64(b2, b3); |
152 | 790M | __m128i B_b0 = _mm_unpackhi_epi64(b0, b1); |
153 | 790M | __m128i B_b2 = _mm_unpackhi_epi64(b2, b3); |
154 | | |
155 | 790M | A_b0 = _mm_abs_epi16(A_b0); |
156 | 790M | A_b2 = _mm_abs_epi16(A_b2); |
157 | 790M | B_b0 = _mm_abs_epi16(B_b0); |
158 | 790M | B_b2 = _mm_abs_epi16(B_b2); |
159 | | |
160 | | // weighted sums |
161 | 790M | A_b0 = _mm_madd_epi16(A_b0, w_0); |
162 | 790M | A_b2 = _mm_madd_epi16(A_b2, w_8); |
163 | 790M | B_b0 = _mm_madd_epi16(B_b0, w_0); |
164 | 790M | B_b2 = _mm_madd_epi16(B_b2, w_8); |
165 | 790M | A_b0 = _mm_add_epi32(A_b0, A_b2); |
166 | 790M | B_b0 = _mm_add_epi32(B_b0, B_b2); |
167 | | |
168 | | // difference of weighted sums |
169 | 790M | A_b2 = _mm_sub_epi32(A_b0, B_b0); |
170 | 790M | _mm_storeu_si128((__m128i*)&sum[0], A_b2); |
171 | 790M | } |
172 | 790M | return sum[0] + sum[1] + sum[2] + sum[3]; |
173 | 790M | } |
174 | | |
175 | | static int Disto4x4_SSE41(const uint8_t* WEBP_RESTRICT const a, |
176 | | const uint8_t* WEBP_RESTRICT const b, |
177 | 790M | const uint16_t* WEBP_RESTRICT const w) { |
178 | 790M | const int diff_sum = TTransform_SSE41(a, b, w); |
179 | 790M | return abs(diff_sum) >> 5; |
180 | 790M | } |
181 | | |
182 | | static int Disto16x16_SSE41(const uint8_t* WEBP_RESTRICT const a, |
183 | | const uint8_t* WEBP_RESTRICT const b, |
184 | 19.0M | const uint16_t* WEBP_RESTRICT const w) { |
185 | 19.0M | int D = 0; |
186 | 19.0M | int x, y; |
187 | 95.1M | for (y = 0; y < 16 * BPS; y += 4 * BPS) { |
188 | 380M | for (x = 0; x < 16; x += 4) { |
189 | 304M | D += Disto4x4_SSE41(a + x + y, b + x + y, w); |
190 | 304M | } |
191 | 76.1M | } |
192 | 19.0M | return D; |
193 | 19.0M | } |
194 | | |
195 | | //------------------------------------------------------------------------------ |
196 | | // Quantization |
197 | | // |
198 | | |
199 | | // Generates a pshufb constant for shuffling 16b words. |
200 | | #define PSHUFB_CST(A, B, C, D, E, F, G, H) \ |
201 | 3.84G | _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \ |
202 | 3.84G | 2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \ |
203 | 3.84G | 2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \ |
204 | 3.84G | 2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0) |
205 | | |
206 | | static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16], |
207 | | const uint16_t* const sharpen, |
208 | 962M | const VP8Matrix* const mtx) { |
209 | 962M | const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL); |
210 | 962M | const __m128i zero = _mm_setzero_si128(); |
211 | 962M | __m128i out0, out8; |
212 | 962M | __m128i packed_out; |
213 | | |
214 | | // Load all inputs. |
215 | 962M | __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]); |
216 | 962M | __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); |
217 | 962M | const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq[0]); |
218 | 962M | const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq[8]); |
219 | 962M | const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q[0]); |
220 | 962M | const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q[8]); |
221 | | |
222 | | // coeff = abs(in) |
223 | 962M | __m128i coeff0 = _mm_abs_epi16(in0); |
224 | 962M | __m128i coeff8 = _mm_abs_epi16(in8); |
225 | | |
226 | | // coeff = abs(in) + sharpen |
227 | 962M | if (sharpen != NULL) { |
228 | 943M | const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]); |
229 | 943M | const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]); |
230 | 943M | coeff0 = _mm_add_epi16(coeff0, sharpen0); |
231 | 943M | coeff8 = _mm_add_epi16(coeff8, sharpen8); |
232 | 943M | } |
233 | | |
234 | | // out = (coeff * iQ + B) >> QFIX |
235 | 962M | { |
236 | | // doing calculations with 32b precision (QFIX=17) |
237 | | // out = (coeff * iQ) |
238 | 962M | const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0); |
239 | 962M | const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0); |
240 | 962M | const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8); |
241 | 962M | const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8); |
242 | 962M | __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H); |
243 | 962M | __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H); |
244 | 962M | __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H); |
245 | 962M | __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H); |
246 | | // out = (coeff * iQ + B) |
247 | 962M | const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias[0]); |
248 | 962M | const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias[4]); |
249 | 962M | const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias[8]); |
250 | 962M | const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias[12]); |
251 | 962M | out_00 = _mm_add_epi32(out_00, bias_00); |
252 | 962M | out_04 = _mm_add_epi32(out_04, bias_04); |
253 | 962M | out_08 = _mm_add_epi32(out_08, bias_08); |
254 | 962M | out_12 = _mm_add_epi32(out_12, bias_12); |
255 | | // out = QUANTDIV(coeff, iQ, B, QFIX) |
256 | 962M | out_00 = _mm_srai_epi32(out_00, QFIX); |
257 | 962M | out_04 = _mm_srai_epi32(out_04, QFIX); |
258 | 962M | out_08 = _mm_srai_epi32(out_08, QFIX); |
259 | 962M | out_12 = _mm_srai_epi32(out_12, QFIX); |
260 | | |
261 | | // pack result as 16b |
262 | 962M | out0 = _mm_packs_epi32(out_00, out_04); |
263 | 962M | out8 = _mm_packs_epi32(out_08, out_12); |
264 | | |
265 | | // if (coeff > 2047) coeff = 2047 |
266 | 962M | out0 = _mm_min_epi16(out0, max_coeff_2047); |
267 | 962M | out8 = _mm_min_epi16(out8, max_coeff_2047); |
268 | 962M | } |
269 | | |
270 | | // put sign back |
271 | 962M | out0 = _mm_sign_epi16(out0, in0); |
272 | 962M | out8 = _mm_sign_epi16(out8, in8); |
273 | | |
274 | | // in = out * Q |
275 | 962M | in0 = _mm_mullo_epi16(out0, q0); |
276 | 962M | in8 = _mm_mullo_epi16(out8, q8); |
277 | | |
278 | 962M | _mm_storeu_si128((__m128i*)&in[0], in0); |
279 | 962M | _mm_storeu_si128((__m128i*)&in[8], in8); |
280 | | |
281 | | // zigzag the output before storing it. The re-ordering is: |
282 | | // 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15 |
283 | | // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15 |
284 | | // There's only two misplaced entries ([8] and [7]) that are crossing the |
285 | | // reg's boundaries. |
286 | | // We use pshufb instead of pshuflo/pshufhi. |
287 | 962M | { |
288 | 962M | const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6); |
289 | 962M | const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1); |
290 | 962M | const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo); |
291 | 962M | const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7 |
292 | 962M | const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7); |
293 | 962M | const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1); |
294 | 962M | const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi); |
295 | 962M | const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8 |
296 | 962M | const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8); |
297 | 962M | const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7); |
298 | 962M | _mm_storeu_si128((__m128i*)&out[0], out_z0); |
299 | 962M | _mm_storeu_si128((__m128i*)&out[8], out_z8); |
300 | 962M | packed_out = _mm_packs_epi16(out_z0, out_z8); |
301 | 962M | } |
302 | | |
303 | | // detect if all 'out' values are zeroes or not |
304 | 962M | return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff); |
305 | 962M | } |
306 | | |
307 | | #undef PSHUFB_CST |
308 | | |
309 | | static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16], |
310 | 486M | const VP8Matrix* WEBP_RESTRICT const mtx) { |
311 | 486M | return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen[0], mtx); |
312 | 486M | } |
313 | | |
314 | | static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16], |
315 | 19.0M | const VP8Matrix* WEBP_RESTRICT const mtx) { |
316 | 19.0M | return DoQuantizeBlock_SSE41(in, out, NULL, mtx); |
317 | 19.0M | } |
318 | | |
319 | | static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32], |
320 | 228M | const VP8Matrix* WEBP_RESTRICT const mtx) { |
321 | 228M | int nz; |
322 | 228M | const uint16_t* const sharpen = &mtx->sharpen[0]; |
323 | 228M | nz = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0; |
324 | 228M | nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1; |
325 | 228M | return nz; |
326 | 228M | } |
327 | | |
328 | | //------------------------------------------------------------------------------ |
329 | | // Entry point |
330 | | |
331 | | extern void VP8EncDspInitSSE41(void); |
332 | 4 | WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) { |
333 | 4 | VP8CollectHistogram = CollectHistogram_SSE41; |
334 | 4 | VP8EncQuantizeBlock = QuantizeBlock_SSE41; |
335 | 4 | VP8EncQuantize2Blocks = Quantize2Blocks_SSE41; |
336 | 4 | VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41; |
337 | 4 | VP8TDisto4x4 = Disto4x4_SSE41; |
338 | 4 | VP8TDisto16x16 = Disto16x16_SSE41; |
339 | 4 | } |
340 | | |
341 | | #else // !WEBP_USE_SSE41 |
342 | | |
343 | | WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41) |
344 | | |
345 | | #endif // WEBP_USE_SSE41 |