/src/aom/aom_dsp/x86/intrapred_ssse3.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2017, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <tmmintrin.h> |
13 | | |
14 | | #include "config/aom_dsp_rtcd.h" |
15 | | |
16 | | #include "aom_dsp/intrapred_common.h" |
17 | | |
18 | | // ----------------------------------------------------------------------------- |
19 | | // PAETH_PRED |
20 | | |
21 | | // Return 8 16-bit pixels in one row |
22 | | static inline __m128i paeth_8x1_pred(const __m128i *left, const __m128i *top, |
23 | 7.53M | const __m128i *topleft) { |
24 | 7.53M | const __m128i base = _mm_sub_epi16(_mm_add_epi16(*top, *left), *topleft); |
25 | | |
26 | 7.53M | __m128i pl = _mm_abs_epi16(_mm_sub_epi16(base, *left)); |
27 | 7.53M | __m128i pt = _mm_abs_epi16(_mm_sub_epi16(base, *top)); |
28 | 7.53M | __m128i ptl = _mm_abs_epi16(_mm_sub_epi16(base, *topleft)); |
29 | | |
30 | 7.53M | __m128i mask1 = _mm_cmpgt_epi16(pl, pt); |
31 | 7.53M | mask1 = _mm_or_si128(mask1, _mm_cmpgt_epi16(pl, ptl)); |
32 | 7.53M | __m128i mask2 = _mm_cmpgt_epi16(pt, ptl); |
33 | | |
34 | 7.53M | pl = _mm_andnot_si128(mask1, *left); |
35 | | |
36 | 7.53M | ptl = _mm_and_si128(mask2, *topleft); |
37 | 7.53M | pt = _mm_andnot_si128(mask2, *top); |
38 | 7.53M | pt = _mm_or_si128(pt, ptl); |
39 | 7.53M | pt = _mm_and_si128(mask1, pt); |
40 | | |
41 | 7.53M | return _mm_or_si128(pl, pt); |
42 | 7.53M | } |
43 | | |
44 | | void aom_paeth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
45 | 268k | const uint8_t *above, const uint8_t *left) { |
46 | 268k | __m128i l = _mm_loadl_epi64((const __m128i *)left); |
47 | 268k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
48 | 268k | const __m128i zero = _mm_setzero_si128(); |
49 | 268k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
50 | 268k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
51 | 268k | __m128i rep = _mm_set1_epi16((short)0x8000); |
52 | 268k | const __m128i one = _mm_set1_epi16(1); |
53 | | |
54 | 268k | int i; |
55 | 1.34M | for (i = 0; i < 4; ++i) { |
56 | 1.07M | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
57 | 1.07M | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
58 | | |
59 | 1.07M | *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row)); |
60 | 1.07M | dst += stride; |
61 | 1.07M | rep = _mm_add_epi16(rep, one); |
62 | 1.07M | } |
63 | 268k | } |
64 | | |
65 | | void aom_paeth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
66 | 58.3k | const uint8_t *above, const uint8_t *left) { |
67 | 58.3k | __m128i l = _mm_loadl_epi64((const __m128i *)left); |
68 | 58.3k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
69 | 58.3k | const __m128i zero = _mm_setzero_si128(); |
70 | 58.3k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
71 | 58.3k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
72 | 58.3k | __m128i rep = _mm_set1_epi16((short)0x8000); |
73 | 58.3k | const __m128i one = _mm_set1_epi16(1); |
74 | | |
75 | 58.3k | int i; |
76 | 525k | for (i = 0; i < 8; ++i) { |
77 | 467k | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
78 | 467k | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
79 | | |
80 | 467k | *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row)); |
81 | 467k | dst += stride; |
82 | 467k | rep = _mm_add_epi16(rep, one); |
83 | 467k | } |
84 | 58.3k | } |
85 | | |
86 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
87 | | void aom_paeth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
88 | 62.1k | const uint8_t *above, const uint8_t *left) { |
89 | 62.1k | __m128i l = _mm_load_si128((const __m128i *)left); |
90 | 62.1k | const __m128i t = _mm_cvtsi32_si128(((const int *)above)[0]); |
91 | 62.1k | const __m128i zero = _mm_setzero_si128(); |
92 | 62.1k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
93 | 62.1k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
94 | 62.1k | __m128i rep = _mm_set1_epi16((short)0x8000); |
95 | 62.1k | const __m128i one = _mm_set1_epi16(1); |
96 | | |
97 | 1.05M | for (int i = 0; i < 16; ++i) { |
98 | 993k | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
99 | 993k | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
100 | | |
101 | 993k | *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row)); |
102 | 993k | dst += stride; |
103 | 993k | rep = _mm_add_epi16(rep, one); |
104 | 993k | } |
105 | 62.1k | } |
106 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
107 | | |
108 | | void aom_paeth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
109 | 85.2k | const uint8_t *above, const uint8_t *left) { |
110 | 85.2k | __m128i l = _mm_loadl_epi64((const __m128i *)left); |
111 | 85.2k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
112 | 85.2k | const __m128i zero = _mm_setzero_si128(); |
113 | 85.2k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
114 | 85.2k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
115 | 85.2k | __m128i rep = _mm_set1_epi16((short)0x8000); |
116 | 85.2k | const __m128i one = _mm_set1_epi16(1); |
117 | | |
118 | 85.2k | int i; |
119 | 426k | for (i = 0; i < 4; ++i) { |
120 | 341k | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
121 | 341k | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
122 | | |
123 | 341k | _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row)); |
124 | 341k | dst += stride; |
125 | 341k | rep = _mm_add_epi16(rep, one); |
126 | 341k | } |
127 | 85.2k | } |
128 | | |
129 | | void aom_paeth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
130 | 176k | const uint8_t *above, const uint8_t *left) { |
131 | 176k | __m128i l = _mm_loadl_epi64((const __m128i *)left); |
132 | 176k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
133 | 176k | const __m128i zero = _mm_setzero_si128(); |
134 | 176k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
135 | 176k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
136 | 176k | __m128i rep = _mm_set1_epi16((short)0x8000); |
137 | 176k | const __m128i one = _mm_set1_epi16(1); |
138 | | |
139 | 176k | int i; |
140 | 1.58M | for (i = 0; i < 8; ++i) { |
141 | 1.41M | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
142 | 1.41M | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
143 | | |
144 | 1.41M | _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row)); |
145 | 1.41M | dst += stride; |
146 | 1.41M | rep = _mm_add_epi16(rep, one); |
147 | 1.41M | } |
148 | 176k | } |
149 | | |
150 | | void aom_paeth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
151 | 47.2k | const uint8_t *above, const uint8_t *left) { |
152 | 47.2k | __m128i l = _mm_load_si128((const __m128i *)left); |
153 | 47.2k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
154 | 47.2k | const __m128i zero = _mm_setzero_si128(); |
155 | 47.2k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
156 | 47.2k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
157 | 47.2k | __m128i rep = _mm_set1_epi16((short)0x8000); |
158 | 47.2k | const __m128i one = _mm_set1_epi16(1); |
159 | | |
160 | 47.2k | int i; |
161 | 802k | for (i = 0; i < 16; ++i) { |
162 | 755k | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
163 | 755k | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
164 | | |
165 | 755k | _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row)); |
166 | 755k | dst += stride; |
167 | 755k | rep = _mm_add_epi16(rep, one); |
168 | 755k | } |
169 | 47.2k | } |
170 | | |
171 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
172 | | void aom_paeth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
173 | 31.2k | const uint8_t *above, const uint8_t *left) { |
174 | 31.2k | const __m128i t = _mm_loadl_epi64((const __m128i *)above); |
175 | 31.2k | const __m128i zero = _mm_setzero_si128(); |
176 | 31.2k | const __m128i t16 = _mm_unpacklo_epi8(t, zero); |
177 | 31.2k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
178 | 31.2k | const __m128i one = _mm_set1_epi16(1); |
179 | | |
180 | 93.8k | for (int j = 0; j < 2; ++j) { |
181 | 62.5k | const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16)); |
182 | 62.5k | __m128i rep = _mm_set1_epi16((short)0x8000); |
183 | 1.06M | for (int i = 0; i < 16; ++i) { |
184 | 1.00M | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
185 | 1.00M | const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16); |
186 | | |
187 | 1.00M | _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row)); |
188 | 1.00M | dst += stride; |
189 | 1.00M | rep = _mm_add_epi16(rep, one); |
190 | 1.00M | } |
191 | 62.5k | } |
192 | 31.2k | } |
193 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
194 | | |
195 | | // Return 16 8-bit pixels in one row |
196 | | static inline __m128i paeth_16x1_pred(const __m128i *left, const __m128i *top0, |
197 | | const __m128i *top1, |
198 | 745k | const __m128i *topleft) { |
199 | 745k | const __m128i p0 = paeth_8x1_pred(left, top0, topleft); |
200 | 745k | const __m128i p1 = paeth_8x1_pred(left, top1, topleft); |
201 | 745k | return _mm_packus_epi16(p0, p1); |
202 | 745k | } |
203 | | |
204 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
205 | | void aom_paeth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
206 | 67.5k | const uint8_t *above, const uint8_t *left) { |
207 | 67.5k | __m128i l = _mm_cvtsi32_si128(((const int *)left)[0]); |
208 | 67.5k | const __m128i t = _mm_load_si128((const __m128i *)above); |
209 | 67.5k | const __m128i zero = _mm_setzero_si128(); |
210 | 67.5k | const __m128i top0 = _mm_unpacklo_epi8(t, zero); |
211 | 67.5k | const __m128i top1 = _mm_unpackhi_epi8(t, zero); |
212 | 67.5k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
213 | 67.5k | __m128i rep = _mm_set1_epi16((short)0x8000); |
214 | 67.5k | const __m128i one = _mm_set1_epi16(1); |
215 | | |
216 | 337k | for (int i = 0; i < 4; ++i) { |
217 | 270k | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
218 | 270k | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
219 | | |
220 | 270k | _mm_store_si128((__m128i *)dst, row); |
221 | 270k | dst += stride; |
222 | 270k | rep = _mm_add_epi16(rep, one); |
223 | 270k | } |
224 | 67.5k | } |
225 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
226 | | |
227 | | void aom_paeth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
228 | 0 | const uint8_t *above, const uint8_t *left) { |
229 | 0 | __m128i l = _mm_loadl_epi64((const __m128i *)left); |
230 | 0 | const __m128i t = _mm_load_si128((const __m128i *)above); |
231 | 0 | const __m128i zero = _mm_setzero_si128(); |
232 | 0 | const __m128i top0 = _mm_unpacklo_epi8(t, zero); |
233 | 0 | const __m128i top1 = _mm_unpackhi_epi8(t, zero); |
234 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
235 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
236 | 0 | const __m128i one = _mm_set1_epi16(1); |
237 | |
|
238 | 0 | int i; |
239 | 0 | for (i = 0; i < 8; ++i) { |
240 | 0 | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
241 | 0 | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
242 | |
|
243 | 0 | _mm_store_si128((__m128i *)dst, row); |
244 | 0 | dst += stride; |
245 | 0 | rep = _mm_add_epi16(rep, one); |
246 | 0 | } |
247 | 0 | } |
248 | | |
249 | | void aom_paeth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
250 | | const uint8_t *above, |
251 | 0 | const uint8_t *left) { |
252 | 0 | __m128i l = _mm_load_si128((const __m128i *)left); |
253 | 0 | const __m128i t = _mm_load_si128((const __m128i *)above); |
254 | 0 | const __m128i zero = _mm_setzero_si128(); |
255 | 0 | const __m128i top0 = _mm_unpacklo_epi8(t, zero); |
256 | 0 | const __m128i top1 = _mm_unpackhi_epi8(t, zero); |
257 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
258 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
259 | 0 | const __m128i one = _mm_set1_epi16(1); |
260 | |
|
261 | 0 | int i; |
262 | 0 | for (i = 0; i < 16; ++i) { |
263 | 0 | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
264 | 0 | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
265 | |
|
266 | 0 | _mm_store_si128((__m128i *)dst, row); |
267 | 0 | dst += stride; |
268 | 0 | rep = _mm_add_epi16(rep, one); |
269 | 0 | } |
270 | 0 | } |
271 | | |
272 | | void aom_paeth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
273 | | const uint8_t *above, |
274 | 0 | const uint8_t *left) { |
275 | 0 | __m128i l = _mm_load_si128((const __m128i *)left); |
276 | 0 | const __m128i t = _mm_load_si128((const __m128i *)above); |
277 | 0 | const __m128i zero = _mm_setzero_si128(); |
278 | 0 | const __m128i top0 = _mm_unpacklo_epi8(t, zero); |
279 | 0 | const __m128i top1 = _mm_unpackhi_epi8(t, zero); |
280 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
281 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
282 | 0 | const __m128i one = _mm_set1_epi16(1); |
283 | 0 | __m128i l16; |
284 | |
|
285 | 0 | int i; |
286 | 0 | for (i = 0; i < 16; ++i) { |
287 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
288 | 0 | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
289 | |
|
290 | 0 | _mm_store_si128((__m128i *)dst, row); |
291 | 0 | dst += stride; |
292 | 0 | rep = _mm_add_epi16(rep, one); |
293 | 0 | } |
294 | |
|
295 | 0 | l = _mm_load_si128((const __m128i *)(left + 16)); |
296 | 0 | rep = _mm_set1_epi16((short)0x8000); |
297 | 0 | for (i = 0; i < 16; ++i) { |
298 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
299 | 0 | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
300 | |
|
301 | 0 | _mm_store_si128((__m128i *)dst, row); |
302 | 0 | dst += stride; |
303 | 0 | rep = _mm_add_epi16(rep, one); |
304 | 0 | } |
305 | 0 | } |
306 | | |
307 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
308 | | void aom_paeth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
309 | | const uint8_t *above, |
310 | 0 | const uint8_t *left) { |
311 | 0 | const __m128i t = _mm_load_si128((const __m128i *)above); |
312 | 0 | const __m128i zero = _mm_setzero_si128(); |
313 | 0 | const __m128i top0 = _mm_unpacklo_epi8(t, zero); |
314 | 0 | const __m128i top1 = _mm_unpackhi_epi8(t, zero); |
315 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
316 | 0 | const __m128i one = _mm_set1_epi16(1); |
317 | |
|
318 | 0 | for (int j = 0; j < 4; ++j) { |
319 | 0 | const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16)); |
320 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
321 | 0 | for (int i = 0; i < 16; ++i) { |
322 | 0 | const __m128i l16 = _mm_shuffle_epi8(l, rep); |
323 | 0 | const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16); |
324 | 0 | _mm_store_si128((__m128i *)dst, row); |
325 | 0 | dst += stride; |
326 | 0 | rep = _mm_add_epi16(rep, one); |
327 | 0 | } |
328 | 0 | } |
329 | 0 | } |
330 | | |
331 | | void aom_paeth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
332 | 29.6k | const uint8_t *above, const uint8_t *left) { |
333 | 29.6k | const __m128i a = _mm_load_si128((const __m128i *)above); |
334 | 29.6k | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
335 | 29.6k | const __m128i zero = _mm_setzero_si128(); |
336 | 29.6k | const __m128i al = _mm_unpacklo_epi8(a, zero); |
337 | 29.6k | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
338 | 29.6k | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
339 | 29.6k | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
340 | | |
341 | 29.6k | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
342 | 29.6k | __m128i rep = _mm_set1_epi16((short)0x8000); |
343 | 29.6k | const __m128i one = _mm_set1_epi16(1); |
344 | 29.6k | const __m128i l = _mm_loadl_epi64((const __m128i *)left); |
345 | 29.6k | __m128i l16; |
346 | | |
347 | 267k | for (int i = 0; i < 8; ++i) { |
348 | 237k | l16 = _mm_shuffle_epi8(l, rep); |
349 | 237k | const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
350 | 237k | const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
351 | | |
352 | 237k | _mm_store_si128((__m128i *)dst, r32l); |
353 | 237k | _mm_store_si128((__m128i *)(dst + 16), r32h); |
354 | 237k | dst += stride; |
355 | 237k | rep = _mm_add_epi16(rep, one); |
356 | 237k | } |
357 | 29.6k | } |
358 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
359 | | |
360 | | void aom_paeth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
361 | | const uint8_t *above, |
362 | 0 | const uint8_t *left) { |
363 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
364 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
365 | 0 | const __m128i zero = _mm_setzero_si128(); |
366 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
367 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
368 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
369 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
370 | |
|
371 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
372 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
373 | 0 | const __m128i one = _mm_set1_epi16(1); |
374 | 0 | __m128i l = _mm_load_si128((const __m128i *)left); |
375 | 0 | __m128i l16; |
376 | |
|
377 | 0 | int i; |
378 | 0 | for (i = 0; i < 16; ++i) { |
379 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
380 | 0 | const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
381 | 0 | const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
382 | |
|
383 | 0 | _mm_store_si128((__m128i *)dst, r32l); |
384 | 0 | _mm_store_si128((__m128i *)(dst + 16), r32h); |
385 | 0 | dst += stride; |
386 | 0 | rep = _mm_add_epi16(rep, one); |
387 | 0 | } |
388 | 0 | } |
389 | | |
390 | | void aom_paeth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
391 | | const uint8_t *above, |
392 | 0 | const uint8_t *left) { |
393 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
394 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
395 | 0 | const __m128i zero = _mm_setzero_si128(); |
396 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
397 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
398 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
399 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
400 | |
|
401 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
402 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
403 | 0 | const __m128i one = _mm_set1_epi16(1); |
404 | 0 | __m128i l = _mm_load_si128((const __m128i *)left); |
405 | 0 | __m128i l16; |
406 | |
|
407 | 0 | int i; |
408 | 0 | for (i = 0; i < 16; ++i) { |
409 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
410 | 0 | const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
411 | 0 | const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
412 | |
|
413 | 0 | _mm_store_si128((__m128i *)dst, r32l); |
414 | 0 | _mm_store_si128((__m128i *)(dst + 16), r32h); |
415 | 0 | dst += stride; |
416 | 0 | rep = _mm_add_epi16(rep, one); |
417 | 0 | } |
418 | |
|
419 | 0 | rep = _mm_set1_epi16((short)0x8000); |
420 | 0 | l = _mm_load_si128((const __m128i *)(left + 16)); |
421 | 0 | for (i = 0; i < 16; ++i) { |
422 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
423 | 0 | const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
424 | 0 | const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
425 | |
|
426 | 0 | _mm_store_si128((__m128i *)dst, r32l); |
427 | 0 | _mm_store_si128((__m128i *)(dst + 16), r32h); |
428 | 0 | dst += stride; |
429 | 0 | rep = _mm_add_epi16(rep, one); |
430 | 0 | } |
431 | 0 | } |
432 | | |
433 | | void aom_paeth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
434 | | const uint8_t *above, |
435 | 0 | const uint8_t *left) { |
436 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
437 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
438 | 0 | const __m128i zero = _mm_setzero_si128(); |
439 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
440 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
441 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
442 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
443 | |
|
444 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
445 | 0 | const __m128i one = _mm_set1_epi16(1); |
446 | 0 | __m128i l16; |
447 | |
|
448 | 0 | int i, j; |
449 | 0 | for (j = 0; j < 4; ++j) { |
450 | 0 | const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16)); |
451 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
452 | 0 | for (i = 0; i < 16; ++i) { |
453 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
454 | 0 | const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
455 | 0 | const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
456 | |
|
457 | 0 | _mm_store_si128((__m128i *)dst, r32l); |
458 | 0 | _mm_store_si128((__m128i *)(dst + 16), r32h); |
459 | 0 | dst += stride; |
460 | 0 | rep = _mm_add_epi16(rep, one); |
461 | 0 | } |
462 | 0 | } |
463 | 0 | } |
464 | | |
465 | | void aom_paeth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
466 | | const uint8_t *above, |
467 | 0 | const uint8_t *left) { |
468 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
469 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
470 | 0 | const __m128i c = _mm_load_si128((const __m128i *)(above + 32)); |
471 | 0 | const __m128i d = _mm_load_si128((const __m128i *)(above + 48)); |
472 | 0 | const __m128i zero = _mm_setzero_si128(); |
473 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
474 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
475 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
476 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
477 | 0 | const __m128i cl = _mm_unpacklo_epi8(c, zero); |
478 | 0 | const __m128i ch = _mm_unpackhi_epi8(c, zero); |
479 | 0 | const __m128i dl = _mm_unpacklo_epi8(d, zero); |
480 | 0 | const __m128i dh = _mm_unpackhi_epi8(d, zero); |
481 | |
|
482 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
483 | 0 | const __m128i one = _mm_set1_epi16(1); |
484 | 0 | __m128i l16; |
485 | |
|
486 | 0 | int i, j; |
487 | 0 | for (j = 0; j < 2; ++j) { |
488 | 0 | const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16)); |
489 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
490 | 0 | for (i = 0; i < 16; ++i) { |
491 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
492 | 0 | const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
493 | 0 | const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
494 | 0 | const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16); |
495 | 0 | const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16); |
496 | |
|
497 | 0 | _mm_store_si128((__m128i *)dst, r0); |
498 | 0 | _mm_store_si128((__m128i *)(dst + 16), r1); |
499 | 0 | _mm_store_si128((__m128i *)(dst + 32), r2); |
500 | 0 | _mm_store_si128((__m128i *)(dst + 48), r3); |
501 | 0 | dst += stride; |
502 | 0 | rep = _mm_add_epi16(rep, one); |
503 | 0 | } |
504 | 0 | } |
505 | 0 | } |
506 | | |
507 | | void aom_paeth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
508 | | const uint8_t *above, |
509 | 0 | const uint8_t *left) { |
510 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
511 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
512 | 0 | const __m128i c = _mm_load_si128((const __m128i *)(above + 32)); |
513 | 0 | const __m128i d = _mm_load_si128((const __m128i *)(above + 48)); |
514 | 0 | const __m128i zero = _mm_setzero_si128(); |
515 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
516 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
517 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
518 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
519 | 0 | const __m128i cl = _mm_unpacklo_epi8(c, zero); |
520 | 0 | const __m128i ch = _mm_unpackhi_epi8(c, zero); |
521 | 0 | const __m128i dl = _mm_unpacklo_epi8(d, zero); |
522 | 0 | const __m128i dh = _mm_unpackhi_epi8(d, zero); |
523 | |
|
524 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
525 | 0 | const __m128i one = _mm_set1_epi16(1); |
526 | 0 | __m128i l16; |
527 | |
|
528 | 0 | int i, j; |
529 | 0 | for (j = 0; j < 4; ++j) { |
530 | 0 | const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16)); |
531 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
532 | 0 | for (i = 0; i < 16; ++i) { |
533 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
534 | 0 | const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
535 | 0 | const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
536 | 0 | const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16); |
537 | 0 | const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16); |
538 | |
|
539 | 0 | _mm_store_si128((__m128i *)dst, r0); |
540 | 0 | _mm_store_si128((__m128i *)(dst + 16), r1); |
541 | 0 | _mm_store_si128((__m128i *)(dst + 32), r2); |
542 | 0 | _mm_store_si128((__m128i *)(dst + 48), r3); |
543 | 0 | dst += stride; |
544 | 0 | rep = _mm_add_epi16(rep, one); |
545 | 0 | } |
546 | 0 | } |
547 | 0 | } |
548 | | |
549 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
550 | | void aom_paeth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
551 | | const uint8_t *above, |
552 | 0 | const uint8_t *left) { |
553 | 0 | const __m128i a = _mm_load_si128((const __m128i *)above); |
554 | 0 | const __m128i b = _mm_load_si128((const __m128i *)(above + 16)); |
555 | 0 | const __m128i c = _mm_load_si128((const __m128i *)(above + 32)); |
556 | 0 | const __m128i d = _mm_load_si128((const __m128i *)(above + 48)); |
557 | 0 | const __m128i zero = _mm_setzero_si128(); |
558 | 0 | const __m128i al = _mm_unpacklo_epi8(a, zero); |
559 | 0 | const __m128i ah = _mm_unpackhi_epi8(a, zero); |
560 | 0 | const __m128i bl = _mm_unpacklo_epi8(b, zero); |
561 | 0 | const __m128i bh = _mm_unpackhi_epi8(b, zero); |
562 | 0 | const __m128i cl = _mm_unpacklo_epi8(c, zero); |
563 | 0 | const __m128i ch = _mm_unpackhi_epi8(c, zero); |
564 | 0 | const __m128i dl = _mm_unpacklo_epi8(d, zero); |
565 | 0 | const __m128i dh = _mm_unpackhi_epi8(d, zero); |
566 | |
|
567 | 0 | const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]); |
568 | 0 | const __m128i one = _mm_set1_epi16(1); |
569 | 0 | __m128i l16; |
570 | |
|
571 | 0 | int i; |
572 | 0 | const __m128i l = _mm_load_si128((const __m128i *)left); |
573 | 0 | __m128i rep = _mm_set1_epi16((short)0x8000); |
574 | 0 | for (i = 0; i < 16; ++i) { |
575 | 0 | l16 = _mm_shuffle_epi8(l, rep); |
576 | 0 | const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16); |
577 | 0 | const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16); |
578 | 0 | const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16); |
579 | 0 | const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16); |
580 | |
|
581 | 0 | _mm_store_si128((__m128i *)dst, r0); |
582 | 0 | _mm_store_si128((__m128i *)(dst + 16), r1); |
583 | 0 | _mm_store_si128((__m128i *)(dst + 32), r2); |
584 | 0 | _mm_store_si128((__m128i *)(dst + 48), r3); |
585 | 0 | dst += stride; |
586 | 0 | rep = _mm_add_epi16(rep, one); |
587 | 0 | } |
588 | 0 | } |
589 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
590 | | |
591 | | // ----------------------------------------------------------------------------- |
592 | | // SMOOTH_PRED |
593 | | |
594 | | // pixels[0]: above and below_pred interleave vector |
595 | | // pixels[1]: left vector |
596 | | // pixels[2]: right_pred vector |
597 | | static inline void load_pixel_w4(const uint8_t *above, const uint8_t *left, |
598 | 296k | int height, __m128i *pixels) { |
599 | 296k | __m128i d = _mm_cvtsi32_si128(((const int *)above)[0]); |
600 | 296k | if (height == 4) |
601 | 201k | pixels[1] = _mm_cvtsi32_si128(((const int *)left)[0]); |
602 | 95.0k | else if (height == 8) |
603 | 62.6k | pixels[1] = _mm_loadl_epi64(((const __m128i *)left)); |
604 | 32.4k | else |
605 | 32.4k | pixels[1] = _mm_loadu_si128(((const __m128i *)left)); |
606 | | |
607 | 296k | pixels[2] = _mm_set1_epi16((int16_t)above[3]); |
608 | | |
609 | 296k | const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]); |
610 | 296k | const __m128i zero = _mm_setzero_si128(); |
611 | 296k | d = _mm_unpacklo_epi8(d, zero); |
612 | 296k | pixels[0] = _mm_unpacklo_epi16(d, bp); |
613 | 296k | } |
614 | | |
615 | | // weight_h[0]: weight_h vector |
616 | | // weight_h[1]: scale - weight_h vector |
617 | | // weight_h[2]: same as [0], second half for height = 16 only |
618 | | // weight_h[3]: same as [1], second half for height = 16 only |
619 | | // weight_w[0]: weights_w and scale - weights_w interleave vector |
620 | | static inline void load_weight_w4(int height, __m128i *weight_h, |
621 | 296k | __m128i *weight_w) { |
622 | 296k | const __m128i zero = _mm_setzero_si128(); |
623 | 296k | const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE)); |
624 | 296k | const __m128i t = _mm_cvtsi32_si128(((const int *)smooth_weights)[0]); |
625 | 296k | weight_h[0] = _mm_unpacklo_epi8(t, zero); |
626 | 296k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
627 | 296k | weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]); |
628 | | |
629 | 296k | if (height == 8) { |
630 | 62.6k | const __m128i weight = _mm_loadl_epi64((const __m128i *)&smooth_weights[4]); |
631 | 62.6k | weight_h[0] = _mm_unpacklo_epi8(weight, zero); |
632 | 62.6k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
633 | 233k | } else if (height == 16) { |
634 | 32.4k | const __m128i weight = |
635 | 32.4k | _mm_loadu_si128((const __m128i *)&smooth_weights[12]); |
636 | 32.4k | weight_h[0] = _mm_unpacklo_epi8(weight, zero); |
637 | 32.4k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
638 | 32.4k | weight_h[2] = _mm_unpackhi_epi8(weight, zero); |
639 | 32.4k | weight_h[3] = _mm_sub_epi16(d, weight_h[2]); |
640 | 32.4k | } |
641 | 296k | } |
642 | | |
643 | | static inline void smooth_pred_4xh(const __m128i *pixel, const __m128i *wh, |
644 | | const __m128i *ww, int h, uint8_t *dst, |
645 | 328k | ptrdiff_t stride, int second_half) { |
646 | 328k | const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE)); |
647 | 328k | const __m128i one = _mm_set1_epi16(1); |
648 | 328k | const __m128i inc = _mm_set1_epi16(0x202); |
649 | 328k | const __m128i gat = _mm_set1_epi32(0xc080400); |
650 | 328k | __m128i rep = second_half ? _mm_set1_epi16((short)0x8008) |
651 | 328k | : _mm_set1_epi16((short)0x8000); |
652 | 328k | __m128i d = _mm_set1_epi16(0x100); |
653 | | |
654 | 2.15M | for (int i = 0; i < h; ++i) { |
655 | 1.82M | const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d); |
656 | 1.82M | const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d); |
657 | 1.82M | const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc); |
658 | 1.82M | __m128i s = _mm_madd_epi16(pixel[0], wh_sc); |
659 | | |
660 | 1.82M | __m128i b = _mm_shuffle_epi8(pixel[1], rep); |
661 | 1.82M | b = _mm_unpacklo_epi16(b, pixel[2]); |
662 | 1.82M | __m128i sum = _mm_madd_epi16(b, ww[0]); |
663 | | |
664 | 1.82M | sum = _mm_add_epi32(s, sum); |
665 | 1.82M | sum = _mm_add_epi32(sum, round); |
666 | 1.82M | sum = _mm_srai_epi32(sum, 1 + SMOOTH_WEIGHT_LOG2_SCALE); |
667 | | |
668 | 1.82M | sum = _mm_shuffle_epi8(sum, gat); |
669 | 1.82M | *(int *)dst = _mm_cvtsi128_si32(sum); |
670 | 1.82M | dst += stride; |
671 | | |
672 | 1.82M | rep = _mm_add_epi16(rep, one); |
673 | 1.82M | d = _mm_add_epi16(d, inc); |
674 | 1.82M | } |
675 | 328k | } |
676 | | |
677 | | void aom_smooth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
678 | 201k | const uint8_t *above, const uint8_t *left) { |
679 | 201k | __m128i pixels[3]; |
680 | 201k | load_pixel_w4(above, left, 4, pixels); |
681 | | |
682 | 201k | __m128i wh[4], ww[2]; |
683 | 201k | load_weight_w4(4, wh, ww); |
684 | | |
685 | 201k | smooth_pred_4xh(pixels, wh, ww, 4, dst, stride, 0); |
686 | 201k | } |
687 | | |
688 | | void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
689 | 62.6k | const uint8_t *above, const uint8_t *left) { |
690 | 62.6k | __m128i pixels[3]; |
691 | 62.6k | load_pixel_w4(above, left, 8, pixels); |
692 | | |
693 | 62.6k | __m128i wh[4], ww[2]; |
694 | 62.6k | load_weight_w4(8, wh, ww); |
695 | | |
696 | 62.6k | smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0); |
697 | 62.6k | } |
698 | | |
699 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
700 | | void aom_smooth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
701 | | const uint8_t *above, |
702 | 32.4k | const uint8_t *left) { |
703 | 32.4k | __m128i pixels[3]; |
704 | 32.4k | load_pixel_w4(above, left, 16, pixels); |
705 | | |
706 | 32.4k | __m128i wh[4], ww[2]; |
707 | 32.4k | load_weight_w4(16, wh, ww); |
708 | | |
709 | 32.4k | smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0); |
710 | 32.4k | dst += stride << 3; |
711 | 32.4k | smooth_pred_4xh(pixels, &wh[2], ww, 8, dst, stride, 1); |
712 | 32.4k | } |
713 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
714 | | |
715 | | // pixels[0]: above and below_pred interleave vector, first half |
716 | | // pixels[1]: above and below_pred interleave vector, second half |
717 | | // pixels[2]: left vector |
718 | | // pixels[3]: right_pred vector |
719 | | // pixels[4]: above and below_pred interleave vector, first half |
720 | | // pixels[5]: above and below_pred interleave vector, second half |
721 | | // pixels[6]: left vector + 16 |
722 | | // pixels[7]: right_pred vector |
723 | | static inline void load_pixel_w8(const uint8_t *above, const uint8_t *left, |
724 | 347k | int height, __m128i *pixels) { |
725 | 347k | const __m128i zero = _mm_setzero_si128(); |
726 | 347k | const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]); |
727 | 347k | __m128i d = _mm_loadl_epi64((const __m128i *)above); |
728 | 347k | d = _mm_unpacklo_epi8(d, zero); |
729 | 347k | pixels[0] = _mm_unpacklo_epi16(d, bp); |
730 | 347k | pixels[1] = _mm_unpackhi_epi16(d, bp); |
731 | | |
732 | 347k | pixels[3] = _mm_set1_epi16((int16_t)above[7]); |
733 | | |
734 | 347k | if (height == 4) { |
735 | 91.3k | pixels[2] = _mm_cvtsi32_si128(((const int *)left)[0]); |
736 | 255k | } else if (height == 8) { |
737 | 186k | pixels[2] = _mm_loadl_epi64((const __m128i *)left); |
738 | 186k | } else if (height == 16) { |
739 | 50.7k | pixels[2] = _mm_load_si128((const __m128i *)left); |
740 | 50.7k | } else { |
741 | 18.6k | pixels[2] = _mm_load_si128((const __m128i *)left); |
742 | 18.6k | pixels[4] = pixels[0]; |
743 | 18.6k | pixels[5] = pixels[1]; |
744 | 18.6k | pixels[6] = _mm_load_si128((const __m128i *)(left + 16)); |
745 | 18.6k | pixels[7] = pixels[3]; |
746 | 18.6k | } |
747 | 347k | } |
748 | | |
749 | | // weight_h[0]: weight_h vector |
750 | | // weight_h[1]: scale - weight_h vector |
751 | | // weight_h[2]: same as [0], offset 8 |
752 | | // weight_h[3]: same as [1], offset 8 |
753 | | // weight_h[4]: same as [0], offset 16 |
754 | | // weight_h[5]: same as [1], offset 16 |
755 | | // weight_h[6]: same as [0], offset 24 |
756 | | // weight_h[7]: same as [1], offset 24 |
757 | | // weight_w[0]: weights_w and scale - weights_w interleave vector, first half |
758 | | // weight_w[1]: weights_w and scale - weights_w interleave vector, second half |
759 | | static inline void load_weight_w8(int height, __m128i *weight_h, |
760 | 347k | __m128i *weight_w) { |
761 | 347k | const __m128i zero = _mm_setzero_si128(); |
762 | 347k | const int we_offset = height < 8 ? 0 : 4; |
763 | 347k | __m128i we = _mm_loadu_si128((const __m128i *)&smooth_weights[we_offset]); |
764 | 347k | weight_h[0] = _mm_unpacklo_epi8(we, zero); |
765 | 347k | const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE)); |
766 | 347k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
767 | | |
768 | 347k | if (height == 4) { |
769 | 91.3k | we = _mm_srli_si128(we, 4); |
770 | 91.3k | __m128i tmp1 = _mm_unpacklo_epi8(we, zero); |
771 | 91.3k | __m128i tmp2 = _mm_sub_epi16(d, tmp1); |
772 | 91.3k | weight_w[0] = _mm_unpacklo_epi16(tmp1, tmp2); |
773 | 91.3k | weight_w[1] = _mm_unpackhi_epi16(tmp1, tmp2); |
774 | 255k | } else { |
775 | 255k | weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]); |
776 | 255k | weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]); |
777 | 255k | } |
778 | | |
779 | 347k | if (height == 16) { |
780 | 50.7k | we = _mm_loadu_si128((const __m128i *)&smooth_weights[12]); |
781 | 50.7k | weight_h[0] = _mm_unpacklo_epi8(we, zero); |
782 | 50.7k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
783 | 50.7k | weight_h[2] = _mm_unpackhi_epi8(we, zero); |
784 | 50.7k | weight_h[3] = _mm_sub_epi16(d, weight_h[2]); |
785 | 296k | } else if (height == 32) { |
786 | 18.6k | const __m128i weight_lo = |
787 | 18.6k | _mm_loadu_si128((const __m128i *)&smooth_weights[28]); |
788 | 18.6k | weight_h[0] = _mm_unpacklo_epi8(weight_lo, zero); |
789 | 18.6k | weight_h[1] = _mm_sub_epi16(d, weight_h[0]); |
790 | 18.6k | weight_h[2] = _mm_unpackhi_epi8(weight_lo, zero); |
791 | 18.6k | weight_h[3] = _mm_sub_epi16(d, weight_h[2]); |
792 | 18.6k | const __m128i weight_hi = |
793 | 18.6k | _mm_loadu_si128((const __m128i *)&smooth_weights[28 + 16]); |
794 | 18.6k | weight_h[4] = _mm_unpacklo_epi8(weight_hi, zero); |
795 | 18.6k | weight_h[5] = _mm_sub_epi16(d, weight_h[4]); |
796 | 18.6k | weight_h[6] = _mm_unpackhi_epi8(weight_hi, zero); |
797 | 18.6k | weight_h[7] = _mm_sub_epi16(d, weight_h[6]); |
798 | 18.6k | } |
799 | 347k | } |
800 | | |
801 | | static inline void smooth_pred_8xh(const __m128i *pixels, const __m128i *wh, |
802 | | const __m128i *ww, int h, uint8_t *dst, |
803 | 453k | ptrdiff_t stride, int second_half) { |
804 | 453k | const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE)); |
805 | 453k | const __m128i one = _mm_set1_epi16(1); |
806 | 453k | const __m128i inc = _mm_set1_epi16(0x202); |
807 | 453k | const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200); |
808 | | |
809 | 453k | __m128i rep = second_half ? _mm_set1_epi16((short)0x8008) |
810 | 453k | : _mm_set1_epi16((short)0x8000); |
811 | 453k | __m128i d = _mm_set1_epi16(0x100); |
812 | | |
813 | 453k | int i; |
814 | 3.71M | for (i = 0; i < h; ++i) { |
815 | 3.26M | const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d); |
816 | 3.26M | const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d); |
817 | 3.26M | const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc); |
818 | 3.26M | __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc); |
819 | 3.26M | __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc); |
820 | | |
821 | 3.26M | __m128i b = _mm_shuffle_epi8(pixels[2], rep); |
822 | 3.26M | b = _mm_unpacklo_epi16(b, pixels[3]); |
823 | 3.26M | __m128i sum0 = _mm_madd_epi16(b, ww[0]); |
824 | 3.26M | __m128i sum1 = _mm_madd_epi16(b, ww[1]); |
825 | | |
826 | 3.26M | s0 = _mm_add_epi32(s0, sum0); |
827 | 3.26M | s0 = _mm_add_epi32(s0, round); |
828 | 3.26M | s0 = _mm_srai_epi32(s0, 1 + SMOOTH_WEIGHT_LOG2_SCALE); |
829 | | |
830 | 3.26M | s1 = _mm_add_epi32(s1, sum1); |
831 | 3.26M | s1 = _mm_add_epi32(s1, round); |
832 | 3.26M | s1 = _mm_srai_epi32(s1, 1 + SMOOTH_WEIGHT_LOG2_SCALE); |
833 | | |
834 | 3.26M | sum0 = _mm_packus_epi16(s0, s1); |
835 | 3.26M | sum0 = _mm_shuffle_epi8(sum0, gat); |
836 | 3.26M | _mm_storel_epi64((__m128i *)dst, sum0); |
837 | 3.26M | dst += stride; |
838 | | |
839 | 3.26M | rep = _mm_add_epi16(rep, one); |
840 | 3.26M | d = _mm_add_epi16(d, inc); |
841 | 3.26M | } |
842 | 453k | } |
843 | | |
844 | | void aom_smooth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
845 | 91.3k | const uint8_t *above, const uint8_t *left) { |
846 | 91.3k | __m128i pixels[4]; |
847 | 91.3k | load_pixel_w8(above, left, 4, pixels); |
848 | | |
849 | 91.3k | __m128i wh[4], ww[2]; |
850 | 91.3k | load_weight_w8(4, wh, ww); |
851 | | |
852 | 91.3k | smooth_pred_8xh(pixels, wh, ww, 4, dst, stride, 0); |
853 | 91.3k | } |
854 | | |
855 | | void aom_smooth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
856 | 186k | const uint8_t *above, const uint8_t *left) { |
857 | 186k | __m128i pixels[4]; |
858 | 186k | load_pixel_w8(above, left, 8, pixels); |
859 | | |
860 | 186k | __m128i wh[4], ww[2]; |
861 | 186k | load_weight_w8(8, wh, ww); |
862 | | |
863 | 186k | smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0); |
864 | 186k | } |
865 | | |
866 | | void aom_smooth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
867 | | const uint8_t *above, |
868 | 50.7k | const uint8_t *left) { |
869 | 50.7k | __m128i pixels[4]; |
870 | 50.7k | load_pixel_w8(above, left, 16, pixels); |
871 | | |
872 | 50.7k | __m128i wh[4], ww[2]; |
873 | 50.7k | load_weight_w8(16, wh, ww); |
874 | | |
875 | 50.7k | smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0); |
876 | 50.7k | dst += stride << 3; |
877 | 50.7k | smooth_pred_8xh(pixels, &wh[2], ww, 8, dst, stride, 1); |
878 | 50.7k | } |
879 | | |
880 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
881 | | void aom_smooth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
882 | | const uint8_t *above, |
883 | 18.6k | const uint8_t *left) { |
884 | 18.6k | __m128i pixels[8]; |
885 | 18.6k | load_pixel_w8(above, left, 32, pixels); |
886 | | |
887 | 18.6k | __m128i wh[8], ww[2]; |
888 | 18.6k | load_weight_w8(32, wh, ww); |
889 | | |
890 | 18.6k | smooth_pred_8xh(&pixels[0], wh, ww, 8, dst, stride, 0); |
891 | 18.6k | dst += stride << 3; |
892 | 18.6k | smooth_pred_8xh(&pixels[0], &wh[2], ww, 8, dst, stride, 1); |
893 | 18.6k | dst += stride << 3; |
894 | 18.6k | smooth_pred_8xh(&pixels[4], &wh[4], ww, 8, dst, stride, 0); |
895 | 18.6k | dst += stride << 3; |
896 | 18.6k | smooth_pred_8xh(&pixels[4], &wh[6], ww, 8, dst, stride, 1); |
897 | 18.6k | } |
898 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
899 | | |
900 | | // TODO(slavarnway): Visual Studio only supports restrict when /std:c11 |
901 | | // (available in 2019+) or greater is specified; __restrict can be used in that |
902 | | // case. This should be moved to rtcd and used consistently between the |
903 | | // function declarations and definitions to avoid warnings in Visual Studio |
904 | | // when defining LIBAOM_RESTRICT to restrict or __restrict. |
905 | | #if defined(_MSC_VER) |
906 | | #define LIBAOM_RESTRICT |
907 | | #else |
908 | | #define LIBAOM_RESTRICT restrict |
909 | | #endif |
910 | | |
911 | 491k | static AOM_FORCE_INLINE __m128i Load4(const void *src) { |
912 | | // With new compilers such as clang 8.0.0 we can use the new _mm_loadu_si32 |
913 | | // intrinsic. Both _mm_loadu_si32(src) and the code here are compiled into a |
914 | | // movss instruction. |
915 | | // |
916 | | // Until compiler support of _mm_loadu_si32 is widespread, use of |
917 | | // _mm_loadu_si32 is banned. |
918 | 491k | int val; |
919 | 491k | memcpy(&val, src, sizeof(val)); |
920 | 491k | return _mm_cvtsi32_si128(val); |
921 | 491k | } |
922 | | |
923 | 88.3M | static AOM_FORCE_INLINE __m128i LoadLo8(const void *a) { |
924 | 88.3M | return _mm_loadl_epi64((const __m128i *)(a)); |
925 | 88.3M | } |
926 | | |
927 | 815k | static AOM_FORCE_INLINE __m128i LoadUnaligned16(const void *a) { |
928 | 815k | return _mm_loadu_si128((const __m128i *)(a)); |
929 | 815k | } |
930 | | |
931 | 1.14M | static AOM_FORCE_INLINE void Store4(void *dst, const __m128i x) { |
932 | 1.14M | const int val = _mm_cvtsi128_si32(x); |
933 | 1.14M | memcpy(dst, &val, sizeof(val)); |
934 | 1.14M | } |
935 | | |
936 | 45.5M | static AOM_FORCE_INLINE void StoreLo8(void *a, const __m128i v) { |
937 | 45.5M | _mm_storel_epi64((__m128i *)(a), v); |
938 | 45.5M | } |
939 | | |
940 | 13.6M | static AOM_FORCE_INLINE void StoreUnaligned16(void *a, const __m128i v) { |
941 | 13.6M | _mm_storeu_si128((__m128i *)(a), v); |
942 | 13.6M | } |
943 | | |
944 | 89.7M | static AOM_FORCE_INLINE __m128i cvtepu8_epi16(__m128i x) { |
945 | 89.7M | return _mm_unpacklo_epi8((x), _mm_setzero_si128()); |
946 | 89.7M | } |
947 | | |
948 | 292k | static AOM_FORCE_INLINE __m128i cvtepu8_epi32(__m128i x) { |
949 | 292k | const __m128i tmp = _mm_unpacklo_epi8((x), _mm_setzero_si128()); |
950 | 292k | return _mm_unpacklo_epi16(tmp, _mm_setzero_si128()); |
951 | 292k | } |
952 | | |
953 | 43.7M | static AOM_FORCE_INLINE __m128i cvtepu16_epi32(__m128i x) { |
954 | 43.7M | return _mm_unpacklo_epi16((x), _mm_setzero_si128()); |
955 | 43.7M | } |
956 | | |
957 | | static void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
958 | | const uint8_t *LIBAOM_RESTRICT top_row, |
959 | | const uint8_t *LIBAOM_RESTRICT left_column, |
960 | 505k | int width, int height) { |
961 | 505k | const uint8_t *const sm_weights_h = smooth_weights + height - 4; |
962 | 505k | const uint8_t *const sm_weights_w = smooth_weights + width - 4; |
963 | 505k | const __m128i zero = _mm_setzero_si128(); |
964 | 505k | const __m128i scale_value = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
965 | 505k | const __m128i bottom_left = _mm_cvtsi32_si128(left_column[height - 1]); |
966 | 505k | const __m128i top_right = _mm_set1_epi16(top_row[width - 1]); |
967 | 505k | const __m128i round = _mm_set1_epi32(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
968 | 10.8M | for (int y = 0; y < height; ++y) { |
969 | 10.3M | const __m128i weights_y = _mm_cvtsi32_si128(sm_weights_h[y]); |
970 | 10.3M | const __m128i left_y = _mm_cvtsi32_si128(left_column[y]); |
971 | 10.3M | const __m128i scale_m_weights_y = _mm_sub_epi16(scale_value, weights_y); |
972 | 10.3M | __m128i scaled_bottom_left = |
973 | 10.3M | _mm_mullo_epi16(scale_m_weights_y, bottom_left); |
974 | 10.3M | const __m128i weight_left_y = |
975 | 10.3M | _mm_shuffle_epi32(_mm_unpacklo_epi16(weights_y, left_y), 0); |
976 | 10.3M | scaled_bottom_left = _mm_add_epi32(scaled_bottom_left, round); |
977 | 10.3M | scaled_bottom_left = _mm_shuffle_epi32(scaled_bottom_left, 0); |
978 | 54.1M | for (int x = 0; x < width; x += 8) { |
979 | 43.7M | const __m128i top_x = LoadLo8(top_row + x); |
980 | 43.7M | const __m128i weights_x = LoadLo8(sm_weights_w + x); |
981 | 43.7M | const __m128i top_weights_x = _mm_unpacklo_epi8(top_x, weights_x); |
982 | 43.7M | const __m128i top_weights_x_lo = cvtepu8_epi16(top_weights_x); |
983 | 43.7M | const __m128i top_weights_x_hi = _mm_unpackhi_epi8(top_weights_x, zero); |
984 | | |
985 | | // Here opposite weights and pixels are multiplied, where the order of |
986 | | // interleaving is indicated in the names. |
987 | 43.7M | __m128i pred_lo = _mm_madd_epi16(top_weights_x_lo, weight_left_y); |
988 | 43.7M | __m128i pred_hi = _mm_madd_epi16(top_weights_x_hi, weight_left_y); |
989 | | |
990 | | // |scaled_bottom_left| is always scaled by the same weight each row, so |
991 | | // we only derive |scaled_top_right| values here. |
992 | 43.7M | const __m128i inverted_weights_x = |
993 | 43.7M | _mm_sub_epi16(scale_value, cvtepu8_epi16(weights_x)); |
994 | 43.7M | const __m128i scaled_top_right = |
995 | 43.7M | _mm_mullo_epi16(inverted_weights_x, top_right); |
996 | 43.7M | const __m128i scaled_top_right_lo = cvtepu16_epi32(scaled_top_right); |
997 | 43.7M | const __m128i scaled_top_right_hi = |
998 | 43.7M | _mm_unpackhi_epi16(scaled_top_right, zero); |
999 | 43.7M | pred_lo = _mm_add_epi32(pred_lo, scaled_bottom_left); |
1000 | 43.7M | pred_hi = _mm_add_epi32(pred_hi, scaled_bottom_left); |
1001 | 43.7M | pred_lo = _mm_add_epi32(pred_lo, scaled_top_right_lo); |
1002 | 43.7M | pred_hi = _mm_add_epi32(pred_hi, scaled_top_right_hi); |
1003 | | |
1004 | | // The round value for RightShiftWithRounding was added with |
1005 | | // |scaled_bottom_left|. |
1006 | 43.7M | pred_lo = _mm_srli_epi32(pred_lo, (1 + SMOOTH_WEIGHT_LOG2_SCALE)); |
1007 | 43.7M | pred_hi = _mm_srli_epi32(pred_hi, (1 + SMOOTH_WEIGHT_LOG2_SCALE)); |
1008 | 43.7M | const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi); |
1009 | 43.7M | StoreLo8(dst + x, _mm_packus_epi16(pred, pred)); |
1010 | 43.7M | } |
1011 | 10.3M | dst += stride; |
1012 | 10.3M | } |
1013 | 505k | } |
1014 | | |
1015 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1016 | | void aom_smooth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride, |
1017 | | const uint8_t *above, |
1018 | 62.4k | const uint8_t *left) { |
1019 | 62.4k | smooth_predictor_wxh(dst, stride, above, left, 16, 4); |
1020 | 62.4k | } |
1021 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1022 | | |
1023 | | void aom_smooth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
1024 | | const uint8_t *above, |
1025 | 66.8k | const uint8_t *left) { |
1026 | 66.8k | smooth_predictor_wxh(dst, stride, above, left, 16, 8); |
1027 | 66.8k | } |
1028 | | |
1029 | | void aom_smooth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
1030 | | const uint8_t *above, |
1031 | 110k | const uint8_t *left) { |
1032 | 110k | smooth_predictor_wxh(dst, stride, above, left, 16, 16); |
1033 | 110k | } |
1034 | | |
1035 | | void aom_smooth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
1036 | | const uint8_t *above, |
1037 | 28.1k | const uint8_t *left) { |
1038 | 28.1k | smooth_predictor_wxh(dst, stride, above, left, 16, 32); |
1039 | 28.1k | } |
1040 | | |
1041 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1042 | | void aom_smooth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
1043 | | const uint8_t *above, |
1044 | 5.19k | const uint8_t *left) { |
1045 | 5.19k | smooth_predictor_wxh(dst, stride, above, left, 16, 64); |
1046 | 5.19k | } |
1047 | | |
1048 | | void aom_smooth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride, |
1049 | | const uint8_t *above, |
1050 | 43.9k | const uint8_t *left) { |
1051 | 43.9k | smooth_predictor_wxh(dst, stride, above, left, 32, 8); |
1052 | 43.9k | } |
1053 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1054 | | |
1055 | | void aom_smooth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
1056 | | const uint8_t *above, |
1057 | 28.1k | const uint8_t *left) { |
1058 | 28.1k | smooth_predictor_wxh(dst, stride, above, left, 32, 16); |
1059 | 28.1k | } |
1060 | | |
1061 | | void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
1062 | | const uint8_t *above, |
1063 | 97.1k | const uint8_t *left) { |
1064 | 97.1k | smooth_predictor_wxh(dst, stride, above, left, 32, 32); |
1065 | 97.1k | } |
1066 | | |
1067 | | void aom_smooth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
1068 | | const uint8_t *above, |
1069 | 2.88k | const uint8_t *left) { |
1070 | 2.88k | smooth_predictor_wxh(dst, stride, above, left, 32, 64); |
1071 | 2.88k | } |
1072 | | |
1073 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1074 | | void aom_smooth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride, |
1075 | | const uint8_t *above, |
1076 | 25.1k | const uint8_t *left) { |
1077 | 25.1k | smooth_predictor_wxh(dst, stride, above, left, 64, 16); |
1078 | 25.1k | } |
1079 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1080 | | |
1081 | | void aom_smooth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride, |
1082 | | const uint8_t *above, |
1083 | 5.39k | const uint8_t *left) { |
1084 | 5.39k | smooth_predictor_wxh(dst, stride, above, left, 64, 32); |
1085 | 5.39k | } |
1086 | | |
1087 | | void aom_smooth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride, |
1088 | | const uint8_t *above, |
1089 | 29.7k | const uint8_t *left) { |
1090 | 29.7k | smooth_predictor_wxh(dst, stride, above, left, 64, 64); |
1091 | 29.7k | } |
1092 | | |
1093 | | // ----------------------------------------------------------------------------- |
1094 | | // Smooth horizontal/vertical helper functions. |
1095 | | |
1096 | | // For Horizontal, pixels1 and pixels2 are the same repeated value. For |
1097 | | // Vertical, weights1 and weights2 are the same, and scaled_corner1 and |
1098 | | // scaled_corner2 are the same. |
1099 | | static AOM_FORCE_INLINE void write_smooth_directional_sum16( |
1100 | | uint8_t *LIBAOM_RESTRICT dst, const __m128i pixels1, const __m128i pixels2, |
1101 | | const __m128i weights1, const __m128i weights2, |
1102 | | const __m128i scaled_corner1, const __m128i scaled_corner2, |
1103 | 13.6M | const __m128i round) { |
1104 | 13.6M | const __m128i weighted_px1 = _mm_mullo_epi16(pixels1, weights1); |
1105 | 13.6M | const __m128i weighted_px2 = _mm_mullo_epi16(pixels2, weights2); |
1106 | 13.6M | const __m128i pred_sum1 = _mm_add_epi16(scaled_corner1, weighted_px1); |
1107 | 13.6M | const __m128i pred_sum2 = _mm_add_epi16(scaled_corner2, weighted_px2); |
1108 | | // Equivalent to RightShiftWithRounding(pred[x][y], 8). |
1109 | 13.6M | const __m128i pred1 = _mm_srli_epi16(_mm_add_epi16(pred_sum1, round), 8); |
1110 | 13.6M | const __m128i pred2 = _mm_srli_epi16(_mm_add_epi16(pred_sum2, round), 8); |
1111 | 13.6M | StoreUnaligned16(dst, _mm_packus_epi16(pred1, pred2)); |
1112 | 13.6M | } |
1113 | | |
1114 | | static AOM_FORCE_INLINE __m128i smooth_directional_sum8( |
1115 | 1.79M | const __m128i pixels, const __m128i weights, const __m128i scaled_corner) { |
1116 | 1.79M | const __m128i weighted_px = _mm_mullo_epi16(pixels, weights); |
1117 | 1.79M | return _mm_add_epi16(scaled_corner, weighted_px); |
1118 | 1.79M | } |
1119 | | |
1120 | | static AOM_FORCE_INLINE void write_smooth_directional_sum8( |
1121 | | uint8_t *LIBAOM_RESTRICT dst, const __m128i *pixels, const __m128i *weights, |
1122 | 1.79M | const __m128i *scaled_corner, const __m128i *round) { |
1123 | 1.79M | const __m128i pred_sum = |
1124 | 1.79M | smooth_directional_sum8(*pixels, *weights, *scaled_corner); |
1125 | | // Equivalent to RightShiftWithRounding(pred[x][y], 8). |
1126 | 1.79M | const __m128i pred = _mm_srli_epi16(_mm_add_epi16(pred_sum, *round), 8); |
1127 | 1.79M | StoreLo8(dst, _mm_packus_epi16(pred, pred)); |
1128 | 1.79M | } |
1129 | | |
1130 | | // ----------------------------------------------------------------------------- |
1131 | | // SMOOTH_V_PRED |
1132 | | |
1133 | | static AOM_FORCE_INLINE void load_smooth_vertical_pixels4( |
1134 | | const uint8_t *LIBAOM_RESTRICT above, const uint8_t *LIBAOM_RESTRICT left, |
1135 | 62.4k | const int height, __m128i *pixels) { |
1136 | 62.4k | __m128i top = Load4(above); |
1137 | 62.4k | const __m128i bottom_left = _mm_set1_epi16(left[height - 1]); |
1138 | 62.4k | top = cvtepu8_epi16(top); |
1139 | 62.4k | pixels[0] = _mm_unpacklo_epi16(top, bottom_left); |
1140 | 62.4k | } |
1141 | | |
1142 | | // |weight_array| alternates weight vectors from the table with their inverted |
1143 | | // (256-w) counterparts. This is precomputed by the compiler when the weights |
1144 | | // table is visible to this module. Removing this visibility can cut speed by up |
1145 | | // to half in both 4xH and 8xH transforms. |
1146 | | static AOM_FORCE_INLINE void load_smooth_vertical_weights4( |
1147 | | const uint8_t *LIBAOM_RESTRICT weight_array, const int height, |
1148 | 62.4k | __m128i *weights) { |
1149 | 62.4k | const __m128i inverter = _mm_set1_epi16(256); |
1150 | | |
1151 | 62.4k | if (height == 4) { |
1152 | 39.3k | const __m128i weight = Load4(weight_array); |
1153 | 39.3k | weights[0] = cvtepu8_epi16(weight); |
1154 | 39.3k | weights[1] = _mm_sub_epi16(inverter, weights[0]); |
1155 | 39.3k | } else if (height == 8) { |
1156 | 13.4k | const __m128i weight = LoadLo8(weight_array + 4); |
1157 | 13.4k | weights[0] = cvtepu8_epi16(weight); |
1158 | 13.4k | weights[1] = _mm_sub_epi16(inverter, weights[0]); |
1159 | 13.4k | } else { |
1160 | 9.59k | const __m128i weight = LoadUnaligned16(weight_array + 12); |
1161 | 9.59k | const __m128i zero = _mm_setzero_si128(); |
1162 | 9.59k | weights[0] = cvtepu8_epi16(weight); |
1163 | 9.59k | weights[1] = _mm_sub_epi16(inverter, weights[0]); |
1164 | 9.59k | weights[2] = _mm_unpackhi_epi8(weight, zero); |
1165 | 9.59k | weights[3] = _mm_sub_epi16(inverter, weights[2]); |
1166 | 9.59k | } |
1167 | 62.4k | } |
1168 | | |
1169 | | static AOM_FORCE_INLINE void write_smooth_vertical4xh( |
1170 | | const __m128i *pixel, const __m128i *weight, const int height, |
1171 | 72.0k | uint8_t *LIBAOM_RESTRICT dst, const ptrdiff_t stride) { |
1172 | 72.0k | const __m128i pred_round = _mm_set1_epi32(128); |
1173 | 72.0k | const __m128i mask_increment = _mm_set1_epi16(0x0202); |
1174 | 72.0k | const __m128i cvtepu8_epi32 = _mm_set1_epi32(0xC080400); |
1175 | 72.0k | __m128i y_select = _mm_set1_epi16(0x0100); |
1176 | | |
1177 | 490k | for (int y = 0; y < height; ++y) { |
1178 | 418k | const __m128i weight_y = _mm_shuffle_epi8(weight[0], y_select); |
1179 | 418k | const __m128i inverted_weight_y = _mm_shuffle_epi8(weight[1], y_select); |
1180 | 418k | const __m128i alternate_weights = |
1181 | 418k | _mm_unpacklo_epi16(weight_y, inverted_weight_y); |
1182 | | // Here the pixel vector is top_row[0], corner, top_row[1], corner, ... |
1183 | | // The madd instruction yields four results of the form: |
1184 | | // (top_row[x] * weight[y] + corner * inverted_weight[y]) |
1185 | 418k | __m128i sum = _mm_madd_epi16(pixel[0], alternate_weights); |
1186 | 418k | sum = _mm_add_epi32(sum, pred_round); |
1187 | 418k | sum = _mm_srai_epi32(sum, 8); |
1188 | 418k | sum = _mm_shuffle_epi8(sum, cvtepu8_epi32); |
1189 | 418k | Store4(dst, sum); |
1190 | 418k | dst += stride; |
1191 | 418k | y_select = _mm_add_epi16(y_select, mask_increment); |
1192 | 418k | } |
1193 | 72.0k | } |
1194 | | |
1195 | | void aom_smooth_v_predictor_4x4_ssse3( |
1196 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1197 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1198 | 39.3k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1199 | 39.3k | __m128i pixels; |
1200 | 39.3k | load_smooth_vertical_pixels4(top_row, left_column, 4, &pixels); |
1201 | | |
1202 | 39.3k | __m128i weights[2]; |
1203 | 39.3k | load_smooth_vertical_weights4(smooth_weights, 4, weights); |
1204 | | |
1205 | 39.3k | write_smooth_vertical4xh(&pixels, weights, 4, dst, stride); |
1206 | 39.3k | } |
1207 | | |
1208 | | void aom_smooth_v_predictor_4x8_ssse3( |
1209 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1210 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1211 | 13.4k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1212 | 13.4k | __m128i pixels; |
1213 | 13.4k | load_smooth_vertical_pixels4(top_row, left_column, 8, &pixels); |
1214 | | |
1215 | 13.4k | __m128i weights[2]; |
1216 | 13.4k | load_smooth_vertical_weights4(smooth_weights, 8, weights); |
1217 | | |
1218 | 13.4k | write_smooth_vertical4xh(&pixels, weights, 8, dst, stride); |
1219 | 13.4k | } |
1220 | | |
1221 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1222 | | void aom_smooth_v_predictor_4x16_ssse3( |
1223 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1224 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1225 | 9.59k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1226 | 9.59k | __m128i pixels; |
1227 | 9.59k | load_smooth_vertical_pixels4(top_row, left_column, 16, &pixels); |
1228 | | |
1229 | 9.59k | __m128i weights[4]; |
1230 | 9.59k | load_smooth_vertical_weights4(smooth_weights, 16, weights); |
1231 | | |
1232 | 9.59k | write_smooth_vertical4xh(&pixels, weights, 8, dst, stride); |
1233 | 9.59k | dst += stride << 3; |
1234 | 9.59k | write_smooth_vertical4xh(&pixels, &weights[2], 8, dst, stride); |
1235 | 9.59k | } |
1236 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1237 | | |
1238 | | void aom_smooth_v_predictor_8x4_ssse3( |
1239 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1240 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1241 | 19.6k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1242 | 19.6k | const __m128i bottom_left = _mm_set1_epi16(left_column[3]); |
1243 | 19.6k | const __m128i weights = cvtepu8_epi16(Load4(smooth_weights)); |
1244 | 19.6k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1245 | 19.6k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
1246 | 19.6k | const __m128i scaled_bottom_left = |
1247 | 19.6k | _mm_mullo_epi16(inverted_weights, bottom_left); |
1248 | 19.6k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1249 | 19.6k | __m128i y_select = _mm_set1_epi32(0x01000100); |
1250 | 19.6k | const __m128i top = cvtepu8_epi16(LoadLo8(top_row)); |
1251 | 19.6k | __m128i weights_y = _mm_shuffle_epi8(weights, y_select); |
1252 | 19.6k | __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1253 | 19.6k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1254 | 19.6k | &round); |
1255 | 19.6k | dst += stride; |
1256 | 19.6k | y_select = _mm_set1_epi32(0x03020302); |
1257 | 19.6k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1258 | 19.6k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1259 | 19.6k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1260 | 19.6k | &round); |
1261 | 19.6k | dst += stride; |
1262 | 19.6k | y_select = _mm_set1_epi32(0x05040504); |
1263 | 19.6k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1264 | 19.6k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1265 | 19.6k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1266 | 19.6k | &round); |
1267 | 19.6k | dst += stride; |
1268 | 19.6k | y_select = _mm_set1_epi32(0x07060706); |
1269 | 19.6k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1270 | 19.6k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1271 | 19.6k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1272 | 19.6k | &round); |
1273 | 19.6k | } |
1274 | | |
1275 | | void aom_smooth_v_predictor_8x8_ssse3( |
1276 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1277 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1278 | 42.5k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1279 | 42.5k | const __m128i bottom_left = _mm_set1_epi16(left_column[7]); |
1280 | 42.5k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
1281 | 42.5k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1282 | 42.5k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
1283 | 42.5k | const __m128i scaled_bottom_left = |
1284 | 42.5k | _mm_mullo_epi16(inverted_weights, bottom_left); |
1285 | 42.5k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1286 | 42.5k | const __m128i top = cvtepu8_epi16(LoadLo8(top_row)); |
1287 | 383k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1288 | 340k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1289 | 340k | const __m128i weights_y = _mm_shuffle_epi8(weights, y_select); |
1290 | 340k | const __m128i scaled_bottom_left_y = |
1291 | 340k | _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1292 | 340k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1293 | 340k | &round); |
1294 | 340k | dst += stride; |
1295 | 340k | } |
1296 | 42.5k | } |
1297 | | |
1298 | | void aom_smooth_v_predictor_8x16_ssse3( |
1299 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1300 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1301 | 13.2k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1302 | 13.2k | const __m128i bottom_left = _mm_set1_epi16(left_column[15]); |
1303 | 13.2k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
1304 | | |
1305 | 13.2k | const __m128i weights1 = cvtepu8_epi16(weights); |
1306 | 13.2k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
1307 | 13.2k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1308 | 13.2k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1309 | 13.2k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1310 | 13.2k | const __m128i scaled_bottom_left1 = |
1311 | 13.2k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1312 | 13.2k | const __m128i scaled_bottom_left2 = |
1313 | 13.2k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1314 | 13.2k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1315 | 13.2k | const __m128i top = cvtepu8_epi16(LoadLo8(top_row)); |
1316 | 119k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1317 | 106k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1318 | 106k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1319 | 106k | const __m128i scaled_bottom_left_y = |
1320 | 106k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1321 | 106k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1322 | 106k | &round); |
1323 | 106k | dst += stride; |
1324 | 106k | } |
1325 | 119k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1326 | 106k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1327 | 106k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1328 | 106k | const __m128i scaled_bottom_left_y = |
1329 | 106k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1330 | 106k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1331 | 106k | &round); |
1332 | 106k | dst += stride; |
1333 | 106k | } |
1334 | 13.2k | } |
1335 | | |
1336 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1337 | | void aom_smooth_v_predictor_8x32_ssse3( |
1338 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1339 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1340 | 4.73k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1341 | 4.73k | const __m128i zero = _mm_setzero_si128(); |
1342 | 4.73k | const __m128i bottom_left = _mm_set1_epi16(left_column[31]); |
1343 | 4.73k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
1344 | 4.73k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
1345 | 4.73k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
1346 | 4.73k | const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero); |
1347 | 4.73k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
1348 | 4.73k | const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero); |
1349 | 4.73k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1350 | 4.73k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1351 | 4.73k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1352 | 4.73k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
1353 | 4.73k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
1354 | 4.73k | const __m128i scaled_bottom_left1 = |
1355 | 4.73k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1356 | 4.73k | const __m128i scaled_bottom_left2 = |
1357 | 4.73k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1358 | 4.73k | const __m128i scaled_bottom_left3 = |
1359 | 4.73k | _mm_mullo_epi16(inverted_weights3, bottom_left); |
1360 | 4.73k | const __m128i scaled_bottom_left4 = |
1361 | 4.73k | _mm_mullo_epi16(inverted_weights4, bottom_left); |
1362 | 4.73k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1363 | 4.73k | const __m128i top = cvtepu8_epi16(LoadLo8(top_row)); |
1364 | 42.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1365 | 37.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1366 | 37.8k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1367 | 37.8k | const __m128i scaled_bottom_left_y = |
1368 | 37.8k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1369 | 37.8k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1370 | 37.8k | &round); |
1371 | 37.8k | dst += stride; |
1372 | 37.8k | } |
1373 | 42.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1374 | 37.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1375 | 37.8k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1376 | 37.8k | const __m128i scaled_bottom_left_y = |
1377 | 37.8k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1378 | 37.8k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1379 | 37.8k | &round); |
1380 | 37.8k | dst += stride; |
1381 | 37.8k | } |
1382 | 42.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1383 | 37.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1384 | 37.8k | const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select); |
1385 | 37.8k | const __m128i scaled_bottom_left_y = |
1386 | 37.8k | _mm_shuffle_epi8(scaled_bottom_left3, y_select); |
1387 | 37.8k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1388 | 37.8k | &round); |
1389 | 37.8k | dst += stride; |
1390 | 37.8k | } |
1391 | 42.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1392 | 37.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1393 | 37.8k | const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select); |
1394 | 37.8k | const __m128i scaled_bottom_left_y = |
1395 | 37.8k | _mm_shuffle_epi8(scaled_bottom_left4, y_select); |
1396 | 37.8k | write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y, |
1397 | 37.8k | &round); |
1398 | 37.8k | dst += stride; |
1399 | 37.8k | } |
1400 | 4.73k | } |
1401 | | |
1402 | | void aom_smooth_v_predictor_16x4_ssse3( |
1403 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1404 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1405 | 18.3k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1406 | 18.3k | const __m128i bottom_left = _mm_set1_epi16(left_column[3]); |
1407 | 18.3k | const __m128i weights = cvtepu8_epi16(Load4(smooth_weights)); |
1408 | 18.3k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1409 | 18.3k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
1410 | 18.3k | const __m128i scaled_bottom_left = |
1411 | 18.3k | _mm_mullo_epi16(inverted_weights, bottom_left); |
1412 | 18.3k | const __m128i round = _mm_set1_epi16(128); |
1413 | 18.3k | const __m128i top = LoadUnaligned16(top_row); |
1414 | 18.3k | const __m128i top_lo = cvtepu8_epi16(top); |
1415 | 18.3k | const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8)); |
1416 | | |
1417 | 18.3k | __m128i y_select = _mm_set1_epi32(0x01000100); |
1418 | 18.3k | __m128i weights_y = _mm_shuffle_epi8(weights, y_select); |
1419 | 18.3k | __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1420 | 18.3k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1421 | 18.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1422 | 18.3k | round); |
1423 | 18.3k | dst += stride; |
1424 | 18.3k | y_select = _mm_set1_epi32(0x03020302); |
1425 | 18.3k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1426 | 18.3k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1427 | 18.3k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1428 | 18.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1429 | 18.3k | round); |
1430 | 18.3k | dst += stride; |
1431 | 18.3k | y_select = _mm_set1_epi32(0x05040504); |
1432 | 18.3k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1433 | 18.3k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1434 | 18.3k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1435 | 18.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1436 | 18.3k | round); |
1437 | 18.3k | dst += stride; |
1438 | 18.3k | y_select = _mm_set1_epi32(0x07060706); |
1439 | 18.3k | weights_y = _mm_shuffle_epi8(weights, y_select); |
1440 | 18.3k | scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1441 | 18.3k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1442 | 18.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1443 | 18.3k | round); |
1444 | 18.3k | } |
1445 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1446 | | |
1447 | | void aom_smooth_v_predictor_16x8_ssse3( |
1448 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1449 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1450 | 14.0k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1451 | 14.0k | const __m128i bottom_left = _mm_set1_epi16(left_column[7]); |
1452 | 14.0k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
1453 | 14.0k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1454 | 14.0k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
1455 | 14.0k | const __m128i scaled_bottom_left = |
1456 | 14.0k | _mm_mullo_epi16(inverted_weights, bottom_left); |
1457 | 14.0k | const __m128i round = _mm_set1_epi16(128); |
1458 | 14.0k | const __m128i top = LoadUnaligned16(top_row); |
1459 | 14.0k | const __m128i top_lo = cvtepu8_epi16(top); |
1460 | 14.0k | const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8)); |
1461 | 126k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1462 | 112k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1463 | 112k | const __m128i weights_y = _mm_shuffle_epi8(weights, y_select); |
1464 | 112k | const __m128i scaled_bottom_left_y = |
1465 | 112k | _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1466 | 112k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1467 | 112k | scaled_bottom_left_y, scaled_bottom_left_y, |
1468 | 112k | round); |
1469 | 112k | dst += stride; |
1470 | 112k | } |
1471 | 14.0k | } |
1472 | | |
1473 | | void aom_smooth_v_predictor_16x16_ssse3( |
1474 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1475 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1476 | 33.2k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1477 | 33.2k | const __m128i bottom_left = _mm_set1_epi16(left_column[15]); |
1478 | 33.2k | const __m128i zero = _mm_setzero_si128(); |
1479 | 33.2k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1480 | 33.2k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
1481 | 33.2k | const __m128i weights_lo = cvtepu8_epi16(weights); |
1482 | 33.2k | const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero); |
1483 | 33.2k | const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo); |
1484 | 33.2k | const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi); |
1485 | 33.2k | const __m128i scaled_bottom_left_lo = |
1486 | 33.2k | _mm_mullo_epi16(inverted_weights_lo, bottom_left); |
1487 | 33.2k | const __m128i scaled_bottom_left_hi = |
1488 | 33.2k | _mm_mullo_epi16(inverted_weights_hi, bottom_left); |
1489 | 33.2k | const __m128i round = _mm_set1_epi16(128); |
1490 | | |
1491 | 33.2k | const __m128i top = LoadUnaligned16(top_row); |
1492 | 33.2k | const __m128i top_lo = cvtepu8_epi16(top); |
1493 | 33.2k | const __m128i top_hi = _mm_unpackhi_epi8(top, zero); |
1494 | 299k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1495 | 266k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1496 | 266k | const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select); |
1497 | 266k | const __m128i scaled_bottom_left_y = |
1498 | 266k | _mm_shuffle_epi8(scaled_bottom_left_lo, y_select); |
1499 | 266k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1500 | 266k | scaled_bottom_left_y, scaled_bottom_left_y, |
1501 | 266k | round); |
1502 | 266k | dst += stride; |
1503 | 266k | } |
1504 | 299k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1505 | 266k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1506 | 266k | const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select); |
1507 | 266k | const __m128i scaled_bottom_left_y = |
1508 | 266k | _mm_shuffle_epi8(scaled_bottom_left_hi, y_select); |
1509 | 266k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1510 | 266k | scaled_bottom_left_y, scaled_bottom_left_y, |
1511 | 266k | round); |
1512 | 266k | dst += stride; |
1513 | 266k | } |
1514 | 33.2k | } |
1515 | | |
1516 | | void aom_smooth_v_predictor_16x32_ssse3( |
1517 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1518 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1519 | 9.09k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1520 | 9.09k | const __m128i bottom_left = _mm_set1_epi16(left_column[31]); |
1521 | 9.09k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
1522 | 9.09k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
1523 | 9.09k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1524 | 9.09k | const __m128i zero = _mm_setzero_si128(); |
1525 | 9.09k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
1526 | 9.09k | const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero); |
1527 | 9.09k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
1528 | 9.09k | const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero); |
1529 | 9.09k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1530 | 9.09k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1531 | 9.09k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
1532 | 9.09k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
1533 | 9.09k | const __m128i scaled_bottom_left1 = |
1534 | 9.09k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1535 | 9.09k | const __m128i scaled_bottom_left2 = |
1536 | 9.09k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1537 | 9.09k | const __m128i scaled_bottom_left3 = |
1538 | 9.09k | _mm_mullo_epi16(inverted_weights3, bottom_left); |
1539 | 9.09k | const __m128i scaled_bottom_left4 = |
1540 | 9.09k | _mm_mullo_epi16(inverted_weights4, bottom_left); |
1541 | 9.09k | const __m128i round = _mm_set1_epi16(128); |
1542 | | |
1543 | 9.09k | const __m128i top = LoadUnaligned16(top_row); |
1544 | 9.09k | const __m128i top_lo = cvtepu8_epi16(top); |
1545 | 9.09k | const __m128i top_hi = _mm_unpackhi_epi8(top, zero); |
1546 | 81.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1547 | 72.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1548 | 72.7k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1549 | 72.7k | const __m128i scaled_bottom_left_y = |
1550 | 72.7k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1551 | 72.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1552 | 72.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1553 | 72.7k | round); |
1554 | 72.7k | dst += stride; |
1555 | 72.7k | } |
1556 | 81.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1557 | 72.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1558 | 72.7k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1559 | 72.7k | const __m128i scaled_bottom_left_y = |
1560 | 72.7k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1561 | 72.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1562 | 72.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1563 | 72.7k | round); |
1564 | 72.7k | dst += stride; |
1565 | 72.7k | } |
1566 | 81.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1567 | 72.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1568 | 72.7k | const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select); |
1569 | 72.7k | const __m128i scaled_bottom_left_y = |
1570 | 72.7k | _mm_shuffle_epi8(scaled_bottom_left3, y_select); |
1571 | 72.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1572 | 72.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1573 | 72.7k | round); |
1574 | 72.7k | dst += stride; |
1575 | 72.7k | } |
1576 | 81.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1577 | 72.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1578 | 72.7k | const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select); |
1579 | 72.7k | const __m128i scaled_bottom_left_y = |
1580 | 72.7k | _mm_shuffle_epi8(scaled_bottom_left4, y_select); |
1581 | 72.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1582 | 72.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1583 | 72.7k | round); |
1584 | 72.7k | dst += stride; |
1585 | 72.7k | } |
1586 | 9.09k | } |
1587 | | |
1588 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1589 | | void aom_smooth_v_predictor_16x64_ssse3( |
1590 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1591 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1592 | 1.77k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1593 | 1.77k | const __m128i bottom_left = _mm_set1_epi16(left_column[63]); |
1594 | 1.77k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1595 | 1.77k | const __m128i round = _mm_set1_epi16(128); |
1596 | 1.77k | const __m128i zero = _mm_setzero_si128(); |
1597 | 1.77k | const __m128i top = LoadUnaligned16(top_row); |
1598 | 1.77k | const __m128i top_lo = cvtepu8_epi16(top); |
1599 | 1.77k | const __m128i top_hi = _mm_unpackhi_epi8(top, zero); |
1600 | 1.77k | const uint8_t *weights_base_ptr = smooth_weights + 60; |
1601 | 8.86k | for (int left_offset = 0; left_offset < 64; left_offset += 16) { |
1602 | 7.09k | const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset); |
1603 | 7.09k | const __m128i weights_lo = cvtepu8_epi16(weights); |
1604 | 7.09k | const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero); |
1605 | 7.09k | const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo); |
1606 | 7.09k | const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi); |
1607 | 7.09k | const __m128i scaled_bottom_left_lo = |
1608 | 7.09k | _mm_mullo_epi16(inverted_weights_lo, bottom_left); |
1609 | 7.09k | const __m128i scaled_bottom_left_hi = |
1610 | 7.09k | _mm_mullo_epi16(inverted_weights_hi, bottom_left); |
1611 | | |
1612 | 63.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1613 | 56.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1614 | 56.7k | const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select); |
1615 | 56.7k | const __m128i scaled_bottom_left_y = |
1616 | 56.7k | _mm_shuffle_epi8(scaled_bottom_left_lo, y_select); |
1617 | 56.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1618 | 56.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1619 | 56.7k | round); |
1620 | 56.7k | dst += stride; |
1621 | 56.7k | } |
1622 | 63.8k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1623 | 56.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1624 | 56.7k | const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select); |
1625 | 56.7k | const __m128i scaled_bottom_left_y = |
1626 | 56.7k | _mm_shuffle_epi8(scaled_bottom_left_hi, y_select); |
1627 | 56.7k | write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y, |
1628 | 56.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1629 | 56.7k | round); |
1630 | 56.7k | dst += stride; |
1631 | 56.7k | } |
1632 | 7.09k | } |
1633 | 1.77k | } |
1634 | | |
1635 | | void aom_smooth_v_predictor_32x8_ssse3( |
1636 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1637 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1638 | 18.8k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1639 | 18.8k | const __m128i zero = _mm_setzero_si128(); |
1640 | 18.8k | const __m128i bottom_left = _mm_set1_epi16(left_column[7]); |
1641 | 18.8k | const __m128i top_lo = LoadUnaligned16(top_row); |
1642 | 18.8k | const __m128i top_hi = LoadUnaligned16(top_row + 16); |
1643 | 18.8k | const __m128i top1 = cvtepu8_epi16(top_lo); |
1644 | 18.8k | const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero); |
1645 | 18.8k | const __m128i top3 = cvtepu8_epi16(top_hi); |
1646 | 18.8k | const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero); |
1647 | 18.8k | __m128i scale = _mm_set1_epi16(256); |
1648 | 18.8k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
1649 | 18.8k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
1650 | 18.8k | const __m128i scaled_bottom_left = |
1651 | 18.8k | _mm_mullo_epi16(inverted_weights, bottom_left); |
1652 | 18.8k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1653 | 169k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1654 | 150k | __m128i y_select = _mm_set1_epi32(y_mask); |
1655 | 150k | const __m128i weights_y = _mm_shuffle_epi8(weights, y_select); |
1656 | 150k | const __m128i scaled_bottom_left_y = |
1657 | 150k | _mm_shuffle_epi8(scaled_bottom_left, y_select); |
1658 | 150k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1659 | 150k | scaled_bottom_left_y, scaled_bottom_left_y, |
1660 | 150k | round); |
1661 | 150k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1662 | 150k | scaled_bottom_left_y, scaled_bottom_left_y, |
1663 | 150k | round); |
1664 | 150k | dst += stride; |
1665 | 150k | } |
1666 | 18.8k | } |
1667 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1668 | | |
1669 | | void aom_smooth_v_predictor_32x16_ssse3( |
1670 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1671 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1672 | 9.13k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1673 | 9.13k | const __m128i zero = _mm_setzero_si128(); |
1674 | 9.13k | const __m128i bottom_left = _mm_set1_epi16(left_column[15]); |
1675 | 9.13k | const __m128i top_lo = LoadUnaligned16(top_row); |
1676 | 9.13k | const __m128i top_hi = LoadUnaligned16(top_row + 16); |
1677 | 9.13k | const __m128i top1 = cvtepu8_epi16(top_lo); |
1678 | 9.13k | const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero); |
1679 | 9.13k | const __m128i top3 = cvtepu8_epi16(top_hi); |
1680 | 9.13k | const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero); |
1681 | 9.13k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
1682 | 9.13k | const __m128i weights1 = cvtepu8_epi16(weights); |
1683 | 9.13k | const __m128i weights2 = _mm_unpackhi_epi8(weights, zero); |
1684 | 9.13k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1685 | 9.13k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1686 | 9.13k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1687 | 9.13k | const __m128i scaled_bottom_left1 = |
1688 | 9.13k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1689 | 9.13k | const __m128i scaled_bottom_left2 = |
1690 | 9.13k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1691 | 9.13k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1692 | 82.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1693 | 73.0k | __m128i y_select = _mm_set1_epi32(y_mask); |
1694 | 73.0k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1695 | 73.0k | const __m128i scaled_bottom_left_y = |
1696 | 73.0k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1697 | 73.0k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1698 | 73.0k | scaled_bottom_left_y, scaled_bottom_left_y, |
1699 | 73.0k | round); |
1700 | 73.0k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1701 | 73.0k | scaled_bottom_left_y, scaled_bottom_left_y, |
1702 | 73.0k | round); |
1703 | 73.0k | dst += stride; |
1704 | 73.0k | } |
1705 | 82.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1706 | 73.0k | __m128i y_select = _mm_set1_epi32(y_mask); |
1707 | 73.0k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1708 | 73.0k | const __m128i scaled_bottom_left_y = |
1709 | 73.0k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1710 | 73.0k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1711 | 73.0k | scaled_bottom_left_y, scaled_bottom_left_y, |
1712 | 73.0k | round); |
1713 | 73.0k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1714 | 73.0k | scaled_bottom_left_y, scaled_bottom_left_y, |
1715 | 73.0k | round); |
1716 | 73.0k | dst += stride; |
1717 | 73.0k | } |
1718 | 9.13k | } |
1719 | | |
1720 | | void aom_smooth_v_predictor_32x32_ssse3( |
1721 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1722 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1723 | 39.3k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1724 | 39.3k | const __m128i bottom_left = _mm_set1_epi16(left_column[31]); |
1725 | 39.3k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
1726 | 39.3k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
1727 | 39.3k | const __m128i zero = _mm_setzero_si128(); |
1728 | 39.3k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1729 | 39.3k | const __m128i top_lo = LoadUnaligned16(top_row); |
1730 | 39.3k | const __m128i top_hi = LoadUnaligned16(top_row + 16); |
1731 | 39.3k | const __m128i top1 = cvtepu8_epi16(top_lo); |
1732 | 39.3k | const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero); |
1733 | 39.3k | const __m128i top3 = cvtepu8_epi16(top_hi); |
1734 | 39.3k | const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero); |
1735 | 39.3k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
1736 | 39.3k | const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero); |
1737 | 39.3k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
1738 | 39.3k | const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero); |
1739 | 39.3k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1740 | 39.3k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1741 | 39.3k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
1742 | 39.3k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
1743 | 39.3k | const __m128i scaled_bottom_left1 = |
1744 | 39.3k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1745 | 39.3k | const __m128i scaled_bottom_left2 = |
1746 | 39.3k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1747 | 39.3k | const __m128i scaled_bottom_left3 = |
1748 | 39.3k | _mm_mullo_epi16(inverted_weights3, bottom_left); |
1749 | 39.3k | const __m128i scaled_bottom_left4 = |
1750 | 39.3k | _mm_mullo_epi16(inverted_weights4, bottom_left); |
1751 | 39.3k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1752 | 353k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1753 | 314k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1754 | 314k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1755 | 314k | const __m128i scaled_bottom_left_y = |
1756 | 314k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1757 | 314k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1758 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1759 | 314k | round); |
1760 | 314k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1761 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1762 | 314k | round); |
1763 | 314k | dst += stride; |
1764 | 314k | } |
1765 | 353k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1766 | 314k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1767 | 314k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1768 | 314k | const __m128i scaled_bottom_left_y = |
1769 | 314k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1770 | 314k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1771 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1772 | 314k | round); |
1773 | 314k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1774 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1775 | 314k | round); |
1776 | 314k | dst += stride; |
1777 | 314k | } |
1778 | 353k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1779 | 314k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1780 | 314k | const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select); |
1781 | 314k | const __m128i scaled_bottom_left_y = |
1782 | 314k | _mm_shuffle_epi8(scaled_bottom_left3, y_select); |
1783 | 314k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1784 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1785 | 314k | round); |
1786 | 314k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1787 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1788 | 314k | round); |
1789 | 314k | dst += stride; |
1790 | 314k | } |
1791 | 353k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1792 | 314k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1793 | 314k | const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select); |
1794 | 314k | const __m128i scaled_bottom_left_y = |
1795 | 314k | _mm_shuffle_epi8(scaled_bottom_left4, y_select); |
1796 | 314k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1797 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1798 | 314k | round); |
1799 | 314k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1800 | 314k | scaled_bottom_left_y, scaled_bottom_left_y, |
1801 | 314k | round); |
1802 | 314k | dst += stride; |
1803 | 314k | } |
1804 | 39.3k | } |
1805 | | |
1806 | | void aom_smooth_v_predictor_32x64_ssse3( |
1807 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1808 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1809 | 741 | const uint8_t *LIBAOM_RESTRICT left_column) { |
1810 | 741 | const __m128i zero = _mm_setzero_si128(); |
1811 | 741 | const __m128i bottom_left = _mm_set1_epi16(left_column[63]); |
1812 | 741 | const __m128i top_lo = LoadUnaligned16(top_row); |
1813 | 741 | const __m128i top_hi = LoadUnaligned16(top_row + 16); |
1814 | 741 | const __m128i top1 = cvtepu8_epi16(top_lo); |
1815 | 741 | const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero); |
1816 | 741 | const __m128i top3 = cvtepu8_epi16(top_hi); |
1817 | 741 | const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero); |
1818 | 741 | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1819 | 741 | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1820 | 741 | const uint8_t *weights_base_ptr = smooth_weights + 60; |
1821 | 3.70k | for (int left_offset = 0; left_offset < 64; left_offset += 16) { |
1822 | 2.96k | const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset); |
1823 | 2.96k | const __m128i weights_lo = cvtepu8_epi16(weights); |
1824 | 2.96k | const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero); |
1825 | 2.96k | const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo); |
1826 | 2.96k | const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi); |
1827 | 2.96k | const __m128i scaled_bottom_left_lo = |
1828 | 2.96k | _mm_mullo_epi16(inverted_weights_lo, bottom_left); |
1829 | 2.96k | const __m128i scaled_bottom_left_hi = |
1830 | 2.96k | _mm_mullo_epi16(inverted_weights_hi, bottom_left); |
1831 | | |
1832 | 26.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1833 | 23.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1834 | 23.7k | const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select); |
1835 | 23.7k | const __m128i scaled_bottom_left_y = |
1836 | 23.7k | _mm_shuffle_epi8(scaled_bottom_left_lo, y_select); |
1837 | 23.7k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1838 | 23.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1839 | 23.7k | round); |
1840 | 23.7k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1841 | 23.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1842 | 23.7k | round); |
1843 | 23.7k | dst += stride; |
1844 | 23.7k | } |
1845 | 26.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1846 | 23.7k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1847 | 23.7k | const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select); |
1848 | 23.7k | const __m128i scaled_bottom_left_y = |
1849 | 23.7k | _mm_shuffle_epi8(scaled_bottom_left_hi, y_select); |
1850 | 23.7k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1851 | 23.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1852 | 23.7k | round); |
1853 | 23.7k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1854 | 23.7k | scaled_bottom_left_y, scaled_bottom_left_y, |
1855 | 23.7k | round); |
1856 | 23.7k | dst += stride; |
1857 | 23.7k | } |
1858 | 2.96k | } |
1859 | 741 | } |
1860 | | |
1861 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1862 | | void aom_smooth_v_predictor_64x16_ssse3( |
1863 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1864 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1865 | 13.5k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1866 | 13.5k | const __m128i bottom_left = _mm_set1_epi16(left_column[15]); |
1867 | 13.5k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1868 | 13.5k | const __m128i zero = _mm_setzero_si128(); |
1869 | 13.5k | const __m128i top_lolo = LoadUnaligned16(top_row); |
1870 | 13.5k | const __m128i top_lohi = LoadUnaligned16(top_row + 16); |
1871 | 13.5k | const __m128i top1 = cvtepu8_epi16(top_lolo); |
1872 | 13.5k | const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero); |
1873 | 13.5k | const __m128i top3 = cvtepu8_epi16(top_lohi); |
1874 | 13.5k | const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero); |
1875 | | |
1876 | 13.5k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
1877 | 13.5k | const __m128i weights1 = cvtepu8_epi16(weights); |
1878 | 13.5k | const __m128i weights2 = _mm_unpackhi_epi8(weights, zero); |
1879 | 13.5k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1880 | 13.5k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1881 | 13.5k | const __m128i top_hilo = LoadUnaligned16(top_row + 32); |
1882 | 13.5k | const __m128i top_hihi = LoadUnaligned16(top_row + 48); |
1883 | 13.5k | const __m128i top5 = cvtepu8_epi16(top_hilo); |
1884 | 13.5k | const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero); |
1885 | 13.5k | const __m128i top7 = cvtepu8_epi16(top_hihi); |
1886 | 13.5k | const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero); |
1887 | 13.5k | const __m128i scaled_bottom_left1 = |
1888 | 13.5k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1889 | 13.5k | const __m128i scaled_bottom_left2 = |
1890 | 13.5k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1891 | 13.5k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1892 | 121k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1893 | 108k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1894 | 108k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1895 | 108k | const __m128i scaled_bottom_left_y = |
1896 | 108k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1897 | 108k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1898 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1899 | 108k | round); |
1900 | 108k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1901 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1902 | 108k | round); |
1903 | 108k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
1904 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1905 | 108k | round); |
1906 | 108k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
1907 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1908 | 108k | round); |
1909 | 108k | dst += stride; |
1910 | 108k | } |
1911 | 121k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1912 | 108k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1913 | 108k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1914 | 108k | const __m128i scaled_bottom_left_y = |
1915 | 108k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1916 | 108k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1917 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1918 | 108k | round); |
1919 | 108k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1920 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1921 | 108k | round); |
1922 | 108k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
1923 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1924 | 108k | round); |
1925 | 108k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
1926 | 108k | scaled_bottom_left_y, scaled_bottom_left_y, |
1927 | 108k | round); |
1928 | 108k | dst += stride; |
1929 | 108k | } |
1930 | 13.5k | } |
1931 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1932 | | |
1933 | | void aom_smooth_v_predictor_64x32_ssse3( |
1934 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
1935 | | const uint8_t *LIBAOM_RESTRICT top_row, |
1936 | 2.41k | const uint8_t *LIBAOM_RESTRICT left_column) { |
1937 | 2.41k | const __m128i zero = _mm_setzero_si128(); |
1938 | 2.41k | const __m128i bottom_left = _mm_set1_epi16(left_column[31]); |
1939 | 2.41k | const __m128i top_lolo = LoadUnaligned16(top_row); |
1940 | 2.41k | const __m128i top_lohi = LoadUnaligned16(top_row + 16); |
1941 | 2.41k | const __m128i top1 = cvtepu8_epi16(top_lolo); |
1942 | 2.41k | const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero); |
1943 | 2.41k | const __m128i top3 = cvtepu8_epi16(top_lohi); |
1944 | 2.41k | const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero); |
1945 | 2.41k | const __m128i top_hilo = LoadUnaligned16(top_row + 32); |
1946 | 2.41k | const __m128i top_hihi = LoadUnaligned16(top_row + 48); |
1947 | 2.41k | const __m128i top5 = cvtepu8_epi16(top_hilo); |
1948 | 2.41k | const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero); |
1949 | 2.41k | const __m128i top7 = cvtepu8_epi16(top_hihi); |
1950 | 2.41k | const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero); |
1951 | 2.41k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
1952 | 2.41k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
1953 | 2.41k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
1954 | 2.41k | const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero); |
1955 | 2.41k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
1956 | 2.41k | const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero); |
1957 | 2.41k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
1958 | 2.41k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
1959 | 2.41k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
1960 | 2.41k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
1961 | 2.41k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
1962 | 2.41k | const __m128i scaled_bottom_left1 = |
1963 | 2.41k | _mm_mullo_epi16(inverted_weights1, bottom_left); |
1964 | 2.41k | const __m128i scaled_bottom_left2 = |
1965 | 2.41k | _mm_mullo_epi16(inverted_weights2, bottom_left); |
1966 | 2.41k | const __m128i scaled_bottom_left3 = |
1967 | 2.41k | _mm_mullo_epi16(inverted_weights3, bottom_left); |
1968 | 2.41k | const __m128i scaled_bottom_left4 = |
1969 | 2.41k | _mm_mullo_epi16(inverted_weights4, bottom_left); |
1970 | 2.41k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
1971 | | |
1972 | 21.7k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1973 | 19.3k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1974 | 19.3k | const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select); |
1975 | 19.3k | const __m128i scaled_bottom_left_y = |
1976 | 19.3k | _mm_shuffle_epi8(scaled_bottom_left1, y_select); |
1977 | 19.3k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1978 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1979 | 19.3k | round); |
1980 | 19.3k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
1981 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1982 | 19.3k | round); |
1983 | 19.3k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
1984 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1985 | 19.3k | round); |
1986 | 19.3k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
1987 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1988 | 19.3k | round); |
1989 | 19.3k | dst += stride; |
1990 | 19.3k | } |
1991 | 21.7k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
1992 | 19.3k | const __m128i y_select = _mm_set1_epi32(y_mask); |
1993 | 19.3k | const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select); |
1994 | 19.3k | const __m128i scaled_bottom_left_y = |
1995 | 19.3k | _mm_shuffle_epi8(scaled_bottom_left2, y_select); |
1996 | 19.3k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
1997 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
1998 | 19.3k | round); |
1999 | 19.3k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
2000 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2001 | 19.3k | round); |
2002 | 19.3k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
2003 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2004 | 19.3k | round); |
2005 | 19.3k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
2006 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2007 | 19.3k | round); |
2008 | 19.3k | dst += stride; |
2009 | 19.3k | } |
2010 | 21.7k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2011 | 19.3k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2012 | 19.3k | const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select); |
2013 | 19.3k | const __m128i scaled_bottom_left_y = |
2014 | 19.3k | _mm_shuffle_epi8(scaled_bottom_left3, y_select); |
2015 | 19.3k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
2016 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2017 | 19.3k | round); |
2018 | 19.3k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
2019 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2020 | 19.3k | round); |
2021 | 19.3k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
2022 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2023 | 19.3k | round); |
2024 | 19.3k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
2025 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2026 | 19.3k | round); |
2027 | 19.3k | dst += stride; |
2028 | 19.3k | } |
2029 | 21.7k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2030 | 19.3k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2031 | 19.3k | const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select); |
2032 | 19.3k | const __m128i scaled_bottom_left_y = |
2033 | 19.3k | _mm_shuffle_epi8(scaled_bottom_left4, y_select); |
2034 | 19.3k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
2035 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2036 | 19.3k | round); |
2037 | 19.3k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
2038 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2039 | 19.3k | round); |
2040 | 19.3k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
2041 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2042 | 19.3k | round); |
2043 | 19.3k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
2044 | 19.3k | scaled_bottom_left_y, scaled_bottom_left_y, |
2045 | 19.3k | round); |
2046 | 19.3k | dst += stride; |
2047 | 19.3k | } |
2048 | 2.41k | } |
2049 | | |
2050 | | void aom_smooth_v_predictor_64x64_ssse3( |
2051 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2052 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2053 | 5.79k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2054 | 5.79k | const __m128i zero = _mm_setzero_si128(); |
2055 | 5.79k | const __m128i bottom_left = _mm_set1_epi16(left_column[63]); |
2056 | 5.79k | const __m128i top_lolo = LoadUnaligned16(top_row); |
2057 | 5.79k | const __m128i top_lohi = LoadUnaligned16(top_row + 16); |
2058 | 5.79k | const __m128i top1 = cvtepu8_epi16(top_lolo); |
2059 | 5.79k | const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero); |
2060 | 5.79k | const __m128i top3 = cvtepu8_epi16(top_lohi); |
2061 | 5.79k | const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero); |
2062 | 5.79k | const __m128i top_hilo = LoadUnaligned16(top_row + 32); |
2063 | 5.79k | const __m128i top_hihi = LoadUnaligned16(top_row + 48); |
2064 | 5.79k | const __m128i top5 = cvtepu8_epi16(top_hilo); |
2065 | 5.79k | const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero); |
2066 | 5.79k | const __m128i top7 = cvtepu8_epi16(top_hihi); |
2067 | 5.79k | const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero); |
2068 | 5.79k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2069 | 5.79k | const __m128i round = _mm_set1_epi16(128); |
2070 | 5.79k | const uint8_t *weights_base_ptr = smooth_weights + 60; |
2071 | 28.9k | for (int left_offset = 0; left_offset < 64; left_offset += 16) { |
2072 | 23.1k | const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset); |
2073 | 23.1k | const __m128i weights_lo = cvtepu8_epi16(weights); |
2074 | 23.1k | const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero); |
2075 | 23.1k | const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo); |
2076 | 23.1k | const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi); |
2077 | 23.1k | const __m128i scaled_bottom_left_lo = |
2078 | 23.1k | _mm_mullo_epi16(inverted_weights_lo, bottom_left); |
2079 | 23.1k | const __m128i scaled_bottom_left_hi = |
2080 | 23.1k | _mm_mullo_epi16(inverted_weights_hi, bottom_left); |
2081 | 208k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2082 | 185k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2083 | 185k | const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select); |
2084 | 185k | const __m128i scaled_bottom_left_y = |
2085 | 185k | _mm_shuffle_epi8(scaled_bottom_left_lo, y_select); |
2086 | 185k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
2087 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2088 | 185k | round); |
2089 | 185k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
2090 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2091 | 185k | round); |
2092 | 185k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
2093 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2094 | 185k | round); |
2095 | 185k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
2096 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2097 | 185k | round); |
2098 | 185k | dst += stride; |
2099 | 185k | } |
2100 | 208k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2101 | 185k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2102 | 185k | const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select); |
2103 | 185k | const __m128i scaled_bottom_left_y = |
2104 | 185k | _mm_shuffle_epi8(scaled_bottom_left_hi, y_select); |
2105 | 185k | write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y, |
2106 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2107 | 185k | round); |
2108 | 185k | write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y, |
2109 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2110 | 185k | round); |
2111 | 185k | write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y, |
2112 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2113 | 185k | round); |
2114 | 185k | write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y, |
2115 | 185k | scaled_bottom_left_y, scaled_bottom_left_y, |
2116 | 185k | round); |
2117 | 185k | dst += stride; |
2118 | 185k | } |
2119 | 23.1k | } |
2120 | 5.79k | } |
2121 | | |
2122 | | // ----------------------------------------------------------------------------- |
2123 | | // SMOOTH_H_PRED |
2124 | | static AOM_FORCE_INLINE void write_smooth_horizontal_sum4( |
2125 | | uint8_t *LIBAOM_RESTRICT dst, const __m128i *left_y, const __m128i *weights, |
2126 | 723k | const __m128i *scaled_top_right, const __m128i *round) { |
2127 | 723k | const __m128i weighted_left_y = _mm_mullo_epi16(*left_y, *weights); |
2128 | 723k | const __m128i pred_sum = _mm_add_epi32(*scaled_top_right, weighted_left_y); |
2129 | | // Equivalent to RightShiftWithRounding(pred[x][y], 8). |
2130 | 723k | const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, *round), 8); |
2131 | 723k | const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400); |
2132 | 723k | Store4(dst, _mm_shuffle_epi8(pred, cvtepi32_epi8)); |
2133 | 723k | } |
2134 | | |
2135 | | void aom_smooth_h_predictor_4x4_ssse3( |
2136 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2137 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2138 | 74.8k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2139 | 74.8k | const __m128i top_right = _mm_set1_epi32(top_row[3]); |
2140 | 74.8k | const __m128i left = cvtepu8_epi32(Load4(left_column)); |
2141 | 74.8k | const __m128i weights = cvtepu8_epi32(Load4(smooth_weights)); |
2142 | 74.8k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2143 | 74.8k | const __m128i inverted_weights = _mm_sub_epi32(scale, weights); |
2144 | 74.8k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2145 | 74.8k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2146 | 74.8k | __m128i left_y = _mm_shuffle_epi32(left, 0); |
2147 | 74.8k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2148 | 74.8k | &round); |
2149 | 74.8k | dst += stride; |
2150 | 74.8k | left_y = _mm_shuffle_epi32(left, 0x55); |
2151 | 74.8k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2152 | 74.8k | &round); |
2153 | 74.8k | dst += stride; |
2154 | 74.8k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2155 | 74.8k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2156 | 74.8k | &round); |
2157 | 74.8k | dst += stride; |
2158 | 74.8k | left_y = _mm_shuffle_epi32(left, 0xff); |
2159 | 74.8k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2160 | 74.8k | &round); |
2161 | 74.8k | } |
2162 | | |
2163 | | void aom_smooth_h_predictor_4x8_ssse3( |
2164 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2165 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2166 | 21.1k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2167 | 21.1k | const __m128i top_right = _mm_set1_epi32(top_row[3]); |
2168 | 21.1k | const __m128i weights = cvtepu8_epi32(Load4(smooth_weights)); |
2169 | 21.1k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2170 | 21.1k | const __m128i inverted_weights = _mm_sub_epi32(scale, weights); |
2171 | 21.1k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2172 | 21.1k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2173 | 21.1k | __m128i left = cvtepu8_epi32(Load4(left_column)); |
2174 | 21.1k | __m128i left_y = _mm_shuffle_epi32(left, 0); |
2175 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2176 | 21.1k | &round); |
2177 | 21.1k | dst += stride; |
2178 | 21.1k | left_y = _mm_shuffle_epi32(left, 0x55); |
2179 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2180 | 21.1k | &round); |
2181 | 21.1k | dst += stride; |
2182 | 21.1k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2183 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2184 | 21.1k | &round); |
2185 | 21.1k | dst += stride; |
2186 | 21.1k | left_y = _mm_shuffle_epi32(left, 0xff); |
2187 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2188 | 21.1k | &round); |
2189 | 21.1k | dst += stride; |
2190 | | |
2191 | 21.1k | left = cvtepu8_epi32(Load4(left_column + 4)); |
2192 | 21.1k | left_y = _mm_shuffle_epi32(left, 0); |
2193 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2194 | 21.1k | &round); |
2195 | 21.1k | dst += stride; |
2196 | 21.1k | left_y = _mm_shuffle_epi32(left, 0x55); |
2197 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2198 | 21.1k | &round); |
2199 | 21.1k | dst += stride; |
2200 | 21.1k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2201 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2202 | 21.1k | &round); |
2203 | 21.1k | dst += stride; |
2204 | 21.1k | left_y = _mm_shuffle_epi32(left, 0xff); |
2205 | 21.1k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2206 | 21.1k | &round); |
2207 | 21.1k | } |
2208 | | |
2209 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2210 | | void aom_smooth_h_predictor_4x16_ssse3( |
2211 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2212 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2213 | 15.9k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2214 | 15.9k | const __m128i top_right = _mm_set1_epi32(top_row[3]); |
2215 | 15.9k | const __m128i weights = cvtepu8_epi32(Load4(smooth_weights)); |
2216 | 15.9k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2217 | 15.9k | const __m128i inverted_weights = _mm_sub_epi32(scale, weights); |
2218 | 15.9k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2219 | 15.9k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2220 | 15.9k | __m128i left = cvtepu8_epi32(Load4(left_column)); |
2221 | 15.9k | __m128i left_y = _mm_shuffle_epi32(left, 0); |
2222 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2223 | 15.9k | &round); |
2224 | 15.9k | dst += stride; |
2225 | 15.9k | left_y = _mm_shuffle_epi32(left, 0x55); |
2226 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2227 | 15.9k | &round); |
2228 | 15.9k | dst += stride; |
2229 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2230 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2231 | 15.9k | &round); |
2232 | 15.9k | dst += stride; |
2233 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xff); |
2234 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2235 | 15.9k | &round); |
2236 | 15.9k | dst += stride; |
2237 | | |
2238 | 15.9k | left = cvtepu8_epi32(Load4(left_column + 4)); |
2239 | 15.9k | left_y = _mm_shuffle_epi32(left, 0); |
2240 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2241 | 15.9k | &round); |
2242 | 15.9k | dst += stride; |
2243 | 15.9k | left_y = _mm_shuffle_epi32(left, 0x55); |
2244 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2245 | 15.9k | &round); |
2246 | 15.9k | dst += stride; |
2247 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2248 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2249 | 15.9k | &round); |
2250 | 15.9k | dst += stride; |
2251 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xff); |
2252 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2253 | 15.9k | &round); |
2254 | 15.9k | dst += stride; |
2255 | | |
2256 | 15.9k | left = cvtepu8_epi32(Load4(left_column + 8)); |
2257 | 15.9k | left_y = _mm_shuffle_epi32(left, 0); |
2258 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2259 | 15.9k | &round); |
2260 | 15.9k | dst += stride; |
2261 | 15.9k | left_y = _mm_shuffle_epi32(left, 0x55); |
2262 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2263 | 15.9k | &round); |
2264 | 15.9k | dst += stride; |
2265 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2266 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2267 | 15.9k | &round); |
2268 | 15.9k | dst += stride; |
2269 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xff); |
2270 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2271 | 15.9k | &round); |
2272 | 15.9k | dst += stride; |
2273 | | |
2274 | 15.9k | left = cvtepu8_epi32(Load4(left_column + 12)); |
2275 | 15.9k | left_y = _mm_shuffle_epi32(left, 0); |
2276 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2277 | 15.9k | &round); |
2278 | 15.9k | dst += stride; |
2279 | 15.9k | left_y = _mm_shuffle_epi32(left, 0x55); |
2280 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2281 | 15.9k | &round); |
2282 | 15.9k | dst += stride; |
2283 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xaa); |
2284 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2285 | 15.9k | &round); |
2286 | 15.9k | dst += stride; |
2287 | 15.9k | left_y = _mm_shuffle_epi32(left, 0xff); |
2288 | 15.9k | write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right, |
2289 | 15.9k | &round); |
2290 | 15.9k | } |
2291 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2292 | | |
2293 | | // For SMOOTH_H, |pixels| is the repeated left value for the row. For SMOOTH_V, |
2294 | | // |pixels| is a segment of the top row or the whole top row, and |weights| is |
2295 | | // repeated. |
2296 | | void aom_smooth_h_predictor_8x4_ssse3( |
2297 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2298 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2299 | 34.9k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2300 | 34.9k | const __m128i top_right = _mm_set1_epi16(top_row[7]); |
2301 | 34.9k | const __m128i left = cvtepu8_epi16(Load4(left_column)); |
2302 | 34.9k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
2303 | 34.9k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2304 | 34.9k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
2305 | 34.9k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2306 | 34.9k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2307 | 34.9k | __m128i y_select = _mm_set1_epi32(0x01000100); |
2308 | 34.9k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2309 | 34.9k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2310 | 34.9k | &round); |
2311 | 34.9k | dst += stride; |
2312 | 34.9k | y_select = _mm_set1_epi32(0x03020302); |
2313 | 34.9k | left_y = _mm_shuffle_epi8(left, y_select); |
2314 | 34.9k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2315 | 34.9k | &round); |
2316 | 34.9k | dst += stride; |
2317 | 34.9k | y_select = _mm_set1_epi32(0x05040504); |
2318 | 34.9k | left_y = _mm_shuffle_epi8(left, y_select); |
2319 | 34.9k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2320 | 34.9k | &round); |
2321 | 34.9k | dst += stride; |
2322 | 34.9k | y_select = _mm_set1_epi32(0x07060706); |
2323 | 34.9k | left_y = _mm_shuffle_epi8(left, y_select); |
2324 | 34.9k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2325 | 34.9k | &round); |
2326 | 34.9k | } |
2327 | | |
2328 | | void aom_smooth_h_predictor_8x8_ssse3( |
2329 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2330 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2331 | 49.1k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2332 | 49.1k | const __m128i top_right = _mm_set1_epi16(top_row[7]); |
2333 | 49.1k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2334 | 49.1k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
2335 | 49.1k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2336 | 49.1k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
2337 | 49.1k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2338 | 49.1k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2339 | 442k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2340 | 393k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2341 | 393k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2342 | 393k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2343 | 393k | &round); |
2344 | 393k | dst += stride; |
2345 | 393k | } |
2346 | 49.1k | } |
2347 | | |
2348 | | void aom_smooth_h_predictor_8x16_ssse3( |
2349 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2350 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2351 | 17.8k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2352 | 17.8k | const __m128i top_right = _mm_set1_epi16(top_row[7]); |
2353 | 17.8k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
2354 | 17.8k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2355 | 17.8k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
2356 | 17.8k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2357 | 17.8k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2358 | 17.8k | __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2359 | 160k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2360 | 142k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2361 | 142k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2362 | 142k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2363 | 142k | &round); |
2364 | 142k | dst += stride; |
2365 | 142k | } |
2366 | 17.8k | left = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2367 | 160k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2368 | 142k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2369 | 142k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2370 | 142k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2371 | 142k | &round); |
2372 | 142k | dst += stride; |
2373 | 142k | } |
2374 | 17.8k | } |
2375 | | |
2376 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2377 | | void aom_smooth_h_predictor_8x32_ssse3( |
2378 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2379 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2380 | 6.02k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2381 | 6.02k | const __m128i top_right = _mm_set1_epi16(top_row[7]); |
2382 | 6.02k | const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4)); |
2383 | 6.02k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2384 | 6.02k | const __m128i inverted_weights = _mm_sub_epi16(scale, weights); |
2385 | 6.02k | const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right); |
2386 | 6.02k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2387 | 6.02k | __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2388 | 54.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2389 | 48.1k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2390 | 48.1k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2391 | 48.1k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2392 | 48.1k | &round); |
2393 | 48.1k | dst += stride; |
2394 | 48.1k | } |
2395 | 6.02k | left = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2396 | 54.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2397 | 48.1k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2398 | 48.1k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2399 | 48.1k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2400 | 48.1k | &round); |
2401 | 48.1k | dst += stride; |
2402 | 48.1k | } |
2403 | 6.02k | left = cvtepu8_epi16(LoadLo8(left_column + 16)); |
2404 | 54.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2405 | 48.1k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2406 | 48.1k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2407 | 48.1k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2408 | 48.1k | &round); |
2409 | 48.1k | dst += stride; |
2410 | 48.1k | } |
2411 | 6.02k | left = cvtepu8_epi16(LoadLo8(left_column + 24)); |
2412 | 54.1k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2413 | 48.1k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2414 | 48.1k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2415 | 48.1k | write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right, |
2416 | 48.1k | &round); |
2417 | 48.1k | dst += stride; |
2418 | 48.1k | } |
2419 | 6.02k | } |
2420 | | |
2421 | | void aom_smooth_h_predictor_16x4_ssse3( |
2422 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2423 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2424 | 23.9k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2425 | 23.9k | const __m128i top_right = _mm_set1_epi16(top_row[15]); |
2426 | 23.9k | const __m128i left = cvtepu8_epi16(Load4(left_column)); |
2427 | 23.9k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
2428 | 23.9k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2429 | 23.9k | const __m128i weights1 = cvtepu8_epi16(weights); |
2430 | 23.9k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
2431 | 23.9k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2432 | 23.9k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2433 | 23.9k | const __m128i scaled_top_right1 = |
2434 | 23.9k | _mm_mullo_epi16(inverted_weights1, top_right); |
2435 | 23.9k | const __m128i scaled_top_right2 = |
2436 | 23.9k | _mm_mullo_epi16(inverted_weights2, top_right); |
2437 | 23.9k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2438 | 23.9k | __m128i y_mask = _mm_set1_epi32(0x01000100); |
2439 | 23.9k | __m128i left_y = _mm_shuffle_epi8(left, y_mask); |
2440 | 23.9k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2441 | 23.9k | scaled_top_right1, scaled_top_right2, round); |
2442 | 23.9k | dst += stride; |
2443 | 23.9k | y_mask = _mm_set1_epi32(0x03020302); |
2444 | 23.9k | left_y = _mm_shuffle_epi8(left, y_mask); |
2445 | 23.9k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2446 | 23.9k | scaled_top_right1, scaled_top_right2, round); |
2447 | 23.9k | dst += stride; |
2448 | 23.9k | y_mask = _mm_set1_epi32(0x05040504); |
2449 | 23.9k | left_y = _mm_shuffle_epi8(left, y_mask); |
2450 | 23.9k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2451 | 23.9k | scaled_top_right1, scaled_top_right2, round); |
2452 | 23.9k | dst += stride; |
2453 | 23.9k | y_mask = _mm_set1_epi32(0x07060706); |
2454 | 23.9k | left_y = _mm_shuffle_epi8(left, y_mask); |
2455 | 23.9k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2456 | 23.9k | scaled_top_right1, scaled_top_right2, round); |
2457 | 23.9k | } |
2458 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2459 | | |
2460 | | void aom_smooth_h_predictor_16x8_ssse3( |
2461 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2462 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2463 | 22.2k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2464 | 22.2k | const __m128i top_right = _mm_set1_epi16(top_row[15]); |
2465 | 22.2k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2466 | 22.2k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
2467 | 22.2k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2468 | 22.2k | const __m128i weights1 = cvtepu8_epi16(weights); |
2469 | 22.2k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
2470 | 22.2k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2471 | 22.2k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2472 | 22.2k | const __m128i scaled_top_right1 = |
2473 | 22.2k | _mm_mullo_epi16(inverted_weights1, top_right); |
2474 | 22.2k | const __m128i scaled_top_right2 = |
2475 | 22.2k | _mm_mullo_epi16(inverted_weights2, top_right); |
2476 | 22.2k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2477 | 200k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2478 | 178k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2479 | 178k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2480 | 178k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2481 | 178k | scaled_top_right1, scaled_top_right2, round); |
2482 | 178k | dst += stride; |
2483 | 178k | } |
2484 | 22.2k | } |
2485 | | |
2486 | | void aom_smooth_h_predictor_16x16_ssse3( |
2487 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2488 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2489 | 42.1k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2490 | 42.1k | const __m128i top_right = _mm_set1_epi16(top_row[15]); |
2491 | 42.1k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
2492 | 42.1k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2493 | 42.1k | const __m128i weights1 = cvtepu8_epi16(weights); |
2494 | 42.1k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
2495 | 42.1k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2496 | 42.1k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2497 | 42.1k | const __m128i scaled_top_right1 = |
2498 | 42.1k | _mm_mullo_epi16(inverted_weights1, top_right); |
2499 | 42.1k | const __m128i scaled_top_right2 = |
2500 | 42.1k | _mm_mullo_epi16(inverted_weights2, top_right); |
2501 | 42.1k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2502 | 42.1k | __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2503 | 379k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2504 | 337k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2505 | 337k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2506 | 337k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2507 | 337k | scaled_top_right1, scaled_top_right2, round); |
2508 | 337k | dst += stride; |
2509 | 337k | } |
2510 | 42.1k | left = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2511 | 379k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2512 | 337k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2513 | 337k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2514 | 337k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2515 | 337k | scaled_top_right1, scaled_top_right2, round); |
2516 | 337k | dst += stride; |
2517 | 337k | } |
2518 | 42.1k | } |
2519 | | |
2520 | | void aom_smooth_h_predictor_16x32_ssse3( |
2521 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2522 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2523 | 12.3k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2524 | 12.3k | const __m128i top_right = _mm_set1_epi16(top_row[15]); |
2525 | 12.3k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
2526 | 12.3k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2527 | 12.3k | const __m128i weights1 = cvtepu8_epi16(weights); |
2528 | 12.3k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
2529 | 12.3k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2530 | 12.3k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2531 | 12.3k | const __m128i scaled_top_right1 = |
2532 | 12.3k | _mm_mullo_epi16(inverted_weights1, top_right); |
2533 | 12.3k | const __m128i scaled_top_right2 = |
2534 | 12.3k | _mm_mullo_epi16(inverted_weights2, top_right); |
2535 | 12.3k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2536 | 12.3k | __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2537 | 111k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2538 | 98.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2539 | 98.8k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2540 | 98.8k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2541 | 98.8k | scaled_top_right1, scaled_top_right2, round); |
2542 | 98.8k | dst += stride; |
2543 | 98.8k | } |
2544 | 12.3k | left = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2545 | 111k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2546 | 98.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2547 | 98.8k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2548 | 98.8k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2549 | 98.8k | scaled_top_right1, scaled_top_right2, round); |
2550 | 98.8k | dst += stride; |
2551 | 98.8k | } |
2552 | 12.3k | left = cvtepu8_epi16(LoadLo8(left_column + 16)); |
2553 | 111k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2554 | 98.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2555 | 98.8k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2556 | 98.8k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2557 | 98.8k | scaled_top_right1, scaled_top_right2, round); |
2558 | 98.8k | dst += stride; |
2559 | 98.8k | } |
2560 | 12.3k | left = cvtepu8_epi16(LoadLo8(left_column + 24)); |
2561 | 111k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2562 | 98.8k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2563 | 98.8k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2564 | 98.8k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2565 | 98.8k | scaled_top_right1, scaled_top_right2, round); |
2566 | 98.8k | dst += stride; |
2567 | 98.8k | } |
2568 | 12.3k | } |
2569 | | |
2570 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2571 | | void aom_smooth_h_predictor_16x64_ssse3( |
2572 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2573 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2574 | 3.03k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2575 | 3.03k | const __m128i top_right = _mm_set1_epi16(top_row[15]); |
2576 | 3.03k | const __m128i weights = LoadUnaligned16(smooth_weights + 12); |
2577 | 3.03k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2578 | 3.03k | const __m128i weights1 = cvtepu8_epi16(weights); |
2579 | 3.03k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8)); |
2580 | 3.03k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2581 | 3.03k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2582 | 3.03k | const __m128i scaled_top_right1 = |
2583 | 3.03k | _mm_mullo_epi16(inverted_weights1, top_right); |
2584 | 3.03k | const __m128i scaled_top_right2 = |
2585 | 3.03k | _mm_mullo_epi16(inverted_weights2, top_right); |
2586 | 3.03k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2587 | 27.3k | for (int left_offset = 0; left_offset < 64; left_offset += 8) { |
2588 | 24.3k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset)); |
2589 | 218k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2590 | 194k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2591 | 194k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2592 | 194k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2593 | 194k | scaled_top_right1, scaled_top_right2, |
2594 | 194k | round); |
2595 | 194k | dst += stride; |
2596 | 194k | } |
2597 | 24.3k | } |
2598 | 3.03k | } |
2599 | | |
2600 | | void aom_smooth_h_predictor_32x8_ssse3( |
2601 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2602 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2603 | 18.1k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2604 | 18.1k | const __m128i top_right = _mm_set1_epi16(top_row[31]); |
2605 | 18.1k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2606 | 18.1k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
2607 | 18.1k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
2608 | 18.1k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2609 | 18.1k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
2610 | 18.1k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8)); |
2611 | 18.1k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
2612 | 18.1k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8)); |
2613 | 18.1k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2614 | 18.1k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2615 | 18.1k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2616 | 18.1k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2617 | 18.1k | const __m128i scaled_top_right1 = |
2618 | 18.1k | _mm_mullo_epi16(inverted_weights1, top_right); |
2619 | 18.1k | const __m128i scaled_top_right2 = |
2620 | 18.1k | _mm_mullo_epi16(inverted_weights2, top_right); |
2621 | 18.1k | const __m128i scaled_top_right3 = |
2622 | 18.1k | _mm_mullo_epi16(inverted_weights3, top_right); |
2623 | 18.1k | const __m128i scaled_top_right4 = |
2624 | 18.1k | _mm_mullo_epi16(inverted_weights4, top_right); |
2625 | 18.1k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2626 | 163k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2627 | 145k | __m128i y_select = _mm_set1_epi32(y_mask); |
2628 | 145k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2629 | 145k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2630 | 145k | scaled_top_right1, scaled_top_right2, round); |
2631 | 145k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2632 | 145k | scaled_top_right3, scaled_top_right4, round); |
2633 | 145k | dst += stride; |
2634 | 145k | } |
2635 | 18.1k | } |
2636 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2637 | | |
2638 | | void aom_smooth_h_predictor_32x16_ssse3( |
2639 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2640 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2641 | 10.5k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2642 | 10.5k | const __m128i top_right = _mm_set1_epi16(top_row[31]); |
2643 | 10.5k | const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column)); |
2644 | 10.5k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
2645 | 10.5k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
2646 | 10.5k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2647 | 10.5k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
2648 | 10.5k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8)); |
2649 | 10.5k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
2650 | 10.5k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8)); |
2651 | 10.5k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2652 | 10.5k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2653 | 10.5k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2654 | 10.5k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2655 | 10.5k | const __m128i scaled_top_right1 = |
2656 | 10.5k | _mm_mullo_epi16(inverted_weights1, top_right); |
2657 | 10.5k | const __m128i scaled_top_right2 = |
2658 | 10.5k | _mm_mullo_epi16(inverted_weights2, top_right); |
2659 | 10.5k | const __m128i scaled_top_right3 = |
2660 | 10.5k | _mm_mullo_epi16(inverted_weights3, top_right); |
2661 | 10.5k | const __m128i scaled_top_right4 = |
2662 | 10.5k | _mm_mullo_epi16(inverted_weights4, top_right); |
2663 | 10.5k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2664 | 95.3k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2665 | 84.7k | __m128i y_select = _mm_set1_epi32(y_mask); |
2666 | 84.7k | __m128i left_y = _mm_shuffle_epi8(left1, y_select); |
2667 | 84.7k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2668 | 84.7k | scaled_top_right1, scaled_top_right2, round); |
2669 | 84.7k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2670 | 84.7k | scaled_top_right3, scaled_top_right4, round); |
2671 | 84.7k | dst += stride; |
2672 | 84.7k | } |
2673 | 10.5k | const __m128i left2 = |
2674 | 10.5k | cvtepu8_epi16(LoadLo8((const uint8_t *)left_column + 8)); |
2675 | 95.3k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2676 | 84.7k | __m128i y_select = _mm_set1_epi32(y_mask); |
2677 | 84.7k | __m128i left_y = _mm_shuffle_epi8(left2, y_select); |
2678 | 84.7k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2679 | 84.7k | scaled_top_right1, scaled_top_right2, round); |
2680 | 84.7k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2681 | 84.7k | scaled_top_right3, scaled_top_right4, round); |
2682 | 84.7k | dst += stride; |
2683 | 84.7k | } |
2684 | 10.5k | } |
2685 | | |
2686 | | void aom_smooth_h_predictor_32x32_ssse3( |
2687 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2688 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2689 | 41.4k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2690 | 41.4k | const __m128i top_right = _mm_set1_epi16(top_row[31]); |
2691 | 41.4k | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
2692 | 41.4k | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
2693 | 41.4k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2694 | 41.4k | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
2695 | 41.4k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8)); |
2696 | 41.4k | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
2697 | 41.4k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8)); |
2698 | 41.4k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2699 | 41.4k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2700 | 41.4k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2701 | 41.4k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2702 | 41.4k | const __m128i scaled_top_right1 = |
2703 | 41.4k | _mm_mullo_epi16(inverted_weights1, top_right); |
2704 | 41.4k | const __m128i scaled_top_right2 = |
2705 | 41.4k | _mm_mullo_epi16(inverted_weights2, top_right); |
2706 | 41.4k | const __m128i scaled_top_right3 = |
2707 | 41.4k | _mm_mullo_epi16(inverted_weights3, top_right); |
2708 | 41.4k | const __m128i scaled_top_right4 = |
2709 | 41.4k | _mm_mullo_epi16(inverted_weights4, top_right); |
2710 | 41.4k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2711 | 41.4k | __m128i left = cvtepu8_epi16(LoadLo8(left_column)); |
2712 | 373k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2713 | 331k | __m128i y_select = _mm_set1_epi32(y_mask); |
2714 | 331k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2715 | 331k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2716 | 331k | scaled_top_right1, scaled_top_right2, round); |
2717 | 331k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2718 | 331k | scaled_top_right3, scaled_top_right4, round); |
2719 | 331k | dst += stride; |
2720 | 331k | } |
2721 | 41.4k | left = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2722 | 373k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2723 | 331k | __m128i y_select = _mm_set1_epi32(y_mask); |
2724 | 331k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2725 | 331k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2726 | 331k | scaled_top_right1, scaled_top_right2, round); |
2727 | 331k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2728 | 331k | scaled_top_right3, scaled_top_right4, round); |
2729 | 331k | dst += stride; |
2730 | 331k | } |
2731 | 41.4k | left = cvtepu8_epi16(LoadLo8(left_column + 16)); |
2732 | 373k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2733 | 331k | __m128i y_select = _mm_set1_epi32(y_mask); |
2734 | 331k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2735 | 331k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2736 | 331k | scaled_top_right1, scaled_top_right2, round); |
2737 | 331k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2738 | 331k | scaled_top_right3, scaled_top_right4, round); |
2739 | 331k | dst += stride; |
2740 | 331k | } |
2741 | 41.4k | left = cvtepu8_epi16(LoadLo8(left_column + 24)); |
2742 | 373k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2743 | 331k | __m128i y_select = _mm_set1_epi32(y_mask); |
2744 | 331k | __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2745 | 331k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2746 | 331k | scaled_top_right1, scaled_top_right2, round); |
2747 | 331k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2748 | 331k | scaled_top_right3, scaled_top_right4, round); |
2749 | 331k | dst += stride; |
2750 | 331k | } |
2751 | 41.4k | } |
2752 | | |
2753 | | void aom_smooth_h_predictor_32x64_ssse3( |
2754 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2755 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2756 | 815 | const uint8_t *LIBAOM_RESTRICT left_column) { |
2757 | 815 | const __m128i top_right = _mm_set1_epi16(top_row[31]); |
2758 | 815 | const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28); |
2759 | 815 | const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44); |
2760 | 815 | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2761 | 815 | const __m128i weights1 = cvtepu8_epi16(weights_lo); |
2762 | 815 | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8)); |
2763 | 815 | const __m128i weights3 = cvtepu8_epi16(weights_hi); |
2764 | 815 | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8)); |
2765 | 815 | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2766 | 815 | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2767 | 815 | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2768 | 815 | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2769 | 815 | const __m128i scaled_top_right1 = |
2770 | 815 | _mm_mullo_epi16(inverted_weights1, top_right); |
2771 | 815 | const __m128i scaled_top_right2 = |
2772 | 815 | _mm_mullo_epi16(inverted_weights2, top_right); |
2773 | 815 | const __m128i scaled_top_right3 = |
2774 | 815 | _mm_mullo_epi16(inverted_weights3, top_right); |
2775 | 815 | const __m128i scaled_top_right4 = |
2776 | 815 | _mm_mullo_epi16(inverted_weights4, top_right); |
2777 | 815 | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2778 | 7.33k | for (int left_offset = 0; left_offset < 64; left_offset += 8) { |
2779 | 6.52k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset)); |
2780 | 58.6k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2781 | 52.1k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2782 | 52.1k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
2783 | 52.1k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2784 | 52.1k | scaled_top_right1, scaled_top_right2, |
2785 | 52.1k | round); |
2786 | 52.1k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, |
2787 | 52.1k | weights4, scaled_top_right3, |
2788 | 52.1k | scaled_top_right4, round); |
2789 | 52.1k | dst += stride; |
2790 | 52.1k | } |
2791 | 6.52k | } |
2792 | 815 | } |
2793 | | |
2794 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2795 | | void aom_smooth_h_predictor_64x16_ssse3( |
2796 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2797 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2798 | 5.70k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2799 | 5.70k | const __m128i top_right = _mm_set1_epi16(top_row[63]); |
2800 | 5.70k | const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column)); |
2801 | 5.70k | const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60); |
2802 | 5.70k | const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76); |
2803 | 5.70k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2804 | 5.70k | const __m128i weights1 = cvtepu8_epi16(weights_lolo); |
2805 | 5.70k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8)); |
2806 | 5.70k | const __m128i weights3 = cvtepu8_epi16(weights_lohi); |
2807 | 5.70k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8)); |
2808 | 5.70k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2809 | 5.70k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2810 | 5.70k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2811 | 5.70k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2812 | 5.70k | const __m128i scaled_top_right1 = |
2813 | 5.70k | _mm_mullo_epi16(inverted_weights1, top_right); |
2814 | 5.70k | const __m128i scaled_top_right2 = |
2815 | 5.70k | _mm_mullo_epi16(inverted_weights2, top_right); |
2816 | 5.70k | const __m128i scaled_top_right3 = |
2817 | 5.70k | _mm_mullo_epi16(inverted_weights3, top_right); |
2818 | 5.70k | const __m128i scaled_top_right4 = |
2819 | 5.70k | _mm_mullo_epi16(inverted_weights4, top_right); |
2820 | 5.70k | const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92); |
2821 | 5.70k | const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108); |
2822 | 5.70k | const __m128i weights5 = cvtepu8_epi16(weights_hilo); |
2823 | 5.70k | const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8)); |
2824 | 5.70k | const __m128i weights7 = cvtepu8_epi16(weights_hihi); |
2825 | 5.70k | const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8)); |
2826 | 5.70k | const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5); |
2827 | 5.70k | const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6); |
2828 | 5.70k | const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7); |
2829 | 5.70k | const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8); |
2830 | 5.70k | const __m128i scaled_top_right5 = |
2831 | 5.70k | _mm_mullo_epi16(inverted_weights5, top_right); |
2832 | 5.70k | const __m128i scaled_top_right6 = |
2833 | 5.70k | _mm_mullo_epi16(inverted_weights6, top_right); |
2834 | 5.70k | const __m128i scaled_top_right7 = |
2835 | 5.70k | _mm_mullo_epi16(inverted_weights7, top_right); |
2836 | 5.70k | const __m128i scaled_top_right8 = |
2837 | 5.70k | _mm_mullo_epi16(inverted_weights8, top_right); |
2838 | 5.70k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2839 | 51.3k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2840 | 45.6k | __m128i y_select = _mm_set1_epi32(y_mask); |
2841 | 45.6k | __m128i left_y = _mm_shuffle_epi8(left1, y_select); |
2842 | 45.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2843 | 45.6k | scaled_top_right1, scaled_top_right2, round); |
2844 | 45.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2845 | 45.6k | scaled_top_right3, scaled_top_right4, round); |
2846 | 45.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2847 | 45.6k | scaled_top_right5, scaled_top_right6, round); |
2848 | 45.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2849 | 45.6k | scaled_top_right7, scaled_top_right8, round); |
2850 | 45.6k | dst += stride; |
2851 | 45.6k | } |
2852 | 5.70k | const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2853 | 51.3k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2854 | 45.6k | __m128i y_select = _mm_set1_epi32(y_mask); |
2855 | 45.6k | __m128i left_y = _mm_shuffle_epi8(left2, y_select); |
2856 | 45.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2857 | 45.6k | scaled_top_right1, scaled_top_right2, round); |
2858 | 45.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2859 | 45.6k | scaled_top_right3, scaled_top_right4, round); |
2860 | 45.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2861 | 45.6k | scaled_top_right5, scaled_top_right6, round); |
2862 | 45.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2863 | 45.6k | scaled_top_right7, scaled_top_right8, round); |
2864 | 45.6k | dst += stride; |
2865 | 45.6k | } |
2866 | 5.70k | } |
2867 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
2868 | | |
2869 | | void aom_smooth_h_predictor_64x32_ssse3( |
2870 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2871 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2872 | 1.45k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2873 | 1.45k | const __m128i top_right = _mm_set1_epi16(top_row[63]); |
2874 | 1.45k | const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column)); |
2875 | 1.45k | const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60); |
2876 | 1.45k | const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76); |
2877 | 1.45k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2878 | 1.45k | const __m128i weights1 = cvtepu8_epi16(weights_lolo); |
2879 | 1.45k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8)); |
2880 | 1.45k | const __m128i weights3 = cvtepu8_epi16(weights_lohi); |
2881 | 1.45k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8)); |
2882 | 1.45k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2883 | 1.45k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2884 | 1.45k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2885 | 1.45k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2886 | 1.45k | const __m128i scaled_top_right1 = |
2887 | 1.45k | _mm_mullo_epi16(inverted_weights1, top_right); |
2888 | 1.45k | const __m128i scaled_top_right2 = |
2889 | 1.45k | _mm_mullo_epi16(inverted_weights2, top_right); |
2890 | 1.45k | const __m128i scaled_top_right3 = |
2891 | 1.45k | _mm_mullo_epi16(inverted_weights3, top_right); |
2892 | 1.45k | const __m128i scaled_top_right4 = |
2893 | 1.45k | _mm_mullo_epi16(inverted_weights4, top_right); |
2894 | 1.45k | const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92); |
2895 | 1.45k | const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108); |
2896 | 1.45k | const __m128i weights5 = cvtepu8_epi16(weights_hilo); |
2897 | 1.45k | const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8)); |
2898 | 1.45k | const __m128i weights7 = cvtepu8_epi16(weights_hihi); |
2899 | 1.45k | const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8)); |
2900 | 1.45k | const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5); |
2901 | 1.45k | const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6); |
2902 | 1.45k | const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7); |
2903 | 1.45k | const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8); |
2904 | 1.45k | const __m128i scaled_top_right5 = |
2905 | 1.45k | _mm_mullo_epi16(inverted_weights5, top_right); |
2906 | 1.45k | const __m128i scaled_top_right6 = |
2907 | 1.45k | _mm_mullo_epi16(inverted_weights6, top_right); |
2908 | 1.45k | const __m128i scaled_top_right7 = |
2909 | 1.45k | _mm_mullo_epi16(inverted_weights7, top_right); |
2910 | 1.45k | const __m128i scaled_top_right8 = |
2911 | 1.45k | _mm_mullo_epi16(inverted_weights8, top_right); |
2912 | 1.45k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
2913 | 13.0k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2914 | 11.6k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2915 | 11.6k | const __m128i left_y = _mm_shuffle_epi8(left1, y_select); |
2916 | 11.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2917 | 11.6k | scaled_top_right1, scaled_top_right2, round); |
2918 | 11.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2919 | 11.6k | scaled_top_right3, scaled_top_right4, round); |
2920 | 11.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2921 | 11.6k | scaled_top_right5, scaled_top_right6, round); |
2922 | 11.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2923 | 11.6k | scaled_top_right7, scaled_top_right8, round); |
2924 | 11.6k | dst += stride; |
2925 | 11.6k | } |
2926 | 1.45k | const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8)); |
2927 | 13.0k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2928 | 11.6k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2929 | 11.6k | const __m128i left_y = _mm_shuffle_epi8(left2, y_select); |
2930 | 11.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2931 | 11.6k | scaled_top_right1, scaled_top_right2, round); |
2932 | 11.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2933 | 11.6k | scaled_top_right3, scaled_top_right4, round); |
2934 | 11.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2935 | 11.6k | scaled_top_right5, scaled_top_right6, round); |
2936 | 11.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2937 | 11.6k | scaled_top_right7, scaled_top_right8, round); |
2938 | 11.6k | dst += stride; |
2939 | 11.6k | } |
2940 | 1.45k | const __m128i left3 = cvtepu8_epi16(LoadLo8(left_column + 16)); |
2941 | 13.0k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2942 | 11.6k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2943 | 11.6k | const __m128i left_y = _mm_shuffle_epi8(left3, y_select); |
2944 | 11.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2945 | 11.6k | scaled_top_right1, scaled_top_right2, round); |
2946 | 11.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2947 | 11.6k | scaled_top_right3, scaled_top_right4, round); |
2948 | 11.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2949 | 11.6k | scaled_top_right5, scaled_top_right6, round); |
2950 | 11.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2951 | 11.6k | scaled_top_right7, scaled_top_right8, round); |
2952 | 11.6k | dst += stride; |
2953 | 11.6k | } |
2954 | 1.45k | const __m128i left4 = cvtepu8_epi16(LoadLo8(left_column + 24)); |
2955 | 13.0k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
2956 | 11.6k | const __m128i y_select = _mm_set1_epi32(y_mask); |
2957 | 11.6k | const __m128i left_y = _mm_shuffle_epi8(left4, y_select); |
2958 | 11.6k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
2959 | 11.6k | scaled_top_right1, scaled_top_right2, round); |
2960 | 11.6k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4, |
2961 | 11.6k | scaled_top_right3, scaled_top_right4, round); |
2962 | 11.6k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6, |
2963 | 11.6k | scaled_top_right5, scaled_top_right6, round); |
2964 | 11.6k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8, |
2965 | 11.6k | scaled_top_right7, scaled_top_right8, round); |
2966 | 11.6k | dst += stride; |
2967 | 11.6k | } |
2968 | 1.45k | } |
2969 | | |
2970 | | void aom_smooth_h_predictor_64x64_ssse3( |
2971 | | uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride, |
2972 | | const uint8_t *LIBAOM_RESTRICT top_row, |
2973 | 4.67k | const uint8_t *LIBAOM_RESTRICT left_column) { |
2974 | 4.67k | const __m128i top_right = _mm_set1_epi16(top_row[63]); |
2975 | 4.67k | const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60); |
2976 | 4.67k | const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76); |
2977 | 4.67k | const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE); |
2978 | 4.67k | const __m128i weights1 = cvtepu8_epi16(weights_lolo); |
2979 | 4.67k | const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8)); |
2980 | 4.67k | const __m128i weights3 = cvtepu8_epi16(weights_lohi); |
2981 | 4.67k | const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8)); |
2982 | 4.67k | const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1); |
2983 | 4.67k | const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2); |
2984 | 4.67k | const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3); |
2985 | 4.67k | const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4); |
2986 | 4.67k | const __m128i scaled_top_right1 = |
2987 | 4.67k | _mm_mullo_epi16(inverted_weights1, top_right); |
2988 | 4.67k | const __m128i scaled_top_right2 = |
2989 | 4.67k | _mm_mullo_epi16(inverted_weights2, top_right); |
2990 | 4.67k | const __m128i scaled_top_right3 = |
2991 | 4.67k | _mm_mullo_epi16(inverted_weights3, top_right); |
2992 | 4.67k | const __m128i scaled_top_right4 = |
2993 | 4.67k | _mm_mullo_epi16(inverted_weights4, top_right); |
2994 | 4.67k | const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92); |
2995 | 4.67k | const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108); |
2996 | 4.67k | const __m128i weights5 = cvtepu8_epi16(weights_hilo); |
2997 | 4.67k | const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8)); |
2998 | 4.67k | const __m128i weights7 = cvtepu8_epi16(weights_hihi); |
2999 | 4.67k | const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8)); |
3000 | 4.67k | const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5); |
3001 | 4.67k | const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6); |
3002 | 4.67k | const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7); |
3003 | 4.67k | const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8); |
3004 | 4.67k | const __m128i scaled_top_right5 = |
3005 | 4.67k | _mm_mullo_epi16(inverted_weights5, top_right); |
3006 | 4.67k | const __m128i scaled_top_right6 = |
3007 | 4.67k | _mm_mullo_epi16(inverted_weights6, top_right); |
3008 | 4.67k | const __m128i scaled_top_right7 = |
3009 | 4.67k | _mm_mullo_epi16(inverted_weights7, top_right); |
3010 | 4.67k | const __m128i scaled_top_right8 = |
3011 | 4.67k | _mm_mullo_epi16(inverted_weights8, top_right); |
3012 | 4.67k | const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1)); |
3013 | 42.0k | for (int left_offset = 0; left_offset < 64; left_offset += 8) { |
3014 | 37.3k | const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset)); |
3015 | 336k | for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) { |
3016 | 299k | const __m128i y_select = _mm_set1_epi32(y_mask); |
3017 | 299k | const __m128i left_y = _mm_shuffle_epi8(left, y_select); |
3018 | 299k | write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2, |
3019 | 299k | scaled_top_right1, scaled_top_right2, |
3020 | 299k | round); |
3021 | 299k | write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, |
3022 | 299k | weights4, scaled_top_right3, |
3023 | 299k | scaled_top_right4, round); |
3024 | 299k | write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, |
3025 | 299k | weights6, scaled_top_right5, |
3026 | 299k | scaled_top_right6, round); |
3027 | 299k | write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, |
3028 | 299k | weights8, scaled_top_right7, |
3029 | 299k | scaled_top_right8, round); |
3030 | 299k | dst += stride; |
3031 | 299k | } |
3032 | 37.3k | } |
3033 | 4.67k | } |