Coverage Report

Created: 2025-07-23 06:32

/src/aom/aom_dsp/x86/intrapred_ssse3.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <tmmintrin.h>
13
14
#include "config/aom_dsp_rtcd.h"
15
16
#include "aom_dsp/intrapred_common.h"
17
18
// -----------------------------------------------------------------------------
19
// PAETH_PRED
20
21
// Return 8 16-bit pixels in one row
22
static inline __m128i paeth_8x1_pred(const __m128i *left, const __m128i *top,
23
6.64M
                                     const __m128i *topleft) {
24
6.64M
  const __m128i base = _mm_sub_epi16(_mm_add_epi16(*top, *left), *topleft);
25
26
6.64M
  __m128i pl = _mm_abs_epi16(_mm_sub_epi16(base, *left));
27
6.64M
  __m128i pt = _mm_abs_epi16(_mm_sub_epi16(base, *top));
28
6.64M
  __m128i ptl = _mm_abs_epi16(_mm_sub_epi16(base, *topleft));
29
30
6.64M
  __m128i mask1 = _mm_cmpgt_epi16(pl, pt);
31
6.64M
  mask1 = _mm_or_si128(mask1, _mm_cmpgt_epi16(pl, ptl));
32
6.64M
  __m128i mask2 = _mm_cmpgt_epi16(pt, ptl);
33
34
6.64M
  pl = _mm_andnot_si128(mask1, *left);
35
36
6.64M
  ptl = _mm_and_si128(mask2, *topleft);
37
6.64M
  pt = _mm_andnot_si128(mask2, *top);
38
6.64M
  pt = _mm_or_si128(pt, ptl);
39
6.64M
  pt = _mm_and_si128(mask1, pt);
40
41
6.64M
  return _mm_or_si128(pl, pt);
42
6.64M
}
43
44
void aom_paeth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
45
208k
                                   const uint8_t *above, const uint8_t *left) {
46
208k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
47
208k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
48
208k
  const __m128i zero = _mm_setzero_si128();
49
208k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
50
208k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
51
208k
  __m128i rep = _mm_set1_epi16((short)0x8000);
52
208k
  const __m128i one = _mm_set1_epi16(1);
53
54
208k
  int i;
55
1.04M
  for (i = 0; i < 4; ++i) {
56
834k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
57
834k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
58
59
834k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
60
834k
    dst += stride;
61
834k
    rep = _mm_add_epi16(rep, one);
62
834k
  }
63
208k
}
64
65
void aom_paeth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
66
46.9k
                                   const uint8_t *above, const uint8_t *left) {
67
46.9k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
68
46.9k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
69
46.9k
  const __m128i zero = _mm_setzero_si128();
70
46.9k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
71
46.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
72
46.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
73
46.9k
  const __m128i one = _mm_set1_epi16(1);
74
75
46.9k
  int i;
76
422k
  for (i = 0; i < 8; ++i) {
77
375k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
78
375k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
79
80
375k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
81
375k
    dst += stride;
82
375k
    rep = _mm_add_epi16(rep, one);
83
375k
  }
84
46.9k
}
85
86
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
87
void aom_paeth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
88
45.2k
                                    const uint8_t *above, const uint8_t *left) {
89
45.2k
  __m128i l = _mm_load_si128((const __m128i *)left);
90
45.2k
  const __m128i t = _mm_cvtsi32_si128(((const int *)above)[0]);
91
45.2k
  const __m128i zero = _mm_setzero_si128();
92
45.2k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
93
45.2k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
94
45.2k
  __m128i rep = _mm_set1_epi16((short)0x8000);
95
45.2k
  const __m128i one = _mm_set1_epi16(1);
96
97
768k
  for (int i = 0; i < 16; ++i) {
98
723k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
99
723k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
100
101
723k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
102
723k
    dst += stride;
103
723k
    rep = _mm_add_epi16(rep, one);
104
723k
  }
105
45.2k
}
106
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
107
108
void aom_paeth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
109
62.9k
                                   const uint8_t *above, const uint8_t *left) {
110
62.9k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
111
62.9k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
112
62.9k
  const __m128i zero = _mm_setzero_si128();
113
62.9k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
114
62.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
115
62.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
116
62.9k
  const __m128i one = _mm_set1_epi16(1);
117
118
62.9k
  int i;
119
314k
  for (i = 0; i < 4; ++i) {
120
251k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
121
251k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
122
123
251k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
124
251k
    dst += stride;
125
251k
    rep = _mm_add_epi16(rep, one);
126
251k
  }
127
62.9k
}
128
129
void aom_paeth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
130
157k
                                   const uint8_t *above, const uint8_t *left) {
131
157k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
132
157k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
133
157k
  const __m128i zero = _mm_setzero_si128();
134
157k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
135
157k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
136
157k
  __m128i rep = _mm_set1_epi16((short)0x8000);
137
157k
  const __m128i one = _mm_set1_epi16(1);
138
139
157k
  int i;
140
1.41M
  for (i = 0; i < 8; ++i) {
141
1.25M
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
142
1.25M
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
143
144
1.25M
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
145
1.25M
    dst += stride;
146
1.25M
    rep = _mm_add_epi16(rep, one);
147
1.25M
  }
148
157k
}
149
150
void aom_paeth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
151
44.1k
                                    const uint8_t *above, const uint8_t *left) {
152
44.1k
  __m128i l = _mm_load_si128((const __m128i *)left);
153
44.1k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
154
44.1k
  const __m128i zero = _mm_setzero_si128();
155
44.1k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
156
44.1k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
157
44.1k
  __m128i rep = _mm_set1_epi16((short)0x8000);
158
44.1k
  const __m128i one = _mm_set1_epi16(1);
159
160
44.1k
  int i;
161
749k
  for (i = 0; i < 16; ++i) {
162
705k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
163
705k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
164
165
705k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
166
705k
    dst += stride;
167
705k
    rep = _mm_add_epi16(rep, one);
168
705k
  }
169
44.1k
}
170
171
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
172
void aom_paeth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
173
36.0k
                                    const uint8_t *above, const uint8_t *left) {
174
36.0k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
175
36.0k
  const __m128i zero = _mm_setzero_si128();
176
36.0k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
177
36.0k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
178
36.0k
  const __m128i one = _mm_set1_epi16(1);
179
180
108k
  for (int j = 0; j < 2; ++j) {
181
72.1k
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
182
72.1k
    __m128i rep = _mm_set1_epi16((short)0x8000);
183
1.22M
    for (int i = 0; i < 16; ++i) {
184
1.15M
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
185
1.15M
      const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
186
187
1.15M
      _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
188
1.15M
      dst += stride;
189
1.15M
      rep = _mm_add_epi16(rep, one);
190
1.15M
    }
191
72.1k
  }
192
36.0k
}
193
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
194
195
// Return 16 8-bit pixels in one row
196
static inline __m128i paeth_16x1_pred(const __m128i *left, const __m128i *top0,
197
                                      const __m128i *top1,
198
670k
                                      const __m128i *topleft) {
199
670k
  const __m128i p0 = paeth_8x1_pred(left, top0, topleft);
200
670k
  const __m128i p1 = paeth_8x1_pred(left, top1, topleft);
201
670k
  return _mm_packus_epi16(p0, p1);
202
670k
}
203
204
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
205
void aom_paeth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
206
58.9k
                                    const uint8_t *above, const uint8_t *left) {
207
58.9k
  __m128i l = _mm_cvtsi32_si128(((const int *)left)[0]);
208
58.9k
  const __m128i t = _mm_load_si128((const __m128i *)above);
209
58.9k
  const __m128i zero = _mm_setzero_si128();
210
58.9k
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
211
58.9k
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
212
58.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
213
58.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
214
58.9k
  const __m128i one = _mm_set1_epi16(1);
215
216
294k
  for (int i = 0; i < 4; ++i) {
217
235k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
218
235k
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
219
220
235k
    _mm_store_si128((__m128i *)dst, row);
221
235k
    dst += stride;
222
235k
    rep = _mm_add_epi16(rep, one);
223
235k
  }
224
58.9k
}
225
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
226
227
void aom_paeth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
228
0
                                    const uint8_t *above, const uint8_t *left) {
229
0
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
230
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
231
0
  const __m128i zero = _mm_setzero_si128();
232
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
233
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
234
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
235
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
236
0
  const __m128i one = _mm_set1_epi16(1);
237
238
0
  int i;
239
0
  for (i = 0; i < 8; ++i) {
240
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
241
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
242
243
0
    _mm_store_si128((__m128i *)dst, row);
244
0
    dst += stride;
245
0
    rep = _mm_add_epi16(rep, one);
246
0
  }
247
0
}
248
249
void aom_paeth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
250
                                     const uint8_t *above,
251
0
                                     const uint8_t *left) {
252
0
  __m128i l = _mm_load_si128((const __m128i *)left);
253
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
254
0
  const __m128i zero = _mm_setzero_si128();
255
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
256
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
257
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
258
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
259
0
  const __m128i one = _mm_set1_epi16(1);
260
261
0
  int i;
262
0
  for (i = 0; i < 16; ++i) {
263
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
264
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
265
266
0
    _mm_store_si128((__m128i *)dst, row);
267
0
    dst += stride;
268
0
    rep = _mm_add_epi16(rep, one);
269
0
  }
270
0
}
271
272
void aom_paeth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
273
                                     const uint8_t *above,
274
0
                                     const uint8_t *left) {
275
0
  __m128i l = _mm_load_si128((const __m128i *)left);
276
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
277
0
  const __m128i zero = _mm_setzero_si128();
278
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
279
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
280
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
281
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
282
0
  const __m128i one = _mm_set1_epi16(1);
283
0
  __m128i l16;
284
285
0
  int i;
286
0
  for (i = 0; i < 16; ++i) {
287
0
    l16 = _mm_shuffle_epi8(l, rep);
288
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
289
290
0
    _mm_store_si128((__m128i *)dst, row);
291
0
    dst += stride;
292
0
    rep = _mm_add_epi16(rep, one);
293
0
  }
294
295
0
  l = _mm_load_si128((const __m128i *)(left + 16));
296
0
  rep = _mm_set1_epi16((short)0x8000);
297
0
  for (i = 0; i < 16; ++i) {
298
0
    l16 = _mm_shuffle_epi8(l, rep);
299
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
300
301
0
    _mm_store_si128((__m128i *)dst, row);
302
0
    dst += stride;
303
0
    rep = _mm_add_epi16(rep, one);
304
0
  }
305
0
}
306
307
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
308
void aom_paeth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
309
                                     const uint8_t *above,
310
0
                                     const uint8_t *left) {
311
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
312
0
  const __m128i zero = _mm_setzero_si128();
313
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
314
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
315
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
316
0
  const __m128i one = _mm_set1_epi16(1);
317
318
0
  for (int j = 0; j < 4; ++j) {
319
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
320
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
321
0
    for (int i = 0; i < 16; ++i) {
322
0
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
323
0
      const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
324
0
      _mm_store_si128((__m128i *)dst, row);
325
0
      dst += stride;
326
0
      rep = _mm_add_epi16(rep, one);
327
0
    }
328
0
  }
329
0
}
330
331
void aom_paeth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
332
27.1k
                                    const uint8_t *above, const uint8_t *left) {
333
27.1k
  const __m128i a = _mm_load_si128((const __m128i *)above);
334
27.1k
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
335
27.1k
  const __m128i zero = _mm_setzero_si128();
336
27.1k
  const __m128i al = _mm_unpacklo_epi8(a, zero);
337
27.1k
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
338
27.1k
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
339
27.1k
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
340
341
27.1k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
342
27.1k
  __m128i rep = _mm_set1_epi16((short)0x8000);
343
27.1k
  const __m128i one = _mm_set1_epi16(1);
344
27.1k
  const __m128i l = _mm_loadl_epi64((const __m128i *)left);
345
27.1k
  __m128i l16;
346
347
244k
  for (int i = 0; i < 8; ++i) {
348
217k
    l16 = _mm_shuffle_epi8(l, rep);
349
217k
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
350
217k
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
351
352
217k
    _mm_store_si128((__m128i *)dst, r32l);
353
217k
    _mm_store_si128((__m128i *)(dst + 16), r32h);
354
217k
    dst += stride;
355
217k
    rep = _mm_add_epi16(rep, one);
356
217k
  }
357
27.1k
}
358
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
359
360
void aom_paeth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
361
                                     const uint8_t *above,
362
0
                                     const uint8_t *left) {
363
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
364
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
365
0
  const __m128i zero = _mm_setzero_si128();
366
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
367
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
368
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
369
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
370
371
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
372
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
373
0
  const __m128i one = _mm_set1_epi16(1);
374
0
  __m128i l = _mm_load_si128((const __m128i *)left);
375
0
  __m128i l16;
376
377
0
  int i;
378
0
  for (i = 0; i < 16; ++i) {
379
0
    l16 = _mm_shuffle_epi8(l, rep);
380
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
381
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
382
383
0
    _mm_store_si128((__m128i *)dst, r32l);
384
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
385
0
    dst += stride;
386
0
    rep = _mm_add_epi16(rep, one);
387
0
  }
388
0
}
389
390
void aom_paeth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
391
                                     const uint8_t *above,
392
0
                                     const uint8_t *left) {
393
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
394
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
395
0
  const __m128i zero = _mm_setzero_si128();
396
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
397
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
398
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
399
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
400
401
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
402
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
403
0
  const __m128i one = _mm_set1_epi16(1);
404
0
  __m128i l = _mm_load_si128((const __m128i *)left);
405
0
  __m128i l16;
406
407
0
  int i;
408
0
  for (i = 0; i < 16; ++i) {
409
0
    l16 = _mm_shuffle_epi8(l, rep);
410
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
411
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
412
413
0
    _mm_store_si128((__m128i *)dst, r32l);
414
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
415
0
    dst += stride;
416
0
    rep = _mm_add_epi16(rep, one);
417
0
  }
418
419
0
  rep = _mm_set1_epi16((short)0x8000);
420
0
  l = _mm_load_si128((const __m128i *)(left + 16));
421
0
  for (i = 0; i < 16; ++i) {
422
0
    l16 = _mm_shuffle_epi8(l, rep);
423
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
424
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
425
426
0
    _mm_store_si128((__m128i *)dst, r32l);
427
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
428
0
    dst += stride;
429
0
    rep = _mm_add_epi16(rep, one);
430
0
  }
431
0
}
432
433
void aom_paeth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
434
                                     const uint8_t *above,
435
0
                                     const uint8_t *left) {
436
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
437
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
438
0
  const __m128i zero = _mm_setzero_si128();
439
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
440
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
441
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
442
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
443
444
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
445
0
  const __m128i one = _mm_set1_epi16(1);
446
0
  __m128i l16;
447
448
0
  int i, j;
449
0
  for (j = 0; j < 4; ++j) {
450
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
451
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
452
0
    for (i = 0; i < 16; ++i) {
453
0
      l16 = _mm_shuffle_epi8(l, rep);
454
0
      const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
455
0
      const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
456
457
0
      _mm_store_si128((__m128i *)dst, r32l);
458
0
      _mm_store_si128((__m128i *)(dst + 16), r32h);
459
0
      dst += stride;
460
0
      rep = _mm_add_epi16(rep, one);
461
0
    }
462
0
  }
463
0
}
464
465
void aom_paeth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
466
                                     const uint8_t *above,
467
0
                                     const uint8_t *left) {
468
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
469
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
470
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
471
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
472
0
  const __m128i zero = _mm_setzero_si128();
473
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
474
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
475
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
476
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
477
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
478
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
479
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
480
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
481
482
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
483
0
  const __m128i one = _mm_set1_epi16(1);
484
0
  __m128i l16;
485
486
0
  int i, j;
487
0
  for (j = 0; j < 2; ++j) {
488
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
489
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
490
0
    for (i = 0; i < 16; ++i) {
491
0
      l16 = _mm_shuffle_epi8(l, rep);
492
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
493
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
494
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
495
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
496
497
0
      _mm_store_si128((__m128i *)dst, r0);
498
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
499
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
500
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
501
0
      dst += stride;
502
0
      rep = _mm_add_epi16(rep, one);
503
0
    }
504
0
  }
505
0
}
506
507
void aom_paeth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
508
                                     const uint8_t *above,
509
0
                                     const uint8_t *left) {
510
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
511
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
512
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
513
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
514
0
  const __m128i zero = _mm_setzero_si128();
515
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
516
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
517
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
518
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
519
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
520
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
521
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
522
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
523
524
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
525
0
  const __m128i one = _mm_set1_epi16(1);
526
0
  __m128i l16;
527
528
0
  int i, j;
529
0
  for (j = 0; j < 4; ++j) {
530
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
531
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
532
0
    for (i = 0; i < 16; ++i) {
533
0
      l16 = _mm_shuffle_epi8(l, rep);
534
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
535
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
536
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
537
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
538
539
0
      _mm_store_si128((__m128i *)dst, r0);
540
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
541
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
542
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
543
0
      dst += stride;
544
0
      rep = _mm_add_epi16(rep, one);
545
0
    }
546
0
  }
547
0
}
548
549
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
550
void aom_paeth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
551
                                     const uint8_t *above,
552
0
                                     const uint8_t *left) {
553
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
554
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
555
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
556
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
557
0
  const __m128i zero = _mm_setzero_si128();
558
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
559
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
560
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
561
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
562
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
563
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
564
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
565
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
566
567
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
568
0
  const __m128i one = _mm_set1_epi16(1);
569
0
  __m128i l16;
570
571
0
  int i;
572
0
  const __m128i l = _mm_load_si128((const __m128i *)left);
573
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
574
0
  for (i = 0; i < 16; ++i) {
575
0
    l16 = _mm_shuffle_epi8(l, rep);
576
0
    const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
577
0
    const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
578
0
    const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
579
0
    const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
580
581
0
    _mm_store_si128((__m128i *)dst, r0);
582
0
    _mm_store_si128((__m128i *)(dst + 16), r1);
583
0
    _mm_store_si128((__m128i *)(dst + 32), r2);
584
0
    _mm_store_si128((__m128i *)(dst + 48), r3);
585
0
    dst += stride;
586
0
    rep = _mm_add_epi16(rep, one);
587
0
  }
588
0
}
589
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
590
591
// -----------------------------------------------------------------------------
592
// SMOOTH_PRED
593
594
// pixels[0]: above and below_pred interleave vector
595
// pixels[1]: left vector
596
// pixels[2]: right_pred vector
597
static inline void load_pixel_w4(const uint8_t *above, const uint8_t *left,
598
229k
                                 int height, __m128i *pixels) {
599
229k
  __m128i d = _mm_cvtsi32_si128(((const int *)above)[0]);
600
229k
  if (height == 4)
601
145k
    pixels[1] = _mm_cvtsi32_si128(((const int *)left)[0]);
602
84.4k
  else if (height == 8)
603
56.0k
    pixels[1] = _mm_loadl_epi64(((const __m128i *)left));
604
28.3k
  else
605
28.3k
    pixels[1] = _mm_loadu_si128(((const __m128i *)left));
606
607
229k
  pixels[2] = _mm_set1_epi16((int16_t)above[3]);
608
609
229k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
610
229k
  const __m128i zero = _mm_setzero_si128();
611
229k
  d = _mm_unpacklo_epi8(d, zero);
612
229k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
613
229k
}
614
615
// weight_h[0]: weight_h vector
616
// weight_h[1]: scale - weight_h vector
617
// weight_h[2]: same as [0], second half for height = 16 only
618
// weight_h[3]: same as [1], second half for height = 16 only
619
// weight_w[0]: weights_w and scale - weights_w interleave vector
620
static inline void load_weight_w4(int height, __m128i *weight_h,
621
229k
                                  __m128i *weight_w) {
622
229k
  const __m128i zero = _mm_setzero_si128();
623
229k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
624
229k
  const __m128i t = _mm_cvtsi32_si128(((const int *)smooth_weights)[0]);
625
229k
  weight_h[0] = _mm_unpacklo_epi8(t, zero);
626
229k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
627
229k
  weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
628
629
229k
  if (height == 8) {
630
56.0k
    const __m128i weight = _mm_loadl_epi64((const __m128i *)&smooth_weights[4]);
631
56.0k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
632
56.0k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
633
173k
  } else if (height == 16) {
634
28.3k
    const __m128i weight =
635
28.3k
        _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
636
28.3k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
637
28.3k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
638
28.3k
    weight_h[2] = _mm_unpackhi_epi8(weight, zero);
639
28.3k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
640
28.3k
  }
641
229k
}
642
643
static inline void smooth_pred_4xh(const __m128i *pixel, const __m128i *wh,
644
                                   const __m128i *ww, int h, uint8_t *dst,
645
258k
                                   ptrdiff_t stride, int second_half) {
646
258k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
647
258k
  const __m128i one = _mm_set1_epi16(1);
648
258k
  const __m128i inc = _mm_set1_epi16(0x202);
649
258k
  const __m128i gat = _mm_set1_epi32(0xc080400);
650
258k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
651
258k
                            : _mm_set1_epi16((short)0x8000);
652
258k
  __m128i d = _mm_set1_epi16(0x100);
653
654
1.74M
  for (int i = 0; i < h; ++i) {
655
1.48M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
656
1.48M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
657
1.48M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
658
1.48M
    __m128i s = _mm_madd_epi16(pixel[0], wh_sc);
659
660
1.48M
    __m128i b = _mm_shuffle_epi8(pixel[1], rep);
661
1.48M
    b = _mm_unpacklo_epi16(b, pixel[2]);
662
1.48M
    __m128i sum = _mm_madd_epi16(b, ww[0]);
663
664
1.48M
    sum = _mm_add_epi32(s, sum);
665
1.48M
    sum = _mm_add_epi32(sum, round);
666
1.48M
    sum = _mm_srai_epi32(sum, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
667
668
1.48M
    sum = _mm_shuffle_epi8(sum, gat);
669
1.48M
    *(int *)dst = _mm_cvtsi128_si32(sum);
670
1.48M
    dst += stride;
671
672
1.48M
    rep = _mm_add_epi16(rep, one);
673
1.48M
    d = _mm_add_epi16(d, inc);
674
1.48M
  }
675
258k
}
676
677
void aom_smooth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
678
145k
                                    const uint8_t *above, const uint8_t *left) {
679
145k
  __m128i pixels[3];
680
145k
  load_pixel_w4(above, left, 4, pixels);
681
682
145k
  __m128i wh[4], ww[2];
683
145k
  load_weight_w4(4, wh, ww);
684
685
145k
  smooth_pred_4xh(pixels, wh, ww, 4, dst, stride, 0);
686
145k
}
687
688
void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
689
56.0k
                                    const uint8_t *above, const uint8_t *left) {
690
56.0k
  __m128i pixels[3];
691
56.0k
  load_pixel_w4(above, left, 8, pixels);
692
693
56.0k
  __m128i wh[4], ww[2];
694
56.0k
  load_weight_w4(8, wh, ww);
695
696
56.0k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
697
56.0k
}
698
699
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
700
void aom_smooth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
701
                                     const uint8_t *above,
702
28.3k
                                     const uint8_t *left) {
703
28.3k
  __m128i pixels[3];
704
28.3k
  load_pixel_w4(above, left, 16, pixels);
705
706
28.3k
  __m128i wh[4], ww[2];
707
28.3k
  load_weight_w4(16, wh, ww);
708
709
28.3k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
710
28.3k
  dst += stride << 3;
711
28.3k
  smooth_pred_4xh(pixels, &wh[2], ww, 8, dst, stride, 1);
712
28.3k
}
713
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
715
// pixels[0]: above and below_pred interleave vector, first half
716
// pixels[1]: above and below_pred interleave vector, second half
717
// pixels[2]: left vector
718
// pixels[3]: right_pred vector
719
// pixels[4]: above and below_pred interleave vector, first half
720
// pixels[5]: above and below_pred interleave vector, second half
721
// pixels[6]: left vector + 16
722
// pixels[7]: right_pred vector
723
static inline void load_pixel_w8(const uint8_t *above, const uint8_t *left,
724
300k
                                 int height, __m128i *pixels) {
725
300k
  const __m128i zero = _mm_setzero_si128();
726
300k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
727
300k
  __m128i d = _mm_loadl_epi64((const __m128i *)above);
728
300k
  d = _mm_unpacklo_epi8(d, zero);
729
300k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
730
300k
  pixels[1] = _mm_unpackhi_epi16(d, bp);
731
732
300k
  pixels[3] = _mm_set1_epi16((int16_t)above[7]);
733
734
300k
  if (height == 4) {
735
78.3k
    pixels[2] = _mm_cvtsi32_si128(((const int *)left)[0]);
736
222k
  } else if (height == 8) {
737
162k
    pixels[2] = _mm_loadl_epi64((const __m128i *)left);
738
162k
  } else if (height == 16) {
739
43.8k
    pixels[2] = _mm_load_si128((const __m128i *)left);
740
43.8k
  } else {
741
15.7k
    pixels[2] = _mm_load_si128((const __m128i *)left);
742
15.7k
    pixels[4] = pixels[0];
743
15.7k
    pixels[5] = pixels[1];
744
15.7k
    pixels[6] = _mm_load_si128((const __m128i *)(left + 16));
745
15.7k
    pixels[7] = pixels[3];
746
15.7k
  }
747
300k
}
748
749
// weight_h[0]: weight_h vector
750
// weight_h[1]: scale - weight_h vector
751
// weight_h[2]: same as [0], offset 8
752
// weight_h[3]: same as [1], offset 8
753
// weight_h[4]: same as [0], offset 16
754
// weight_h[5]: same as [1], offset 16
755
// weight_h[6]: same as [0], offset 24
756
// weight_h[7]: same as [1], offset 24
757
// weight_w[0]: weights_w and scale - weights_w interleave vector, first half
758
// weight_w[1]: weights_w and scale - weights_w interleave vector, second half
759
static inline void load_weight_w8(int height, __m128i *weight_h,
760
300k
                                  __m128i *weight_w) {
761
300k
  const __m128i zero = _mm_setzero_si128();
762
300k
  const int we_offset = height < 8 ? 0 : 4;
763
300k
  __m128i we = _mm_loadu_si128((const __m128i *)&smooth_weights[we_offset]);
764
300k
  weight_h[0] = _mm_unpacklo_epi8(we, zero);
765
300k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
766
300k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
767
768
300k
  if (height == 4) {
769
78.3k
    we = _mm_srli_si128(we, 4);
770
78.3k
    __m128i tmp1 = _mm_unpacklo_epi8(we, zero);
771
78.3k
    __m128i tmp2 = _mm_sub_epi16(d, tmp1);
772
78.3k
    weight_w[0] = _mm_unpacklo_epi16(tmp1, tmp2);
773
78.3k
    weight_w[1] = _mm_unpackhi_epi16(tmp1, tmp2);
774
222k
  } else {
775
222k
    weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
776
222k
    weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
777
222k
  }
778
779
300k
  if (height == 16) {
780
43.8k
    we = _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
781
43.8k
    weight_h[0] = _mm_unpacklo_epi8(we, zero);
782
43.8k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
783
43.8k
    weight_h[2] = _mm_unpackhi_epi8(we, zero);
784
43.8k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
785
256k
  } else if (height == 32) {
786
15.7k
    const __m128i weight_lo =
787
15.7k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28]);
788
15.7k
    weight_h[0] = _mm_unpacklo_epi8(weight_lo, zero);
789
15.7k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
790
15.7k
    weight_h[2] = _mm_unpackhi_epi8(weight_lo, zero);
791
15.7k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
792
15.7k
    const __m128i weight_hi =
793
15.7k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28 + 16]);
794
15.7k
    weight_h[4] = _mm_unpacklo_epi8(weight_hi, zero);
795
15.7k
    weight_h[5] = _mm_sub_epi16(d, weight_h[4]);
796
15.7k
    weight_h[6] = _mm_unpackhi_epi8(weight_hi, zero);
797
15.7k
    weight_h[7] = _mm_sub_epi16(d, weight_h[6]);
798
15.7k
  }
799
300k
}
800
801
static inline void smooth_pred_8xh(const __m128i *pixels, const __m128i *wh,
802
                                   const __m128i *ww, int h, uint8_t *dst,
803
391k
                                   ptrdiff_t stride, int second_half) {
804
391k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
805
391k
  const __m128i one = _mm_set1_epi16(1);
806
391k
  const __m128i inc = _mm_set1_epi16(0x202);
807
391k
  const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
808
809
391k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
810
391k
                            : _mm_set1_epi16((short)0x8000);
811
391k
  __m128i d = _mm_set1_epi16(0x100);
812
813
391k
  int i;
814
3.21M
  for (i = 0; i < h; ++i) {
815
2.81M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
816
2.81M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
817
2.81M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
818
2.81M
    __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc);
819
2.81M
    __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc);
820
821
2.81M
    __m128i b = _mm_shuffle_epi8(pixels[2], rep);
822
2.81M
    b = _mm_unpacklo_epi16(b, pixels[3]);
823
2.81M
    __m128i sum0 = _mm_madd_epi16(b, ww[0]);
824
2.81M
    __m128i sum1 = _mm_madd_epi16(b, ww[1]);
825
826
2.81M
    s0 = _mm_add_epi32(s0, sum0);
827
2.81M
    s0 = _mm_add_epi32(s0, round);
828
2.81M
    s0 = _mm_srai_epi32(s0, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
829
830
2.81M
    s1 = _mm_add_epi32(s1, sum1);
831
2.81M
    s1 = _mm_add_epi32(s1, round);
832
2.81M
    s1 = _mm_srai_epi32(s1, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
833
834
2.81M
    sum0 = _mm_packus_epi16(s0, s1);
835
2.81M
    sum0 = _mm_shuffle_epi8(sum0, gat);
836
2.81M
    _mm_storel_epi64((__m128i *)dst, sum0);
837
2.81M
    dst += stride;
838
839
2.81M
    rep = _mm_add_epi16(rep, one);
840
2.81M
    d = _mm_add_epi16(d, inc);
841
2.81M
  }
842
391k
}
843
844
void aom_smooth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
845
78.3k
                                    const uint8_t *above, const uint8_t *left) {
846
78.3k
  __m128i pixels[4];
847
78.3k
  load_pixel_w8(above, left, 4, pixels);
848
849
78.3k
  __m128i wh[4], ww[2];
850
78.3k
  load_weight_w8(4, wh, ww);
851
852
78.3k
  smooth_pred_8xh(pixels, wh, ww, 4, dst, stride, 0);
853
78.3k
}
854
855
void aom_smooth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
856
162k
                                    const uint8_t *above, const uint8_t *left) {
857
162k
  __m128i pixels[4];
858
162k
  load_pixel_w8(above, left, 8, pixels);
859
860
162k
  __m128i wh[4], ww[2];
861
162k
  load_weight_w8(8, wh, ww);
862
863
162k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
864
162k
}
865
866
void aom_smooth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
867
                                     const uint8_t *above,
868
43.8k
                                     const uint8_t *left) {
869
43.8k
  __m128i pixels[4];
870
43.8k
  load_pixel_w8(above, left, 16, pixels);
871
872
43.8k
  __m128i wh[4], ww[2];
873
43.8k
  load_weight_w8(16, wh, ww);
874
875
43.8k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
876
43.8k
  dst += stride << 3;
877
43.8k
  smooth_pred_8xh(pixels, &wh[2], ww, 8, dst, stride, 1);
878
43.8k
}
879
880
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
881
void aom_smooth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
882
                                     const uint8_t *above,
883
15.7k
                                     const uint8_t *left) {
884
15.7k
  __m128i pixels[8];
885
15.7k
  load_pixel_w8(above, left, 32, pixels);
886
887
15.7k
  __m128i wh[8], ww[2];
888
15.7k
  load_weight_w8(32, wh, ww);
889
890
15.7k
  smooth_pred_8xh(&pixels[0], wh, ww, 8, dst, stride, 0);
891
15.7k
  dst += stride << 3;
892
15.7k
  smooth_pred_8xh(&pixels[0], &wh[2], ww, 8, dst, stride, 1);
893
15.7k
  dst += stride << 3;
894
15.7k
  smooth_pred_8xh(&pixels[4], &wh[4], ww, 8, dst, stride, 0);
895
15.7k
  dst += stride << 3;
896
15.7k
  smooth_pred_8xh(&pixels[4], &wh[6], ww, 8, dst, stride, 1);
897
15.7k
}
898
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
899
900
// TODO(slavarnway): Visual Studio only supports restrict when /std:c11
901
// (available in 2019+) or greater is specified; __restrict can be used in that
902
// case. This should be moved to rtcd and used consistently between the
903
// function declarations and definitions to avoid warnings in Visual Studio
904
// when defining LIBAOM_RESTRICT to restrict or __restrict.
905
#if defined(_MSC_VER)
906
#define LIBAOM_RESTRICT
907
#else
908
#define LIBAOM_RESTRICT restrict
909
#endif
910
911
400k
static AOM_FORCE_INLINE __m128i Load4(const void *src) {
912
  // With new compilers such as clang 8.0.0 we can use the new _mm_loadu_si32
913
  // intrinsic. Both _mm_loadu_si32(src) and the code here are compiled into a
914
  // movss instruction.
915
  //
916
  // Until compiler support of _mm_loadu_si32 is widespread, use of
917
  // _mm_loadu_si32 is banned.
918
400k
  int val;
919
400k
  memcpy(&val, src, sizeof(val));
920
400k
  return _mm_cvtsi32_si128(val);
921
400k
}
922
923
77.3M
static AOM_FORCE_INLINE __m128i LoadLo8(const void *a) {
924
77.3M
  return _mm_loadl_epi64((const __m128i *)(a));
925
77.3M
}
926
927
744k
static AOM_FORCE_INLINE __m128i LoadUnaligned16(const void *a) {
928
744k
  return _mm_loadu_si128((const __m128i *)(a));
929
744k
}
930
931
918k
static AOM_FORCE_INLINE void Store4(void *dst, const __m128i x) {
932
918k
  const int val = _mm_cvtsi128_si32(x);
933
918k
  memcpy(dst, &val, sizeof(val));
934
918k
}
935
936
39.8M
static AOM_FORCE_INLINE void StoreLo8(void *a, const __m128i v) {
937
39.8M
  _mm_storel_epi64((__m128i *)(a), v);
938
39.8M
}
939
940
12.8M
static AOM_FORCE_INLINE void StoreUnaligned16(void *a, const __m128i v) {
941
12.8M
  _mm_storeu_si128((__m128i *)(a), v);
942
12.8M
}
943
944
78.5M
static AOM_FORCE_INLINE __m128i cvtepu8_epi16(__m128i x) {
945
78.5M
  return _mm_unpacklo_epi8((x), _mm_setzero_si128());
946
78.5M
}
947
948
253k
static AOM_FORCE_INLINE __m128i cvtepu8_epi32(__m128i x) {
949
253k
  const __m128i tmp = _mm_unpacklo_epi8((x), _mm_setzero_si128());
950
253k
  return _mm_unpacklo_epi16(tmp, _mm_setzero_si128());
951
253k
}
952
953
38.3M
static AOM_FORCE_INLINE __m128i cvtepu16_epi32(__m128i x) {
954
38.3M
  return _mm_unpacklo_epi16((x), _mm_setzero_si128());
955
38.3M
}
956
957
static void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
958
                                 const uint8_t *LIBAOM_RESTRICT top_row,
959
                                 const uint8_t *LIBAOM_RESTRICT left_column,
960
441k
                                 int width, int height) {
961
441k
  const uint8_t *const sm_weights_h = smooth_weights + height - 4;
962
441k
  const uint8_t *const sm_weights_w = smooth_weights + width - 4;
963
441k
  const __m128i zero = _mm_setzero_si128();
964
441k
  const __m128i scale_value = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
965
441k
  const __m128i bottom_left = _mm_cvtsi32_si128(left_column[height - 1]);
966
441k
  const __m128i top_right = _mm_set1_epi16(top_row[width - 1]);
967
441k
  const __m128i round = _mm_set1_epi32(1 << SMOOTH_WEIGHT_LOG2_SCALE);
968
9.54M
  for (int y = 0; y < height; ++y) {
969
9.10M
    const __m128i weights_y = _mm_cvtsi32_si128(sm_weights_h[y]);
970
9.10M
    const __m128i left_y = _mm_cvtsi32_si128(left_column[y]);
971
9.10M
    const __m128i scale_m_weights_y = _mm_sub_epi16(scale_value, weights_y);
972
9.10M
    __m128i scaled_bottom_left =
973
9.10M
        _mm_mullo_epi16(scale_m_weights_y, bottom_left);
974
9.10M
    const __m128i weight_left_y =
975
9.10M
        _mm_shuffle_epi32(_mm_unpacklo_epi16(weights_y, left_y), 0);
976
9.10M
    scaled_bottom_left = _mm_add_epi32(scaled_bottom_left, round);
977
9.10M
    scaled_bottom_left = _mm_shuffle_epi32(scaled_bottom_left, 0);
978
47.4M
    for (int x = 0; x < width; x += 8) {
979
38.3M
      const __m128i top_x = LoadLo8(top_row + x);
980
38.3M
      const __m128i weights_x = LoadLo8(sm_weights_w + x);
981
38.3M
      const __m128i top_weights_x = _mm_unpacklo_epi8(top_x, weights_x);
982
38.3M
      const __m128i top_weights_x_lo = cvtepu8_epi16(top_weights_x);
983
38.3M
      const __m128i top_weights_x_hi = _mm_unpackhi_epi8(top_weights_x, zero);
984
985
      // Here opposite weights and pixels are multiplied, where the order of
986
      // interleaving is indicated in the names.
987
38.3M
      __m128i pred_lo = _mm_madd_epi16(top_weights_x_lo, weight_left_y);
988
38.3M
      __m128i pred_hi = _mm_madd_epi16(top_weights_x_hi, weight_left_y);
989
990
      // |scaled_bottom_left| is always scaled by the same weight each row, so
991
      // we only derive |scaled_top_right| values here.
992
38.3M
      const __m128i inverted_weights_x =
993
38.3M
          _mm_sub_epi16(scale_value, cvtepu8_epi16(weights_x));
994
38.3M
      const __m128i scaled_top_right =
995
38.3M
          _mm_mullo_epi16(inverted_weights_x, top_right);
996
38.3M
      const __m128i scaled_top_right_lo = cvtepu16_epi32(scaled_top_right);
997
38.3M
      const __m128i scaled_top_right_hi =
998
38.3M
          _mm_unpackhi_epi16(scaled_top_right, zero);
999
38.3M
      pred_lo = _mm_add_epi32(pred_lo, scaled_bottom_left);
1000
38.3M
      pred_hi = _mm_add_epi32(pred_hi, scaled_bottom_left);
1001
38.3M
      pred_lo = _mm_add_epi32(pred_lo, scaled_top_right_lo);
1002
38.3M
      pred_hi = _mm_add_epi32(pred_hi, scaled_top_right_hi);
1003
1004
      // The round value for RightShiftWithRounding was added with
1005
      // |scaled_bottom_left|.
1006
38.3M
      pred_lo = _mm_srli_epi32(pred_lo, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1007
38.3M
      pred_hi = _mm_srli_epi32(pred_hi, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1008
38.3M
      const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi);
1009
38.3M
      StoreLo8(dst + x, _mm_packus_epi16(pred, pred));
1010
38.3M
    }
1011
9.10M
    dst += stride;
1012
9.10M
  }
1013
441k
}
1014
1015
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1016
void aom_smooth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
1017
                                     const uint8_t *above,
1018
56.5k
                                     const uint8_t *left) {
1019
56.5k
  smooth_predictor_wxh(dst, stride, above, left, 16, 4);
1020
56.5k
}
1021
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1022
1023
void aom_smooth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1024
                                     const uint8_t *above,
1025
57.1k
                                     const uint8_t *left) {
1026
57.1k
  smooth_predictor_wxh(dst, stride, above, left, 16, 8);
1027
57.1k
}
1028
1029
void aom_smooth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1030
                                      const uint8_t *above,
1031
97.1k
                                      const uint8_t *left) {
1032
97.1k
  smooth_predictor_wxh(dst, stride, above, left, 16, 16);
1033
97.1k
}
1034
1035
void aom_smooth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1036
                                      const uint8_t *above,
1037
25.4k
                                      const uint8_t *left) {
1038
25.4k
  smooth_predictor_wxh(dst, stride, above, left, 16, 32);
1039
25.4k
}
1040
1041
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1042
void aom_smooth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1043
                                      const uint8_t *above,
1044
4.59k
                                      const uint8_t *left) {
1045
4.59k
  smooth_predictor_wxh(dst, stride, above, left, 16, 64);
1046
4.59k
}
1047
1048
void aom_smooth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1049
                                     const uint8_t *above,
1050
37.4k
                                     const uint8_t *left) {
1051
37.4k
  smooth_predictor_wxh(dst, stride, above, left, 32, 8);
1052
37.4k
}
1053
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1054
1055
void aom_smooth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1056
                                      const uint8_t *above,
1057
21.8k
                                      const uint8_t *left) {
1058
21.8k
  smooth_predictor_wxh(dst, stride, above, left, 32, 16);
1059
21.8k
}
1060
1061
void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1062
                                      const uint8_t *above,
1063
87.5k
                                      const uint8_t *left) {
1064
87.5k
  smooth_predictor_wxh(dst, stride, above, left, 32, 32);
1065
87.5k
}
1066
1067
void aom_smooth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1068
                                      const uint8_t *above,
1069
2.48k
                                      const uint8_t *left) {
1070
2.48k
  smooth_predictor_wxh(dst, stride, above, left, 32, 64);
1071
2.48k
}
1072
1073
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1074
void aom_smooth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1075
                                      const uint8_t *above,
1076
19.9k
                                      const uint8_t *left) {
1077
19.9k
  smooth_predictor_wxh(dst, stride, above, left, 64, 16);
1078
19.9k
}
1079
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1080
1081
void aom_smooth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1082
                                      const uint8_t *above,
1083
4.56k
                                      const uint8_t *left) {
1084
4.56k
  smooth_predictor_wxh(dst, stride, above, left, 64, 32);
1085
4.56k
}
1086
1087
void aom_smooth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1088
                                      const uint8_t *above,
1089
26.3k
                                      const uint8_t *left) {
1090
26.3k
  smooth_predictor_wxh(dst, stride, above, left, 64, 64);
1091
26.3k
}
1092
1093
// -----------------------------------------------------------------------------
1094
// Smooth horizontal/vertical helper functions.
1095
1096
// For Horizontal, pixels1 and pixels2 are the same repeated value. For
1097
// Vertical, weights1 and weights2 are the same, and scaled_corner1 and
1098
// scaled_corner2 are the same.
1099
static AOM_FORCE_INLINE void write_smooth_directional_sum16(
1100
    uint8_t *LIBAOM_RESTRICT dst, const __m128i pixels1, const __m128i pixels2,
1101
    const __m128i weights1, const __m128i weights2,
1102
    const __m128i scaled_corner1, const __m128i scaled_corner2,
1103
12.8M
    const __m128i round) {
1104
12.8M
  const __m128i weighted_px1 = _mm_mullo_epi16(pixels1, weights1);
1105
12.8M
  const __m128i weighted_px2 = _mm_mullo_epi16(pixels2, weights2);
1106
12.8M
  const __m128i pred_sum1 = _mm_add_epi16(scaled_corner1, weighted_px1);
1107
12.8M
  const __m128i pred_sum2 = _mm_add_epi16(scaled_corner2, weighted_px2);
1108
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1109
12.8M
  const __m128i pred1 = _mm_srli_epi16(_mm_add_epi16(pred_sum1, round), 8);
1110
12.8M
  const __m128i pred2 = _mm_srli_epi16(_mm_add_epi16(pred_sum2, round), 8);
1111
12.8M
  StoreUnaligned16(dst, _mm_packus_epi16(pred1, pred2));
1112
12.8M
}
1113
1114
static AOM_FORCE_INLINE __m128i smooth_directional_sum8(
1115
1.52M
    const __m128i pixels, const __m128i weights, const __m128i scaled_corner) {
1116
1.52M
  const __m128i weighted_px = _mm_mullo_epi16(pixels, weights);
1117
1.52M
  return _mm_add_epi16(scaled_corner, weighted_px);
1118
1.52M
}
1119
1120
static AOM_FORCE_INLINE void write_smooth_directional_sum8(
1121
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *pixels, const __m128i *weights,
1122
1.52M
    const __m128i *scaled_corner, const __m128i *round) {
1123
1.52M
  const __m128i pred_sum =
1124
1.52M
      smooth_directional_sum8(*pixels, *weights, *scaled_corner);
1125
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1126
1.52M
  const __m128i pred = _mm_srli_epi16(_mm_add_epi16(pred_sum, *round), 8);
1127
1.52M
  StoreLo8(dst, _mm_packus_epi16(pred, pred));
1128
1.52M
}
1129
1130
// -----------------------------------------------------------------------------
1131
// SMOOTH_V_PRED
1132
1133
static AOM_FORCE_INLINE void load_smooth_vertical_pixels4(
1134
    const uint8_t *LIBAOM_RESTRICT above, const uint8_t *LIBAOM_RESTRICT left,
1135
40.4k
    const int height, __m128i *pixels) {
1136
40.4k
  __m128i top = Load4(above);
1137
40.4k
  const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
1138
40.4k
  top = cvtepu8_epi16(top);
1139
40.4k
  pixels[0] = _mm_unpacklo_epi16(top, bottom_left);
1140
40.4k
}
1141
1142
// |weight_array| alternates weight vectors from the table with their inverted
1143
// (256-w) counterparts. This is precomputed by the compiler when the weights
1144
// table is visible to this module. Removing this visibility can cut speed by up
1145
// to half in both 4xH and 8xH transforms.
1146
static AOM_FORCE_INLINE void load_smooth_vertical_weights4(
1147
    const uint8_t *LIBAOM_RESTRICT weight_array, const int height,
1148
40.4k
    __m128i *weights) {
1149
40.4k
  const __m128i inverter = _mm_set1_epi16(256);
1150
1151
40.4k
  if (height == 4) {
1152
23.6k
    const __m128i weight = Load4(weight_array);
1153
23.6k
    weights[0] = cvtepu8_epi16(weight);
1154
23.6k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1155
23.6k
  } else if (height == 8) {
1156
10.0k
    const __m128i weight = LoadLo8(weight_array + 4);
1157
10.0k
    weights[0] = cvtepu8_epi16(weight);
1158
10.0k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1159
10.0k
  } else {
1160
6.79k
    const __m128i weight = LoadUnaligned16(weight_array + 12);
1161
6.79k
    const __m128i zero = _mm_setzero_si128();
1162
6.79k
    weights[0] = cvtepu8_epi16(weight);
1163
6.79k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1164
6.79k
    weights[2] = _mm_unpackhi_epi8(weight, zero);
1165
6.79k
    weights[3] = _mm_sub_epi16(inverter, weights[2]);
1166
6.79k
  }
1167
40.4k
}
1168
1169
static AOM_FORCE_INLINE void write_smooth_vertical4xh(
1170
    const __m128i *pixel, const __m128i *weight, const int height,
1171
47.2k
    uint8_t *LIBAOM_RESTRICT dst, const ptrdiff_t stride) {
1172
47.2k
  const __m128i pred_round = _mm_set1_epi32(128);
1173
47.2k
  const __m128i mask_increment = _mm_set1_epi16(0x0202);
1174
47.2k
  const __m128i cvtepu8_epi32 = _mm_set1_epi32(0xC080400);
1175
47.2k
  __m128i y_select = _mm_set1_epi16(0x0100);
1176
1177
330k
  for (int y = 0; y < height; ++y) {
1178
283k
    const __m128i weight_y = _mm_shuffle_epi8(weight[0], y_select);
1179
283k
    const __m128i inverted_weight_y = _mm_shuffle_epi8(weight[1], y_select);
1180
283k
    const __m128i alternate_weights =
1181
283k
        _mm_unpacklo_epi16(weight_y, inverted_weight_y);
1182
    // Here the pixel vector is top_row[0], corner, top_row[1], corner, ...
1183
    // The madd instruction yields four results of the form:
1184
    // (top_row[x] * weight[y] + corner * inverted_weight[y])
1185
283k
    __m128i sum = _mm_madd_epi16(pixel[0], alternate_weights);
1186
283k
    sum = _mm_add_epi32(sum, pred_round);
1187
283k
    sum = _mm_srai_epi32(sum, 8);
1188
283k
    sum = _mm_shuffle_epi8(sum, cvtepu8_epi32);
1189
283k
    Store4(dst, sum);
1190
283k
    dst += stride;
1191
283k
    y_select = _mm_add_epi16(y_select, mask_increment);
1192
283k
  }
1193
47.2k
}
1194
1195
void aom_smooth_v_predictor_4x4_ssse3(
1196
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1197
    const uint8_t *LIBAOM_RESTRICT top_row,
1198
23.6k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1199
23.6k
  __m128i pixels;
1200
23.6k
  load_smooth_vertical_pixels4(top_row, left_column, 4, &pixels);
1201
1202
23.6k
  __m128i weights[2];
1203
23.6k
  load_smooth_vertical_weights4(smooth_weights, 4, weights);
1204
1205
23.6k
  write_smooth_vertical4xh(&pixels, weights, 4, dst, stride);
1206
23.6k
}
1207
1208
void aom_smooth_v_predictor_4x8_ssse3(
1209
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1210
    const uint8_t *LIBAOM_RESTRICT top_row,
1211
10.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1212
10.0k
  __m128i pixels;
1213
10.0k
  load_smooth_vertical_pixels4(top_row, left_column, 8, &pixels);
1214
1215
10.0k
  __m128i weights[2];
1216
10.0k
  load_smooth_vertical_weights4(smooth_weights, 8, weights);
1217
1218
10.0k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1219
10.0k
}
1220
1221
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1222
void aom_smooth_v_predictor_4x16_ssse3(
1223
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1224
    const uint8_t *LIBAOM_RESTRICT top_row,
1225
6.79k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1226
6.79k
  __m128i pixels;
1227
6.79k
  load_smooth_vertical_pixels4(top_row, left_column, 16, &pixels);
1228
1229
6.79k
  __m128i weights[4];
1230
6.79k
  load_smooth_vertical_weights4(smooth_weights, 16, weights);
1231
1232
6.79k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1233
6.79k
  dst += stride << 3;
1234
6.79k
  write_smooth_vertical4xh(&pixels, &weights[2], 8, dst, stride);
1235
6.79k
}
1236
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1237
1238
void aom_smooth_v_predictor_8x4_ssse3(
1239
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1240
    const uint8_t *LIBAOM_RESTRICT top_row,
1241
14.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1242
14.5k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1243
14.5k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1244
14.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1245
14.5k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1246
14.5k
  const __m128i scaled_bottom_left =
1247
14.5k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1248
14.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1249
14.5k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1250
14.5k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1251
14.5k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1252
14.5k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1253
14.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1254
14.5k
                                &round);
1255
14.5k
  dst += stride;
1256
14.5k
  y_select = _mm_set1_epi32(0x03020302);
1257
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1258
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1259
14.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1260
14.5k
                                &round);
1261
14.5k
  dst += stride;
1262
14.5k
  y_select = _mm_set1_epi32(0x05040504);
1263
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1264
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1265
14.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1266
14.5k
                                &round);
1267
14.5k
  dst += stride;
1268
14.5k
  y_select = _mm_set1_epi32(0x07060706);
1269
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1270
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1271
14.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1272
14.5k
                                &round);
1273
14.5k
}
1274
1275
void aom_smooth_v_predictor_8x8_ssse3(
1276
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1277
    const uint8_t *LIBAOM_RESTRICT top_row,
1278
35.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1279
35.1k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1280
35.1k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1281
35.1k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1282
35.1k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1283
35.1k
  const __m128i scaled_bottom_left =
1284
35.1k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1285
35.1k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1286
35.1k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1287
315k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1288
280k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1289
280k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1290
280k
    const __m128i scaled_bottom_left_y =
1291
280k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1292
280k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1293
280k
                                  &round);
1294
280k
    dst += stride;
1295
280k
  }
1296
35.1k
}
1297
1298
void aom_smooth_v_predictor_8x16_ssse3(
1299
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1300
    const uint8_t *LIBAOM_RESTRICT top_row,
1301
9.85k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1302
9.85k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1303
9.85k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1304
1305
9.85k
  const __m128i weights1 = cvtepu8_epi16(weights);
1306
9.85k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
1307
9.85k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1308
9.85k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1309
9.85k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1310
9.85k
  const __m128i scaled_bottom_left1 =
1311
9.85k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1312
9.85k
  const __m128i scaled_bottom_left2 =
1313
9.85k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1314
9.85k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1315
9.85k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1316
88.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1317
78.8k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1318
78.8k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1319
78.8k
    const __m128i scaled_bottom_left_y =
1320
78.8k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1321
78.8k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1322
78.8k
                                  &round);
1323
78.8k
    dst += stride;
1324
78.8k
  }
1325
88.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1326
78.8k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1327
78.8k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1328
78.8k
    const __m128i scaled_bottom_left_y =
1329
78.8k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1330
78.8k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1331
78.8k
                                  &round);
1332
78.8k
    dst += stride;
1333
78.8k
  }
1334
9.85k
}
1335
1336
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1337
void aom_smooth_v_predictor_8x32_ssse3(
1338
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1339
    const uint8_t *LIBAOM_RESTRICT top_row,
1340
3.65k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1341
3.65k
  const __m128i zero = _mm_setzero_si128();
1342
3.65k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1343
3.65k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1344
3.65k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1345
3.65k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1346
3.65k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1347
3.65k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1348
3.65k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1349
3.65k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1350
3.65k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1351
3.65k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1352
3.65k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1353
3.65k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1354
3.65k
  const __m128i scaled_bottom_left1 =
1355
3.65k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1356
3.65k
  const __m128i scaled_bottom_left2 =
1357
3.65k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1358
3.65k
  const __m128i scaled_bottom_left3 =
1359
3.65k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1360
3.65k
  const __m128i scaled_bottom_left4 =
1361
3.65k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1362
3.65k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1363
3.65k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1364
32.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1365
29.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1366
29.2k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1367
29.2k
    const __m128i scaled_bottom_left_y =
1368
29.2k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1369
29.2k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1370
29.2k
                                  &round);
1371
29.2k
    dst += stride;
1372
29.2k
  }
1373
32.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1374
29.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1375
29.2k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1376
29.2k
    const __m128i scaled_bottom_left_y =
1377
29.2k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1378
29.2k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1379
29.2k
                                  &round);
1380
29.2k
    dst += stride;
1381
29.2k
  }
1382
32.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1383
29.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1384
29.2k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1385
29.2k
    const __m128i scaled_bottom_left_y =
1386
29.2k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1387
29.2k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1388
29.2k
                                  &round);
1389
29.2k
    dst += stride;
1390
29.2k
  }
1391
32.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1392
29.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1393
29.2k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1394
29.2k
    const __m128i scaled_bottom_left_y =
1395
29.2k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1396
29.2k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1397
29.2k
                                  &round);
1398
29.2k
    dst += stride;
1399
29.2k
  }
1400
3.65k
}
1401
1402
void aom_smooth_v_predictor_16x4_ssse3(
1403
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1404
    const uint8_t *LIBAOM_RESTRICT top_row,
1405
14.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1406
14.5k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1407
14.5k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1408
14.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1409
14.5k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1410
14.5k
  const __m128i scaled_bottom_left =
1411
14.5k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1412
14.5k
  const __m128i round = _mm_set1_epi16(128);
1413
14.5k
  const __m128i top = LoadUnaligned16(top_row);
1414
14.5k
  const __m128i top_lo = cvtepu8_epi16(top);
1415
14.5k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1416
1417
14.5k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1418
14.5k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1419
14.5k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1420
14.5k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1421
14.5k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1422
14.5k
                                 round);
1423
14.5k
  dst += stride;
1424
14.5k
  y_select = _mm_set1_epi32(0x03020302);
1425
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1426
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1427
14.5k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1428
14.5k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1429
14.5k
                                 round);
1430
14.5k
  dst += stride;
1431
14.5k
  y_select = _mm_set1_epi32(0x05040504);
1432
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1433
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1434
14.5k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1435
14.5k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1436
14.5k
                                 round);
1437
14.5k
  dst += stride;
1438
14.5k
  y_select = _mm_set1_epi32(0x07060706);
1439
14.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1440
14.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1441
14.5k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1442
14.5k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1443
14.5k
                                 round);
1444
14.5k
}
1445
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1446
1447
void aom_smooth_v_predictor_16x8_ssse3(
1448
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1449
    const uint8_t *LIBAOM_RESTRICT top_row,
1450
10.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1451
10.8k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1452
10.8k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1453
10.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1454
10.8k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1455
10.8k
  const __m128i scaled_bottom_left =
1456
10.8k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1457
10.8k
  const __m128i round = _mm_set1_epi16(128);
1458
10.8k
  const __m128i top = LoadUnaligned16(top_row);
1459
10.8k
  const __m128i top_lo = cvtepu8_epi16(top);
1460
10.8k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1461
97.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1462
86.7k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1463
86.7k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1464
86.7k
    const __m128i scaled_bottom_left_y =
1465
86.7k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1466
86.7k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1467
86.7k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1468
86.7k
                                   round);
1469
86.7k
    dst += stride;
1470
86.7k
  }
1471
10.8k
}
1472
1473
void aom_smooth_v_predictor_16x16_ssse3(
1474
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1475
    const uint8_t *LIBAOM_RESTRICT top_row,
1476
28.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1477
28.4k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1478
28.4k
  const __m128i zero = _mm_setzero_si128();
1479
28.4k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1480
28.4k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1481
28.4k
  const __m128i weights_lo = cvtepu8_epi16(weights);
1482
28.4k
  const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1483
28.4k
  const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1484
28.4k
  const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1485
28.4k
  const __m128i scaled_bottom_left_lo =
1486
28.4k
      _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1487
28.4k
  const __m128i scaled_bottom_left_hi =
1488
28.4k
      _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1489
28.4k
  const __m128i round = _mm_set1_epi16(128);
1490
1491
28.4k
  const __m128i top = LoadUnaligned16(top_row);
1492
28.4k
  const __m128i top_lo = cvtepu8_epi16(top);
1493
28.4k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1494
255k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1495
227k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1496
227k
    const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1497
227k
    const __m128i scaled_bottom_left_y =
1498
227k
        _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1499
227k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1500
227k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1501
227k
                                   round);
1502
227k
    dst += stride;
1503
227k
  }
1504
255k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1505
227k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1506
227k
    const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1507
227k
    const __m128i scaled_bottom_left_y =
1508
227k
        _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1509
227k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1510
227k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1511
227k
                                   round);
1512
227k
    dst += stride;
1513
227k
  }
1514
28.4k
}
1515
1516
void aom_smooth_v_predictor_16x32_ssse3(
1517
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1518
    const uint8_t *LIBAOM_RESTRICT top_row,
1519
7.71k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1520
7.71k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1521
7.71k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1522
7.71k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1523
7.71k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1524
7.71k
  const __m128i zero = _mm_setzero_si128();
1525
7.71k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1526
7.71k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1527
7.71k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1528
7.71k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1529
7.71k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1530
7.71k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1531
7.71k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1532
7.71k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1533
7.71k
  const __m128i scaled_bottom_left1 =
1534
7.71k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1535
7.71k
  const __m128i scaled_bottom_left2 =
1536
7.71k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1537
7.71k
  const __m128i scaled_bottom_left3 =
1538
7.71k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1539
7.71k
  const __m128i scaled_bottom_left4 =
1540
7.71k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1541
7.71k
  const __m128i round = _mm_set1_epi16(128);
1542
1543
7.71k
  const __m128i top = LoadUnaligned16(top_row);
1544
7.71k
  const __m128i top_lo = cvtepu8_epi16(top);
1545
7.71k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1546
69.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1547
61.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1548
61.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1549
61.6k
    const __m128i scaled_bottom_left_y =
1550
61.6k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1551
61.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1552
61.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1553
61.6k
                                   round);
1554
61.6k
    dst += stride;
1555
61.6k
  }
1556
69.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1557
61.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1558
61.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1559
61.6k
    const __m128i scaled_bottom_left_y =
1560
61.6k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1561
61.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1562
61.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1563
61.6k
                                   round);
1564
61.6k
    dst += stride;
1565
61.6k
  }
1566
69.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1567
61.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1568
61.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1569
61.6k
    const __m128i scaled_bottom_left_y =
1570
61.6k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1571
61.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1572
61.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1573
61.6k
                                   round);
1574
61.6k
    dst += stride;
1575
61.6k
  }
1576
69.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1577
61.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1578
61.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1579
61.6k
    const __m128i scaled_bottom_left_y =
1580
61.6k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1581
61.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1582
61.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1583
61.6k
                                   round);
1584
61.6k
    dst += stride;
1585
61.6k
  }
1586
7.71k
}
1587
1588
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1589
void aom_smooth_v_predictor_16x64_ssse3(
1590
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1591
    const uint8_t *LIBAOM_RESTRICT top_row,
1592
1.40k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1593
1.40k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1594
1.40k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1595
1.40k
  const __m128i round = _mm_set1_epi16(128);
1596
1.40k
  const __m128i zero = _mm_setzero_si128();
1597
1.40k
  const __m128i top = LoadUnaligned16(top_row);
1598
1.40k
  const __m128i top_lo = cvtepu8_epi16(top);
1599
1.40k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1600
1.40k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1601
7.03k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1602
5.62k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1603
5.62k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1604
5.62k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1605
5.62k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1606
5.62k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1607
5.62k
    const __m128i scaled_bottom_left_lo =
1608
5.62k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1609
5.62k
    const __m128i scaled_bottom_left_hi =
1610
5.62k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1611
1612
50.6k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1613
45.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1614
45.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1615
45.0k
      const __m128i scaled_bottom_left_y =
1616
45.0k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1617
45.0k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1618
45.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1619
45.0k
                                     round);
1620
45.0k
      dst += stride;
1621
45.0k
    }
1622
50.6k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1623
45.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1624
45.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1625
45.0k
      const __m128i scaled_bottom_left_y =
1626
45.0k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1627
45.0k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1628
45.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1629
45.0k
                                     round);
1630
45.0k
      dst += stride;
1631
45.0k
    }
1632
5.62k
  }
1633
1.40k
}
1634
1635
void aom_smooth_v_predictor_32x8_ssse3(
1636
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1637
    const uint8_t *LIBAOM_RESTRICT top_row,
1638
15.2k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1639
15.2k
  const __m128i zero = _mm_setzero_si128();
1640
15.2k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1641
15.2k
  const __m128i top_lo = LoadUnaligned16(top_row);
1642
15.2k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1643
15.2k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1644
15.2k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1645
15.2k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1646
15.2k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1647
15.2k
  __m128i scale = _mm_set1_epi16(256);
1648
15.2k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1649
15.2k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1650
15.2k
  const __m128i scaled_bottom_left =
1651
15.2k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1652
15.2k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1653
137k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1654
122k
    __m128i y_select = _mm_set1_epi32(y_mask);
1655
122k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1656
122k
    const __m128i scaled_bottom_left_y =
1657
122k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1658
122k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1659
122k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1660
122k
                                   round);
1661
122k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1662
122k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1663
122k
                                   round);
1664
122k
    dst += stride;
1665
122k
  }
1666
15.2k
}
1667
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1668
1669
void aom_smooth_v_predictor_32x16_ssse3(
1670
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1671
    const uint8_t *LIBAOM_RESTRICT top_row,
1672
7.25k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1673
7.25k
  const __m128i zero = _mm_setzero_si128();
1674
7.25k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1675
7.25k
  const __m128i top_lo = LoadUnaligned16(top_row);
1676
7.25k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1677
7.25k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1678
7.25k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1679
7.25k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1680
7.25k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1681
7.25k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1682
7.25k
  const __m128i weights1 = cvtepu8_epi16(weights);
1683
7.25k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1684
7.25k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1685
7.25k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1686
7.25k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1687
7.25k
  const __m128i scaled_bottom_left1 =
1688
7.25k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1689
7.25k
  const __m128i scaled_bottom_left2 =
1690
7.25k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1691
7.25k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1692
65.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1693
58.0k
    __m128i y_select = _mm_set1_epi32(y_mask);
1694
58.0k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1695
58.0k
    const __m128i scaled_bottom_left_y =
1696
58.0k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1697
58.0k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1698
58.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1699
58.0k
                                   round);
1700
58.0k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1701
58.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1702
58.0k
                                   round);
1703
58.0k
    dst += stride;
1704
58.0k
  }
1705
65.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1706
58.0k
    __m128i y_select = _mm_set1_epi32(y_mask);
1707
58.0k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1708
58.0k
    const __m128i scaled_bottom_left_y =
1709
58.0k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1710
58.0k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1711
58.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1712
58.0k
                                   round);
1713
58.0k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1714
58.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1715
58.0k
                                   round);
1716
58.0k
    dst += stride;
1717
58.0k
  }
1718
7.25k
}
1719
1720
void aom_smooth_v_predictor_32x32_ssse3(
1721
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1722
    const uint8_t *LIBAOM_RESTRICT top_row,
1723
43.3k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1724
43.3k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1725
43.3k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1726
43.3k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1727
43.3k
  const __m128i zero = _mm_setzero_si128();
1728
43.3k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1729
43.3k
  const __m128i top_lo = LoadUnaligned16(top_row);
1730
43.3k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1731
43.3k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1732
43.3k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1733
43.3k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1734
43.3k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1735
43.3k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1736
43.3k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1737
43.3k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1738
43.3k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1739
43.3k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1740
43.3k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1741
43.3k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1742
43.3k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1743
43.3k
  const __m128i scaled_bottom_left1 =
1744
43.3k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1745
43.3k
  const __m128i scaled_bottom_left2 =
1746
43.3k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1747
43.3k
  const __m128i scaled_bottom_left3 =
1748
43.3k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1749
43.3k
  const __m128i scaled_bottom_left4 =
1750
43.3k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1751
43.3k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1752
389k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1753
346k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1754
346k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1755
346k
    const __m128i scaled_bottom_left_y =
1756
346k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1757
346k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1758
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1759
346k
                                   round);
1760
346k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1761
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1762
346k
                                   round);
1763
346k
    dst += stride;
1764
346k
  }
1765
389k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1766
346k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1767
346k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1768
346k
    const __m128i scaled_bottom_left_y =
1769
346k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1770
346k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1771
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1772
346k
                                   round);
1773
346k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1774
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1775
346k
                                   round);
1776
346k
    dst += stride;
1777
346k
  }
1778
389k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1779
346k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1780
346k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1781
346k
    const __m128i scaled_bottom_left_y =
1782
346k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1783
346k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1784
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1785
346k
                                   round);
1786
346k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1787
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1788
346k
                                   round);
1789
346k
    dst += stride;
1790
346k
  }
1791
389k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1792
346k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1793
346k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1794
346k
    const __m128i scaled_bottom_left_y =
1795
346k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1796
346k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1797
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1798
346k
                                   round);
1799
346k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1800
346k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1801
346k
                                   round);
1802
346k
    dst += stride;
1803
346k
  }
1804
43.3k
}
1805
1806
void aom_smooth_v_predictor_32x64_ssse3(
1807
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1808
    const uint8_t *LIBAOM_RESTRICT top_row,
1809
679
    const uint8_t *LIBAOM_RESTRICT left_column) {
1810
679
  const __m128i zero = _mm_setzero_si128();
1811
679
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1812
679
  const __m128i top_lo = LoadUnaligned16(top_row);
1813
679
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1814
679
  const __m128i top1 = cvtepu8_epi16(top_lo);
1815
679
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1816
679
  const __m128i top3 = cvtepu8_epi16(top_hi);
1817
679
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1818
679
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1819
679
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1820
679
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1821
3.39k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1822
2.71k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1823
2.71k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1824
2.71k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1825
2.71k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1826
2.71k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1827
2.71k
    const __m128i scaled_bottom_left_lo =
1828
2.71k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1829
2.71k
    const __m128i scaled_bottom_left_hi =
1830
2.71k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1831
1832
24.4k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1833
21.7k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1834
21.7k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1835
21.7k
      const __m128i scaled_bottom_left_y =
1836
21.7k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1837
21.7k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1838
21.7k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1839
21.7k
                                     round);
1840
21.7k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1841
21.7k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1842
21.7k
                                     round);
1843
21.7k
      dst += stride;
1844
21.7k
    }
1845
24.4k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1846
21.7k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1847
21.7k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1848
21.7k
      const __m128i scaled_bottom_left_y =
1849
21.7k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1850
21.7k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1851
21.7k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1852
21.7k
                                     round);
1853
21.7k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1854
21.7k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1855
21.7k
                                     round);
1856
21.7k
      dst += stride;
1857
21.7k
    }
1858
2.71k
  }
1859
679
}
1860
1861
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1862
void aom_smooth_v_predictor_64x16_ssse3(
1863
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1864
    const uint8_t *LIBAOM_RESTRICT top_row,
1865
10.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1866
10.8k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1867
10.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1868
10.8k
  const __m128i zero = _mm_setzero_si128();
1869
10.8k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1870
10.8k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1871
10.8k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1872
10.8k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1873
10.8k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1874
10.8k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1875
1876
10.8k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1877
10.8k
  const __m128i weights1 = cvtepu8_epi16(weights);
1878
10.8k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1879
10.8k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1880
10.8k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1881
10.8k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1882
10.8k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1883
10.8k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1884
10.8k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1885
10.8k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1886
10.8k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1887
10.8k
  const __m128i scaled_bottom_left1 =
1888
10.8k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1889
10.8k
  const __m128i scaled_bottom_left2 =
1890
10.8k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1891
10.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1892
97.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1893
86.9k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1894
86.9k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1895
86.9k
    const __m128i scaled_bottom_left_y =
1896
86.9k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1897
86.9k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1898
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1899
86.9k
                                   round);
1900
86.9k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1901
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1902
86.9k
                                   round);
1903
86.9k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1904
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1905
86.9k
                                   round);
1906
86.9k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1907
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1908
86.9k
                                   round);
1909
86.9k
    dst += stride;
1910
86.9k
  }
1911
97.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1912
86.9k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1913
86.9k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1914
86.9k
    const __m128i scaled_bottom_left_y =
1915
86.9k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1916
86.9k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1917
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1918
86.9k
                                   round);
1919
86.9k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1920
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1921
86.9k
                                   round);
1922
86.9k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1923
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1924
86.9k
                                   round);
1925
86.9k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1926
86.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1927
86.9k
                                   round);
1928
86.9k
    dst += stride;
1929
86.9k
  }
1930
10.8k
}
1931
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1932
1933
void aom_smooth_v_predictor_64x32_ssse3(
1934
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1935
    const uint8_t *LIBAOM_RESTRICT top_row,
1936
1.93k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1937
1.93k
  const __m128i zero = _mm_setzero_si128();
1938
1.93k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1939
1.93k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1940
1.93k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1941
1.93k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1942
1.93k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1943
1.93k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1944
1.93k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1945
1.93k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1946
1.93k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1947
1.93k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1948
1.93k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1949
1.93k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1950
1.93k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1951
1.93k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1952
1.93k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1953
1.93k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1954
1.93k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1955
1.93k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1956
1.93k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1957
1.93k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1958
1.93k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1959
1.93k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1960
1.93k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1961
1.93k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1962
1.93k
  const __m128i scaled_bottom_left1 =
1963
1.93k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1964
1.93k
  const __m128i scaled_bottom_left2 =
1965
1.93k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1966
1.93k
  const __m128i scaled_bottom_left3 =
1967
1.93k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1968
1.93k
  const __m128i scaled_bottom_left4 =
1969
1.93k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1970
1.93k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1971
1972
17.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1973
15.5k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1974
15.5k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1975
15.5k
    const __m128i scaled_bottom_left_y =
1976
15.5k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1977
15.5k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1978
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1979
15.5k
                                   round);
1980
15.5k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1981
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1982
15.5k
                                   round);
1983
15.5k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1984
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1985
15.5k
                                   round);
1986
15.5k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1987
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1988
15.5k
                                   round);
1989
15.5k
    dst += stride;
1990
15.5k
  }
1991
17.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1992
15.5k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1993
15.5k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1994
15.5k
    const __m128i scaled_bottom_left_y =
1995
15.5k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1996
15.5k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1997
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1998
15.5k
                                   round);
1999
15.5k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2000
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2001
15.5k
                                   round);
2002
15.5k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2003
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2004
15.5k
                                   round);
2005
15.5k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2006
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2007
15.5k
                                   round);
2008
15.5k
    dst += stride;
2009
15.5k
  }
2010
17.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2011
15.5k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2012
15.5k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
2013
15.5k
    const __m128i scaled_bottom_left_y =
2014
15.5k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
2015
15.5k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2016
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2017
15.5k
                                   round);
2018
15.5k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2019
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2020
15.5k
                                   round);
2021
15.5k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2022
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2023
15.5k
                                   round);
2024
15.5k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2025
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2026
15.5k
                                   round);
2027
15.5k
    dst += stride;
2028
15.5k
  }
2029
17.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2030
15.5k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2031
15.5k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
2032
15.5k
    const __m128i scaled_bottom_left_y =
2033
15.5k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
2034
15.5k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2035
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2036
15.5k
                                   round);
2037
15.5k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2038
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2039
15.5k
                                   round);
2040
15.5k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2041
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2042
15.5k
                                   round);
2043
15.5k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2044
15.5k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2045
15.5k
                                   round);
2046
15.5k
    dst += stride;
2047
15.5k
  }
2048
1.93k
}
2049
2050
void aom_smooth_v_predictor_64x64_ssse3(
2051
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2052
    const uint8_t *LIBAOM_RESTRICT top_row,
2053
5.49k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2054
5.49k
  const __m128i zero = _mm_setzero_si128();
2055
5.49k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
2056
5.49k
  const __m128i top_lolo = LoadUnaligned16(top_row);
2057
5.49k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
2058
5.49k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
2059
5.49k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2060
5.49k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
2061
5.49k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2062
5.49k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
2063
5.49k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
2064
5.49k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
2065
5.49k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2066
5.49k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
2067
5.49k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2068
5.49k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2069
5.49k
  const __m128i round = _mm_set1_epi16(128);
2070
5.49k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
2071
27.4k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
2072
21.9k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
2073
21.9k
    const __m128i weights_lo = cvtepu8_epi16(weights);
2074
21.9k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
2075
21.9k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
2076
21.9k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
2077
21.9k
    const __m128i scaled_bottom_left_lo =
2078
21.9k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
2079
21.9k
    const __m128i scaled_bottom_left_hi =
2080
21.9k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
2081
197k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2082
175k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2083
175k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
2084
175k
      const __m128i scaled_bottom_left_y =
2085
175k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
2086
175k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2087
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2088
175k
                                     round);
2089
175k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2090
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2091
175k
                                     round);
2092
175k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2093
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2094
175k
                                     round);
2095
175k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2096
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2097
175k
                                     round);
2098
175k
      dst += stride;
2099
175k
    }
2100
197k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2101
175k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2102
175k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
2103
175k
      const __m128i scaled_bottom_left_y =
2104
175k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
2105
175k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2106
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2107
175k
                                     round);
2108
175k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2109
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2110
175k
                                     round);
2111
175k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2112
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2113
175k
                                     round);
2114
175k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2115
175k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2116
175k
                                     round);
2117
175k
      dst += stride;
2118
175k
    }
2119
21.9k
  }
2120
5.49k
}
2121
2122
// -----------------------------------------------------------------------------
2123
// SMOOTH_H_PRED
2124
static AOM_FORCE_INLINE void write_smooth_horizontal_sum4(
2125
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *left_y, const __m128i *weights,
2126
634k
    const __m128i *scaled_top_right, const __m128i *round) {
2127
634k
  const __m128i weighted_left_y = _mm_mullo_epi16(*left_y, *weights);
2128
634k
  const __m128i pred_sum = _mm_add_epi32(*scaled_top_right, weighted_left_y);
2129
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
2130
634k
  const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, *round), 8);
2131
634k
  const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
2132
634k
  Store4(dst, _mm_shuffle_epi8(pred, cvtepi32_epi8));
2133
634k
}
2134
2135
void aom_smooth_h_predictor_4x4_ssse3(
2136
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2137
    const uint8_t *LIBAOM_RESTRICT top_row,
2138
60.6k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2139
60.6k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2140
60.6k
  const __m128i left = cvtepu8_epi32(Load4(left_column));
2141
60.6k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2142
60.6k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2143
60.6k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2144
60.6k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2145
60.6k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2146
60.6k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2147
60.6k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2148
60.6k
                               &round);
2149
60.6k
  dst += stride;
2150
60.6k
  left_y = _mm_shuffle_epi32(left, 0x55);
2151
60.6k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2152
60.6k
                               &round);
2153
60.6k
  dst += stride;
2154
60.6k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2155
60.6k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2156
60.6k
                               &round);
2157
60.6k
  dst += stride;
2158
60.6k
  left_y = _mm_shuffle_epi32(left, 0xff);
2159
60.6k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2160
60.6k
                               &round);
2161
60.6k
}
2162
2163
void aom_smooth_h_predictor_4x8_ssse3(
2164
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2165
    const uint8_t *LIBAOM_RESTRICT top_row,
2166
18.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2167
18.8k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2168
18.8k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2169
18.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2170
18.8k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2171
18.8k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2172
18.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2173
18.8k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2174
18.8k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2175
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2176
18.8k
                               &round);
2177
18.8k
  dst += stride;
2178
18.8k
  left_y = _mm_shuffle_epi32(left, 0x55);
2179
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2180
18.8k
                               &round);
2181
18.8k
  dst += stride;
2182
18.8k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2183
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2184
18.8k
                               &round);
2185
18.8k
  dst += stride;
2186
18.8k
  left_y = _mm_shuffle_epi32(left, 0xff);
2187
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2188
18.8k
                               &round);
2189
18.8k
  dst += stride;
2190
2191
18.8k
  left = cvtepu8_epi32(Load4(left_column + 4));
2192
18.8k
  left_y = _mm_shuffle_epi32(left, 0);
2193
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2194
18.8k
                               &round);
2195
18.8k
  dst += stride;
2196
18.8k
  left_y = _mm_shuffle_epi32(left, 0x55);
2197
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2198
18.8k
                               &round);
2199
18.8k
  dst += stride;
2200
18.8k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2201
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2202
18.8k
                               &round);
2203
18.8k
  dst += stride;
2204
18.8k
  left_y = _mm_shuffle_epi32(left, 0xff);
2205
18.8k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2206
18.8k
                               &round);
2207
18.8k
}
2208
2209
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2210
void aom_smooth_h_predictor_4x16_ssse3(
2211
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2212
    const uint8_t *LIBAOM_RESTRICT top_row,
2213
15.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2214
15.0k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2215
15.0k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2216
15.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2217
15.0k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2218
15.0k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2219
15.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2220
15.0k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2221
15.0k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2222
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2223
15.0k
                               &round);
2224
15.0k
  dst += stride;
2225
15.0k
  left_y = _mm_shuffle_epi32(left, 0x55);
2226
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2227
15.0k
                               &round);
2228
15.0k
  dst += stride;
2229
15.0k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2230
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2231
15.0k
                               &round);
2232
15.0k
  dst += stride;
2233
15.0k
  left_y = _mm_shuffle_epi32(left, 0xff);
2234
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2235
15.0k
                               &round);
2236
15.0k
  dst += stride;
2237
2238
15.0k
  left = cvtepu8_epi32(Load4(left_column + 4));
2239
15.0k
  left_y = _mm_shuffle_epi32(left, 0);
2240
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2241
15.0k
                               &round);
2242
15.0k
  dst += stride;
2243
15.0k
  left_y = _mm_shuffle_epi32(left, 0x55);
2244
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2245
15.0k
                               &round);
2246
15.0k
  dst += stride;
2247
15.0k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2248
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2249
15.0k
                               &round);
2250
15.0k
  dst += stride;
2251
15.0k
  left_y = _mm_shuffle_epi32(left, 0xff);
2252
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2253
15.0k
                               &round);
2254
15.0k
  dst += stride;
2255
2256
15.0k
  left = cvtepu8_epi32(Load4(left_column + 8));
2257
15.0k
  left_y = _mm_shuffle_epi32(left, 0);
2258
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2259
15.0k
                               &round);
2260
15.0k
  dst += stride;
2261
15.0k
  left_y = _mm_shuffle_epi32(left, 0x55);
2262
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2263
15.0k
                               &round);
2264
15.0k
  dst += stride;
2265
15.0k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2266
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2267
15.0k
                               &round);
2268
15.0k
  dst += stride;
2269
15.0k
  left_y = _mm_shuffle_epi32(left, 0xff);
2270
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2271
15.0k
                               &round);
2272
15.0k
  dst += stride;
2273
2274
15.0k
  left = cvtepu8_epi32(Load4(left_column + 12));
2275
15.0k
  left_y = _mm_shuffle_epi32(left, 0);
2276
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2277
15.0k
                               &round);
2278
15.0k
  dst += stride;
2279
15.0k
  left_y = _mm_shuffle_epi32(left, 0x55);
2280
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2281
15.0k
                               &round);
2282
15.0k
  dst += stride;
2283
15.0k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2284
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2285
15.0k
                               &round);
2286
15.0k
  dst += stride;
2287
15.0k
  left_y = _mm_shuffle_epi32(left, 0xff);
2288
15.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2289
15.0k
                               &round);
2290
15.0k
}
2291
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2292
2293
// For SMOOTH_H, |pixels| is the repeated left value for the row. For SMOOTH_V,
2294
// |pixels| is a segment of the top row or the whole top row, and |weights| is
2295
// repeated.
2296
void aom_smooth_h_predictor_8x4_ssse3(
2297
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2298
    const uint8_t *LIBAOM_RESTRICT top_row,
2299
31.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2300
31.8k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2301
31.8k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2302
31.8k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2303
31.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2304
31.8k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2305
31.8k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2306
31.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2307
31.8k
  __m128i y_select = _mm_set1_epi32(0x01000100);
2308
31.8k
  __m128i left_y = _mm_shuffle_epi8(left, y_select);
2309
31.8k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2310
31.8k
                                &round);
2311
31.8k
  dst += stride;
2312
31.8k
  y_select = _mm_set1_epi32(0x03020302);
2313
31.8k
  left_y = _mm_shuffle_epi8(left, y_select);
2314
31.8k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2315
31.8k
                                &round);
2316
31.8k
  dst += stride;
2317
31.8k
  y_select = _mm_set1_epi32(0x05040504);
2318
31.8k
  left_y = _mm_shuffle_epi8(left, y_select);
2319
31.8k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2320
31.8k
                                &round);
2321
31.8k
  dst += stride;
2322
31.8k
  y_select = _mm_set1_epi32(0x07060706);
2323
31.8k
  left_y = _mm_shuffle_epi8(left, y_select);
2324
31.8k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2325
31.8k
                                &round);
2326
31.8k
}
2327
2328
void aom_smooth_h_predictor_8x8_ssse3(
2329
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2330
    const uint8_t *LIBAOM_RESTRICT top_row,
2331
44.7k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2332
44.7k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2333
44.7k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2334
44.7k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2335
44.7k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2336
44.7k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2337
44.7k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2338
44.7k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2339
403k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2340
358k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2341
358k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2342
358k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2343
358k
                                  &round);
2344
358k
    dst += stride;
2345
358k
  }
2346
44.7k
}
2347
2348
void aom_smooth_h_predictor_8x16_ssse3(
2349
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2350
    const uint8_t *LIBAOM_RESTRICT top_row,
2351
15.6k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2352
15.6k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2353
15.6k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2354
15.6k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2355
15.6k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2356
15.6k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2357
15.6k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2358
15.6k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2359
140k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2360
124k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2361
124k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2362
124k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2363
124k
                                  &round);
2364
124k
    dst += stride;
2365
124k
  }
2366
15.6k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2367
140k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2368
124k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2369
124k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2370
124k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2371
124k
                                  &round);
2372
124k
    dst += stride;
2373
124k
  }
2374
15.6k
}
2375
2376
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2377
void aom_smooth_h_predictor_8x32_ssse3(
2378
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2379
    const uint8_t *LIBAOM_RESTRICT top_row,
2380
5.52k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2381
5.52k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2382
5.52k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2383
5.52k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2384
5.52k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2385
5.52k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2386
5.52k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2387
5.52k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2388
49.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2389
44.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2390
44.2k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2391
44.2k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2392
44.2k
                                  &round);
2393
44.2k
    dst += stride;
2394
44.2k
  }
2395
5.52k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2396
49.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2397
44.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2398
44.2k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2399
44.2k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2400
44.2k
                                  &round);
2401
44.2k
    dst += stride;
2402
44.2k
  }
2403
5.52k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2404
49.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2405
44.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2406
44.2k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2407
44.2k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2408
44.2k
                                  &round);
2409
44.2k
    dst += stride;
2410
44.2k
  }
2411
5.52k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2412
49.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2413
44.2k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2414
44.2k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2415
44.2k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2416
44.2k
                                  &round);
2417
44.2k
    dst += stride;
2418
44.2k
  }
2419
5.52k
}
2420
2421
void aom_smooth_h_predictor_16x4_ssse3(
2422
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2423
    const uint8_t *LIBAOM_RESTRICT top_row,
2424
22.6k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2425
22.6k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2426
22.6k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2427
22.6k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2428
22.6k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2429
22.6k
  const __m128i weights1 = cvtepu8_epi16(weights);
2430
22.6k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2431
22.6k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2432
22.6k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2433
22.6k
  const __m128i scaled_top_right1 =
2434
22.6k
      _mm_mullo_epi16(inverted_weights1, top_right);
2435
22.6k
  const __m128i scaled_top_right2 =
2436
22.6k
      _mm_mullo_epi16(inverted_weights2, top_right);
2437
22.6k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2438
22.6k
  __m128i y_mask = _mm_set1_epi32(0x01000100);
2439
22.6k
  __m128i left_y = _mm_shuffle_epi8(left, y_mask);
2440
22.6k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2441
22.6k
                                 scaled_top_right1, scaled_top_right2, round);
2442
22.6k
  dst += stride;
2443
22.6k
  y_mask = _mm_set1_epi32(0x03020302);
2444
22.6k
  left_y = _mm_shuffle_epi8(left, y_mask);
2445
22.6k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2446
22.6k
                                 scaled_top_right1, scaled_top_right2, round);
2447
22.6k
  dst += stride;
2448
22.6k
  y_mask = _mm_set1_epi32(0x05040504);
2449
22.6k
  left_y = _mm_shuffle_epi8(left, y_mask);
2450
22.6k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2451
22.6k
                                 scaled_top_right1, scaled_top_right2, round);
2452
22.6k
  dst += stride;
2453
22.6k
  y_mask = _mm_set1_epi32(0x07060706);
2454
22.6k
  left_y = _mm_shuffle_epi8(left, y_mask);
2455
22.6k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2456
22.6k
                                 scaled_top_right1, scaled_top_right2, round);
2457
22.6k
}
2458
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2459
2460
void aom_smooth_h_predictor_16x8_ssse3(
2461
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2462
    const uint8_t *LIBAOM_RESTRICT top_row,
2463
20.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2464
20.0k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2465
20.0k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2466
20.0k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2467
20.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2468
20.0k
  const __m128i weights1 = cvtepu8_epi16(weights);
2469
20.0k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2470
20.0k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2471
20.0k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2472
20.0k
  const __m128i scaled_top_right1 =
2473
20.0k
      _mm_mullo_epi16(inverted_weights1, top_right);
2474
20.0k
  const __m128i scaled_top_right2 =
2475
20.0k
      _mm_mullo_epi16(inverted_weights2, top_right);
2476
20.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2477
180k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2478
160k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2479
160k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2480
160k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2481
160k
                                   scaled_top_right1, scaled_top_right2, round);
2482
160k
    dst += stride;
2483
160k
  }
2484
20.0k
}
2485
2486
void aom_smooth_h_predictor_16x16_ssse3(
2487
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2488
    const uint8_t *LIBAOM_RESTRICT top_row,
2489
39.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2490
39.9k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2491
39.9k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2492
39.9k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2493
39.9k
  const __m128i weights1 = cvtepu8_epi16(weights);
2494
39.9k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2495
39.9k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2496
39.9k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2497
39.9k
  const __m128i scaled_top_right1 =
2498
39.9k
      _mm_mullo_epi16(inverted_weights1, top_right);
2499
39.9k
  const __m128i scaled_top_right2 =
2500
39.9k
      _mm_mullo_epi16(inverted_weights2, top_right);
2501
39.9k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2502
39.9k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2503
359k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2504
319k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2505
319k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2506
319k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2507
319k
                                   scaled_top_right1, scaled_top_right2, round);
2508
319k
    dst += stride;
2509
319k
  }
2510
39.9k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2511
359k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2512
319k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2513
319k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2514
319k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2515
319k
                                   scaled_top_right1, scaled_top_right2, round);
2516
319k
    dst += stride;
2517
319k
  }
2518
39.9k
}
2519
2520
void aom_smooth_h_predictor_16x32_ssse3(
2521
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2522
    const uint8_t *LIBAOM_RESTRICT top_row,
2523
11.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2524
11.4k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2525
11.4k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2526
11.4k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2527
11.4k
  const __m128i weights1 = cvtepu8_epi16(weights);
2528
11.4k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2529
11.4k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2530
11.4k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2531
11.4k
  const __m128i scaled_top_right1 =
2532
11.4k
      _mm_mullo_epi16(inverted_weights1, top_right);
2533
11.4k
  const __m128i scaled_top_right2 =
2534
11.4k
      _mm_mullo_epi16(inverted_weights2, top_right);
2535
11.4k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2536
11.4k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2537
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2538
91.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2539
91.6k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2540
91.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2541
91.6k
                                   scaled_top_right1, scaled_top_right2, round);
2542
91.6k
    dst += stride;
2543
91.6k
  }
2544
11.4k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2545
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2546
91.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2547
91.6k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2548
91.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2549
91.6k
                                   scaled_top_right1, scaled_top_right2, round);
2550
91.6k
    dst += stride;
2551
91.6k
  }
2552
11.4k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2553
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2554
91.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2555
91.6k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2556
91.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2557
91.6k
                                   scaled_top_right1, scaled_top_right2, round);
2558
91.6k
    dst += stride;
2559
91.6k
  }
2560
11.4k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2561
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2562
91.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2563
91.6k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2564
91.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2565
91.6k
                                   scaled_top_right1, scaled_top_right2, round);
2566
91.6k
    dst += stride;
2567
91.6k
  }
2568
11.4k
}
2569
2570
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2571
void aom_smooth_h_predictor_16x64_ssse3(
2572
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2573
    const uint8_t *LIBAOM_RESTRICT top_row,
2574
2.70k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2575
2.70k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2576
2.70k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2577
2.70k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2578
2.70k
  const __m128i weights1 = cvtepu8_epi16(weights);
2579
2.70k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2580
2.70k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2581
2.70k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2582
2.70k
  const __m128i scaled_top_right1 =
2583
2.70k
      _mm_mullo_epi16(inverted_weights1, top_right);
2584
2.70k
  const __m128i scaled_top_right2 =
2585
2.70k
      _mm_mullo_epi16(inverted_weights2, top_right);
2586
2.70k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2587
24.3k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2588
21.6k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2589
194k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2590
172k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2591
172k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2592
172k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2593
172k
                                     scaled_top_right1, scaled_top_right2,
2594
172k
                                     round);
2595
172k
      dst += stride;
2596
172k
    }
2597
21.6k
  }
2598
2.70k
}
2599
2600
void aom_smooth_h_predictor_32x8_ssse3(
2601
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2602
    const uint8_t *LIBAOM_RESTRICT top_row,
2603
15.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2604
15.5k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2605
15.5k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2606
15.5k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2607
15.5k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2608
15.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2609
15.5k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2610
15.5k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2611
15.5k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2612
15.5k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2613
15.5k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2614
15.5k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2615
15.5k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2616
15.5k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2617
15.5k
  const __m128i scaled_top_right1 =
2618
15.5k
      _mm_mullo_epi16(inverted_weights1, top_right);
2619
15.5k
  const __m128i scaled_top_right2 =
2620
15.5k
      _mm_mullo_epi16(inverted_weights2, top_right);
2621
15.5k
  const __m128i scaled_top_right3 =
2622
15.5k
      _mm_mullo_epi16(inverted_weights3, top_right);
2623
15.5k
  const __m128i scaled_top_right4 =
2624
15.5k
      _mm_mullo_epi16(inverted_weights4, top_right);
2625
15.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2626
140k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2627
124k
    __m128i y_select = _mm_set1_epi32(y_mask);
2628
124k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2629
124k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2630
124k
                                   scaled_top_right1, scaled_top_right2, round);
2631
124k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2632
124k
                                   scaled_top_right3, scaled_top_right4, round);
2633
124k
    dst += stride;
2634
124k
  }
2635
15.5k
}
2636
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2637
2638
void aom_smooth_h_predictor_32x16_ssse3(
2639
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2640
    const uint8_t *LIBAOM_RESTRICT top_row,
2641
8.81k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2642
8.81k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2643
8.81k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2644
8.81k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2645
8.81k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2646
8.81k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2647
8.81k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2648
8.81k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2649
8.81k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2650
8.81k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2651
8.81k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2652
8.81k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2653
8.81k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2654
8.81k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2655
8.81k
  const __m128i scaled_top_right1 =
2656
8.81k
      _mm_mullo_epi16(inverted_weights1, top_right);
2657
8.81k
  const __m128i scaled_top_right2 =
2658
8.81k
      _mm_mullo_epi16(inverted_weights2, top_right);
2659
8.81k
  const __m128i scaled_top_right3 =
2660
8.81k
      _mm_mullo_epi16(inverted_weights3, top_right);
2661
8.81k
  const __m128i scaled_top_right4 =
2662
8.81k
      _mm_mullo_epi16(inverted_weights4, top_right);
2663
8.81k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2664
79.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2665
70.5k
    __m128i y_select = _mm_set1_epi32(y_mask);
2666
70.5k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2667
70.5k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2668
70.5k
                                   scaled_top_right1, scaled_top_right2, round);
2669
70.5k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2670
70.5k
                                   scaled_top_right3, scaled_top_right4, round);
2671
70.5k
    dst += stride;
2672
70.5k
  }
2673
8.81k
  const __m128i left2 =
2674
8.81k
      cvtepu8_epi16(LoadLo8((const uint8_t *)left_column + 8));
2675
79.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2676
70.5k
    __m128i y_select = _mm_set1_epi32(y_mask);
2677
70.5k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2678
70.5k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2679
70.5k
                                   scaled_top_right1, scaled_top_right2, round);
2680
70.5k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2681
70.5k
                                   scaled_top_right3, scaled_top_right4, round);
2682
70.5k
    dst += stride;
2683
70.5k
  }
2684
8.81k
}
2685
2686
void aom_smooth_h_predictor_32x32_ssse3(
2687
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2688
    const uint8_t *LIBAOM_RESTRICT top_row,
2689
39.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2690
39.5k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2691
39.5k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2692
39.5k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2693
39.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2694
39.5k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2695
39.5k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2696
39.5k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2697
39.5k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2698
39.5k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2699
39.5k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2700
39.5k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2701
39.5k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2702
39.5k
  const __m128i scaled_top_right1 =
2703
39.5k
      _mm_mullo_epi16(inverted_weights1, top_right);
2704
39.5k
  const __m128i scaled_top_right2 =
2705
39.5k
      _mm_mullo_epi16(inverted_weights2, top_right);
2706
39.5k
  const __m128i scaled_top_right3 =
2707
39.5k
      _mm_mullo_epi16(inverted_weights3, top_right);
2708
39.5k
  const __m128i scaled_top_right4 =
2709
39.5k
      _mm_mullo_epi16(inverted_weights4, top_right);
2710
39.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2711
39.5k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2712
355k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2713
316k
    __m128i y_select = _mm_set1_epi32(y_mask);
2714
316k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2715
316k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2716
316k
                                   scaled_top_right1, scaled_top_right2, round);
2717
316k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2718
316k
                                   scaled_top_right3, scaled_top_right4, round);
2719
316k
    dst += stride;
2720
316k
  }
2721
39.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2722
355k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2723
316k
    __m128i y_select = _mm_set1_epi32(y_mask);
2724
316k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2725
316k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2726
316k
                                   scaled_top_right1, scaled_top_right2, round);
2727
316k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2728
316k
                                   scaled_top_right3, scaled_top_right4, round);
2729
316k
    dst += stride;
2730
316k
  }
2731
39.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2732
355k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2733
316k
    __m128i y_select = _mm_set1_epi32(y_mask);
2734
316k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2735
316k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2736
316k
                                   scaled_top_right1, scaled_top_right2, round);
2737
316k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2738
316k
                                   scaled_top_right3, scaled_top_right4, round);
2739
316k
    dst += stride;
2740
316k
  }
2741
39.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2742
355k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2743
316k
    __m128i y_select = _mm_set1_epi32(y_mask);
2744
316k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2745
316k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2746
316k
                                   scaled_top_right1, scaled_top_right2, round);
2747
316k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2748
316k
                                   scaled_top_right3, scaled_top_right4, round);
2749
316k
    dst += stride;
2750
316k
  }
2751
39.5k
}
2752
2753
void aom_smooth_h_predictor_32x64_ssse3(
2754
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2755
    const uint8_t *LIBAOM_RESTRICT top_row,
2756
769
    const uint8_t *LIBAOM_RESTRICT left_column) {
2757
769
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2758
769
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2759
769
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2760
769
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2761
769
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2762
769
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2763
769
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2764
769
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2765
769
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2766
769
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2767
769
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2768
769
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2769
769
  const __m128i scaled_top_right1 =
2770
769
      _mm_mullo_epi16(inverted_weights1, top_right);
2771
769
  const __m128i scaled_top_right2 =
2772
769
      _mm_mullo_epi16(inverted_weights2, top_right);
2773
769
  const __m128i scaled_top_right3 =
2774
769
      _mm_mullo_epi16(inverted_weights3, top_right);
2775
769
  const __m128i scaled_top_right4 =
2776
769
      _mm_mullo_epi16(inverted_weights4, top_right);
2777
769
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2778
6.92k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2779
6.15k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2780
55.3k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2781
49.2k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2782
49.2k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2783
49.2k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2784
49.2k
                                     scaled_top_right1, scaled_top_right2,
2785
49.2k
                                     round);
2786
49.2k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
2787
49.2k
                                     weights4, scaled_top_right3,
2788
49.2k
                                     scaled_top_right4, round);
2789
49.2k
      dst += stride;
2790
49.2k
    }
2791
6.15k
  }
2792
769
}
2793
2794
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2795
void aom_smooth_h_predictor_64x16_ssse3(
2796
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2797
    const uint8_t *LIBAOM_RESTRICT top_row,
2798
4.79k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2799
4.79k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2800
4.79k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2801
4.79k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2802
4.79k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2803
4.79k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2804
4.79k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2805
4.79k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2806
4.79k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2807
4.79k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2808
4.79k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2809
4.79k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2810
4.79k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2811
4.79k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2812
4.79k
  const __m128i scaled_top_right1 =
2813
4.79k
      _mm_mullo_epi16(inverted_weights1, top_right);
2814
4.79k
  const __m128i scaled_top_right2 =
2815
4.79k
      _mm_mullo_epi16(inverted_weights2, top_right);
2816
4.79k
  const __m128i scaled_top_right3 =
2817
4.79k
      _mm_mullo_epi16(inverted_weights3, top_right);
2818
4.79k
  const __m128i scaled_top_right4 =
2819
4.79k
      _mm_mullo_epi16(inverted_weights4, top_right);
2820
4.79k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2821
4.79k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2822
4.79k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2823
4.79k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2824
4.79k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2825
4.79k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2826
4.79k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2827
4.79k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2828
4.79k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2829
4.79k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2830
4.79k
  const __m128i scaled_top_right5 =
2831
4.79k
      _mm_mullo_epi16(inverted_weights5, top_right);
2832
4.79k
  const __m128i scaled_top_right6 =
2833
4.79k
      _mm_mullo_epi16(inverted_weights6, top_right);
2834
4.79k
  const __m128i scaled_top_right7 =
2835
4.79k
      _mm_mullo_epi16(inverted_weights7, top_right);
2836
4.79k
  const __m128i scaled_top_right8 =
2837
4.79k
      _mm_mullo_epi16(inverted_weights8, top_right);
2838
4.79k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2839
43.1k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2840
38.3k
    __m128i y_select = _mm_set1_epi32(y_mask);
2841
38.3k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2842
38.3k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2843
38.3k
                                   scaled_top_right1, scaled_top_right2, round);
2844
38.3k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2845
38.3k
                                   scaled_top_right3, scaled_top_right4, round);
2846
38.3k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2847
38.3k
                                   scaled_top_right5, scaled_top_right6, round);
2848
38.3k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2849
38.3k
                                   scaled_top_right7, scaled_top_right8, round);
2850
38.3k
    dst += stride;
2851
38.3k
  }
2852
4.79k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2853
43.1k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2854
38.3k
    __m128i y_select = _mm_set1_epi32(y_mask);
2855
38.3k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2856
38.3k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2857
38.3k
                                   scaled_top_right1, scaled_top_right2, round);
2858
38.3k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2859
38.3k
                                   scaled_top_right3, scaled_top_right4, round);
2860
38.3k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2861
38.3k
                                   scaled_top_right5, scaled_top_right6, round);
2862
38.3k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2863
38.3k
                                   scaled_top_right7, scaled_top_right8, round);
2864
38.3k
    dst += stride;
2865
38.3k
  }
2866
4.79k
}
2867
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2868
2869
void aom_smooth_h_predictor_64x32_ssse3(
2870
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2871
    const uint8_t *LIBAOM_RESTRICT top_row,
2872
1.17k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2873
1.17k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2874
1.17k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2875
1.17k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2876
1.17k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2877
1.17k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2878
1.17k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2879
1.17k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2880
1.17k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2881
1.17k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2882
1.17k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2883
1.17k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2884
1.17k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2885
1.17k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2886
1.17k
  const __m128i scaled_top_right1 =
2887
1.17k
      _mm_mullo_epi16(inverted_weights1, top_right);
2888
1.17k
  const __m128i scaled_top_right2 =
2889
1.17k
      _mm_mullo_epi16(inverted_weights2, top_right);
2890
1.17k
  const __m128i scaled_top_right3 =
2891
1.17k
      _mm_mullo_epi16(inverted_weights3, top_right);
2892
1.17k
  const __m128i scaled_top_right4 =
2893
1.17k
      _mm_mullo_epi16(inverted_weights4, top_right);
2894
1.17k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2895
1.17k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2896
1.17k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2897
1.17k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2898
1.17k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2899
1.17k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2900
1.17k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2901
1.17k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2902
1.17k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2903
1.17k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2904
1.17k
  const __m128i scaled_top_right5 =
2905
1.17k
      _mm_mullo_epi16(inverted_weights5, top_right);
2906
1.17k
  const __m128i scaled_top_right6 =
2907
1.17k
      _mm_mullo_epi16(inverted_weights6, top_right);
2908
1.17k
  const __m128i scaled_top_right7 =
2909
1.17k
      _mm_mullo_epi16(inverted_weights7, top_right);
2910
1.17k
  const __m128i scaled_top_right8 =
2911
1.17k
      _mm_mullo_epi16(inverted_weights8, top_right);
2912
1.17k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2913
10.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2914
9.43k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2915
9.43k
    const __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2916
9.43k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2917
9.43k
                                   scaled_top_right1, scaled_top_right2, round);
2918
9.43k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2919
9.43k
                                   scaled_top_right3, scaled_top_right4, round);
2920
9.43k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2921
9.43k
                                   scaled_top_right5, scaled_top_right6, round);
2922
9.43k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2923
9.43k
                                   scaled_top_right7, scaled_top_right8, round);
2924
9.43k
    dst += stride;
2925
9.43k
  }
2926
1.17k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2927
10.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2928
9.43k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2929
9.43k
    const __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2930
9.43k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2931
9.43k
                                   scaled_top_right1, scaled_top_right2, round);
2932
9.43k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2933
9.43k
                                   scaled_top_right3, scaled_top_right4, round);
2934
9.43k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2935
9.43k
                                   scaled_top_right5, scaled_top_right6, round);
2936
9.43k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2937
9.43k
                                   scaled_top_right7, scaled_top_right8, round);
2938
9.43k
    dst += stride;
2939
9.43k
  }
2940
1.17k
  const __m128i left3 = cvtepu8_epi16(LoadLo8(left_column + 16));
2941
10.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2942
9.43k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2943
9.43k
    const __m128i left_y = _mm_shuffle_epi8(left3, y_select);
2944
9.43k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2945
9.43k
                                   scaled_top_right1, scaled_top_right2, round);
2946
9.43k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2947
9.43k
                                   scaled_top_right3, scaled_top_right4, round);
2948
9.43k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2949
9.43k
                                   scaled_top_right5, scaled_top_right6, round);
2950
9.43k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2951
9.43k
                                   scaled_top_right7, scaled_top_right8, round);
2952
9.43k
    dst += stride;
2953
9.43k
  }
2954
1.17k
  const __m128i left4 = cvtepu8_epi16(LoadLo8(left_column + 24));
2955
10.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2956
9.43k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2957
9.43k
    const __m128i left_y = _mm_shuffle_epi8(left4, y_select);
2958
9.43k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2959
9.43k
                                   scaled_top_right1, scaled_top_right2, round);
2960
9.43k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2961
9.43k
                                   scaled_top_right3, scaled_top_right4, round);
2962
9.43k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2963
9.43k
                                   scaled_top_right5, scaled_top_right6, round);
2964
9.43k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2965
9.43k
                                   scaled_top_right7, scaled_top_right8, round);
2966
9.43k
    dst += stride;
2967
9.43k
  }
2968
1.17k
}
2969
2970
void aom_smooth_h_predictor_64x64_ssse3(
2971
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2972
    const uint8_t *LIBAOM_RESTRICT top_row,
2973
4.55k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2974
4.55k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2975
4.55k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2976
4.55k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2977
4.55k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2978
4.55k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2979
4.55k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2980
4.55k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2981
4.55k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2982
4.55k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2983
4.55k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2984
4.55k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2985
4.55k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2986
4.55k
  const __m128i scaled_top_right1 =
2987
4.55k
      _mm_mullo_epi16(inverted_weights1, top_right);
2988
4.55k
  const __m128i scaled_top_right2 =
2989
4.55k
      _mm_mullo_epi16(inverted_weights2, top_right);
2990
4.55k
  const __m128i scaled_top_right3 =
2991
4.55k
      _mm_mullo_epi16(inverted_weights3, top_right);
2992
4.55k
  const __m128i scaled_top_right4 =
2993
4.55k
      _mm_mullo_epi16(inverted_weights4, top_right);
2994
4.55k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2995
4.55k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2996
4.55k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2997
4.55k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2998
4.55k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2999
4.55k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
3000
4.55k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
3001
4.55k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
3002
4.55k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
3003
4.55k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
3004
4.55k
  const __m128i scaled_top_right5 =
3005
4.55k
      _mm_mullo_epi16(inverted_weights5, top_right);
3006
4.55k
  const __m128i scaled_top_right6 =
3007
4.55k
      _mm_mullo_epi16(inverted_weights6, top_right);
3008
4.55k
  const __m128i scaled_top_right7 =
3009
4.55k
      _mm_mullo_epi16(inverted_weights7, top_right);
3010
4.55k
  const __m128i scaled_top_right8 =
3011
4.55k
      _mm_mullo_epi16(inverted_weights8, top_right);
3012
4.55k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
3013
40.9k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
3014
36.4k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
3015
327k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
3016
291k
      const __m128i y_select = _mm_set1_epi32(y_mask);
3017
291k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
3018
291k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
3019
291k
                                     scaled_top_right1, scaled_top_right2,
3020
291k
                                     round);
3021
291k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
3022
291k
                                     weights4, scaled_top_right3,
3023
291k
                                     scaled_top_right4, round);
3024
291k
      write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5,
3025
291k
                                     weights6, scaled_top_right5,
3026
291k
                                     scaled_top_right6, round);
3027
291k
      write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7,
3028
291k
                                     weights8, scaled_top_right7,
3029
291k
                                     scaled_top_right8, round);
3030
291k
      dst += stride;
3031
291k
    }
3032
36.4k
  }
3033
4.55k
}