Coverage Report

Created: 2025-11-16 07:09

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/aom_dsp/x86/intrapred_ssse3.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <tmmintrin.h>
13
14
#include "config/aom_dsp_rtcd.h"
15
16
#include "aom_dsp/intrapred_common.h"
17
18
// -----------------------------------------------------------------------------
19
// PAETH_PRED
20
21
// Return 8 16-bit pixels in one row
22
static inline __m128i paeth_8x1_pred(const __m128i *left, const __m128i *top,
23
9.19M
                                     const __m128i *topleft) {
24
9.19M
  const __m128i base = _mm_sub_epi16(_mm_add_epi16(*top, *left), *topleft);
25
26
9.19M
  __m128i pl = _mm_abs_epi16(_mm_sub_epi16(base, *left));
27
9.19M
  __m128i pt = _mm_abs_epi16(_mm_sub_epi16(base, *top));
28
9.19M
  __m128i ptl = _mm_abs_epi16(_mm_sub_epi16(base, *topleft));
29
30
9.19M
  __m128i mask1 = _mm_cmpgt_epi16(pl, pt);
31
9.19M
  mask1 = _mm_or_si128(mask1, _mm_cmpgt_epi16(pl, ptl));
32
9.19M
  __m128i mask2 = _mm_cmpgt_epi16(pt, ptl);
33
34
9.19M
  pl = _mm_andnot_si128(mask1, *left);
35
36
9.19M
  ptl = _mm_and_si128(mask2, *topleft);
37
9.19M
  pt = _mm_andnot_si128(mask2, *top);
38
9.19M
  pt = _mm_or_si128(pt, ptl);
39
9.19M
  pt = _mm_and_si128(mask1, pt);
40
41
9.19M
  return _mm_or_si128(pl, pt);
42
9.19M
}
43
44
void aom_paeth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
45
678k
                                   const uint8_t *above, const uint8_t *left) {
46
678k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
47
678k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
48
678k
  const __m128i zero = _mm_setzero_si128();
49
678k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
50
678k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
51
678k
  __m128i rep = _mm_set1_epi16((short)0x8000);
52
678k
  const __m128i one = _mm_set1_epi16(1);
53
54
678k
  int i;
55
3.39M
  for (i = 0; i < 4; ++i) {
56
2.71M
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
57
2.71M
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
58
59
2.71M
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
60
2.71M
    dst += stride;
61
2.71M
    rep = _mm_add_epi16(rep, one);
62
2.71M
  }
63
678k
}
64
65
void aom_paeth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
66
53.9k
                                   const uint8_t *above, const uint8_t *left) {
67
53.9k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
68
53.9k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
69
53.9k
  const __m128i zero = _mm_setzero_si128();
70
53.9k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
71
53.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
72
53.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
73
53.9k
  const __m128i one = _mm_set1_epi16(1);
74
75
53.9k
  int i;
76
485k
  for (i = 0; i < 8; ++i) {
77
431k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
78
431k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
79
80
431k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
81
431k
    dst += stride;
82
431k
    rep = _mm_add_epi16(rep, one);
83
431k
  }
84
53.9k
}
85
86
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
87
void aom_paeth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
88
62.3k
                                    const uint8_t *above, const uint8_t *left) {
89
62.3k
  __m128i l = _mm_load_si128((const __m128i *)left);
90
62.3k
  const __m128i t = _mm_cvtsi32_si128(((const int *)above)[0]);
91
62.3k
  const __m128i zero = _mm_setzero_si128();
92
62.3k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
93
62.3k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
94
62.3k
  __m128i rep = _mm_set1_epi16((short)0x8000);
95
62.3k
  const __m128i one = _mm_set1_epi16(1);
96
97
1.05M
  for (int i = 0; i < 16; ++i) {
98
996k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
99
996k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
100
101
996k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
102
996k
    dst += stride;
103
996k
    rep = _mm_add_epi16(rep, one);
104
996k
  }
105
62.3k
}
106
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
107
108
void aom_paeth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
109
79.9k
                                   const uint8_t *above, const uint8_t *left) {
110
79.9k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
111
79.9k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
112
79.9k
  const __m128i zero = _mm_setzero_si128();
113
79.9k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
114
79.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
115
79.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
116
79.9k
  const __m128i one = _mm_set1_epi16(1);
117
118
79.9k
  int i;
119
399k
  for (i = 0; i < 4; ++i) {
120
319k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
121
319k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
122
123
319k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
124
319k
    dst += stride;
125
319k
    rep = _mm_add_epi16(rep, one);
126
319k
  }
127
79.9k
}
128
129
void aom_paeth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
130
172k
                                   const uint8_t *above, const uint8_t *left) {
131
172k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
132
172k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
133
172k
  const __m128i zero = _mm_setzero_si128();
134
172k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
135
172k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
136
172k
  __m128i rep = _mm_set1_epi16((short)0x8000);
137
172k
  const __m128i one = _mm_set1_epi16(1);
138
139
172k
  int i;
140
1.55M
  for (i = 0; i < 8; ++i) {
141
1.38M
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
142
1.38M
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
143
144
1.38M
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
145
1.38M
    dst += stride;
146
1.38M
    rep = _mm_add_epi16(rep, one);
147
1.38M
  }
148
172k
}
149
150
void aom_paeth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
151
48.9k
                                    const uint8_t *above, const uint8_t *left) {
152
48.9k
  __m128i l = _mm_load_si128((const __m128i *)left);
153
48.9k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
154
48.9k
  const __m128i zero = _mm_setzero_si128();
155
48.9k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
156
48.9k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
157
48.9k
  __m128i rep = _mm_set1_epi16((short)0x8000);
158
48.9k
  const __m128i one = _mm_set1_epi16(1);
159
160
48.9k
  int i;
161
831k
  for (i = 0; i < 16; ++i) {
162
782k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
163
782k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
164
165
782k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
166
782k
    dst += stride;
167
782k
    rep = _mm_add_epi16(rep, one);
168
782k
  }
169
48.9k
}
170
171
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
172
void aom_paeth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
173
32.2k
                                    const uint8_t *above, const uint8_t *left) {
174
32.2k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
175
32.2k
  const __m128i zero = _mm_setzero_si128();
176
32.2k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
177
32.2k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
178
32.2k
  const __m128i one = _mm_set1_epi16(1);
179
180
96.7k
  for (int j = 0; j < 2; ++j) {
181
64.5k
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
182
64.5k
    __m128i rep = _mm_set1_epi16((short)0x8000);
183
1.09M
    for (int i = 0; i < 16; ++i) {
184
1.03M
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
185
1.03M
      const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
186
187
1.03M
      _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
188
1.03M
      dst += stride;
189
1.03M
      rep = _mm_add_epi16(rep, one);
190
1.03M
    }
191
64.5k
  }
192
32.2k
}
193
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
194
195
// Return 16 8-bit pixels in one row
196
static inline __m128i paeth_16x1_pred(const __m128i *left, const __m128i *top0,
197
                                      const __m128i *top1,
198
768k
                                      const __m128i *topleft) {
199
768k
  const __m128i p0 = paeth_8x1_pred(left, top0, topleft);
200
768k
  const __m128i p1 = paeth_8x1_pred(left, top1, topleft);
201
768k
  return _mm_packus_epi16(p0, p1);
202
768k
}
203
204
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
205
void aom_paeth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
206
70.6k
                                    const uint8_t *above, const uint8_t *left) {
207
70.6k
  __m128i l = _mm_cvtsi32_si128(((const int *)left)[0]);
208
70.6k
  const __m128i t = _mm_load_si128((const __m128i *)above);
209
70.6k
  const __m128i zero = _mm_setzero_si128();
210
70.6k
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
211
70.6k
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
212
70.6k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
213
70.6k
  __m128i rep = _mm_set1_epi16((short)0x8000);
214
70.6k
  const __m128i one = _mm_set1_epi16(1);
215
216
353k
  for (int i = 0; i < 4; ++i) {
217
282k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
218
282k
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
219
220
282k
    _mm_store_si128((__m128i *)dst, row);
221
282k
    dst += stride;
222
282k
    rep = _mm_add_epi16(rep, one);
223
282k
  }
224
70.6k
}
225
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
226
227
void aom_paeth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
228
0
                                    const uint8_t *above, const uint8_t *left) {
229
0
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
230
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
231
0
  const __m128i zero = _mm_setzero_si128();
232
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
233
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
234
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
235
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
236
0
  const __m128i one = _mm_set1_epi16(1);
237
238
0
  int i;
239
0
  for (i = 0; i < 8; ++i) {
240
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
241
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
242
243
0
    _mm_store_si128((__m128i *)dst, row);
244
0
    dst += stride;
245
0
    rep = _mm_add_epi16(rep, one);
246
0
  }
247
0
}
248
249
void aom_paeth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
250
                                     const uint8_t *above,
251
0
                                     const uint8_t *left) {
252
0
  __m128i l = _mm_load_si128((const __m128i *)left);
253
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
254
0
  const __m128i zero = _mm_setzero_si128();
255
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
256
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
257
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
258
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
259
0
  const __m128i one = _mm_set1_epi16(1);
260
261
0
  int i;
262
0
  for (i = 0; i < 16; ++i) {
263
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
264
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
265
266
0
    _mm_store_si128((__m128i *)dst, row);
267
0
    dst += stride;
268
0
    rep = _mm_add_epi16(rep, one);
269
0
  }
270
0
}
271
272
void aom_paeth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
273
                                     const uint8_t *above,
274
0
                                     const uint8_t *left) {
275
0
  __m128i l = _mm_load_si128((const __m128i *)left);
276
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
277
0
  const __m128i zero = _mm_setzero_si128();
278
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
279
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
280
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
281
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
282
0
  const __m128i one = _mm_set1_epi16(1);
283
0
  __m128i l16;
284
285
0
  int i;
286
0
  for (i = 0; i < 16; ++i) {
287
0
    l16 = _mm_shuffle_epi8(l, rep);
288
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
289
290
0
    _mm_store_si128((__m128i *)dst, row);
291
0
    dst += stride;
292
0
    rep = _mm_add_epi16(rep, one);
293
0
  }
294
295
0
  l = _mm_load_si128((const __m128i *)(left + 16));
296
0
  rep = _mm_set1_epi16((short)0x8000);
297
0
  for (i = 0; i < 16; ++i) {
298
0
    l16 = _mm_shuffle_epi8(l, rep);
299
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
300
301
0
    _mm_store_si128((__m128i *)dst, row);
302
0
    dst += stride;
303
0
    rep = _mm_add_epi16(rep, one);
304
0
  }
305
0
}
306
307
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
308
void aom_paeth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
309
                                     const uint8_t *above,
310
0
                                     const uint8_t *left) {
311
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
312
0
  const __m128i zero = _mm_setzero_si128();
313
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
314
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
315
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
316
0
  const __m128i one = _mm_set1_epi16(1);
317
318
0
  for (int j = 0; j < 4; ++j) {
319
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
320
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
321
0
    for (int i = 0; i < 16; ++i) {
322
0
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
323
0
      const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
324
0
      _mm_store_si128((__m128i *)dst, row);
325
0
      dst += stride;
326
0
      rep = _mm_add_epi16(rep, one);
327
0
    }
328
0
  }
329
0
}
330
331
void aom_paeth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
332
30.3k
                                    const uint8_t *above, const uint8_t *left) {
333
30.3k
  const __m128i a = _mm_load_si128((const __m128i *)above);
334
30.3k
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
335
30.3k
  const __m128i zero = _mm_setzero_si128();
336
30.3k
  const __m128i al = _mm_unpacklo_epi8(a, zero);
337
30.3k
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
338
30.3k
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
339
30.3k
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
340
341
30.3k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
342
30.3k
  __m128i rep = _mm_set1_epi16((short)0x8000);
343
30.3k
  const __m128i one = _mm_set1_epi16(1);
344
30.3k
  const __m128i l = _mm_loadl_epi64((const __m128i *)left);
345
30.3k
  __m128i l16;
346
347
273k
  for (int i = 0; i < 8; ++i) {
348
243k
    l16 = _mm_shuffle_epi8(l, rep);
349
243k
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
350
243k
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
351
352
243k
    _mm_store_si128((__m128i *)dst, r32l);
353
243k
    _mm_store_si128((__m128i *)(dst + 16), r32h);
354
243k
    dst += stride;
355
243k
    rep = _mm_add_epi16(rep, one);
356
243k
  }
357
30.3k
}
358
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
359
360
void aom_paeth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
361
                                     const uint8_t *above,
362
0
                                     const uint8_t *left) {
363
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
364
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
365
0
  const __m128i zero = _mm_setzero_si128();
366
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
367
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
368
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
369
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
370
371
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
372
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
373
0
  const __m128i one = _mm_set1_epi16(1);
374
0
  __m128i l = _mm_load_si128((const __m128i *)left);
375
0
  __m128i l16;
376
377
0
  int i;
378
0
  for (i = 0; i < 16; ++i) {
379
0
    l16 = _mm_shuffle_epi8(l, rep);
380
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
381
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
382
383
0
    _mm_store_si128((__m128i *)dst, r32l);
384
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
385
0
    dst += stride;
386
0
    rep = _mm_add_epi16(rep, one);
387
0
  }
388
0
}
389
390
void aom_paeth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
391
                                     const uint8_t *above,
392
0
                                     const uint8_t *left) {
393
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
394
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
395
0
  const __m128i zero = _mm_setzero_si128();
396
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
397
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
398
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
399
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
400
401
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
402
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
403
0
  const __m128i one = _mm_set1_epi16(1);
404
0
  __m128i l = _mm_load_si128((const __m128i *)left);
405
0
  __m128i l16;
406
407
0
  int i;
408
0
  for (i = 0; i < 16; ++i) {
409
0
    l16 = _mm_shuffle_epi8(l, rep);
410
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
411
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
412
413
0
    _mm_store_si128((__m128i *)dst, r32l);
414
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
415
0
    dst += stride;
416
0
    rep = _mm_add_epi16(rep, one);
417
0
  }
418
419
0
  rep = _mm_set1_epi16((short)0x8000);
420
0
  l = _mm_load_si128((const __m128i *)(left + 16));
421
0
  for (i = 0; i < 16; ++i) {
422
0
    l16 = _mm_shuffle_epi8(l, rep);
423
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
424
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
425
426
0
    _mm_store_si128((__m128i *)dst, r32l);
427
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
428
0
    dst += stride;
429
0
    rep = _mm_add_epi16(rep, one);
430
0
  }
431
0
}
432
433
void aom_paeth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
434
                                     const uint8_t *above,
435
0
                                     const uint8_t *left) {
436
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
437
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
438
0
  const __m128i zero = _mm_setzero_si128();
439
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
440
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
441
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
442
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
443
444
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
445
0
  const __m128i one = _mm_set1_epi16(1);
446
0
  __m128i l16;
447
448
0
  int i, j;
449
0
  for (j = 0; j < 4; ++j) {
450
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
451
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
452
0
    for (i = 0; i < 16; ++i) {
453
0
      l16 = _mm_shuffle_epi8(l, rep);
454
0
      const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
455
0
      const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
456
457
0
      _mm_store_si128((__m128i *)dst, r32l);
458
0
      _mm_store_si128((__m128i *)(dst + 16), r32h);
459
0
      dst += stride;
460
0
      rep = _mm_add_epi16(rep, one);
461
0
    }
462
0
  }
463
0
}
464
465
void aom_paeth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
466
                                     const uint8_t *above,
467
0
                                     const uint8_t *left) {
468
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
469
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
470
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
471
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
472
0
  const __m128i zero = _mm_setzero_si128();
473
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
474
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
475
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
476
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
477
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
478
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
479
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
480
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
481
482
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
483
0
  const __m128i one = _mm_set1_epi16(1);
484
0
  __m128i l16;
485
486
0
  int i, j;
487
0
  for (j = 0; j < 2; ++j) {
488
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
489
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
490
0
    for (i = 0; i < 16; ++i) {
491
0
      l16 = _mm_shuffle_epi8(l, rep);
492
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
493
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
494
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
495
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
496
497
0
      _mm_store_si128((__m128i *)dst, r0);
498
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
499
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
500
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
501
0
      dst += stride;
502
0
      rep = _mm_add_epi16(rep, one);
503
0
    }
504
0
  }
505
0
}
506
507
void aom_paeth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
508
                                     const uint8_t *above,
509
0
                                     const uint8_t *left) {
510
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
511
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
512
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
513
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
514
0
  const __m128i zero = _mm_setzero_si128();
515
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
516
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
517
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
518
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
519
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
520
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
521
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
522
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
523
524
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
525
0
  const __m128i one = _mm_set1_epi16(1);
526
0
  __m128i l16;
527
528
0
  int i, j;
529
0
  for (j = 0; j < 4; ++j) {
530
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
531
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
532
0
    for (i = 0; i < 16; ++i) {
533
0
      l16 = _mm_shuffle_epi8(l, rep);
534
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
535
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
536
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
537
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
538
539
0
      _mm_store_si128((__m128i *)dst, r0);
540
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
541
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
542
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
543
0
      dst += stride;
544
0
      rep = _mm_add_epi16(rep, one);
545
0
    }
546
0
  }
547
0
}
548
549
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
550
void aom_paeth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
551
                                     const uint8_t *above,
552
0
                                     const uint8_t *left) {
553
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
554
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
555
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
556
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
557
0
  const __m128i zero = _mm_setzero_si128();
558
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
559
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
560
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
561
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
562
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
563
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
564
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
565
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
566
567
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
568
0
  const __m128i one = _mm_set1_epi16(1);
569
0
  __m128i l16;
570
571
0
  int i;
572
0
  const __m128i l = _mm_load_si128((const __m128i *)left);
573
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
574
0
  for (i = 0; i < 16; ++i) {
575
0
    l16 = _mm_shuffle_epi8(l, rep);
576
0
    const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
577
0
    const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
578
0
    const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
579
0
    const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
580
581
0
    _mm_store_si128((__m128i *)dst, r0);
582
0
    _mm_store_si128((__m128i *)(dst + 16), r1);
583
0
    _mm_store_si128((__m128i *)(dst + 32), r2);
584
0
    _mm_store_si128((__m128i *)(dst + 48), r3);
585
0
    dst += stride;
586
0
    rep = _mm_add_epi16(rep, one);
587
0
  }
588
0
}
589
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
590
591
// -----------------------------------------------------------------------------
592
// SMOOTH_PRED
593
594
// pixels[0]: above and below_pred interleave vector
595
// pixels[1]: left vector
596
// pixels[2]: right_pred vector
597
static inline void load_pixel_w4(const uint8_t *above, const uint8_t *left,
598
278k
                                 int height, __m128i *pixels) {
599
278k
  __m128i d = _mm_cvtsi32_si128(((const int *)above)[0]);
600
278k
  if (height == 4)
601
180k
    pixels[1] = _mm_cvtsi32_si128(((const int *)left)[0]);
602
98.0k
  else if (height == 8)
603
62.9k
    pixels[1] = _mm_loadl_epi64(((const __m128i *)left));
604
35.1k
  else
605
35.1k
    pixels[1] = _mm_loadu_si128(((const __m128i *)left));
606
607
278k
  pixels[2] = _mm_set1_epi16((int16_t)above[3]);
608
609
278k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
610
278k
  const __m128i zero = _mm_setzero_si128();
611
278k
  d = _mm_unpacklo_epi8(d, zero);
612
278k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
613
278k
}
614
615
// weight_h[0]: weight_h vector
616
// weight_h[1]: scale - weight_h vector
617
// weight_h[2]: same as [0], second half for height = 16 only
618
// weight_h[3]: same as [1], second half for height = 16 only
619
// weight_w[0]: weights_w and scale - weights_w interleave vector
620
static inline void load_weight_w4(int height, __m128i *weight_h,
621
278k
                                  __m128i *weight_w) {
622
278k
  const __m128i zero = _mm_setzero_si128();
623
278k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
624
278k
  const __m128i t = _mm_cvtsi32_si128(((const int *)smooth_weights)[0]);
625
278k
  weight_h[0] = _mm_unpacklo_epi8(t, zero);
626
278k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
627
278k
  weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
628
629
278k
  if (height == 8) {
630
62.9k
    const __m128i weight = _mm_loadl_epi64((const __m128i *)&smooth_weights[4]);
631
62.9k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
632
62.9k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
633
216k
  } else if (height == 16) {
634
35.1k
    const __m128i weight =
635
35.1k
        _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
636
35.1k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
637
35.1k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
638
35.1k
    weight_h[2] = _mm_unpackhi_epi8(weight, zero);
639
35.1k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
640
35.1k
  }
641
278k
}
642
643
static inline void smooth_pred_4xh(const __m128i *pixel, const __m128i *wh,
644
                                   const __m128i *ww, int h, uint8_t *dst,
645
314k
                                   ptrdiff_t stride, int second_half) {
646
314k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
647
314k
  const __m128i one = _mm_set1_epi16(1);
648
314k
  const __m128i inc = _mm_set1_epi16(0x202);
649
314k
  const __m128i gat = _mm_set1_epi32(0xc080400);
650
314k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
651
314k
                            : _mm_set1_epi16((short)0x8000);
652
314k
  __m128i d = _mm_set1_epi16(0x100);
653
654
2.10M
  for (int i = 0; i < h; ++i) {
655
1.78M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
656
1.78M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
657
1.78M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
658
1.78M
    __m128i s = _mm_madd_epi16(pixel[0], wh_sc);
659
660
1.78M
    __m128i b = _mm_shuffle_epi8(pixel[1], rep);
661
1.78M
    b = _mm_unpacklo_epi16(b, pixel[2]);
662
1.78M
    __m128i sum = _mm_madd_epi16(b, ww[0]);
663
664
1.78M
    sum = _mm_add_epi32(s, sum);
665
1.78M
    sum = _mm_add_epi32(sum, round);
666
1.78M
    sum = _mm_srai_epi32(sum, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
667
668
1.78M
    sum = _mm_shuffle_epi8(sum, gat);
669
1.78M
    *(int *)dst = _mm_cvtsi128_si32(sum);
670
1.78M
    dst += stride;
671
672
1.78M
    rep = _mm_add_epi16(rep, one);
673
1.78M
    d = _mm_add_epi16(d, inc);
674
1.78M
  }
675
314k
}
676
677
void aom_smooth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
678
180k
                                    const uint8_t *above, const uint8_t *left) {
679
180k
  __m128i pixels[3];
680
180k
  load_pixel_w4(above, left, 4, pixels);
681
682
180k
  __m128i wh[4], ww[2];
683
180k
  load_weight_w4(4, wh, ww);
684
685
180k
  smooth_pred_4xh(pixels, wh, ww, 4, dst, stride, 0);
686
180k
}
687
688
void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
689
62.9k
                                    const uint8_t *above, const uint8_t *left) {
690
62.9k
  __m128i pixels[3];
691
62.9k
  load_pixel_w4(above, left, 8, pixels);
692
693
62.9k
  __m128i wh[4], ww[2];
694
62.9k
  load_weight_w4(8, wh, ww);
695
696
62.9k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
697
62.9k
}
698
699
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
700
void aom_smooth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
701
                                     const uint8_t *above,
702
35.1k
                                     const uint8_t *left) {
703
35.1k
  __m128i pixels[3];
704
35.1k
  load_pixel_w4(above, left, 16, pixels);
705
706
35.1k
  __m128i wh[4], ww[2];
707
35.1k
  load_weight_w4(16, wh, ww);
708
709
35.1k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
710
35.1k
  dst += stride << 3;
711
35.1k
  smooth_pred_4xh(pixels, &wh[2], ww, 8, dst, stride, 1);
712
35.1k
}
713
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
715
// pixels[0]: above and below_pred interleave vector, first half
716
// pixels[1]: above and below_pred interleave vector, second half
717
// pixels[2]: left vector
718
// pixels[3]: right_pred vector
719
// pixels[4]: above and below_pred interleave vector, first half
720
// pixels[5]: above and below_pred interleave vector, second half
721
// pixels[6]: left vector + 16
722
// pixels[7]: right_pred vector
723
static inline void load_pixel_w8(const uint8_t *above, const uint8_t *left,
724
350k
                                 int height, __m128i *pixels) {
725
350k
  const __m128i zero = _mm_setzero_si128();
726
350k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
727
350k
  __m128i d = _mm_loadl_epi64((const __m128i *)above);
728
350k
  d = _mm_unpacklo_epi8(d, zero);
729
350k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
730
350k
  pixels[1] = _mm_unpackhi_epi16(d, bp);
731
732
350k
  pixels[3] = _mm_set1_epi16((int16_t)above[7]);
733
734
350k
  if (height == 4) {
735
97.3k
    pixels[2] = _mm_cvtsi32_si128(((const int *)left)[0]);
736
253k
  } else if (height == 8) {
737
188k
    pixels[2] = _mm_loadl_epi64((const __m128i *)left);
738
188k
  } else if (height == 16) {
739
47.4k
    pixels[2] = _mm_load_si128((const __m128i *)left);
740
47.4k
  } else {
741
16.6k
    pixels[2] = _mm_load_si128((const __m128i *)left);
742
16.6k
    pixels[4] = pixels[0];
743
16.6k
    pixels[5] = pixels[1];
744
16.6k
    pixels[6] = _mm_load_si128((const __m128i *)(left + 16));
745
16.6k
    pixels[7] = pixels[3];
746
16.6k
  }
747
350k
}
748
749
// weight_h[0]: weight_h vector
750
// weight_h[1]: scale - weight_h vector
751
// weight_h[2]: same as [0], offset 8
752
// weight_h[3]: same as [1], offset 8
753
// weight_h[4]: same as [0], offset 16
754
// weight_h[5]: same as [1], offset 16
755
// weight_h[6]: same as [0], offset 24
756
// weight_h[7]: same as [1], offset 24
757
// weight_w[0]: weights_w and scale - weights_w interleave vector, first half
758
// weight_w[1]: weights_w and scale - weights_w interleave vector, second half
759
static inline void load_weight_w8(int height, __m128i *weight_h,
760
350k
                                  __m128i *weight_w) {
761
350k
  const __m128i zero = _mm_setzero_si128();
762
350k
  const int we_offset = height < 8 ? 0 : 4;
763
350k
  __m128i we = _mm_loadu_si128((const __m128i *)&smooth_weights[we_offset]);
764
350k
  weight_h[0] = _mm_unpacklo_epi8(we, zero);
765
350k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
766
350k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
767
768
350k
  if (height == 4) {
769
97.3k
    we = _mm_srli_si128(we, 4);
770
97.3k
    __m128i tmp1 = _mm_unpacklo_epi8(we, zero);
771
97.3k
    __m128i tmp2 = _mm_sub_epi16(d, tmp1);
772
97.3k
    weight_w[0] = _mm_unpacklo_epi16(tmp1, tmp2);
773
97.3k
    weight_w[1] = _mm_unpackhi_epi16(tmp1, tmp2);
774
253k
  } else {
775
253k
    weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
776
253k
    weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
777
253k
  }
778
779
350k
  if (height == 16) {
780
47.4k
    we = _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
781
47.4k
    weight_h[0] = _mm_unpacklo_epi8(we, zero);
782
47.4k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
783
47.4k
    weight_h[2] = _mm_unpackhi_epi8(we, zero);
784
47.4k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
785
302k
  } else if (height == 32) {
786
16.6k
    const __m128i weight_lo =
787
16.6k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28]);
788
16.6k
    weight_h[0] = _mm_unpacklo_epi8(weight_lo, zero);
789
16.6k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
790
16.6k
    weight_h[2] = _mm_unpackhi_epi8(weight_lo, zero);
791
16.6k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
792
16.6k
    const __m128i weight_hi =
793
16.6k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28 + 16]);
794
16.6k
    weight_h[4] = _mm_unpacklo_epi8(weight_hi, zero);
795
16.6k
    weight_h[5] = _mm_sub_epi16(d, weight_h[4]);
796
16.6k
    weight_h[6] = _mm_unpackhi_epi8(weight_hi, zero);
797
16.6k
    weight_h[7] = _mm_sub_epi16(d, weight_h[6]);
798
16.6k
  }
799
350k
}
800
801
static inline void smooth_pred_8xh(const __m128i *pixels, const __m128i *wh,
802
                                   const __m128i *ww, int h, uint8_t *dst,
803
447k
                                   ptrdiff_t stride, int second_half) {
804
447k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
805
447k
  const __m128i one = _mm_set1_epi16(1);
806
447k
  const __m128i inc = _mm_set1_epi16(0x202);
807
447k
  const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
808
809
447k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
810
447k
                            : _mm_set1_epi16((short)0x8000);
811
447k
  __m128i d = _mm_set1_epi16(0x100);
812
813
447k
  int i;
814
3.64M
  for (i = 0; i < h; ++i) {
815
3.19M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
816
3.19M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
817
3.19M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
818
3.19M
    __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc);
819
3.19M
    __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc);
820
821
3.19M
    __m128i b = _mm_shuffle_epi8(pixels[2], rep);
822
3.19M
    b = _mm_unpacklo_epi16(b, pixels[3]);
823
3.19M
    __m128i sum0 = _mm_madd_epi16(b, ww[0]);
824
3.19M
    __m128i sum1 = _mm_madd_epi16(b, ww[1]);
825
826
3.19M
    s0 = _mm_add_epi32(s0, sum0);
827
3.19M
    s0 = _mm_add_epi32(s0, round);
828
3.19M
    s0 = _mm_srai_epi32(s0, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
829
830
3.19M
    s1 = _mm_add_epi32(s1, sum1);
831
3.19M
    s1 = _mm_add_epi32(s1, round);
832
3.19M
    s1 = _mm_srai_epi32(s1, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
833
834
3.19M
    sum0 = _mm_packus_epi16(s0, s1);
835
3.19M
    sum0 = _mm_shuffle_epi8(sum0, gat);
836
3.19M
    _mm_storel_epi64((__m128i *)dst, sum0);
837
3.19M
    dst += stride;
838
839
3.19M
    rep = _mm_add_epi16(rep, one);
840
3.19M
    d = _mm_add_epi16(d, inc);
841
3.19M
  }
842
447k
}
843
844
void aom_smooth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
845
97.3k
                                    const uint8_t *above, const uint8_t *left) {
846
97.3k
  __m128i pixels[4];
847
97.3k
  load_pixel_w8(above, left, 4, pixels);
848
849
97.3k
  __m128i wh[4], ww[2];
850
97.3k
  load_weight_w8(4, wh, ww);
851
852
97.3k
  smooth_pred_8xh(pixels, wh, ww, 4, dst, stride, 0);
853
97.3k
}
854
855
void aom_smooth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
856
188k
                                    const uint8_t *above, const uint8_t *left) {
857
188k
  __m128i pixels[4];
858
188k
  load_pixel_w8(above, left, 8, pixels);
859
860
188k
  __m128i wh[4], ww[2];
861
188k
  load_weight_w8(8, wh, ww);
862
863
188k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
864
188k
}
865
866
void aom_smooth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
867
                                     const uint8_t *above,
868
47.4k
                                     const uint8_t *left) {
869
47.4k
  __m128i pixels[4];
870
47.4k
  load_pixel_w8(above, left, 16, pixels);
871
872
47.4k
  __m128i wh[4], ww[2];
873
47.4k
  load_weight_w8(16, wh, ww);
874
875
47.4k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
876
47.4k
  dst += stride << 3;
877
47.4k
  smooth_pred_8xh(pixels, &wh[2], ww, 8, dst, stride, 1);
878
47.4k
}
879
880
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
881
void aom_smooth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
882
                                     const uint8_t *above,
883
16.6k
                                     const uint8_t *left) {
884
16.6k
  __m128i pixels[8];
885
16.6k
  load_pixel_w8(above, left, 32, pixels);
886
887
16.6k
  __m128i wh[8], ww[2];
888
16.6k
  load_weight_w8(32, wh, ww);
889
890
16.6k
  smooth_pred_8xh(&pixels[0], wh, ww, 8, dst, stride, 0);
891
16.6k
  dst += stride << 3;
892
16.6k
  smooth_pred_8xh(&pixels[0], &wh[2], ww, 8, dst, stride, 1);
893
16.6k
  dst += stride << 3;
894
16.6k
  smooth_pred_8xh(&pixels[4], &wh[4], ww, 8, dst, stride, 0);
895
16.6k
  dst += stride << 3;
896
16.6k
  smooth_pred_8xh(&pixels[4], &wh[6], ww, 8, dst, stride, 1);
897
16.6k
}
898
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
899
900
// TODO(slavarnway): Visual Studio only supports restrict when /std:c11
901
// (available in 2019+) or greater is specified; __restrict can be used in that
902
// case. This should be moved to rtcd and used consistently between the
903
// function declarations and definitions to avoid warnings in Visual Studio
904
// when defining LIBAOM_RESTRICT to restrict or __restrict.
905
#if defined(_MSC_VER)
906
#define LIBAOM_RESTRICT
907
#else
908
#define LIBAOM_RESTRICT restrict
909
#endif
910
911
411k
static AOM_FORCE_INLINE __m128i Load4(const void *src) {
912
  // With new compilers such as clang 8.0.0 we can use the new _mm_loadu_si32
913
  // intrinsic. Both _mm_loadu_si32(src) and the code here are compiled into a
914
  // movss instruction.
915
  //
916
  // Until compiler support of _mm_loadu_si32 is widespread, use of
917
  // _mm_loadu_si32 is banned.
918
411k
  int val;
919
411k
  memcpy(&val, src, sizeof(val));
920
411k
  return _mm_cvtsi32_si128(val);
921
411k
}
922
923
85.4M
static AOM_FORCE_INLINE __m128i LoadLo8(const void *a) {
924
85.4M
  return _mm_loadl_epi64((const __m128i *)(a));
925
85.4M
}
926
927
803k
static AOM_FORCE_INLINE __m128i LoadUnaligned16(const void *a) {
928
803k
  return _mm_loadu_si128((const __m128i *)(a));
929
803k
}
930
931
953k
static AOM_FORCE_INLINE void Store4(void *dst, const __m128i x) {
932
953k
  const int val = _mm_cvtsi128_si32(x);
933
953k
  memcpy(dst, &val, sizeof(val));
934
953k
}
935
936
43.9M
static AOM_FORCE_INLINE void StoreLo8(void *a, const __m128i v) {
937
43.9M
  _mm_storel_epi64((__m128i *)(a), v);
938
43.9M
}
939
940
13.5M
static AOM_FORCE_INLINE void StoreUnaligned16(void *a, const __m128i v) {
941
13.5M
  _mm_storeu_si128((__m128i *)(a), v);
942
13.5M
}
943
944
86.7M
static AOM_FORCE_INLINE __m128i cvtepu8_epi16(__m128i x) {
945
86.7M
  return _mm_unpacklo_epi8((x), _mm_setzero_si128());
946
86.7M
}
947
948
235k
static AOM_FORCE_INLINE __m128i cvtepu8_epi32(__m128i x) {
949
235k
  const __m128i tmp = _mm_unpacklo_epi8((x), _mm_setzero_si128());
950
235k
  return _mm_unpacklo_epi16(tmp, _mm_setzero_si128());
951
235k
}
952
953
42.3M
static AOM_FORCE_INLINE __m128i cvtepu16_epi32(__m128i x) {
954
42.3M
  return _mm_unpacklo_epi16((x), _mm_setzero_si128());
955
42.3M
}
956
957
static void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
958
                                 const uint8_t *LIBAOM_RESTRICT top_row,
959
                                 const uint8_t *LIBAOM_RESTRICT left_column,
960
500k
                                 int width, int height) {
961
500k
  const uint8_t *const sm_weights_h = smooth_weights + height - 4;
962
500k
  const uint8_t *const sm_weights_w = smooth_weights + width - 4;
963
500k
  const __m128i zero = _mm_setzero_si128();
964
500k
  const __m128i scale_value = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
965
500k
  const __m128i bottom_left = _mm_cvtsi32_si128(left_column[height - 1]);
966
500k
  const __m128i top_right = _mm_set1_epi16(top_row[width - 1]);
967
500k
  const __m128i round = _mm_set1_epi32(1 << SMOOTH_WEIGHT_LOG2_SCALE);
968
10.6M
  for (int y = 0; y < height; ++y) {
969
10.1M
    const __m128i weights_y = _mm_cvtsi32_si128(sm_weights_h[y]);
970
10.1M
    const __m128i left_y = _mm_cvtsi32_si128(left_column[y]);
971
10.1M
    const __m128i scale_m_weights_y = _mm_sub_epi16(scale_value, weights_y);
972
10.1M
    __m128i scaled_bottom_left =
973
10.1M
        _mm_mullo_epi16(scale_m_weights_y, bottom_left);
974
10.1M
    const __m128i weight_left_y =
975
10.1M
        _mm_shuffle_epi32(_mm_unpacklo_epi16(weights_y, left_y), 0);
976
10.1M
    scaled_bottom_left = _mm_add_epi32(scaled_bottom_left, round);
977
10.1M
    scaled_bottom_left = _mm_shuffle_epi32(scaled_bottom_left, 0);
978
52.5M
    for (int x = 0; x < width; x += 8) {
979
42.3M
      const __m128i top_x = LoadLo8(top_row + x);
980
42.3M
      const __m128i weights_x = LoadLo8(sm_weights_w + x);
981
42.3M
      const __m128i top_weights_x = _mm_unpacklo_epi8(top_x, weights_x);
982
42.3M
      const __m128i top_weights_x_lo = cvtepu8_epi16(top_weights_x);
983
42.3M
      const __m128i top_weights_x_hi = _mm_unpackhi_epi8(top_weights_x, zero);
984
985
      // Here opposite weights and pixels are multiplied, where the order of
986
      // interleaving is indicated in the names.
987
42.3M
      __m128i pred_lo = _mm_madd_epi16(top_weights_x_lo, weight_left_y);
988
42.3M
      __m128i pred_hi = _mm_madd_epi16(top_weights_x_hi, weight_left_y);
989
990
      // |scaled_bottom_left| is always scaled by the same weight each row, so
991
      // we only derive |scaled_top_right| values here.
992
42.3M
      const __m128i inverted_weights_x =
993
42.3M
          _mm_sub_epi16(scale_value, cvtepu8_epi16(weights_x));
994
42.3M
      const __m128i scaled_top_right =
995
42.3M
          _mm_mullo_epi16(inverted_weights_x, top_right);
996
42.3M
      const __m128i scaled_top_right_lo = cvtepu16_epi32(scaled_top_right);
997
42.3M
      const __m128i scaled_top_right_hi =
998
42.3M
          _mm_unpackhi_epi16(scaled_top_right, zero);
999
42.3M
      pred_lo = _mm_add_epi32(pred_lo, scaled_bottom_left);
1000
42.3M
      pred_hi = _mm_add_epi32(pred_hi, scaled_bottom_left);
1001
42.3M
      pred_lo = _mm_add_epi32(pred_lo, scaled_top_right_lo);
1002
42.3M
      pred_hi = _mm_add_epi32(pred_hi, scaled_top_right_hi);
1003
1004
      // The round value for RightShiftWithRounding was added with
1005
      // |scaled_bottom_left|.
1006
42.3M
      pred_lo = _mm_srli_epi32(pred_lo, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1007
42.3M
      pred_hi = _mm_srli_epi32(pred_hi, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1008
42.3M
      const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi);
1009
42.3M
      StoreLo8(dst + x, _mm_packus_epi16(pred, pred));
1010
42.3M
    }
1011
10.1M
    dst += stride;
1012
10.1M
  }
1013
500k
}
1014
1015
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1016
void aom_smooth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
1017
                                     const uint8_t *above,
1018
65.8k
                                     const uint8_t *left) {
1019
65.8k
  smooth_predictor_wxh(dst, stride, above, left, 16, 4);
1020
65.8k
}
1021
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1022
1023
void aom_smooth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1024
                                     const uint8_t *above,
1025
65.3k
                                     const uint8_t *left) {
1026
65.3k
  smooth_predictor_wxh(dst, stride, above, left, 16, 8);
1027
65.3k
}
1028
1029
void aom_smooth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1030
                                      const uint8_t *above,
1031
109k
                                      const uint8_t *left) {
1032
109k
  smooth_predictor_wxh(dst, stride, above, left, 16, 16);
1033
109k
}
1034
1035
void aom_smooth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1036
                                      const uint8_t *above,
1037
30.5k
                                      const uint8_t *left) {
1038
30.5k
  smooth_predictor_wxh(dst, stride, above, left, 16, 32);
1039
30.5k
}
1040
1041
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1042
void aom_smooth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1043
                                      const uint8_t *above,
1044
5.70k
                                      const uint8_t *left) {
1045
5.70k
  smooth_predictor_wxh(dst, stride, above, left, 16, 64);
1046
5.70k
}
1047
1048
void aom_smooth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1049
                                     const uint8_t *above,
1050
45.1k
                                     const uint8_t *left) {
1051
45.1k
  smooth_predictor_wxh(dst, stride, above, left, 32, 8);
1052
45.1k
}
1053
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1054
1055
void aom_smooth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1056
                                      const uint8_t *above,
1057
27.9k
                                      const uint8_t *left) {
1058
27.9k
  smooth_predictor_wxh(dst, stride, above, left, 32, 16);
1059
27.9k
}
1060
1061
void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1062
                                      const uint8_t *above,
1063
91.6k
                                      const uint8_t *left) {
1064
91.6k
  smooth_predictor_wxh(dst, stride, above, left, 32, 32);
1065
91.6k
}
1066
1067
void aom_smooth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1068
                                      const uint8_t *above,
1069
2.39k
                                      const uint8_t *left) {
1070
2.39k
  smooth_predictor_wxh(dst, stride, above, left, 32, 64);
1071
2.39k
}
1072
1073
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1074
void aom_smooth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1075
                                      const uint8_t *above,
1076
23.2k
                                      const uint8_t *left) {
1077
23.2k
  smooth_predictor_wxh(dst, stride, above, left, 64, 16);
1078
23.2k
}
1079
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1080
1081
void aom_smooth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1082
                                      const uint8_t *above,
1083
4.56k
                                      const uint8_t *left) {
1084
4.56k
  smooth_predictor_wxh(dst, stride, above, left, 64, 32);
1085
4.56k
}
1086
1087
void aom_smooth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1088
                                      const uint8_t *above,
1089
29.0k
                                      const uint8_t *left) {
1090
29.0k
  smooth_predictor_wxh(dst, stride, above, left, 64, 64);
1091
29.0k
}
1092
1093
// -----------------------------------------------------------------------------
1094
// Smooth horizontal/vertical helper functions.
1095
1096
// For Horizontal, pixels1 and pixels2 are the same repeated value. For
1097
// Vertical, weights1 and weights2 are the same, and scaled_corner1 and
1098
// scaled_corner2 are the same.
1099
static AOM_FORCE_INLINE void write_smooth_directional_sum16(
1100
    uint8_t *LIBAOM_RESTRICT dst, const __m128i pixels1, const __m128i pixels2,
1101
    const __m128i weights1, const __m128i weights2,
1102
    const __m128i scaled_corner1, const __m128i scaled_corner2,
1103
13.5M
    const __m128i round) {
1104
13.5M
  const __m128i weighted_px1 = _mm_mullo_epi16(pixels1, weights1);
1105
13.5M
  const __m128i weighted_px2 = _mm_mullo_epi16(pixels2, weights2);
1106
13.5M
  const __m128i pred_sum1 = _mm_add_epi16(scaled_corner1, weighted_px1);
1107
13.5M
  const __m128i pred_sum2 = _mm_add_epi16(scaled_corner2, weighted_px2);
1108
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1109
13.5M
  const __m128i pred1 = _mm_srli_epi16(_mm_add_epi16(pred_sum1, round), 8);
1110
13.5M
  const __m128i pred2 = _mm_srli_epi16(_mm_add_epi16(pred_sum2, round), 8);
1111
13.5M
  StoreUnaligned16(dst, _mm_packus_epi16(pred1, pred2));
1112
13.5M
}
1113
1114
static AOM_FORCE_INLINE __m128i smooth_directional_sum8(
1115
1.61M
    const __m128i pixels, const __m128i weights, const __m128i scaled_corner) {
1116
1.61M
  const __m128i weighted_px = _mm_mullo_epi16(pixels, weights);
1117
1.61M
  return _mm_add_epi16(scaled_corner, weighted_px);
1118
1.61M
}
1119
1120
static AOM_FORCE_INLINE void write_smooth_directional_sum8(
1121
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *pixels, const __m128i *weights,
1122
1.61M
    const __m128i *scaled_corner, const __m128i *round) {
1123
1.61M
  const __m128i pred_sum =
1124
1.61M
      smooth_directional_sum8(*pixels, *weights, *scaled_corner);
1125
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1126
1.61M
  const __m128i pred = _mm_srli_epi16(_mm_add_epi16(pred_sum, *round), 8);
1127
1.61M
  StoreLo8(dst, _mm_packus_epi16(pred, pred));
1128
1.61M
}
1129
1130
// -----------------------------------------------------------------------------
1131
// SMOOTH_V_PRED
1132
1133
static AOM_FORCE_INLINE void load_smooth_vertical_pixels4(
1134
    const uint8_t *LIBAOM_RESTRICT above, const uint8_t *LIBAOM_RESTRICT left,
1135
53.1k
    const int height, __m128i *pixels) {
1136
53.1k
  __m128i top = Load4(above);
1137
53.1k
  const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
1138
53.1k
  top = cvtepu8_epi16(top);
1139
53.1k
  pixels[0] = _mm_unpacklo_epi16(top, bottom_left);
1140
53.1k
}
1141
1142
// |weight_array| alternates weight vectors from the table with their inverted
1143
// (256-w) counterparts. This is precomputed by the compiler when the weights
1144
// table is visible to this module. Removing this visibility can cut speed by up
1145
// to half in both 4xH and 8xH transforms.
1146
static AOM_FORCE_INLINE void load_smooth_vertical_weights4(
1147
    const uint8_t *LIBAOM_RESTRICT weight_array, const int height,
1148
53.1k
    __m128i *weights) {
1149
53.1k
  const __m128i inverter = _mm_set1_epi16(256);
1150
1151
53.1k
  if (height == 4) {
1152
34.3k
    const __m128i weight = Load4(weight_array);
1153
34.3k
    weights[0] = cvtepu8_epi16(weight);
1154
34.3k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1155
34.3k
  } else if (height == 8) {
1156
11.1k
    const __m128i weight = LoadLo8(weight_array + 4);
1157
11.1k
    weights[0] = cvtepu8_epi16(weight);
1158
11.1k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1159
11.1k
  } else {
1160
7.65k
    const __m128i weight = LoadUnaligned16(weight_array + 12);
1161
7.65k
    const __m128i zero = _mm_setzero_si128();
1162
7.65k
    weights[0] = cvtepu8_epi16(weight);
1163
7.65k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1164
7.65k
    weights[2] = _mm_unpackhi_epi8(weight, zero);
1165
7.65k
    weights[3] = _mm_sub_epi16(inverter, weights[2]);
1166
7.65k
  }
1167
53.1k
}
1168
1169
static AOM_FORCE_INLINE void write_smooth_vertical4xh(
1170
    const __m128i *pixel, const __m128i *weight, const int height,
1171
60.7k
    uint8_t *LIBAOM_RESTRICT dst, const ptrdiff_t stride) {
1172
60.7k
  const __m128i pred_round = _mm_set1_epi32(128);
1173
60.7k
  const __m128i mask_increment = _mm_set1_epi16(0x0202);
1174
60.7k
  const __m128i cvtepu8_epi32 = _mm_set1_epi32(0xC080400);
1175
60.7k
  __m128i y_select = _mm_set1_epi16(0x0100);
1176
1177
409k
  for (int y = 0; y < height; ++y) {
1178
348k
    const __m128i weight_y = _mm_shuffle_epi8(weight[0], y_select);
1179
348k
    const __m128i inverted_weight_y = _mm_shuffle_epi8(weight[1], y_select);
1180
348k
    const __m128i alternate_weights =
1181
348k
        _mm_unpacklo_epi16(weight_y, inverted_weight_y);
1182
    // Here the pixel vector is top_row[0], corner, top_row[1], corner, ...
1183
    // The madd instruction yields four results of the form:
1184
    // (top_row[x] * weight[y] + corner * inverted_weight[y])
1185
348k
    __m128i sum = _mm_madd_epi16(pixel[0], alternate_weights);
1186
348k
    sum = _mm_add_epi32(sum, pred_round);
1187
348k
    sum = _mm_srai_epi32(sum, 8);
1188
348k
    sum = _mm_shuffle_epi8(sum, cvtepu8_epi32);
1189
348k
    Store4(dst, sum);
1190
348k
    dst += stride;
1191
348k
    y_select = _mm_add_epi16(y_select, mask_increment);
1192
348k
  }
1193
60.7k
}
1194
1195
void aom_smooth_v_predictor_4x4_ssse3(
1196
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1197
    const uint8_t *LIBAOM_RESTRICT top_row,
1198
34.3k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1199
34.3k
  __m128i pixels;
1200
34.3k
  load_smooth_vertical_pixels4(top_row, left_column, 4, &pixels);
1201
1202
34.3k
  __m128i weights[2];
1203
34.3k
  load_smooth_vertical_weights4(smooth_weights, 4, weights);
1204
1205
34.3k
  write_smooth_vertical4xh(&pixels, weights, 4, dst, stride);
1206
34.3k
}
1207
1208
void aom_smooth_v_predictor_4x8_ssse3(
1209
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1210
    const uint8_t *LIBAOM_RESTRICT top_row,
1211
11.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1212
11.1k
  __m128i pixels;
1213
11.1k
  load_smooth_vertical_pixels4(top_row, left_column, 8, &pixels);
1214
1215
11.1k
  __m128i weights[2];
1216
11.1k
  load_smooth_vertical_weights4(smooth_weights, 8, weights);
1217
1218
11.1k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1219
11.1k
}
1220
1221
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1222
void aom_smooth_v_predictor_4x16_ssse3(
1223
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1224
    const uint8_t *LIBAOM_RESTRICT top_row,
1225
7.65k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1226
7.65k
  __m128i pixels;
1227
7.65k
  load_smooth_vertical_pixels4(top_row, left_column, 16, &pixels);
1228
1229
7.65k
  __m128i weights[4];
1230
7.65k
  load_smooth_vertical_weights4(smooth_weights, 16, weights);
1231
1232
7.65k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1233
7.65k
  dst += stride << 3;
1234
7.65k
  write_smooth_vertical4xh(&pixels, &weights[2], 8, dst, stride);
1235
7.65k
}
1236
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1237
1238
void aom_smooth_v_predictor_8x4_ssse3(
1239
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1240
    const uint8_t *LIBAOM_RESTRICT top_row,
1241
16.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1242
16.5k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1243
16.5k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1244
16.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1245
16.5k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1246
16.5k
  const __m128i scaled_bottom_left =
1247
16.5k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1248
16.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1249
16.5k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1250
16.5k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1251
16.5k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1252
16.5k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1253
16.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1254
16.5k
                                &round);
1255
16.5k
  dst += stride;
1256
16.5k
  y_select = _mm_set1_epi32(0x03020302);
1257
16.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1258
16.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1259
16.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1260
16.5k
                                &round);
1261
16.5k
  dst += stride;
1262
16.5k
  y_select = _mm_set1_epi32(0x05040504);
1263
16.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1264
16.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1265
16.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1266
16.5k
                                &round);
1267
16.5k
  dst += stride;
1268
16.5k
  y_select = _mm_set1_epi32(0x07060706);
1269
16.5k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1270
16.5k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1271
16.5k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1272
16.5k
                                &round);
1273
16.5k
}
1274
1275
void aom_smooth_v_predictor_8x8_ssse3(
1276
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1277
    const uint8_t *LIBAOM_RESTRICT top_row,
1278
40.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1279
40.9k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1280
40.9k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1281
40.9k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1282
40.9k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1283
40.9k
  const __m128i scaled_bottom_left =
1284
40.9k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1285
40.9k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1286
40.9k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1287
368k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1288
327k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1289
327k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1290
327k
    const __m128i scaled_bottom_left_y =
1291
327k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1292
327k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1293
327k
                                  &round);
1294
327k
    dst += stride;
1295
327k
  }
1296
40.9k
}
1297
1298
void aom_smooth_v_predictor_8x16_ssse3(
1299
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1300
    const uint8_t *LIBAOM_RESTRICT top_row,
1301
11.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1302
11.5k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1303
11.5k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1304
1305
11.5k
  const __m128i weights1 = cvtepu8_epi16(weights);
1306
11.5k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
1307
11.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1308
11.5k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1309
11.5k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1310
11.5k
  const __m128i scaled_bottom_left1 =
1311
11.5k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1312
11.5k
  const __m128i scaled_bottom_left2 =
1313
11.5k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1314
11.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1315
11.5k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1316
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1317
92.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1318
92.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1319
92.3k
    const __m128i scaled_bottom_left_y =
1320
92.3k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1321
92.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1322
92.3k
                                  &round);
1323
92.3k
    dst += stride;
1324
92.3k
  }
1325
103k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1326
92.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1327
92.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1328
92.3k
    const __m128i scaled_bottom_left_y =
1329
92.3k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1330
92.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1331
92.3k
                                  &round);
1332
92.3k
    dst += stride;
1333
92.3k
  }
1334
11.5k
}
1335
1336
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1337
void aom_smooth_v_predictor_8x32_ssse3(
1338
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1339
    const uint8_t *LIBAOM_RESTRICT top_row,
1340
4.05k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1341
4.05k
  const __m128i zero = _mm_setzero_si128();
1342
4.05k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1343
4.05k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1344
4.05k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1345
4.05k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1346
4.05k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1347
4.05k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1348
4.05k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1349
4.05k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1350
4.05k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1351
4.05k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1352
4.05k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1353
4.05k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1354
4.05k
  const __m128i scaled_bottom_left1 =
1355
4.05k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1356
4.05k
  const __m128i scaled_bottom_left2 =
1357
4.05k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1358
4.05k
  const __m128i scaled_bottom_left3 =
1359
4.05k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1360
4.05k
  const __m128i scaled_bottom_left4 =
1361
4.05k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1362
4.05k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1363
4.05k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1364
36.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1365
32.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1366
32.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1367
32.4k
    const __m128i scaled_bottom_left_y =
1368
32.4k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1369
32.4k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1370
32.4k
                                  &round);
1371
32.4k
    dst += stride;
1372
32.4k
  }
1373
36.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1374
32.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1375
32.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1376
32.4k
    const __m128i scaled_bottom_left_y =
1377
32.4k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1378
32.4k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1379
32.4k
                                  &round);
1380
32.4k
    dst += stride;
1381
32.4k
  }
1382
36.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1383
32.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1384
32.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1385
32.4k
    const __m128i scaled_bottom_left_y =
1386
32.4k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1387
32.4k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1388
32.4k
                                  &round);
1389
32.4k
    dst += stride;
1390
32.4k
  }
1391
36.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1392
32.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1393
32.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1394
32.4k
    const __m128i scaled_bottom_left_y =
1395
32.4k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1396
32.4k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1397
32.4k
                                  &round);
1398
32.4k
    dst += stride;
1399
32.4k
  }
1400
4.05k
}
1401
1402
void aom_smooth_v_predictor_16x4_ssse3(
1403
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1404
    const uint8_t *LIBAOM_RESTRICT top_row,
1405
17.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1406
17.0k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1407
17.0k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1408
17.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1409
17.0k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1410
17.0k
  const __m128i scaled_bottom_left =
1411
17.0k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1412
17.0k
  const __m128i round = _mm_set1_epi16(128);
1413
17.0k
  const __m128i top = LoadUnaligned16(top_row);
1414
17.0k
  const __m128i top_lo = cvtepu8_epi16(top);
1415
17.0k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1416
1417
17.0k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1418
17.0k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1419
17.0k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1420
17.0k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1421
17.0k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1422
17.0k
                                 round);
1423
17.0k
  dst += stride;
1424
17.0k
  y_select = _mm_set1_epi32(0x03020302);
1425
17.0k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1426
17.0k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1427
17.0k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1428
17.0k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1429
17.0k
                                 round);
1430
17.0k
  dst += stride;
1431
17.0k
  y_select = _mm_set1_epi32(0x05040504);
1432
17.0k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1433
17.0k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1434
17.0k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1435
17.0k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1436
17.0k
                                 round);
1437
17.0k
  dst += stride;
1438
17.0k
  y_select = _mm_set1_epi32(0x07060706);
1439
17.0k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1440
17.0k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1441
17.0k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1442
17.0k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1443
17.0k
                                 round);
1444
17.0k
}
1445
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1446
1447
void aom_smooth_v_predictor_16x8_ssse3(
1448
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1449
    const uint8_t *LIBAOM_RESTRICT top_row,
1450
13.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1451
13.4k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1452
13.4k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1453
13.4k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1454
13.4k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1455
13.4k
  const __m128i scaled_bottom_left =
1456
13.4k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1457
13.4k
  const __m128i round = _mm_set1_epi16(128);
1458
13.4k
  const __m128i top = LoadUnaligned16(top_row);
1459
13.4k
  const __m128i top_lo = cvtepu8_epi16(top);
1460
13.4k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1461
120k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1462
107k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1463
107k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1464
107k
    const __m128i scaled_bottom_left_y =
1465
107k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1466
107k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1467
107k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1468
107k
                                   round);
1469
107k
    dst += stride;
1470
107k
  }
1471
13.4k
}
1472
1473
void aom_smooth_v_predictor_16x16_ssse3(
1474
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1475
    const uint8_t *LIBAOM_RESTRICT top_row,
1476
32.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1477
32.0k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1478
32.0k
  const __m128i zero = _mm_setzero_si128();
1479
32.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1480
32.0k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1481
32.0k
  const __m128i weights_lo = cvtepu8_epi16(weights);
1482
32.0k
  const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1483
32.0k
  const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1484
32.0k
  const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1485
32.0k
  const __m128i scaled_bottom_left_lo =
1486
32.0k
      _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1487
32.0k
  const __m128i scaled_bottom_left_hi =
1488
32.0k
      _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1489
32.0k
  const __m128i round = _mm_set1_epi16(128);
1490
1491
32.0k
  const __m128i top = LoadUnaligned16(top_row);
1492
32.0k
  const __m128i top_lo = cvtepu8_epi16(top);
1493
32.0k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1494
288k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1495
256k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1496
256k
    const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1497
256k
    const __m128i scaled_bottom_left_y =
1498
256k
        _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1499
256k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1500
256k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1501
256k
                                   round);
1502
256k
    dst += stride;
1503
256k
  }
1504
288k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1505
256k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1506
256k
    const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1507
256k
    const __m128i scaled_bottom_left_y =
1508
256k
        _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1509
256k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1510
256k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1511
256k
                                   round);
1512
256k
    dst += stride;
1513
256k
  }
1514
32.0k
}
1515
1516
void aom_smooth_v_predictor_16x32_ssse3(
1517
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1518
    const uint8_t *LIBAOM_RESTRICT top_row,
1519
8.70k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1520
8.70k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1521
8.70k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1522
8.70k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1523
8.70k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1524
8.70k
  const __m128i zero = _mm_setzero_si128();
1525
8.70k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1526
8.70k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1527
8.70k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1528
8.70k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1529
8.70k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1530
8.70k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1531
8.70k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1532
8.70k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1533
8.70k
  const __m128i scaled_bottom_left1 =
1534
8.70k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1535
8.70k
  const __m128i scaled_bottom_left2 =
1536
8.70k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1537
8.70k
  const __m128i scaled_bottom_left3 =
1538
8.70k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1539
8.70k
  const __m128i scaled_bottom_left4 =
1540
8.70k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1541
8.70k
  const __m128i round = _mm_set1_epi16(128);
1542
1543
8.70k
  const __m128i top = LoadUnaligned16(top_row);
1544
8.70k
  const __m128i top_lo = cvtepu8_epi16(top);
1545
8.70k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1546
78.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1547
69.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1548
69.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1549
69.6k
    const __m128i scaled_bottom_left_y =
1550
69.6k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1551
69.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1552
69.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1553
69.6k
                                   round);
1554
69.6k
    dst += stride;
1555
69.6k
  }
1556
78.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1557
69.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1558
69.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1559
69.6k
    const __m128i scaled_bottom_left_y =
1560
69.6k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1561
69.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1562
69.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1563
69.6k
                                   round);
1564
69.6k
    dst += stride;
1565
69.6k
  }
1566
78.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1567
69.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1568
69.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1569
69.6k
    const __m128i scaled_bottom_left_y =
1570
69.6k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1571
69.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1572
69.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1573
69.6k
                                   round);
1574
69.6k
    dst += stride;
1575
69.6k
  }
1576
78.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1577
69.6k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1578
69.6k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1579
69.6k
    const __m128i scaled_bottom_left_y =
1580
69.6k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1581
69.6k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1582
69.6k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1583
69.6k
                                   round);
1584
69.6k
    dst += stride;
1585
69.6k
  }
1586
8.70k
}
1587
1588
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1589
void aom_smooth_v_predictor_16x64_ssse3(
1590
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1591
    const uint8_t *LIBAOM_RESTRICT top_row,
1592
1.67k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1593
1.67k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1594
1.67k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1595
1.67k
  const __m128i round = _mm_set1_epi16(128);
1596
1.67k
  const __m128i zero = _mm_setzero_si128();
1597
1.67k
  const __m128i top = LoadUnaligned16(top_row);
1598
1.67k
  const __m128i top_lo = cvtepu8_epi16(top);
1599
1.67k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1600
1.67k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1601
8.37k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1602
6.69k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1603
6.69k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1604
6.69k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1605
6.69k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1606
6.69k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1607
6.69k
    const __m128i scaled_bottom_left_lo =
1608
6.69k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1609
6.69k
    const __m128i scaled_bottom_left_hi =
1610
6.69k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1611
1612
60.2k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1613
53.5k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1614
53.5k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1615
53.5k
      const __m128i scaled_bottom_left_y =
1616
53.5k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1617
53.5k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1618
53.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1619
53.5k
                                     round);
1620
53.5k
      dst += stride;
1621
53.5k
    }
1622
60.2k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1623
53.5k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1624
53.5k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1625
53.5k
      const __m128i scaled_bottom_left_y =
1626
53.5k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1627
53.5k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1628
53.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1629
53.5k
                                     round);
1630
53.5k
      dst += stride;
1631
53.5k
    }
1632
6.69k
  }
1633
1.67k
}
1634
1635
void aom_smooth_v_predictor_32x8_ssse3(
1636
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1637
    const uint8_t *LIBAOM_RESTRICT top_row,
1638
18.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1639
18.9k
  const __m128i zero = _mm_setzero_si128();
1640
18.9k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1641
18.9k
  const __m128i top_lo = LoadUnaligned16(top_row);
1642
18.9k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1643
18.9k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1644
18.9k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1645
18.9k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1646
18.9k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1647
18.9k
  __m128i scale = _mm_set1_epi16(256);
1648
18.9k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1649
18.9k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1650
18.9k
  const __m128i scaled_bottom_left =
1651
18.9k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1652
18.9k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1653
170k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1654
151k
    __m128i y_select = _mm_set1_epi32(y_mask);
1655
151k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1656
151k
    const __m128i scaled_bottom_left_y =
1657
151k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1658
151k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1659
151k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1660
151k
                                   round);
1661
151k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1662
151k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1663
151k
                                   round);
1664
151k
    dst += stride;
1665
151k
  }
1666
18.9k
}
1667
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1668
1669
void aom_smooth_v_predictor_32x16_ssse3(
1670
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1671
    const uint8_t *LIBAOM_RESTRICT top_row,
1672
8.76k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1673
8.76k
  const __m128i zero = _mm_setzero_si128();
1674
8.76k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1675
8.76k
  const __m128i top_lo = LoadUnaligned16(top_row);
1676
8.76k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1677
8.76k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1678
8.76k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1679
8.76k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1680
8.76k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1681
8.76k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1682
8.76k
  const __m128i weights1 = cvtepu8_epi16(weights);
1683
8.76k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1684
8.76k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1685
8.76k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1686
8.76k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1687
8.76k
  const __m128i scaled_bottom_left1 =
1688
8.76k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1689
8.76k
  const __m128i scaled_bottom_left2 =
1690
8.76k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1691
8.76k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1692
78.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1693
70.0k
    __m128i y_select = _mm_set1_epi32(y_mask);
1694
70.0k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1695
70.0k
    const __m128i scaled_bottom_left_y =
1696
70.0k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1697
70.0k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1698
70.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1699
70.0k
                                   round);
1700
70.0k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1701
70.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1702
70.0k
                                   round);
1703
70.0k
    dst += stride;
1704
70.0k
  }
1705
78.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1706
70.0k
    __m128i y_select = _mm_set1_epi32(y_mask);
1707
70.0k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1708
70.0k
    const __m128i scaled_bottom_left_y =
1709
70.0k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1710
70.0k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1711
70.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1712
70.0k
                                   round);
1713
70.0k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1714
70.0k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1715
70.0k
                                   round);
1716
70.0k
    dst += stride;
1717
70.0k
  }
1718
8.76k
}
1719
1720
void aom_smooth_v_predictor_32x32_ssse3(
1721
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1722
    const uint8_t *LIBAOM_RESTRICT top_row,
1723
42.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1724
42.8k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1725
42.8k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1726
42.8k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1727
42.8k
  const __m128i zero = _mm_setzero_si128();
1728
42.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1729
42.8k
  const __m128i top_lo = LoadUnaligned16(top_row);
1730
42.8k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1731
42.8k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1732
42.8k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1733
42.8k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1734
42.8k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1735
42.8k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1736
42.8k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1737
42.8k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1738
42.8k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1739
42.8k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1740
42.8k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1741
42.8k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1742
42.8k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1743
42.8k
  const __m128i scaled_bottom_left1 =
1744
42.8k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1745
42.8k
  const __m128i scaled_bottom_left2 =
1746
42.8k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1747
42.8k
  const __m128i scaled_bottom_left3 =
1748
42.8k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1749
42.8k
  const __m128i scaled_bottom_left4 =
1750
42.8k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1751
42.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1752
385k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1753
343k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1754
343k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1755
343k
    const __m128i scaled_bottom_left_y =
1756
343k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1757
343k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1758
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1759
343k
                                   round);
1760
343k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1761
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1762
343k
                                   round);
1763
343k
    dst += stride;
1764
343k
  }
1765
385k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1766
343k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1767
343k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1768
343k
    const __m128i scaled_bottom_left_y =
1769
343k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1770
343k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1771
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1772
343k
                                   round);
1773
343k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1774
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1775
343k
                                   round);
1776
343k
    dst += stride;
1777
343k
  }
1778
385k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1779
343k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1780
343k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1781
343k
    const __m128i scaled_bottom_left_y =
1782
343k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1783
343k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1784
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1785
343k
                                   round);
1786
343k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1787
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1788
343k
                                   round);
1789
343k
    dst += stride;
1790
343k
  }
1791
385k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1792
343k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1793
343k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1794
343k
    const __m128i scaled_bottom_left_y =
1795
343k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1796
343k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1797
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1798
343k
                                   round);
1799
343k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1800
343k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1801
343k
                                   round);
1802
343k
    dst += stride;
1803
343k
  }
1804
42.8k
}
1805
1806
void aom_smooth_v_predictor_32x64_ssse3(
1807
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1808
    const uint8_t *LIBAOM_RESTRICT top_row,
1809
672
    const uint8_t *LIBAOM_RESTRICT left_column) {
1810
672
  const __m128i zero = _mm_setzero_si128();
1811
672
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1812
672
  const __m128i top_lo = LoadUnaligned16(top_row);
1813
672
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1814
672
  const __m128i top1 = cvtepu8_epi16(top_lo);
1815
672
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1816
672
  const __m128i top3 = cvtepu8_epi16(top_hi);
1817
672
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1818
672
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1819
672
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1820
672
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1821
3.36k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1822
2.68k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1823
2.68k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1824
2.68k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1825
2.68k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1826
2.68k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1827
2.68k
    const __m128i scaled_bottom_left_lo =
1828
2.68k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1829
2.68k
    const __m128i scaled_bottom_left_hi =
1830
2.68k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1831
1832
24.1k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1833
21.5k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1834
21.5k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1835
21.5k
      const __m128i scaled_bottom_left_y =
1836
21.5k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1837
21.5k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1838
21.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1839
21.5k
                                     round);
1840
21.5k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1841
21.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1842
21.5k
                                     round);
1843
21.5k
      dst += stride;
1844
21.5k
    }
1845
24.1k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1846
21.5k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1847
21.5k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1848
21.5k
      const __m128i scaled_bottom_left_y =
1849
21.5k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1850
21.5k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1851
21.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1852
21.5k
                                     round);
1853
21.5k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1854
21.5k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1855
21.5k
                                     round);
1856
21.5k
      dst += stride;
1857
21.5k
    }
1858
2.68k
  }
1859
672
}
1860
1861
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1862
void aom_smooth_v_predictor_64x16_ssse3(
1863
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1864
    const uint8_t *LIBAOM_RESTRICT top_row,
1865
12.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1866
12.8k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1867
12.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1868
12.8k
  const __m128i zero = _mm_setzero_si128();
1869
12.8k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1870
12.8k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1871
12.8k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1872
12.8k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1873
12.8k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1874
12.8k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1875
1876
12.8k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1877
12.8k
  const __m128i weights1 = cvtepu8_epi16(weights);
1878
12.8k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1879
12.8k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1880
12.8k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1881
12.8k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1882
12.8k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1883
12.8k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1884
12.8k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1885
12.8k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1886
12.8k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1887
12.8k
  const __m128i scaled_bottom_left1 =
1888
12.8k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1889
12.8k
  const __m128i scaled_bottom_left2 =
1890
12.8k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1891
12.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1892
115k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1893
103k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1894
103k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1895
103k
    const __m128i scaled_bottom_left_y =
1896
103k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1897
103k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1898
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1899
103k
                                   round);
1900
103k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1901
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1902
103k
                                   round);
1903
103k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1904
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1905
103k
                                   round);
1906
103k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1907
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1908
103k
                                   round);
1909
103k
    dst += stride;
1910
103k
  }
1911
115k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1912
103k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1913
103k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1914
103k
    const __m128i scaled_bottom_left_y =
1915
103k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1916
103k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1917
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1918
103k
                                   round);
1919
103k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1920
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1921
103k
                                   round);
1922
103k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1923
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1924
103k
                                   round);
1925
103k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1926
103k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1927
103k
                                   round);
1928
103k
    dst += stride;
1929
103k
  }
1930
12.8k
}
1931
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1932
1933
void aom_smooth_v_predictor_64x32_ssse3(
1934
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1935
    const uint8_t *LIBAOM_RESTRICT top_row,
1936
2.18k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1937
2.18k
  const __m128i zero = _mm_setzero_si128();
1938
2.18k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1939
2.18k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1940
2.18k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1941
2.18k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1942
2.18k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1943
2.18k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1944
2.18k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1945
2.18k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1946
2.18k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1947
2.18k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1948
2.18k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1949
2.18k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1950
2.18k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1951
2.18k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1952
2.18k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1953
2.18k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1954
2.18k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1955
2.18k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1956
2.18k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1957
2.18k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1958
2.18k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1959
2.18k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1960
2.18k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1961
2.18k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1962
2.18k
  const __m128i scaled_bottom_left1 =
1963
2.18k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1964
2.18k
  const __m128i scaled_bottom_left2 =
1965
2.18k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1966
2.18k
  const __m128i scaled_bottom_left3 =
1967
2.18k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1968
2.18k
  const __m128i scaled_bottom_left4 =
1969
2.18k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1970
2.18k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1971
1972
19.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1973
17.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1974
17.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1975
17.4k
    const __m128i scaled_bottom_left_y =
1976
17.4k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1977
17.4k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1978
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1979
17.4k
                                   round);
1980
17.4k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1981
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1982
17.4k
                                   round);
1983
17.4k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1984
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1985
17.4k
                                   round);
1986
17.4k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1987
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1988
17.4k
                                   round);
1989
17.4k
    dst += stride;
1990
17.4k
  }
1991
19.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1992
17.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1993
17.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1994
17.4k
    const __m128i scaled_bottom_left_y =
1995
17.4k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1996
17.4k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1997
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1998
17.4k
                                   round);
1999
17.4k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2000
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2001
17.4k
                                   round);
2002
17.4k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2003
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2004
17.4k
                                   round);
2005
17.4k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2006
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2007
17.4k
                                   round);
2008
17.4k
    dst += stride;
2009
17.4k
  }
2010
19.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2011
17.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2012
17.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
2013
17.4k
    const __m128i scaled_bottom_left_y =
2014
17.4k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
2015
17.4k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2016
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2017
17.4k
                                   round);
2018
17.4k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2019
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2020
17.4k
                                   round);
2021
17.4k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2022
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2023
17.4k
                                   round);
2024
17.4k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2025
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2026
17.4k
                                   round);
2027
17.4k
    dst += stride;
2028
17.4k
  }
2029
19.6k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2030
17.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2031
17.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
2032
17.4k
    const __m128i scaled_bottom_left_y =
2033
17.4k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
2034
17.4k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2035
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2036
17.4k
                                   round);
2037
17.4k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2038
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2039
17.4k
                                   round);
2040
17.4k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2041
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2042
17.4k
                                   round);
2043
17.4k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2044
17.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2045
17.4k
                                   round);
2046
17.4k
    dst += stride;
2047
17.4k
  }
2048
2.18k
}
2049
2050
void aom_smooth_v_predictor_64x64_ssse3(
2051
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2052
    const uint8_t *LIBAOM_RESTRICT top_row,
2053
5.54k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2054
5.54k
  const __m128i zero = _mm_setzero_si128();
2055
5.54k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
2056
5.54k
  const __m128i top_lolo = LoadUnaligned16(top_row);
2057
5.54k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
2058
5.54k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
2059
5.54k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2060
5.54k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
2061
5.54k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2062
5.54k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
2063
5.54k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
2064
5.54k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
2065
5.54k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2066
5.54k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
2067
5.54k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2068
5.54k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2069
5.54k
  const __m128i round = _mm_set1_epi16(128);
2070
5.54k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
2071
27.7k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
2072
22.1k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
2073
22.1k
    const __m128i weights_lo = cvtepu8_epi16(weights);
2074
22.1k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
2075
22.1k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
2076
22.1k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
2077
22.1k
    const __m128i scaled_bottom_left_lo =
2078
22.1k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
2079
22.1k
    const __m128i scaled_bottom_left_hi =
2080
22.1k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
2081
199k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2082
177k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2083
177k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
2084
177k
      const __m128i scaled_bottom_left_y =
2085
177k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
2086
177k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2087
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2088
177k
                                     round);
2089
177k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2090
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2091
177k
                                     round);
2092
177k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2093
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2094
177k
                                     round);
2095
177k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2096
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2097
177k
                                     round);
2098
177k
      dst += stride;
2099
177k
    }
2100
199k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2101
177k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2102
177k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
2103
177k
      const __m128i scaled_bottom_left_y =
2104
177k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
2105
177k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2106
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2107
177k
                                     round);
2108
177k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2109
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2110
177k
                                     round);
2111
177k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2112
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2113
177k
                                     round);
2114
177k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2115
177k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2116
177k
                                     round);
2117
177k
      dst += stride;
2118
177k
    }
2119
22.1k
  }
2120
5.54k
}
2121
2122
// -----------------------------------------------------------------------------
2123
// SMOOTH_H_PRED
2124
static AOM_FORCE_INLINE void write_smooth_horizontal_sum4(
2125
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *left_y, const __m128i *weights,
2126
605k
    const __m128i *scaled_top_right, const __m128i *round) {
2127
605k
  const __m128i weighted_left_y = _mm_mullo_epi16(*left_y, *weights);
2128
605k
  const __m128i pred_sum = _mm_add_epi32(*scaled_top_right, weighted_left_y);
2129
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
2130
605k
  const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, *round), 8);
2131
605k
  const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
2132
605k
  Store4(dst, _mm_shuffle_epi8(pred, cvtepi32_epi8));
2133
605k
}
2134
2135
void aom_smooth_h_predictor_4x4_ssse3(
2136
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2137
    const uint8_t *LIBAOM_RESTRICT top_row,
2138
49.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2139
49.9k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2140
49.9k
  const __m128i left = cvtepu8_epi32(Load4(left_column));
2141
49.9k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2142
49.9k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2143
49.9k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2144
49.9k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2145
49.9k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2146
49.9k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2147
49.9k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2148
49.9k
                               &round);
2149
49.9k
  dst += stride;
2150
49.9k
  left_y = _mm_shuffle_epi32(left, 0x55);
2151
49.9k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2152
49.9k
                               &round);
2153
49.9k
  dst += stride;
2154
49.9k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2155
49.9k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2156
49.9k
                               &round);
2157
49.9k
  dst += stride;
2158
49.9k
  left_y = _mm_shuffle_epi32(left, 0xff);
2159
49.9k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2160
49.9k
                               &round);
2161
49.9k
}
2162
2163
void aom_smooth_h_predictor_4x8_ssse3(
2164
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2165
    const uint8_t *LIBAOM_RESTRICT top_row,
2166
18.3k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2167
18.3k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2168
18.3k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2169
18.3k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2170
18.3k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2171
18.3k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2172
18.3k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2173
18.3k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2174
18.3k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2175
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2176
18.3k
                               &round);
2177
18.3k
  dst += stride;
2178
18.3k
  left_y = _mm_shuffle_epi32(left, 0x55);
2179
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2180
18.3k
                               &round);
2181
18.3k
  dst += stride;
2182
18.3k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2183
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2184
18.3k
                               &round);
2185
18.3k
  dst += stride;
2186
18.3k
  left_y = _mm_shuffle_epi32(left, 0xff);
2187
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2188
18.3k
                               &round);
2189
18.3k
  dst += stride;
2190
2191
18.3k
  left = cvtepu8_epi32(Load4(left_column + 4));
2192
18.3k
  left_y = _mm_shuffle_epi32(left, 0);
2193
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2194
18.3k
                               &round);
2195
18.3k
  dst += stride;
2196
18.3k
  left_y = _mm_shuffle_epi32(left, 0x55);
2197
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2198
18.3k
                               &round);
2199
18.3k
  dst += stride;
2200
18.3k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2201
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2202
18.3k
                               &round);
2203
18.3k
  dst += stride;
2204
18.3k
  left_y = _mm_shuffle_epi32(left, 0xff);
2205
18.3k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2206
18.3k
                               &round);
2207
18.3k
}
2208
2209
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2210
void aom_smooth_h_predictor_4x16_ssse3(
2211
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2212
    const uint8_t *LIBAOM_RESTRICT top_row,
2213
16.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2214
16.1k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2215
16.1k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2216
16.1k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2217
16.1k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2218
16.1k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2219
16.1k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2220
16.1k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2221
16.1k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2222
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2223
16.1k
                               &round);
2224
16.1k
  dst += stride;
2225
16.1k
  left_y = _mm_shuffle_epi32(left, 0x55);
2226
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2227
16.1k
                               &round);
2228
16.1k
  dst += stride;
2229
16.1k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2230
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2231
16.1k
                               &round);
2232
16.1k
  dst += stride;
2233
16.1k
  left_y = _mm_shuffle_epi32(left, 0xff);
2234
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2235
16.1k
                               &round);
2236
16.1k
  dst += stride;
2237
2238
16.1k
  left = cvtepu8_epi32(Load4(left_column + 4));
2239
16.1k
  left_y = _mm_shuffle_epi32(left, 0);
2240
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2241
16.1k
                               &round);
2242
16.1k
  dst += stride;
2243
16.1k
  left_y = _mm_shuffle_epi32(left, 0x55);
2244
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2245
16.1k
                               &round);
2246
16.1k
  dst += stride;
2247
16.1k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2248
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2249
16.1k
                               &round);
2250
16.1k
  dst += stride;
2251
16.1k
  left_y = _mm_shuffle_epi32(left, 0xff);
2252
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2253
16.1k
                               &round);
2254
16.1k
  dst += stride;
2255
2256
16.1k
  left = cvtepu8_epi32(Load4(left_column + 8));
2257
16.1k
  left_y = _mm_shuffle_epi32(left, 0);
2258
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2259
16.1k
                               &round);
2260
16.1k
  dst += stride;
2261
16.1k
  left_y = _mm_shuffle_epi32(left, 0x55);
2262
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2263
16.1k
                               &round);
2264
16.1k
  dst += stride;
2265
16.1k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2266
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2267
16.1k
                               &round);
2268
16.1k
  dst += stride;
2269
16.1k
  left_y = _mm_shuffle_epi32(left, 0xff);
2270
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2271
16.1k
                               &round);
2272
16.1k
  dst += stride;
2273
2274
16.1k
  left = cvtepu8_epi32(Load4(left_column + 12));
2275
16.1k
  left_y = _mm_shuffle_epi32(left, 0);
2276
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2277
16.1k
                               &round);
2278
16.1k
  dst += stride;
2279
16.1k
  left_y = _mm_shuffle_epi32(left, 0x55);
2280
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2281
16.1k
                               &round);
2282
16.1k
  dst += stride;
2283
16.1k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2284
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2285
16.1k
                               &round);
2286
16.1k
  dst += stride;
2287
16.1k
  left_y = _mm_shuffle_epi32(left, 0xff);
2288
16.1k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2289
16.1k
                               &round);
2290
16.1k
}
2291
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2292
2293
// For SMOOTH_H, |pixels| is the repeated left value for the row. For SMOOTH_V,
2294
// |pixels| is a segment of the top row or the whole top row, and |weights| is
2295
// repeated.
2296
void aom_smooth_h_predictor_8x4_ssse3(
2297
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2298
    const uint8_t *LIBAOM_RESTRICT top_row,
2299
30.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2300
30.5k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2301
30.5k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2302
30.5k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2303
30.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2304
30.5k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2305
30.5k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2306
30.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2307
30.5k
  __m128i y_select = _mm_set1_epi32(0x01000100);
2308
30.5k
  __m128i left_y = _mm_shuffle_epi8(left, y_select);
2309
30.5k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2310
30.5k
                                &round);
2311
30.5k
  dst += stride;
2312
30.5k
  y_select = _mm_set1_epi32(0x03020302);
2313
30.5k
  left_y = _mm_shuffle_epi8(left, y_select);
2314
30.5k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2315
30.5k
                                &round);
2316
30.5k
  dst += stride;
2317
30.5k
  y_select = _mm_set1_epi32(0x05040504);
2318
30.5k
  left_y = _mm_shuffle_epi8(left, y_select);
2319
30.5k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2320
30.5k
                                &round);
2321
30.5k
  dst += stride;
2322
30.5k
  y_select = _mm_set1_epi32(0x07060706);
2323
30.5k
  left_y = _mm_shuffle_epi8(left, y_select);
2324
30.5k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2325
30.5k
                                &round);
2326
30.5k
}
2327
2328
void aom_smooth_h_predictor_8x8_ssse3(
2329
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2330
    const uint8_t *LIBAOM_RESTRICT top_row,
2331
46.3k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2332
46.3k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2333
46.3k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2334
46.3k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2335
46.3k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2336
46.3k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2337
46.3k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2338
46.3k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2339
416k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2340
370k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2341
370k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2342
370k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2343
370k
                                  &round);
2344
370k
    dst += stride;
2345
370k
  }
2346
46.3k
}
2347
2348
void aom_smooth_h_predictor_8x16_ssse3(
2349
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2350
    const uint8_t *LIBAOM_RESTRICT top_row,
2351
15.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2352
15.0k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2353
15.0k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2354
15.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2355
15.0k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2356
15.0k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2357
15.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2358
15.0k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2359
135k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2360
120k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2361
120k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2362
120k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2363
120k
                                  &round);
2364
120k
    dst += stride;
2365
120k
  }
2366
15.0k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2367
135k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2368
120k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2369
120k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2370
120k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2371
120k
                                  &round);
2372
120k
    dst += stride;
2373
120k
  }
2374
15.0k
}
2375
2376
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2377
void aom_smooth_h_predictor_8x32_ssse3(
2378
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2379
    const uint8_t *LIBAOM_RESTRICT top_row,
2380
5.42k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2381
5.42k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2382
5.42k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2383
5.42k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2384
5.42k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2385
5.42k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2386
5.42k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2387
5.42k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2388
48.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2389
43.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2390
43.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2391
43.4k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2392
43.4k
                                  &round);
2393
43.4k
    dst += stride;
2394
43.4k
  }
2395
5.42k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2396
48.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2397
43.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2398
43.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2399
43.4k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2400
43.4k
                                  &round);
2401
43.4k
    dst += stride;
2402
43.4k
  }
2403
5.42k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2404
48.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2405
43.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2406
43.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2407
43.4k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2408
43.4k
                                  &round);
2409
43.4k
    dst += stride;
2410
43.4k
  }
2411
5.42k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2412
48.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2413
43.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2414
43.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2415
43.4k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2416
43.4k
                                  &round);
2417
43.4k
    dst += stride;
2418
43.4k
  }
2419
5.42k
}
2420
2421
void aom_smooth_h_predictor_16x4_ssse3(
2422
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2423
    const uint8_t *LIBAOM_RESTRICT top_row,
2424
24.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2425
24.4k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2426
24.4k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2427
24.4k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2428
24.4k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2429
24.4k
  const __m128i weights1 = cvtepu8_epi16(weights);
2430
24.4k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2431
24.4k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2432
24.4k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2433
24.4k
  const __m128i scaled_top_right1 =
2434
24.4k
      _mm_mullo_epi16(inverted_weights1, top_right);
2435
24.4k
  const __m128i scaled_top_right2 =
2436
24.4k
      _mm_mullo_epi16(inverted_weights2, top_right);
2437
24.4k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2438
24.4k
  __m128i y_mask = _mm_set1_epi32(0x01000100);
2439
24.4k
  __m128i left_y = _mm_shuffle_epi8(left, y_mask);
2440
24.4k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2441
24.4k
                                 scaled_top_right1, scaled_top_right2, round);
2442
24.4k
  dst += stride;
2443
24.4k
  y_mask = _mm_set1_epi32(0x03020302);
2444
24.4k
  left_y = _mm_shuffle_epi8(left, y_mask);
2445
24.4k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2446
24.4k
                                 scaled_top_right1, scaled_top_right2, round);
2447
24.4k
  dst += stride;
2448
24.4k
  y_mask = _mm_set1_epi32(0x05040504);
2449
24.4k
  left_y = _mm_shuffle_epi8(left, y_mask);
2450
24.4k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2451
24.4k
                                 scaled_top_right1, scaled_top_right2, round);
2452
24.4k
  dst += stride;
2453
24.4k
  y_mask = _mm_set1_epi32(0x07060706);
2454
24.4k
  left_y = _mm_shuffle_epi8(left, y_mask);
2455
24.4k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2456
24.4k
                                 scaled_top_right1, scaled_top_right2, round);
2457
24.4k
}
2458
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2459
2460
void aom_smooth_h_predictor_16x8_ssse3(
2461
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2462
    const uint8_t *LIBAOM_RESTRICT top_row,
2463
21.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2464
21.8k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2465
21.8k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2466
21.8k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2467
21.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2468
21.8k
  const __m128i weights1 = cvtepu8_epi16(weights);
2469
21.8k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2470
21.8k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2471
21.8k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2472
21.8k
  const __m128i scaled_top_right1 =
2473
21.8k
      _mm_mullo_epi16(inverted_weights1, top_right);
2474
21.8k
  const __m128i scaled_top_right2 =
2475
21.8k
      _mm_mullo_epi16(inverted_weights2, top_right);
2476
21.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2477
196k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2478
175k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2479
175k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2480
175k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2481
175k
                                   scaled_top_right1, scaled_top_right2, round);
2482
175k
    dst += stride;
2483
175k
  }
2484
21.8k
}
2485
2486
void aom_smooth_h_predictor_16x16_ssse3(
2487
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2488
    const uint8_t *LIBAOM_RESTRICT top_row,
2489
36.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2490
36.9k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2491
36.9k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2492
36.9k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2493
36.9k
  const __m128i weights1 = cvtepu8_epi16(weights);
2494
36.9k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2495
36.9k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2496
36.9k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2497
36.9k
  const __m128i scaled_top_right1 =
2498
36.9k
      _mm_mullo_epi16(inverted_weights1, top_right);
2499
36.9k
  const __m128i scaled_top_right2 =
2500
36.9k
      _mm_mullo_epi16(inverted_weights2, top_right);
2501
36.9k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2502
36.9k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2503
332k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2504
295k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2505
295k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2506
295k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2507
295k
                                   scaled_top_right1, scaled_top_right2, round);
2508
295k
    dst += stride;
2509
295k
  }
2510
36.9k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2511
332k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2512
295k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2513
295k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2514
295k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2515
295k
                                   scaled_top_right1, scaled_top_right2, round);
2516
295k
    dst += stride;
2517
295k
  }
2518
36.9k
}
2519
2520
void aom_smooth_h_predictor_16x32_ssse3(
2521
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2522
    const uint8_t *LIBAOM_RESTRICT top_row,
2523
12.7k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2524
12.7k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2525
12.7k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2526
12.7k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2527
12.7k
  const __m128i weights1 = cvtepu8_epi16(weights);
2528
12.7k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2529
12.7k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2530
12.7k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2531
12.7k
  const __m128i scaled_top_right1 =
2532
12.7k
      _mm_mullo_epi16(inverted_weights1, top_right);
2533
12.7k
  const __m128i scaled_top_right2 =
2534
12.7k
      _mm_mullo_epi16(inverted_weights2, top_right);
2535
12.7k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2536
12.7k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2537
114k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2538
102k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2539
102k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2540
102k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2541
102k
                                   scaled_top_right1, scaled_top_right2, round);
2542
102k
    dst += stride;
2543
102k
  }
2544
12.7k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2545
114k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2546
102k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2547
102k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2548
102k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2549
102k
                                   scaled_top_right1, scaled_top_right2, round);
2550
102k
    dst += stride;
2551
102k
  }
2552
12.7k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2553
114k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2554
102k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2555
102k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2556
102k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2557
102k
                                   scaled_top_right1, scaled_top_right2, round);
2558
102k
    dst += stride;
2559
102k
  }
2560
12.7k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2561
114k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2562
102k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2563
102k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2564
102k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2565
102k
                                   scaled_top_right1, scaled_top_right2, round);
2566
102k
    dst += stride;
2567
102k
  }
2568
12.7k
}
2569
2570
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2571
void aom_smooth_h_predictor_16x64_ssse3(
2572
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2573
    const uint8_t *LIBAOM_RESTRICT top_row,
2574
3.43k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2575
3.43k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2576
3.43k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2577
3.43k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2578
3.43k
  const __m128i weights1 = cvtepu8_epi16(weights);
2579
3.43k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2580
3.43k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2581
3.43k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2582
3.43k
  const __m128i scaled_top_right1 =
2583
3.43k
      _mm_mullo_epi16(inverted_weights1, top_right);
2584
3.43k
  const __m128i scaled_top_right2 =
2585
3.43k
      _mm_mullo_epi16(inverted_weights2, top_right);
2586
3.43k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2587
30.8k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2588
27.4k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2589
247k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2590
219k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2591
219k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2592
219k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2593
219k
                                     scaled_top_right1, scaled_top_right2,
2594
219k
                                     round);
2595
219k
      dst += stride;
2596
219k
    }
2597
27.4k
  }
2598
3.43k
}
2599
2600
void aom_smooth_h_predictor_32x8_ssse3(
2601
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2602
    const uint8_t *LIBAOM_RESTRICT top_row,
2603
17.7k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2604
17.7k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2605
17.7k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2606
17.7k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2607
17.7k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2608
17.7k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2609
17.7k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2610
17.7k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2611
17.7k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2612
17.7k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2613
17.7k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2614
17.7k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2615
17.7k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2616
17.7k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2617
17.7k
  const __m128i scaled_top_right1 =
2618
17.7k
      _mm_mullo_epi16(inverted_weights1, top_right);
2619
17.7k
  const __m128i scaled_top_right2 =
2620
17.7k
      _mm_mullo_epi16(inverted_weights2, top_right);
2621
17.7k
  const __m128i scaled_top_right3 =
2622
17.7k
      _mm_mullo_epi16(inverted_weights3, top_right);
2623
17.7k
  const __m128i scaled_top_right4 =
2624
17.7k
      _mm_mullo_epi16(inverted_weights4, top_right);
2625
17.7k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2626
159k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2627
141k
    __m128i y_select = _mm_set1_epi32(y_mask);
2628
141k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2629
141k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2630
141k
                                   scaled_top_right1, scaled_top_right2, round);
2631
141k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2632
141k
                                   scaled_top_right3, scaled_top_right4, round);
2633
141k
    dst += stride;
2634
141k
  }
2635
17.7k
}
2636
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2637
2638
void aom_smooth_h_predictor_32x16_ssse3(
2639
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2640
    const uint8_t *LIBAOM_RESTRICT top_row,
2641
9.97k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2642
9.97k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2643
9.97k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2644
9.97k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2645
9.97k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2646
9.97k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2647
9.97k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2648
9.97k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2649
9.97k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2650
9.97k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2651
9.97k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2652
9.97k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2653
9.97k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2654
9.97k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2655
9.97k
  const __m128i scaled_top_right1 =
2656
9.97k
      _mm_mullo_epi16(inverted_weights1, top_right);
2657
9.97k
  const __m128i scaled_top_right2 =
2658
9.97k
      _mm_mullo_epi16(inverted_weights2, top_right);
2659
9.97k
  const __m128i scaled_top_right3 =
2660
9.97k
      _mm_mullo_epi16(inverted_weights3, top_right);
2661
9.97k
  const __m128i scaled_top_right4 =
2662
9.97k
      _mm_mullo_epi16(inverted_weights4, top_right);
2663
9.97k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2664
89.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2665
79.8k
    __m128i y_select = _mm_set1_epi32(y_mask);
2666
79.8k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2667
79.8k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2668
79.8k
                                   scaled_top_right1, scaled_top_right2, round);
2669
79.8k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2670
79.8k
                                   scaled_top_right3, scaled_top_right4, round);
2671
79.8k
    dst += stride;
2672
79.8k
  }
2673
9.97k
  const __m128i left2 =
2674
9.97k
      cvtepu8_epi16(LoadLo8((const uint8_t *)left_column + 8));
2675
89.7k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2676
79.8k
    __m128i y_select = _mm_set1_epi32(y_mask);
2677
79.8k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2678
79.8k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2679
79.8k
                                   scaled_top_right1, scaled_top_right2, round);
2680
79.8k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2681
79.8k
                                   scaled_top_right3, scaled_top_right4, round);
2682
79.8k
    dst += stride;
2683
79.8k
  }
2684
9.97k
}
2685
2686
void aom_smooth_h_predictor_32x32_ssse3(
2687
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2688
    const uint8_t *LIBAOM_RESTRICT top_row,
2689
41.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2690
41.5k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2691
41.5k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2692
41.5k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2693
41.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2694
41.5k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2695
41.5k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2696
41.5k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2697
41.5k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2698
41.5k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2699
41.5k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2700
41.5k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2701
41.5k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2702
41.5k
  const __m128i scaled_top_right1 =
2703
41.5k
      _mm_mullo_epi16(inverted_weights1, top_right);
2704
41.5k
  const __m128i scaled_top_right2 =
2705
41.5k
      _mm_mullo_epi16(inverted_weights2, top_right);
2706
41.5k
  const __m128i scaled_top_right3 =
2707
41.5k
      _mm_mullo_epi16(inverted_weights3, top_right);
2708
41.5k
  const __m128i scaled_top_right4 =
2709
41.5k
      _mm_mullo_epi16(inverted_weights4, top_right);
2710
41.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2711
41.5k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2712
374k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2713
332k
    __m128i y_select = _mm_set1_epi32(y_mask);
2714
332k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2715
332k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2716
332k
                                   scaled_top_right1, scaled_top_right2, round);
2717
332k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2718
332k
                                   scaled_top_right3, scaled_top_right4, round);
2719
332k
    dst += stride;
2720
332k
  }
2721
41.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2722
374k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2723
332k
    __m128i y_select = _mm_set1_epi32(y_mask);
2724
332k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2725
332k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2726
332k
                                   scaled_top_right1, scaled_top_right2, round);
2727
332k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2728
332k
                                   scaled_top_right3, scaled_top_right4, round);
2729
332k
    dst += stride;
2730
332k
  }
2731
41.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2732
374k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2733
332k
    __m128i y_select = _mm_set1_epi32(y_mask);
2734
332k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2735
332k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2736
332k
                                   scaled_top_right1, scaled_top_right2, round);
2737
332k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2738
332k
                                   scaled_top_right3, scaled_top_right4, round);
2739
332k
    dst += stride;
2740
332k
  }
2741
41.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2742
374k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2743
332k
    __m128i y_select = _mm_set1_epi32(y_mask);
2744
332k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2745
332k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2746
332k
                                   scaled_top_right1, scaled_top_right2, round);
2747
332k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2748
332k
                                   scaled_top_right3, scaled_top_right4, round);
2749
332k
    dst += stride;
2750
332k
  }
2751
41.5k
}
2752
2753
void aom_smooth_h_predictor_32x64_ssse3(
2754
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2755
    const uint8_t *LIBAOM_RESTRICT top_row,
2756
877
    const uint8_t *LIBAOM_RESTRICT left_column) {
2757
877
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2758
877
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2759
877
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2760
877
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2761
877
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2762
877
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2763
877
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2764
877
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2765
877
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2766
877
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2767
877
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2768
877
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2769
877
  const __m128i scaled_top_right1 =
2770
877
      _mm_mullo_epi16(inverted_weights1, top_right);
2771
877
  const __m128i scaled_top_right2 =
2772
877
      _mm_mullo_epi16(inverted_weights2, top_right);
2773
877
  const __m128i scaled_top_right3 =
2774
877
      _mm_mullo_epi16(inverted_weights3, top_right);
2775
877
  const __m128i scaled_top_right4 =
2776
877
      _mm_mullo_epi16(inverted_weights4, top_right);
2777
877
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2778
7.89k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2779
7.01k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2780
63.1k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2781
56.1k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2782
56.1k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2783
56.1k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2784
56.1k
                                     scaled_top_right1, scaled_top_right2,
2785
56.1k
                                     round);
2786
56.1k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
2787
56.1k
                                     weights4, scaled_top_right3,
2788
56.1k
                                     scaled_top_right4, round);
2789
56.1k
      dst += stride;
2790
56.1k
    }
2791
7.01k
  }
2792
877
}
2793
2794
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2795
void aom_smooth_h_predictor_64x16_ssse3(
2796
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2797
    const uint8_t *LIBAOM_RESTRICT top_row,
2798
5.65k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2799
5.65k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2800
5.65k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2801
5.65k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2802
5.65k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2803
5.65k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2804
5.65k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2805
5.65k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2806
5.65k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2807
5.65k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2808
5.65k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2809
5.65k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2810
5.65k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2811
5.65k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2812
5.65k
  const __m128i scaled_top_right1 =
2813
5.65k
      _mm_mullo_epi16(inverted_weights1, top_right);
2814
5.65k
  const __m128i scaled_top_right2 =
2815
5.65k
      _mm_mullo_epi16(inverted_weights2, top_right);
2816
5.65k
  const __m128i scaled_top_right3 =
2817
5.65k
      _mm_mullo_epi16(inverted_weights3, top_right);
2818
5.65k
  const __m128i scaled_top_right4 =
2819
5.65k
      _mm_mullo_epi16(inverted_weights4, top_right);
2820
5.65k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2821
5.65k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2822
5.65k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2823
5.65k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2824
5.65k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2825
5.65k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2826
5.65k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2827
5.65k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2828
5.65k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2829
5.65k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2830
5.65k
  const __m128i scaled_top_right5 =
2831
5.65k
      _mm_mullo_epi16(inverted_weights5, top_right);
2832
5.65k
  const __m128i scaled_top_right6 =
2833
5.65k
      _mm_mullo_epi16(inverted_weights6, top_right);
2834
5.65k
  const __m128i scaled_top_right7 =
2835
5.65k
      _mm_mullo_epi16(inverted_weights7, top_right);
2836
5.65k
  const __m128i scaled_top_right8 =
2837
5.65k
      _mm_mullo_epi16(inverted_weights8, top_right);
2838
5.65k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2839
50.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2840
45.2k
    __m128i y_select = _mm_set1_epi32(y_mask);
2841
45.2k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2842
45.2k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2843
45.2k
                                   scaled_top_right1, scaled_top_right2, round);
2844
45.2k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2845
45.2k
                                   scaled_top_right3, scaled_top_right4, round);
2846
45.2k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2847
45.2k
                                   scaled_top_right5, scaled_top_right6, round);
2848
45.2k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2849
45.2k
                                   scaled_top_right7, scaled_top_right8, round);
2850
45.2k
    dst += stride;
2851
45.2k
  }
2852
5.65k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2853
50.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2854
45.2k
    __m128i y_select = _mm_set1_epi32(y_mask);
2855
45.2k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2856
45.2k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2857
45.2k
                                   scaled_top_right1, scaled_top_right2, round);
2858
45.2k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2859
45.2k
                                   scaled_top_right3, scaled_top_right4, round);
2860
45.2k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2861
45.2k
                                   scaled_top_right5, scaled_top_right6, round);
2862
45.2k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2863
45.2k
                                   scaled_top_right7, scaled_top_right8, round);
2864
45.2k
    dst += stride;
2865
45.2k
  }
2866
5.65k
}
2867
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2868
2869
void aom_smooth_h_predictor_64x32_ssse3(
2870
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2871
    const uint8_t *LIBAOM_RESTRICT top_row,
2872
1.38k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2873
1.38k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2874
1.38k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2875
1.38k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2876
1.38k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2877
1.38k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2878
1.38k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2879
1.38k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2880
1.38k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2881
1.38k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2882
1.38k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2883
1.38k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2884
1.38k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2885
1.38k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2886
1.38k
  const __m128i scaled_top_right1 =
2887
1.38k
      _mm_mullo_epi16(inverted_weights1, top_right);
2888
1.38k
  const __m128i scaled_top_right2 =
2889
1.38k
      _mm_mullo_epi16(inverted_weights2, top_right);
2890
1.38k
  const __m128i scaled_top_right3 =
2891
1.38k
      _mm_mullo_epi16(inverted_weights3, top_right);
2892
1.38k
  const __m128i scaled_top_right4 =
2893
1.38k
      _mm_mullo_epi16(inverted_weights4, top_right);
2894
1.38k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2895
1.38k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2896
1.38k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2897
1.38k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2898
1.38k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2899
1.38k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2900
1.38k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2901
1.38k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2902
1.38k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2903
1.38k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2904
1.38k
  const __m128i scaled_top_right5 =
2905
1.38k
      _mm_mullo_epi16(inverted_weights5, top_right);
2906
1.38k
  const __m128i scaled_top_right6 =
2907
1.38k
      _mm_mullo_epi16(inverted_weights6, top_right);
2908
1.38k
  const __m128i scaled_top_right7 =
2909
1.38k
      _mm_mullo_epi16(inverted_weights7, top_right);
2910
1.38k
  const __m128i scaled_top_right8 =
2911
1.38k
      _mm_mullo_epi16(inverted_weights8, top_right);
2912
1.38k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2913
12.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2914
11.0k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2915
11.0k
    const __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2916
11.0k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2917
11.0k
                                   scaled_top_right1, scaled_top_right2, round);
2918
11.0k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2919
11.0k
                                   scaled_top_right3, scaled_top_right4, round);
2920
11.0k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2921
11.0k
                                   scaled_top_right5, scaled_top_right6, round);
2922
11.0k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2923
11.0k
                                   scaled_top_right7, scaled_top_right8, round);
2924
11.0k
    dst += stride;
2925
11.0k
  }
2926
1.38k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2927
12.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2928
11.0k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2929
11.0k
    const __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2930
11.0k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2931
11.0k
                                   scaled_top_right1, scaled_top_right2, round);
2932
11.0k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2933
11.0k
                                   scaled_top_right3, scaled_top_right4, round);
2934
11.0k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2935
11.0k
                                   scaled_top_right5, scaled_top_right6, round);
2936
11.0k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2937
11.0k
                                   scaled_top_right7, scaled_top_right8, round);
2938
11.0k
    dst += stride;
2939
11.0k
  }
2940
1.38k
  const __m128i left3 = cvtepu8_epi16(LoadLo8(left_column + 16));
2941
12.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2942
11.0k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2943
11.0k
    const __m128i left_y = _mm_shuffle_epi8(left3, y_select);
2944
11.0k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2945
11.0k
                                   scaled_top_right1, scaled_top_right2, round);
2946
11.0k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2947
11.0k
                                   scaled_top_right3, scaled_top_right4, round);
2948
11.0k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2949
11.0k
                                   scaled_top_right5, scaled_top_right6, round);
2950
11.0k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2951
11.0k
                                   scaled_top_right7, scaled_top_right8, round);
2952
11.0k
    dst += stride;
2953
11.0k
  }
2954
1.38k
  const __m128i left4 = cvtepu8_epi16(LoadLo8(left_column + 24));
2955
12.4k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2956
11.0k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2957
11.0k
    const __m128i left_y = _mm_shuffle_epi8(left4, y_select);
2958
11.0k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2959
11.0k
                                   scaled_top_right1, scaled_top_right2, round);
2960
11.0k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2961
11.0k
                                   scaled_top_right3, scaled_top_right4, round);
2962
11.0k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2963
11.0k
                                   scaled_top_right5, scaled_top_right6, round);
2964
11.0k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2965
11.0k
                                   scaled_top_right7, scaled_top_right8, round);
2966
11.0k
    dst += stride;
2967
11.0k
  }
2968
1.38k
}
2969
2970
void aom_smooth_h_predictor_64x64_ssse3(
2971
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2972
    const uint8_t *LIBAOM_RESTRICT top_row,
2973
4.50k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2974
4.50k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2975
4.50k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2976
4.50k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2977
4.50k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2978
4.50k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2979
4.50k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2980
4.50k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2981
4.50k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2982
4.50k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2983
4.50k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2984
4.50k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2985
4.50k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2986
4.50k
  const __m128i scaled_top_right1 =
2987
4.50k
      _mm_mullo_epi16(inverted_weights1, top_right);
2988
4.50k
  const __m128i scaled_top_right2 =
2989
4.50k
      _mm_mullo_epi16(inverted_weights2, top_right);
2990
4.50k
  const __m128i scaled_top_right3 =
2991
4.50k
      _mm_mullo_epi16(inverted_weights3, top_right);
2992
4.50k
  const __m128i scaled_top_right4 =
2993
4.50k
      _mm_mullo_epi16(inverted_weights4, top_right);
2994
4.50k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2995
4.50k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2996
4.50k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2997
4.50k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2998
4.50k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2999
4.50k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
3000
4.50k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
3001
4.50k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
3002
4.50k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
3003
4.50k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
3004
4.50k
  const __m128i scaled_top_right5 =
3005
4.50k
      _mm_mullo_epi16(inverted_weights5, top_right);
3006
4.50k
  const __m128i scaled_top_right6 =
3007
4.50k
      _mm_mullo_epi16(inverted_weights6, top_right);
3008
4.50k
  const __m128i scaled_top_right7 =
3009
4.50k
      _mm_mullo_epi16(inverted_weights7, top_right);
3010
4.50k
  const __m128i scaled_top_right8 =
3011
4.50k
      _mm_mullo_epi16(inverted_weights8, top_right);
3012
4.50k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
3013
40.5k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
3014
36.0k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
3015
324k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
3016
288k
      const __m128i y_select = _mm_set1_epi32(y_mask);
3017
288k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
3018
288k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
3019
288k
                                     scaled_top_right1, scaled_top_right2,
3020
288k
                                     round);
3021
288k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
3022
288k
                                     weights4, scaled_top_right3,
3023
288k
                                     scaled_top_right4, round);
3024
288k
      write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5,
3025
288k
                                     weights6, scaled_top_right5,
3026
288k
                                     scaled_top_right6, round);
3027
288k
      write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7,
3028
288k
                                     weights8, scaled_top_right7,
3029
288k
                                     scaled_top_right8, round);
3030
288k
      dst += stride;
3031
288k
    }
3032
36.0k
  }
3033
4.50k
}