Coverage Report

Created: 2026-03-31 06:59

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/aom_dsp/x86/intrapred_ssse3.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <tmmintrin.h>
13
14
#include "config/aom_dsp_rtcd.h"
15
16
#include "aom_dsp/intrapred_common.h"
17
18
// -----------------------------------------------------------------------------
19
// PAETH_PRED
20
21
// Return 8 16-bit pixels in one row
22
static inline __m128i paeth_8x1_pred(const __m128i *left, const __m128i *top,
23
7.30M
                                     const __m128i *topleft) {
24
7.30M
  const __m128i base = _mm_sub_epi16(_mm_add_epi16(*top, *left), *topleft);
25
26
7.30M
  __m128i pl = _mm_abs_epi16(_mm_sub_epi16(base, *left));
27
7.30M
  __m128i pt = _mm_abs_epi16(_mm_sub_epi16(base, *top));
28
7.30M
  __m128i ptl = _mm_abs_epi16(_mm_sub_epi16(base, *topleft));
29
30
7.30M
  __m128i mask1 = _mm_cmpgt_epi16(pl, pt);
31
7.30M
  mask1 = _mm_or_si128(mask1, _mm_cmpgt_epi16(pl, ptl));
32
7.30M
  __m128i mask2 = _mm_cmpgt_epi16(pt, ptl);
33
34
7.30M
  pl = _mm_andnot_si128(mask1, *left);
35
36
7.30M
  ptl = _mm_and_si128(mask2, *topleft);
37
7.30M
  pt = _mm_andnot_si128(mask2, *top);
38
7.30M
  pt = _mm_or_si128(pt, ptl);
39
7.30M
  pt = _mm_and_si128(mask1, pt);
40
41
7.30M
  return _mm_or_si128(pl, pt);
42
7.30M
}
43
44
void aom_paeth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
45
190k
                                   const uint8_t *above, const uint8_t *left) {
46
190k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
47
190k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
48
190k
  const __m128i zero = _mm_setzero_si128();
49
190k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
50
190k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
51
190k
  __m128i rep = _mm_set1_epi16((short)0x8000);
52
190k
  const __m128i one = _mm_set1_epi16(1);
53
54
190k
  int i;
55
950k
  for (i = 0; i < 4; ++i) {
56
760k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
57
760k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
58
59
760k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
60
760k
    dst += stride;
61
760k
    rep = _mm_add_epi16(rep, one);
62
760k
  }
63
190k
}
64
65
void aom_paeth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
66
35.7k
                                   const uint8_t *above, const uint8_t *left) {
67
35.7k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
68
35.7k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
69
35.7k
  const __m128i zero = _mm_setzero_si128();
70
35.7k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
71
35.7k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
72
35.7k
  __m128i rep = _mm_set1_epi16((short)0x8000);
73
35.7k
  const __m128i one = _mm_set1_epi16(1);
74
75
35.7k
  int i;
76
321k
  for (i = 0; i < 8; ++i) {
77
285k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
78
285k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
79
80
285k
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
81
285k
    dst += stride;
82
285k
    rep = _mm_add_epi16(rep, one);
83
285k
  }
84
35.7k
}
85
86
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
87
void aom_paeth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
88
198k
                                    const uint8_t *above, const uint8_t *left) {
89
198k
  __m128i l = _mm_load_si128((const __m128i *)left);
90
198k
  const __m128i t = _mm_cvtsi32_si128(((const int *)above)[0]);
91
198k
  const __m128i zero = _mm_setzero_si128();
92
198k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
93
198k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
94
198k
  __m128i rep = _mm_set1_epi16((short)0x8000);
95
198k
  const __m128i one = _mm_set1_epi16(1);
96
97
3.37M
  for (int i = 0; i < 16; ++i) {
98
3.17M
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
99
3.17M
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
100
101
3.17M
    *(int *)dst = _mm_cvtsi128_si32(_mm_packus_epi16(row, row));
102
3.17M
    dst += stride;
103
3.17M
    rep = _mm_add_epi16(rep, one);
104
3.17M
  }
105
198k
}
106
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
107
108
void aom_paeth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
109
46.1k
                                   const uint8_t *above, const uint8_t *left) {
110
46.1k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
111
46.1k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
112
46.1k
  const __m128i zero = _mm_setzero_si128();
113
46.1k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
114
46.1k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
115
46.1k
  __m128i rep = _mm_set1_epi16((short)0x8000);
116
46.1k
  const __m128i one = _mm_set1_epi16(1);
117
118
46.1k
  int i;
119
230k
  for (i = 0; i < 4; ++i) {
120
184k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
121
184k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
122
123
184k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
124
184k
    dst += stride;
125
184k
    rep = _mm_add_epi16(rep, one);
126
184k
  }
127
46.1k
}
128
129
void aom_paeth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
130
106k
                                   const uint8_t *above, const uint8_t *left) {
131
106k
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
132
106k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
133
106k
  const __m128i zero = _mm_setzero_si128();
134
106k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
135
106k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
136
106k
  __m128i rep = _mm_set1_epi16((short)0x8000);
137
106k
  const __m128i one = _mm_set1_epi16(1);
138
139
106k
  int i;
140
959k
  for (i = 0; i < 8; ++i) {
141
853k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
142
853k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
143
144
853k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
145
853k
    dst += stride;
146
853k
    rep = _mm_add_epi16(rep, one);
147
853k
  }
148
106k
}
149
150
void aom_paeth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
151
29.0k
                                    const uint8_t *above, const uint8_t *left) {
152
29.0k
  __m128i l = _mm_load_si128((const __m128i *)left);
153
29.0k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
154
29.0k
  const __m128i zero = _mm_setzero_si128();
155
29.0k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
156
29.0k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
157
29.0k
  __m128i rep = _mm_set1_epi16((short)0x8000);
158
29.0k
  const __m128i one = _mm_set1_epi16(1);
159
160
29.0k
  int i;
161
493k
  for (i = 0; i < 16; ++i) {
162
464k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
163
464k
    const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
164
165
464k
    _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
166
464k
    dst += stride;
167
464k
    rep = _mm_add_epi16(rep, one);
168
464k
  }
169
29.0k
}
170
171
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
172
void aom_paeth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
173
22.1k
                                    const uint8_t *above, const uint8_t *left) {
174
22.1k
  const __m128i t = _mm_loadl_epi64((const __m128i *)above);
175
22.1k
  const __m128i zero = _mm_setzero_si128();
176
22.1k
  const __m128i t16 = _mm_unpacklo_epi8(t, zero);
177
22.1k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
178
22.1k
  const __m128i one = _mm_set1_epi16(1);
179
180
66.5k
  for (int j = 0; j < 2; ++j) {
181
44.3k
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
182
44.3k
    __m128i rep = _mm_set1_epi16((short)0x8000);
183
754k
    for (int i = 0; i < 16; ++i) {
184
709k
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
185
709k
      const __m128i row = paeth_8x1_pred(&l16, &t16, &tl16);
186
187
709k
      _mm_storel_epi64((__m128i *)dst, _mm_packus_epi16(row, row));
188
709k
      dst += stride;
189
709k
      rep = _mm_add_epi16(rep, one);
190
709k
    }
191
44.3k
  }
192
22.1k
}
193
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
194
195
// Return 16 8-bit pixels in one row
196
static inline __m128i paeth_16x1_pred(const __m128i *left, const __m128i *top0,
197
                                      const __m128i *top1,
198
437k
                                      const __m128i *topleft) {
199
437k
  const __m128i p0 = paeth_8x1_pred(left, top0, topleft);
200
437k
  const __m128i p1 = paeth_8x1_pred(left, top1, topleft);
201
437k
  return _mm_packus_epi16(p0, p1);
202
437k
}
203
204
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
205
void aom_paeth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
206
46.5k
                                    const uint8_t *above, const uint8_t *left) {
207
46.5k
  __m128i l = _mm_cvtsi32_si128(((const int *)left)[0]);
208
46.5k
  const __m128i t = _mm_load_si128((const __m128i *)above);
209
46.5k
  const __m128i zero = _mm_setzero_si128();
210
46.5k
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
211
46.5k
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
212
46.5k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
213
46.5k
  __m128i rep = _mm_set1_epi16((short)0x8000);
214
46.5k
  const __m128i one = _mm_set1_epi16(1);
215
216
232k
  for (int i = 0; i < 4; ++i) {
217
186k
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
218
186k
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
219
220
186k
    _mm_store_si128((__m128i *)dst, row);
221
186k
    dst += stride;
222
186k
    rep = _mm_add_epi16(rep, one);
223
186k
  }
224
46.5k
}
225
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
226
227
void aom_paeth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
228
0
                                    const uint8_t *above, const uint8_t *left) {
229
0
  __m128i l = _mm_loadl_epi64((const __m128i *)left);
230
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
231
0
  const __m128i zero = _mm_setzero_si128();
232
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
233
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
234
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
235
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
236
0
  const __m128i one = _mm_set1_epi16(1);
237
238
0
  int i;
239
0
  for (i = 0; i < 8; ++i) {
240
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
241
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
242
243
0
    _mm_store_si128((__m128i *)dst, row);
244
0
    dst += stride;
245
0
    rep = _mm_add_epi16(rep, one);
246
0
  }
247
0
}
248
249
void aom_paeth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
250
                                     const uint8_t *above,
251
0
                                     const uint8_t *left) {
252
0
  __m128i l = _mm_load_si128((const __m128i *)left);
253
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
254
0
  const __m128i zero = _mm_setzero_si128();
255
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
256
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
257
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
258
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
259
0
  const __m128i one = _mm_set1_epi16(1);
260
261
0
  int i;
262
0
  for (i = 0; i < 16; ++i) {
263
0
    const __m128i l16 = _mm_shuffle_epi8(l, rep);
264
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
265
266
0
    _mm_store_si128((__m128i *)dst, row);
267
0
    dst += stride;
268
0
    rep = _mm_add_epi16(rep, one);
269
0
  }
270
0
}
271
272
void aom_paeth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
273
                                     const uint8_t *above,
274
0
                                     const uint8_t *left) {
275
0
  __m128i l = _mm_load_si128((const __m128i *)left);
276
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
277
0
  const __m128i zero = _mm_setzero_si128();
278
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
279
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
280
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
281
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
282
0
  const __m128i one = _mm_set1_epi16(1);
283
0
  __m128i l16;
284
285
0
  int i;
286
0
  for (i = 0; i < 16; ++i) {
287
0
    l16 = _mm_shuffle_epi8(l, rep);
288
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
289
290
0
    _mm_store_si128((__m128i *)dst, row);
291
0
    dst += stride;
292
0
    rep = _mm_add_epi16(rep, one);
293
0
  }
294
295
0
  l = _mm_load_si128((const __m128i *)(left + 16));
296
0
  rep = _mm_set1_epi16((short)0x8000);
297
0
  for (i = 0; i < 16; ++i) {
298
0
    l16 = _mm_shuffle_epi8(l, rep);
299
0
    const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
300
301
0
    _mm_store_si128((__m128i *)dst, row);
302
0
    dst += stride;
303
0
    rep = _mm_add_epi16(rep, one);
304
0
  }
305
0
}
306
307
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
308
void aom_paeth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
309
                                     const uint8_t *above,
310
0
                                     const uint8_t *left) {
311
0
  const __m128i t = _mm_load_si128((const __m128i *)above);
312
0
  const __m128i zero = _mm_setzero_si128();
313
0
  const __m128i top0 = _mm_unpacklo_epi8(t, zero);
314
0
  const __m128i top1 = _mm_unpackhi_epi8(t, zero);
315
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
316
0
  const __m128i one = _mm_set1_epi16(1);
317
318
0
  for (int j = 0; j < 4; ++j) {
319
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
320
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
321
0
    for (int i = 0; i < 16; ++i) {
322
0
      const __m128i l16 = _mm_shuffle_epi8(l, rep);
323
0
      const __m128i row = paeth_16x1_pred(&l16, &top0, &top1, &tl16);
324
0
      _mm_store_si128((__m128i *)dst, row);
325
0
      dst += stride;
326
0
      rep = _mm_add_epi16(rep, one);
327
0
    }
328
0
  }
329
0
}
330
331
void aom_paeth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
332
15.7k
                                    const uint8_t *above, const uint8_t *left) {
333
15.7k
  const __m128i a = _mm_load_si128((const __m128i *)above);
334
15.7k
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
335
15.7k
  const __m128i zero = _mm_setzero_si128();
336
15.7k
  const __m128i al = _mm_unpacklo_epi8(a, zero);
337
15.7k
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
338
15.7k
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
339
15.7k
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
340
341
15.7k
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
342
15.7k
  __m128i rep = _mm_set1_epi16((short)0x8000);
343
15.7k
  const __m128i one = _mm_set1_epi16(1);
344
15.7k
  const __m128i l = _mm_loadl_epi64((const __m128i *)left);
345
15.7k
  __m128i l16;
346
347
141k
  for (int i = 0; i < 8; ++i) {
348
125k
    l16 = _mm_shuffle_epi8(l, rep);
349
125k
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
350
125k
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
351
352
125k
    _mm_store_si128((__m128i *)dst, r32l);
353
125k
    _mm_store_si128((__m128i *)(dst + 16), r32h);
354
125k
    dst += stride;
355
125k
    rep = _mm_add_epi16(rep, one);
356
125k
  }
357
15.7k
}
358
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
359
360
void aom_paeth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
361
                                     const uint8_t *above,
362
0
                                     const uint8_t *left) {
363
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
364
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
365
0
  const __m128i zero = _mm_setzero_si128();
366
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
367
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
368
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
369
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
370
371
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
372
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
373
0
  const __m128i one = _mm_set1_epi16(1);
374
0
  __m128i l = _mm_load_si128((const __m128i *)left);
375
0
  __m128i l16;
376
377
0
  int i;
378
0
  for (i = 0; i < 16; ++i) {
379
0
    l16 = _mm_shuffle_epi8(l, rep);
380
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
381
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
382
383
0
    _mm_store_si128((__m128i *)dst, r32l);
384
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
385
0
    dst += stride;
386
0
    rep = _mm_add_epi16(rep, one);
387
0
  }
388
0
}
389
390
void aom_paeth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
391
                                     const uint8_t *above,
392
0
                                     const uint8_t *left) {
393
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
394
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
395
0
  const __m128i zero = _mm_setzero_si128();
396
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
397
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
398
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
399
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
400
401
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
402
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
403
0
  const __m128i one = _mm_set1_epi16(1);
404
0
  __m128i l = _mm_load_si128((const __m128i *)left);
405
0
  __m128i l16;
406
407
0
  int i;
408
0
  for (i = 0; i < 16; ++i) {
409
0
    l16 = _mm_shuffle_epi8(l, rep);
410
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
411
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
412
413
0
    _mm_store_si128((__m128i *)dst, r32l);
414
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
415
0
    dst += stride;
416
0
    rep = _mm_add_epi16(rep, one);
417
0
  }
418
419
0
  rep = _mm_set1_epi16((short)0x8000);
420
0
  l = _mm_load_si128((const __m128i *)(left + 16));
421
0
  for (i = 0; i < 16; ++i) {
422
0
    l16 = _mm_shuffle_epi8(l, rep);
423
0
    const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
424
0
    const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
425
426
0
    _mm_store_si128((__m128i *)dst, r32l);
427
0
    _mm_store_si128((__m128i *)(dst + 16), r32h);
428
0
    dst += stride;
429
0
    rep = _mm_add_epi16(rep, one);
430
0
  }
431
0
}
432
433
void aom_paeth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
434
                                     const uint8_t *above,
435
0
                                     const uint8_t *left) {
436
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
437
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
438
0
  const __m128i zero = _mm_setzero_si128();
439
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
440
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
441
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
442
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
443
444
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
445
0
  const __m128i one = _mm_set1_epi16(1);
446
0
  __m128i l16;
447
448
0
  int i, j;
449
0
  for (j = 0; j < 4; ++j) {
450
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
451
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
452
0
    for (i = 0; i < 16; ++i) {
453
0
      l16 = _mm_shuffle_epi8(l, rep);
454
0
      const __m128i r32l = paeth_16x1_pred(&l16, &al, &ah, &tl16);
455
0
      const __m128i r32h = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
456
457
0
      _mm_store_si128((__m128i *)dst, r32l);
458
0
      _mm_store_si128((__m128i *)(dst + 16), r32h);
459
0
      dst += stride;
460
0
      rep = _mm_add_epi16(rep, one);
461
0
    }
462
0
  }
463
0
}
464
465
void aom_paeth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
466
                                     const uint8_t *above,
467
0
                                     const uint8_t *left) {
468
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
469
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
470
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
471
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
472
0
  const __m128i zero = _mm_setzero_si128();
473
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
474
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
475
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
476
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
477
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
478
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
479
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
480
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
481
482
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
483
0
  const __m128i one = _mm_set1_epi16(1);
484
0
  __m128i l16;
485
486
0
  int i, j;
487
0
  for (j = 0; j < 2; ++j) {
488
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
489
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
490
0
    for (i = 0; i < 16; ++i) {
491
0
      l16 = _mm_shuffle_epi8(l, rep);
492
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
493
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
494
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
495
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
496
497
0
      _mm_store_si128((__m128i *)dst, r0);
498
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
499
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
500
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
501
0
      dst += stride;
502
0
      rep = _mm_add_epi16(rep, one);
503
0
    }
504
0
  }
505
0
}
506
507
void aom_paeth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
508
                                     const uint8_t *above,
509
0
                                     const uint8_t *left) {
510
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
511
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
512
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
513
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
514
0
  const __m128i zero = _mm_setzero_si128();
515
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
516
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
517
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
518
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
519
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
520
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
521
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
522
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
523
524
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
525
0
  const __m128i one = _mm_set1_epi16(1);
526
0
  __m128i l16;
527
528
0
  int i, j;
529
0
  for (j = 0; j < 4; ++j) {
530
0
    const __m128i l = _mm_load_si128((const __m128i *)(left + j * 16));
531
0
    __m128i rep = _mm_set1_epi16((short)0x8000);
532
0
    for (i = 0; i < 16; ++i) {
533
0
      l16 = _mm_shuffle_epi8(l, rep);
534
0
      const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
535
0
      const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
536
0
      const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
537
0
      const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
538
539
0
      _mm_store_si128((__m128i *)dst, r0);
540
0
      _mm_store_si128((__m128i *)(dst + 16), r1);
541
0
      _mm_store_si128((__m128i *)(dst + 32), r2);
542
0
      _mm_store_si128((__m128i *)(dst + 48), r3);
543
0
      dst += stride;
544
0
      rep = _mm_add_epi16(rep, one);
545
0
    }
546
0
  }
547
0
}
548
549
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
550
void aom_paeth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
551
                                     const uint8_t *above,
552
0
                                     const uint8_t *left) {
553
0
  const __m128i a = _mm_load_si128((const __m128i *)above);
554
0
  const __m128i b = _mm_load_si128((const __m128i *)(above + 16));
555
0
  const __m128i c = _mm_load_si128((const __m128i *)(above + 32));
556
0
  const __m128i d = _mm_load_si128((const __m128i *)(above + 48));
557
0
  const __m128i zero = _mm_setzero_si128();
558
0
  const __m128i al = _mm_unpacklo_epi8(a, zero);
559
0
  const __m128i ah = _mm_unpackhi_epi8(a, zero);
560
0
  const __m128i bl = _mm_unpacklo_epi8(b, zero);
561
0
  const __m128i bh = _mm_unpackhi_epi8(b, zero);
562
0
  const __m128i cl = _mm_unpacklo_epi8(c, zero);
563
0
  const __m128i ch = _mm_unpackhi_epi8(c, zero);
564
0
  const __m128i dl = _mm_unpacklo_epi8(d, zero);
565
0
  const __m128i dh = _mm_unpackhi_epi8(d, zero);
566
567
0
  const __m128i tl16 = _mm_set1_epi16((int16_t)above[-1]);
568
0
  const __m128i one = _mm_set1_epi16(1);
569
0
  __m128i l16;
570
571
0
  int i;
572
0
  const __m128i l = _mm_load_si128((const __m128i *)left);
573
0
  __m128i rep = _mm_set1_epi16((short)0x8000);
574
0
  for (i = 0; i < 16; ++i) {
575
0
    l16 = _mm_shuffle_epi8(l, rep);
576
0
    const __m128i r0 = paeth_16x1_pred(&l16, &al, &ah, &tl16);
577
0
    const __m128i r1 = paeth_16x1_pred(&l16, &bl, &bh, &tl16);
578
0
    const __m128i r2 = paeth_16x1_pred(&l16, &cl, &ch, &tl16);
579
0
    const __m128i r3 = paeth_16x1_pred(&l16, &dl, &dh, &tl16);
580
581
0
    _mm_store_si128((__m128i *)dst, r0);
582
0
    _mm_store_si128((__m128i *)(dst + 16), r1);
583
0
    _mm_store_si128((__m128i *)(dst + 32), r2);
584
0
    _mm_store_si128((__m128i *)(dst + 48), r3);
585
0
    dst += stride;
586
0
    rep = _mm_add_epi16(rep, one);
587
0
  }
588
0
}
589
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
590
591
// -----------------------------------------------------------------------------
592
// SMOOTH_PRED
593
594
// pixels[0]: above and below_pred interleave vector
595
// pixels[1]: left vector
596
// pixels[2]: right_pred vector
597
static inline void load_pixel_w4(const uint8_t *above, const uint8_t *left,
598
156k
                                 int height, __m128i *pixels) {
599
156k
  __m128i d = _mm_cvtsi32_si128(((const int *)above)[0]);
600
156k
  if (height == 4)
601
94.3k
    pixels[1] = _mm_cvtsi32_si128(((const int *)left)[0]);
602
62.3k
  else if (height == 8)
603
40.2k
    pixels[1] = _mm_loadl_epi64(((const __m128i *)left));
604
22.1k
  else
605
22.1k
    pixels[1] = _mm_loadu_si128(((const __m128i *)left));
606
607
156k
  pixels[2] = _mm_set1_epi16((int16_t)above[3]);
608
609
156k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
610
156k
  const __m128i zero = _mm_setzero_si128();
611
156k
  d = _mm_unpacklo_epi8(d, zero);
612
156k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
613
156k
}
614
615
// weight_h[0]: weight_h vector
616
// weight_h[1]: scale - weight_h vector
617
// weight_h[2]: same as [0], second half for height = 16 only
618
// weight_h[3]: same as [1], second half for height = 16 only
619
// weight_w[0]: weights_w and scale - weights_w interleave vector
620
static inline void load_weight_w4(int height, __m128i *weight_h,
621
156k
                                  __m128i *weight_w) {
622
156k
  const __m128i zero = _mm_setzero_si128();
623
156k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
624
156k
  const __m128i t = _mm_cvtsi32_si128(((const int *)smooth_weights)[0]);
625
156k
  weight_h[0] = _mm_unpacklo_epi8(t, zero);
626
156k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
627
156k
  weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
628
629
156k
  if (height == 8) {
630
40.2k
    const __m128i weight = _mm_loadl_epi64((const __m128i *)&smooth_weights[4]);
631
40.2k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
632
40.2k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
633
116k
  } else if (height == 16) {
634
22.1k
    const __m128i weight =
635
22.1k
        _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
636
22.1k
    weight_h[0] = _mm_unpacklo_epi8(weight, zero);
637
22.1k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
638
22.1k
    weight_h[2] = _mm_unpackhi_epi8(weight, zero);
639
22.1k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
640
22.1k
  }
641
156k
}
642
643
static inline void smooth_pred_4xh(const __m128i *pixel, const __m128i *wh,
644
                                   const __m128i *ww, int h, uint8_t *dst,
645
178k
                                   ptrdiff_t stride, int second_half) {
646
178k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
647
178k
  const __m128i one = _mm_set1_epi16(1);
648
178k
  const __m128i inc = _mm_set1_epi16(0x202);
649
178k
  const __m128i gat = _mm_set1_epi32(0xc080400);
650
178k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
651
178k
                            : _mm_set1_epi16((short)0x8000);
652
178k
  __m128i d = _mm_set1_epi16(0x100);
653
654
1.23M
  for (int i = 0; i < h; ++i) {
655
1.05M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
656
1.05M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
657
1.05M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
658
1.05M
    __m128i s = _mm_madd_epi16(pixel[0], wh_sc);
659
660
1.05M
    __m128i b = _mm_shuffle_epi8(pixel[1], rep);
661
1.05M
    b = _mm_unpacklo_epi16(b, pixel[2]);
662
1.05M
    __m128i sum = _mm_madd_epi16(b, ww[0]);
663
664
1.05M
    sum = _mm_add_epi32(s, sum);
665
1.05M
    sum = _mm_add_epi32(sum, round);
666
1.05M
    sum = _mm_srai_epi32(sum, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
667
668
1.05M
    sum = _mm_shuffle_epi8(sum, gat);
669
1.05M
    *(int *)dst = _mm_cvtsi128_si32(sum);
670
1.05M
    dst += stride;
671
672
1.05M
    rep = _mm_add_epi16(rep, one);
673
1.05M
    d = _mm_add_epi16(d, inc);
674
1.05M
  }
675
178k
}
676
677
void aom_smooth_predictor_4x4_ssse3(uint8_t *dst, ptrdiff_t stride,
678
94.3k
                                    const uint8_t *above, const uint8_t *left) {
679
94.3k
  __m128i pixels[3];
680
94.3k
  load_pixel_w4(above, left, 4, pixels);
681
682
94.3k
  __m128i wh[4], ww[2];
683
94.3k
  load_weight_w4(4, wh, ww);
684
685
94.3k
  smooth_pred_4xh(pixels, wh, ww, 4, dst, stride, 0);
686
94.3k
}
687
688
void aom_smooth_predictor_4x8_ssse3(uint8_t *dst, ptrdiff_t stride,
689
40.2k
                                    const uint8_t *above, const uint8_t *left) {
690
40.2k
  __m128i pixels[3];
691
40.2k
  load_pixel_w4(above, left, 8, pixels);
692
693
40.2k
  __m128i wh[4], ww[2];
694
40.2k
  load_weight_w4(8, wh, ww);
695
696
40.2k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
697
40.2k
}
698
699
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
700
void aom_smooth_predictor_4x16_ssse3(uint8_t *dst, ptrdiff_t stride,
701
                                     const uint8_t *above,
702
22.1k
                                     const uint8_t *left) {
703
22.1k
  __m128i pixels[3];
704
22.1k
  load_pixel_w4(above, left, 16, pixels);
705
706
22.1k
  __m128i wh[4], ww[2];
707
22.1k
  load_weight_w4(16, wh, ww);
708
709
22.1k
  smooth_pred_4xh(pixels, wh, ww, 8, dst, stride, 0);
710
22.1k
  dst += stride << 3;
711
22.1k
  smooth_pred_4xh(pixels, &wh[2], ww, 8, dst, stride, 1);
712
22.1k
}
713
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
715
// pixels[0]: above and below_pred interleave vector, first half
716
// pixels[1]: above and below_pred interleave vector, second half
717
// pixels[2]: left vector
718
// pixels[3]: right_pred vector
719
// pixels[4]: above and below_pred interleave vector, first half
720
// pixels[5]: above and below_pred interleave vector, second half
721
// pixels[6]: left vector + 16
722
// pixels[7]: right_pred vector
723
static inline void load_pixel_w8(const uint8_t *above, const uint8_t *left,
724
223k
                                 int height, __m128i *pixels) {
725
223k
  const __m128i zero = _mm_setzero_si128();
726
223k
  const __m128i bp = _mm_set1_epi16((int16_t)left[height - 1]);
727
223k
  __m128i d = _mm_loadl_epi64((const __m128i *)above);
728
223k
  d = _mm_unpacklo_epi8(d, zero);
729
223k
  pixels[0] = _mm_unpacklo_epi16(d, bp);
730
223k
  pixels[1] = _mm_unpackhi_epi16(d, bp);
731
732
223k
  pixels[3] = _mm_set1_epi16((int16_t)above[7]);
733
734
223k
  if (height == 4) {
735
59.0k
    pixels[2] = _mm_cvtsi32_si128(((const int *)left)[0]);
736
164k
  } else if (height == 8) {
737
120k
    pixels[2] = _mm_loadl_epi64((const __m128i *)left);
738
120k
  } else if (height == 16) {
739
31.5k
    pixels[2] = _mm_load_si128((const __m128i *)left);
740
31.5k
  } else {
741
12.7k
    pixels[2] = _mm_load_si128((const __m128i *)left);
742
12.7k
    pixels[4] = pixels[0];
743
12.7k
    pixels[5] = pixels[1];
744
12.7k
    pixels[6] = _mm_load_si128((const __m128i *)(left + 16));
745
12.7k
    pixels[7] = pixels[3];
746
12.7k
  }
747
223k
}
748
749
// weight_h[0]: weight_h vector
750
// weight_h[1]: scale - weight_h vector
751
// weight_h[2]: same as [0], offset 8
752
// weight_h[3]: same as [1], offset 8
753
// weight_h[4]: same as [0], offset 16
754
// weight_h[5]: same as [1], offset 16
755
// weight_h[6]: same as [0], offset 24
756
// weight_h[7]: same as [1], offset 24
757
// weight_w[0]: weights_w and scale - weights_w interleave vector, first half
758
// weight_w[1]: weights_w and scale - weights_w interleave vector, second half
759
static inline void load_weight_w8(int height, __m128i *weight_h,
760
223k
                                  __m128i *weight_w) {
761
223k
  const __m128i zero = _mm_setzero_si128();
762
223k
  const int we_offset = height < 8 ? 0 : 4;
763
223k
  __m128i we = _mm_loadu_si128((const __m128i *)&smooth_weights[we_offset]);
764
223k
  weight_h[0] = _mm_unpacklo_epi8(we, zero);
765
223k
  const __m128i d = _mm_set1_epi16((int16_t)(1 << SMOOTH_WEIGHT_LOG2_SCALE));
766
223k
  weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
767
768
223k
  if (height == 4) {
769
59.0k
    we = _mm_srli_si128(we, 4);
770
59.0k
    __m128i tmp1 = _mm_unpacklo_epi8(we, zero);
771
59.0k
    __m128i tmp2 = _mm_sub_epi16(d, tmp1);
772
59.0k
    weight_w[0] = _mm_unpacklo_epi16(tmp1, tmp2);
773
59.0k
    weight_w[1] = _mm_unpackhi_epi16(tmp1, tmp2);
774
164k
  } else {
775
164k
    weight_w[0] = _mm_unpacklo_epi16(weight_h[0], weight_h[1]);
776
164k
    weight_w[1] = _mm_unpackhi_epi16(weight_h[0], weight_h[1]);
777
164k
  }
778
779
223k
  if (height == 16) {
780
31.5k
    we = _mm_loadu_si128((const __m128i *)&smooth_weights[12]);
781
31.5k
    weight_h[0] = _mm_unpacklo_epi8(we, zero);
782
31.5k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
783
31.5k
    weight_h[2] = _mm_unpackhi_epi8(we, zero);
784
31.5k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
785
192k
  } else if (height == 32) {
786
12.7k
    const __m128i weight_lo =
787
12.7k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28]);
788
12.7k
    weight_h[0] = _mm_unpacklo_epi8(weight_lo, zero);
789
12.7k
    weight_h[1] = _mm_sub_epi16(d, weight_h[0]);
790
12.7k
    weight_h[2] = _mm_unpackhi_epi8(weight_lo, zero);
791
12.7k
    weight_h[3] = _mm_sub_epi16(d, weight_h[2]);
792
12.7k
    const __m128i weight_hi =
793
12.7k
        _mm_loadu_si128((const __m128i *)&smooth_weights[28 + 16]);
794
12.7k
    weight_h[4] = _mm_unpacklo_epi8(weight_hi, zero);
795
12.7k
    weight_h[5] = _mm_sub_epi16(d, weight_h[4]);
796
12.7k
    weight_h[6] = _mm_unpackhi_epi8(weight_hi, zero);
797
12.7k
    weight_h[7] = _mm_sub_epi16(d, weight_h[6]);
798
12.7k
  }
799
223k
}
800
801
static inline void smooth_pred_8xh(const __m128i *pixels, const __m128i *wh,
802
                                   const __m128i *ww, int h, uint8_t *dst,
803
293k
                                   ptrdiff_t stride, int second_half) {
804
293k
  const __m128i round = _mm_set1_epi32((1 << SMOOTH_WEIGHT_LOG2_SCALE));
805
293k
  const __m128i one = _mm_set1_epi16(1);
806
293k
  const __m128i inc = _mm_set1_epi16(0x202);
807
293k
  const __m128i gat = _mm_set_epi32(0, 0, 0xe0c0a08, 0x6040200);
808
809
293k
  __m128i rep = second_half ? _mm_set1_epi16((short)0x8008)
810
293k
                            : _mm_set1_epi16((short)0x8000);
811
293k
  __m128i d = _mm_set1_epi16(0x100);
812
813
293k
  int i;
814
2.40M
  for (i = 0; i < h; ++i) {
815
2.11M
    const __m128i wg_wg = _mm_shuffle_epi8(wh[0], d);
816
2.11M
    const __m128i sc_sc = _mm_shuffle_epi8(wh[1], d);
817
2.11M
    const __m128i wh_sc = _mm_unpacklo_epi16(wg_wg, sc_sc);
818
2.11M
    __m128i s0 = _mm_madd_epi16(pixels[0], wh_sc);
819
2.11M
    __m128i s1 = _mm_madd_epi16(pixels[1], wh_sc);
820
821
2.11M
    __m128i b = _mm_shuffle_epi8(pixels[2], rep);
822
2.11M
    b = _mm_unpacklo_epi16(b, pixels[3]);
823
2.11M
    __m128i sum0 = _mm_madd_epi16(b, ww[0]);
824
2.11M
    __m128i sum1 = _mm_madd_epi16(b, ww[1]);
825
826
2.11M
    s0 = _mm_add_epi32(s0, sum0);
827
2.11M
    s0 = _mm_add_epi32(s0, round);
828
2.11M
    s0 = _mm_srai_epi32(s0, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
829
830
2.11M
    s1 = _mm_add_epi32(s1, sum1);
831
2.11M
    s1 = _mm_add_epi32(s1, round);
832
2.11M
    s1 = _mm_srai_epi32(s1, 1 + SMOOTH_WEIGHT_LOG2_SCALE);
833
834
2.11M
    sum0 = _mm_packus_epi16(s0, s1);
835
2.11M
    sum0 = _mm_shuffle_epi8(sum0, gat);
836
2.11M
    _mm_storel_epi64((__m128i *)dst, sum0);
837
2.11M
    dst += stride;
838
839
2.11M
    rep = _mm_add_epi16(rep, one);
840
2.11M
    d = _mm_add_epi16(d, inc);
841
2.11M
  }
842
293k
}
843
844
void aom_smooth_predictor_8x4_ssse3(uint8_t *dst, ptrdiff_t stride,
845
59.0k
                                    const uint8_t *above, const uint8_t *left) {
846
59.0k
  __m128i pixels[4];
847
59.0k
  load_pixel_w8(above, left, 4, pixels);
848
849
59.0k
  __m128i wh[4], ww[2];
850
59.0k
  load_weight_w8(4, wh, ww);
851
852
59.0k
  smooth_pred_8xh(pixels, wh, ww, 4, dst, stride, 0);
853
59.0k
}
854
855
void aom_smooth_predictor_8x8_ssse3(uint8_t *dst, ptrdiff_t stride,
856
120k
                                    const uint8_t *above, const uint8_t *left) {
857
120k
  __m128i pixels[4];
858
120k
  load_pixel_w8(above, left, 8, pixels);
859
860
120k
  __m128i wh[4], ww[2];
861
120k
  load_weight_w8(8, wh, ww);
862
863
120k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
864
120k
}
865
866
void aom_smooth_predictor_8x16_ssse3(uint8_t *dst, ptrdiff_t stride,
867
                                     const uint8_t *above,
868
31.5k
                                     const uint8_t *left) {
869
31.5k
  __m128i pixels[4];
870
31.5k
  load_pixel_w8(above, left, 16, pixels);
871
872
31.5k
  __m128i wh[4], ww[2];
873
31.5k
  load_weight_w8(16, wh, ww);
874
875
31.5k
  smooth_pred_8xh(pixels, wh, ww, 8, dst, stride, 0);
876
31.5k
  dst += stride << 3;
877
31.5k
  smooth_pred_8xh(pixels, &wh[2], ww, 8, dst, stride, 1);
878
31.5k
}
879
880
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
881
void aom_smooth_predictor_8x32_ssse3(uint8_t *dst, ptrdiff_t stride,
882
                                     const uint8_t *above,
883
12.7k
                                     const uint8_t *left) {
884
12.7k
  __m128i pixels[8];
885
12.7k
  load_pixel_w8(above, left, 32, pixels);
886
887
12.7k
  __m128i wh[8], ww[2];
888
12.7k
  load_weight_w8(32, wh, ww);
889
890
12.7k
  smooth_pred_8xh(&pixels[0], wh, ww, 8, dst, stride, 0);
891
12.7k
  dst += stride << 3;
892
12.7k
  smooth_pred_8xh(&pixels[0], &wh[2], ww, 8, dst, stride, 1);
893
12.7k
  dst += stride << 3;
894
12.7k
  smooth_pred_8xh(&pixels[4], &wh[4], ww, 8, dst, stride, 0);
895
12.7k
  dst += stride << 3;
896
12.7k
  smooth_pred_8xh(&pixels[4], &wh[6], ww, 8, dst, stride, 1);
897
12.7k
}
898
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
899
900
// TODO(slavarnway): Visual Studio only supports restrict when /std:c11
901
// (available in 2019+) or greater is specified; __restrict can be used in that
902
// case. This should be moved to rtcd and used consistently between the
903
// function declarations and definitions to avoid warnings in Visual Studio
904
// when defining LIBAOM_RESTRICT to restrict or __restrict.
905
#if defined(_MSC_VER)
906
#define LIBAOM_RESTRICT
907
#else
908
#define LIBAOM_RESTRICT restrict
909
#endif
910
911
273k
static AOM_FORCE_INLINE __m128i Load4(const void *src) {
912
  // With new compilers such as clang 8.0.0 we can use the new _mm_loadu_si32
913
  // intrinsic. Both _mm_loadu_si32(src) and the code here are compiled into a
914
  // movss instruction.
915
  //
916
  // Until compiler support of _mm_loadu_si32 is widespread, use of
917
  // _mm_loadu_si32 is banned.
918
273k
  int val;
919
273k
  memcpy(&val, src, sizeof(val));
920
273k
  return _mm_cvtsi32_si128(val);
921
273k
}
922
923
59.4M
static AOM_FORCE_INLINE __m128i LoadLo8(const void *a) {
924
59.4M
  return _mm_loadl_epi64((const __m128i *)(a));
925
59.4M
}
926
927
556k
static AOM_FORCE_INLINE __m128i LoadUnaligned16(const void *a) {
928
556k
  return _mm_loadu_si128((const __m128i *)(a));
929
556k
}
930
931
635k
static AOM_FORCE_INLINE void Store4(void *dst, const __m128i x) {
932
635k
  const int val = _mm_cvtsi128_si32(x);
933
635k
  memcpy(dst, &val, sizeof(val));
934
635k
}
935
936
30.5M
static AOM_FORCE_INLINE void StoreLo8(void *a, const __m128i v) {
937
30.5M
  _mm_storel_epi64((__m128i *)(a), v);
938
30.5M
}
939
940
9.66M
static AOM_FORCE_INLINE void StoreUnaligned16(void *a, const __m128i v) {
941
9.66M
  _mm_storeu_si128((__m128i *)(a), v);
942
9.66M
}
943
944
60.3M
static AOM_FORCE_INLINE __m128i cvtepu8_epi16(__m128i x) {
945
60.3M
  return _mm_unpacklo_epi8((x), _mm_setzero_si128());
946
60.3M
}
947
948
153k
static AOM_FORCE_INLINE __m128i cvtepu8_epi32(__m128i x) {
949
153k
  const __m128i tmp = _mm_unpacklo_epi8((x), _mm_setzero_si128());
950
153k
  return _mm_unpacklo_epi16(tmp, _mm_setzero_si128());
951
153k
}
952
953
29.4M
static AOM_FORCE_INLINE __m128i cvtepu16_epi32(__m128i x) {
954
29.4M
  return _mm_unpacklo_epi16((x), _mm_setzero_si128());
955
29.4M
}
956
957
static void smooth_predictor_wxh(uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
958
                                 const uint8_t *LIBAOM_RESTRICT top_row,
959
                                 const uint8_t *LIBAOM_RESTRICT left_column,
960
334k
                                 int width, int height) {
961
334k
  const uint8_t *const sm_weights_h = smooth_weights + height - 4;
962
334k
  const uint8_t *const sm_weights_w = smooth_weights + width - 4;
963
334k
  const __m128i zero = _mm_setzero_si128();
964
334k
  const __m128i scale_value = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
965
334k
  const __m128i bottom_left = _mm_cvtsi32_si128(left_column[height - 1]);
966
334k
  const __m128i top_right = _mm_set1_epi16(top_row[width - 1]);
967
334k
  const __m128i round = _mm_set1_epi32(1 << SMOOTH_WEIGHT_LOG2_SCALE);
968
7.23M
  for (int y = 0; y < height; ++y) {
969
6.90M
    const __m128i weights_y = _mm_cvtsi32_si128(sm_weights_h[y]);
970
6.90M
    const __m128i left_y = _mm_cvtsi32_si128(left_column[y]);
971
6.90M
    const __m128i scale_m_weights_y = _mm_sub_epi16(scale_value, weights_y);
972
6.90M
    __m128i scaled_bottom_left =
973
6.90M
        _mm_mullo_epi16(scale_m_weights_y, bottom_left);
974
6.90M
    const __m128i weight_left_y =
975
6.90M
        _mm_shuffle_epi32(_mm_unpacklo_epi16(weights_y, left_y), 0);
976
6.90M
    scaled_bottom_left = _mm_add_epi32(scaled_bottom_left, round);
977
6.90M
    scaled_bottom_left = _mm_shuffle_epi32(scaled_bottom_left, 0);
978
36.3M
    for (int x = 0; x < width; x += 8) {
979
29.4M
      const __m128i top_x = LoadLo8(top_row + x);
980
29.4M
      const __m128i weights_x = LoadLo8(sm_weights_w + x);
981
29.4M
      const __m128i top_weights_x = _mm_unpacklo_epi8(top_x, weights_x);
982
29.4M
      const __m128i top_weights_x_lo = cvtepu8_epi16(top_weights_x);
983
29.4M
      const __m128i top_weights_x_hi = _mm_unpackhi_epi8(top_weights_x, zero);
984
985
      // Here opposite weights and pixels are multiplied, where the order of
986
      // interleaving is indicated in the names.
987
29.4M
      __m128i pred_lo = _mm_madd_epi16(top_weights_x_lo, weight_left_y);
988
29.4M
      __m128i pred_hi = _mm_madd_epi16(top_weights_x_hi, weight_left_y);
989
990
      // |scaled_bottom_left| is always scaled by the same weight each row, so
991
      // we only derive |scaled_top_right| values here.
992
29.4M
      const __m128i inverted_weights_x =
993
29.4M
          _mm_sub_epi16(scale_value, cvtepu8_epi16(weights_x));
994
29.4M
      const __m128i scaled_top_right =
995
29.4M
          _mm_mullo_epi16(inverted_weights_x, top_right);
996
29.4M
      const __m128i scaled_top_right_lo = cvtepu16_epi32(scaled_top_right);
997
29.4M
      const __m128i scaled_top_right_hi =
998
29.4M
          _mm_unpackhi_epi16(scaled_top_right, zero);
999
29.4M
      pred_lo = _mm_add_epi32(pred_lo, scaled_bottom_left);
1000
29.4M
      pred_hi = _mm_add_epi32(pred_hi, scaled_bottom_left);
1001
29.4M
      pred_lo = _mm_add_epi32(pred_lo, scaled_top_right_lo);
1002
29.4M
      pred_hi = _mm_add_epi32(pred_hi, scaled_top_right_hi);
1003
1004
      // The round value for RightShiftWithRounding was added with
1005
      // |scaled_bottom_left|.
1006
29.4M
      pred_lo = _mm_srli_epi32(pred_lo, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1007
29.4M
      pred_hi = _mm_srli_epi32(pred_hi, (1 + SMOOTH_WEIGHT_LOG2_SCALE));
1008
29.4M
      const __m128i pred = _mm_packus_epi16(pred_lo, pred_hi);
1009
29.4M
      StoreLo8(dst + x, _mm_packus_epi16(pred, pred));
1010
29.4M
    }
1011
6.90M
    dst += stride;
1012
6.90M
  }
1013
334k
}
1014
1015
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1016
void aom_smooth_predictor_16x4_ssse3(uint8_t *dst, ptrdiff_t stride,
1017
                                     const uint8_t *above,
1018
47.2k
                                     const uint8_t *left) {
1019
47.2k
  smooth_predictor_wxh(dst, stride, above, left, 16, 4);
1020
47.2k
}
1021
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1022
1023
void aom_smooth_predictor_16x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1024
                                     const uint8_t *above,
1025
40.9k
                                     const uint8_t *left) {
1026
40.9k
  smooth_predictor_wxh(dst, stride, above, left, 16, 8);
1027
40.9k
}
1028
1029
void aom_smooth_predictor_16x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1030
                                      const uint8_t *above,
1031
66.9k
                                      const uint8_t *left) {
1032
66.9k
  smooth_predictor_wxh(dst, stride, above, left, 16, 16);
1033
66.9k
}
1034
1035
void aom_smooth_predictor_16x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1036
                                      const uint8_t *above,
1037
25.4k
                                      const uint8_t *left) {
1038
25.4k
  smooth_predictor_wxh(dst, stride, above, left, 16, 32);
1039
25.4k
}
1040
1041
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1042
void aom_smooth_predictor_16x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1043
                                      const uint8_t *above,
1044
3.84k
                                      const uint8_t *left) {
1045
3.84k
  smooth_predictor_wxh(dst, stride, above, left, 16, 64);
1046
3.84k
}
1047
1048
void aom_smooth_predictor_32x8_ssse3(uint8_t *dst, ptrdiff_t stride,
1049
                                     const uint8_t *above,
1050
30.0k
                                     const uint8_t *left) {
1051
30.0k
  smooth_predictor_wxh(dst, stride, above, left, 32, 8);
1052
30.0k
}
1053
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1054
1055
void aom_smooth_predictor_32x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1056
                                      const uint8_t *above,
1057
20.3k
                                      const uint8_t *left) {
1058
20.3k
  smooth_predictor_wxh(dst, stride, above, left, 32, 16);
1059
20.3k
}
1060
1061
void aom_smooth_predictor_32x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1062
                                      const uint8_t *above,
1063
56.2k
                                      const uint8_t *left) {
1064
56.2k
  smooth_predictor_wxh(dst, stride, above, left, 32, 32);
1065
56.2k
}
1066
1067
void aom_smooth_predictor_32x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1068
                                      const uint8_t *above,
1069
1.72k
                                      const uint8_t *left) {
1070
1.72k
  smooth_predictor_wxh(dst, stride, above, left, 32, 64);
1071
1.72k
}
1072
1073
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1074
void aom_smooth_predictor_64x16_ssse3(uint8_t *dst, ptrdiff_t stride,
1075
                                      const uint8_t *above,
1076
16.2k
                                      const uint8_t *left) {
1077
16.2k
  smooth_predictor_wxh(dst, stride, above, left, 64, 16);
1078
16.2k
}
1079
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1080
1081
void aom_smooth_predictor_64x32_ssse3(uint8_t *dst, ptrdiff_t stride,
1082
                                      const uint8_t *above,
1083
2.81k
                                      const uint8_t *left) {
1084
2.81k
  smooth_predictor_wxh(dst, stride, above, left, 64, 32);
1085
2.81k
}
1086
1087
void aom_smooth_predictor_64x64_ssse3(uint8_t *dst, ptrdiff_t stride,
1088
                                      const uint8_t *above,
1089
22.3k
                                      const uint8_t *left) {
1090
22.3k
  smooth_predictor_wxh(dst, stride, above, left, 64, 64);
1091
22.3k
}
1092
1093
// -----------------------------------------------------------------------------
1094
// Smooth horizontal/vertical helper functions.
1095
1096
// For Horizontal, pixels1 and pixels2 are the same repeated value. For
1097
// Vertical, weights1 and weights2 are the same, and scaled_corner1 and
1098
// scaled_corner2 are the same.
1099
static AOM_FORCE_INLINE void write_smooth_directional_sum16(
1100
    uint8_t *LIBAOM_RESTRICT dst, const __m128i pixels1, const __m128i pixels2,
1101
    const __m128i weights1, const __m128i weights2,
1102
    const __m128i scaled_corner1, const __m128i scaled_corner2,
1103
9.66M
    const __m128i round) {
1104
9.66M
  const __m128i weighted_px1 = _mm_mullo_epi16(pixels1, weights1);
1105
9.66M
  const __m128i weighted_px2 = _mm_mullo_epi16(pixels2, weights2);
1106
9.66M
  const __m128i pred_sum1 = _mm_add_epi16(scaled_corner1, weighted_px1);
1107
9.66M
  const __m128i pred_sum2 = _mm_add_epi16(scaled_corner2, weighted_px2);
1108
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1109
9.66M
  const __m128i pred1 = _mm_srli_epi16(_mm_add_epi16(pred_sum1, round), 8);
1110
9.66M
  const __m128i pred2 = _mm_srli_epi16(_mm_add_epi16(pred_sum2, round), 8);
1111
9.66M
  StoreUnaligned16(dst, _mm_packus_epi16(pred1, pred2));
1112
9.66M
}
1113
1114
static AOM_FORCE_INLINE __m128i smooth_directional_sum8(
1115
1.07M
    const __m128i pixels, const __m128i weights, const __m128i scaled_corner) {
1116
1.07M
  const __m128i weighted_px = _mm_mullo_epi16(pixels, weights);
1117
1.07M
  return _mm_add_epi16(scaled_corner, weighted_px);
1118
1.07M
}
1119
1120
static AOM_FORCE_INLINE void write_smooth_directional_sum8(
1121
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *pixels, const __m128i *weights,
1122
1.07M
    const __m128i *scaled_corner, const __m128i *round) {
1123
1.07M
  const __m128i pred_sum =
1124
1.07M
      smooth_directional_sum8(*pixels, *weights, *scaled_corner);
1125
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
1126
1.07M
  const __m128i pred = _mm_srli_epi16(_mm_add_epi16(pred_sum, *round), 8);
1127
1.07M
  StoreLo8(dst, _mm_packus_epi16(pred, pred));
1128
1.07M
}
1129
1130
// -----------------------------------------------------------------------------
1131
// SMOOTH_V_PRED
1132
1133
static AOM_FORCE_INLINE void load_smooth_vertical_pixels4(
1134
    const uint8_t *LIBAOM_RESTRICT above, const uint8_t *LIBAOM_RESTRICT left,
1135
38.3k
    const int height, __m128i *pixels) {
1136
38.3k
  __m128i top = Load4(above);
1137
38.3k
  const __m128i bottom_left = _mm_set1_epi16(left[height - 1]);
1138
38.3k
  top = cvtepu8_epi16(top);
1139
38.3k
  pixels[0] = _mm_unpacklo_epi16(top, bottom_left);
1140
38.3k
}
1141
1142
// |weight_array| alternates weight vectors from the table with their inverted
1143
// (256-w) counterparts. This is precomputed by the compiler when the weights
1144
// table is visible to this module. Removing this visibility can cut speed by up
1145
// to half in both 4xH and 8xH transforms.
1146
static AOM_FORCE_INLINE void load_smooth_vertical_weights4(
1147
    const uint8_t *LIBAOM_RESTRICT weight_array, const int height,
1148
38.3k
    __m128i *weights) {
1149
38.3k
  const __m128i inverter = _mm_set1_epi16(256);
1150
1151
38.3k
  if (height == 4) {
1152
25.0k
    const __m128i weight = Load4(weight_array);
1153
25.0k
    weights[0] = cvtepu8_epi16(weight);
1154
25.0k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1155
25.0k
  } else if (height == 8) {
1156
7.74k
    const __m128i weight = LoadLo8(weight_array + 4);
1157
7.74k
    weights[0] = cvtepu8_epi16(weight);
1158
7.74k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1159
7.74k
  } else {
1160
5.56k
    const __m128i weight = LoadUnaligned16(weight_array + 12);
1161
5.56k
    const __m128i zero = _mm_setzero_si128();
1162
5.56k
    weights[0] = cvtepu8_epi16(weight);
1163
5.56k
    weights[1] = _mm_sub_epi16(inverter, weights[0]);
1164
5.56k
    weights[2] = _mm_unpackhi_epi8(weight, zero);
1165
5.56k
    weights[3] = _mm_sub_epi16(inverter, weights[2]);
1166
5.56k
  }
1167
38.3k
}
1168
1169
static AOM_FORCE_INLINE void write_smooth_vertical4xh(
1170
    const __m128i *pixel, const __m128i *weight, const int height,
1171
43.9k
    uint8_t *LIBAOM_RESTRICT dst, const ptrdiff_t stride) {
1172
43.9k
  const __m128i pred_round = _mm_set1_epi32(128);
1173
43.9k
  const __m128i mask_increment = _mm_set1_epi16(0x0202);
1174
43.9k
  const __m128i cvtepu8_epi32 = _mm_set1_epi32(0xC080400);
1175
43.9k
  __m128i y_select = _mm_set1_epi16(0x0100);
1176
1177
295k
  for (int y = 0; y < height; ++y) {
1178
251k
    const __m128i weight_y = _mm_shuffle_epi8(weight[0], y_select);
1179
251k
    const __m128i inverted_weight_y = _mm_shuffle_epi8(weight[1], y_select);
1180
251k
    const __m128i alternate_weights =
1181
251k
        _mm_unpacklo_epi16(weight_y, inverted_weight_y);
1182
    // Here the pixel vector is top_row[0], corner, top_row[1], corner, ...
1183
    // The madd instruction yields four results of the form:
1184
    // (top_row[x] * weight[y] + corner * inverted_weight[y])
1185
251k
    __m128i sum = _mm_madd_epi16(pixel[0], alternate_weights);
1186
251k
    sum = _mm_add_epi32(sum, pred_round);
1187
251k
    sum = _mm_srai_epi32(sum, 8);
1188
251k
    sum = _mm_shuffle_epi8(sum, cvtepu8_epi32);
1189
251k
    Store4(dst, sum);
1190
251k
    dst += stride;
1191
251k
    y_select = _mm_add_epi16(y_select, mask_increment);
1192
251k
  }
1193
43.9k
}
1194
1195
void aom_smooth_v_predictor_4x4_ssse3(
1196
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1197
    const uint8_t *LIBAOM_RESTRICT top_row,
1198
25.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1199
25.0k
  __m128i pixels;
1200
25.0k
  load_smooth_vertical_pixels4(top_row, left_column, 4, &pixels);
1201
1202
25.0k
  __m128i weights[2];
1203
25.0k
  load_smooth_vertical_weights4(smooth_weights, 4, weights);
1204
1205
25.0k
  write_smooth_vertical4xh(&pixels, weights, 4, dst, stride);
1206
25.0k
}
1207
1208
void aom_smooth_v_predictor_4x8_ssse3(
1209
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1210
    const uint8_t *LIBAOM_RESTRICT top_row,
1211
7.74k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1212
7.74k
  __m128i pixels;
1213
7.74k
  load_smooth_vertical_pixels4(top_row, left_column, 8, &pixels);
1214
1215
7.74k
  __m128i weights[2];
1216
7.74k
  load_smooth_vertical_weights4(smooth_weights, 8, weights);
1217
1218
7.74k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1219
7.74k
}
1220
1221
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1222
void aom_smooth_v_predictor_4x16_ssse3(
1223
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1224
    const uint8_t *LIBAOM_RESTRICT top_row,
1225
5.56k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1226
5.56k
  __m128i pixels;
1227
5.56k
  load_smooth_vertical_pixels4(top_row, left_column, 16, &pixels);
1228
1229
5.56k
  __m128i weights[4];
1230
5.56k
  load_smooth_vertical_weights4(smooth_weights, 16, weights);
1231
1232
5.56k
  write_smooth_vertical4xh(&pixels, weights, 8, dst, stride);
1233
5.56k
  dst += stride << 3;
1234
5.56k
  write_smooth_vertical4xh(&pixels, &weights[2], 8, dst, stride);
1235
5.56k
}
1236
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1237
1238
void aom_smooth_v_predictor_8x4_ssse3(
1239
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1240
    const uint8_t *LIBAOM_RESTRICT top_row,
1241
11.2k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1242
11.2k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1243
11.2k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1244
11.2k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1245
11.2k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1246
11.2k
  const __m128i scaled_bottom_left =
1247
11.2k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1248
11.2k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1249
11.2k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1250
11.2k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1251
11.2k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1252
11.2k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1253
11.2k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1254
11.2k
                                &round);
1255
11.2k
  dst += stride;
1256
11.2k
  y_select = _mm_set1_epi32(0x03020302);
1257
11.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1258
11.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1259
11.2k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1260
11.2k
                                &round);
1261
11.2k
  dst += stride;
1262
11.2k
  y_select = _mm_set1_epi32(0x05040504);
1263
11.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1264
11.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1265
11.2k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1266
11.2k
                                &round);
1267
11.2k
  dst += stride;
1268
11.2k
  y_select = _mm_set1_epi32(0x07060706);
1269
11.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1270
11.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1271
11.2k
  write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1272
11.2k
                                &round);
1273
11.2k
}
1274
1275
void aom_smooth_v_predictor_8x8_ssse3(
1276
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1277
    const uint8_t *LIBAOM_RESTRICT top_row,
1278
30.8k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1279
30.8k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1280
30.8k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1281
30.8k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1282
30.8k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1283
30.8k
  const __m128i scaled_bottom_left =
1284
30.8k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1285
30.8k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1286
30.8k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1287
277k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1288
247k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1289
247k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1290
247k
    const __m128i scaled_bottom_left_y =
1291
247k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1292
247k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1293
247k
                                  &round);
1294
247k
    dst += stride;
1295
247k
  }
1296
30.8k
}
1297
1298
void aom_smooth_v_predictor_8x16_ssse3(
1299
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1300
    const uint8_t *LIBAOM_RESTRICT top_row,
1301
6.14k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1302
6.14k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1303
6.14k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1304
1305
6.14k
  const __m128i weights1 = cvtepu8_epi16(weights);
1306
6.14k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
1307
6.14k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1308
6.14k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1309
6.14k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1310
6.14k
  const __m128i scaled_bottom_left1 =
1311
6.14k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1312
6.14k
  const __m128i scaled_bottom_left2 =
1313
6.14k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1314
6.14k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1315
6.14k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1316
55.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1317
49.1k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1318
49.1k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1319
49.1k
    const __m128i scaled_bottom_left_y =
1320
49.1k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1321
49.1k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1322
49.1k
                                  &round);
1323
49.1k
    dst += stride;
1324
49.1k
  }
1325
55.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1326
49.1k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1327
49.1k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1328
49.1k
    const __m128i scaled_bottom_left_y =
1329
49.1k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1330
49.1k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1331
49.1k
                                  &round);
1332
49.1k
    dst += stride;
1333
49.1k
  }
1334
6.14k
}
1335
1336
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1337
void aom_smooth_v_predictor_8x32_ssse3(
1338
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1339
    const uint8_t *LIBAOM_RESTRICT top_row,
1340
2.17k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1341
2.17k
  const __m128i zero = _mm_setzero_si128();
1342
2.17k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1343
2.17k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1344
2.17k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1345
2.17k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1346
2.17k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1347
2.17k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1348
2.17k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1349
2.17k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1350
2.17k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1351
2.17k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1352
2.17k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1353
2.17k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1354
2.17k
  const __m128i scaled_bottom_left1 =
1355
2.17k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1356
2.17k
  const __m128i scaled_bottom_left2 =
1357
2.17k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1358
2.17k
  const __m128i scaled_bottom_left3 =
1359
2.17k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1360
2.17k
  const __m128i scaled_bottom_left4 =
1361
2.17k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1362
2.17k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1363
2.17k
  const __m128i top = cvtepu8_epi16(LoadLo8(top_row));
1364
19.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1365
17.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1366
17.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1367
17.3k
    const __m128i scaled_bottom_left_y =
1368
17.3k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1369
17.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1370
17.3k
                                  &round);
1371
17.3k
    dst += stride;
1372
17.3k
  }
1373
19.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1374
17.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1375
17.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1376
17.3k
    const __m128i scaled_bottom_left_y =
1377
17.3k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1378
17.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1379
17.3k
                                  &round);
1380
17.3k
    dst += stride;
1381
17.3k
  }
1382
19.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1383
17.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1384
17.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1385
17.3k
    const __m128i scaled_bottom_left_y =
1386
17.3k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1387
17.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1388
17.3k
                                  &round);
1389
17.3k
    dst += stride;
1390
17.3k
  }
1391
19.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1392
17.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1393
17.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1394
17.3k
    const __m128i scaled_bottom_left_y =
1395
17.3k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1396
17.3k
    write_smooth_directional_sum8(dst, &top, &weights_y, &scaled_bottom_left_y,
1397
17.3k
                                  &round);
1398
17.3k
    dst += stride;
1399
17.3k
  }
1400
2.17k
}
1401
1402
void aom_smooth_v_predictor_16x4_ssse3(
1403
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1404
    const uint8_t *LIBAOM_RESTRICT top_row,
1405
10.2k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1406
10.2k
  const __m128i bottom_left = _mm_set1_epi16(left_column[3]);
1407
10.2k
  const __m128i weights = cvtepu8_epi16(Load4(smooth_weights));
1408
10.2k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1409
10.2k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1410
10.2k
  const __m128i scaled_bottom_left =
1411
10.2k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1412
10.2k
  const __m128i round = _mm_set1_epi16(128);
1413
10.2k
  const __m128i top = LoadUnaligned16(top_row);
1414
10.2k
  const __m128i top_lo = cvtepu8_epi16(top);
1415
10.2k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1416
1417
10.2k
  __m128i y_select = _mm_set1_epi32(0x01000100);
1418
10.2k
  __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1419
10.2k
  __m128i scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1420
10.2k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1421
10.2k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1422
10.2k
                                 round);
1423
10.2k
  dst += stride;
1424
10.2k
  y_select = _mm_set1_epi32(0x03020302);
1425
10.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1426
10.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1427
10.2k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1428
10.2k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1429
10.2k
                                 round);
1430
10.2k
  dst += stride;
1431
10.2k
  y_select = _mm_set1_epi32(0x05040504);
1432
10.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1433
10.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1434
10.2k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1435
10.2k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1436
10.2k
                                 round);
1437
10.2k
  dst += stride;
1438
10.2k
  y_select = _mm_set1_epi32(0x07060706);
1439
10.2k
  weights_y = _mm_shuffle_epi8(weights, y_select);
1440
10.2k
  scaled_bottom_left_y = _mm_shuffle_epi8(scaled_bottom_left, y_select);
1441
10.2k
  write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1442
10.2k
                                 scaled_bottom_left_y, scaled_bottom_left_y,
1443
10.2k
                                 round);
1444
10.2k
}
1445
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1446
1447
void aom_smooth_v_predictor_16x8_ssse3(
1448
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1449
    const uint8_t *LIBAOM_RESTRICT top_row,
1450
6.99k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1451
6.99k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1452
6.99k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1453
6.99k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1454
6.99k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1455
6.99k
  const __m128i scaled_bottom_left =
1456
6.99k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1457
6.99k
  const __m128i round = _mm_set1_epi16(128);
1458
6.99k
  const __m128i top = LoadUnaligned16(top_row);
1459
6.99k
  const __m128i top_lo = cvtepu8_epi16(top);
1460
6.99k
  const __m128i top_hi = cvtepu8_epi16(_mm_srli_si128(top, 8));
1461
62.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1462
55.9k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1463
55.9k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1464
55.9k
    const __m128i scaled_bottom_left_y =
1465
55.9k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1466
55.9k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1467
55.9k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1468
55.9k
                                   round);
1469
55.9k
    dst += stride;
1470
55.9k
  }
1471
6.99k
}
1472
1473
void aom_smooth_v_predictor_16x16_ssse3(
1474
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1475
    const uint8_t *LIBAOM_RESTRICT top_row,
1476
22.9k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1477
22.9k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1478
22.9k
  const __m128i zero = _mm_setzero_si128();
1479
22.9k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1480
22.9k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1481
22.9k
  const __m128i weights_lo = cvtepu8_epi16(weights);
1482
22.9k
  const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1483
22.9k
  const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1484
22.9k
  const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1485
22.9k
  const __m128i scaled_bottom_left_lo =
1486
22.9k
      _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1487
22.9k
  const __m128i scaled_bottom_left_hi =
1488
22.9k
      _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1489
22.9k
  const __m128i round = _mm_set1_epi16(128);
1490
1491
22.9k
  const __m128i top = LoadUnaligned16(top_row);
1492
22.9k
  const __m128i top_lo = cvtepu8_epi16(top);
1493
22.9k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1494
206k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1495
183k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1496
183k
    const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1497
183k
    const __m128i scaled_bottom_left_y =
1498
183k
        _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1499
183k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1500
183k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1501
183k
                                   round);
1502
183k
    dst += stride;
1503
183k
  }
1504
206k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1505
183k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1506
183k
    const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1507
183k
    const __m128i scaled_bottom_left_y =
1508
183k
        _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1509
183k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1510
183k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1511
183k
                                   round);
1512
183k
    dst += stride;
1513
183k
  }
1514
22.9k
}
1515
1516
void aom_smooth_v_predictor_16x32_ssse3(
1517
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1518
    const uint8_t *LIBAOM_RESTRICT top_row,
1519
6.56k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1520
6.56k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1521
6.56k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1522
6.56k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1523
6.56k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1524
6.56k
  const __m128i zero = _mm_setzero_si128();
1525
6.56k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1526
6.56k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1527
6.56k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1528
6.56k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1529
6.56k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1530
6.56k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1531
6.56k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1532
6.56k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1533
6.56k
  const __m128i scaled_bottom_left1 =
1534
6.56k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1535
6.56k
  const __m128i scaled_bottom_left2 =
1536
6.56k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1537
6.56k
  const __m128i scaled_bottom_left3 =
1538
6.56k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1539
6.56k
  const __m128i scaled_bottom_left4 =
1540
6.56k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1541
6.56k
  const __m128i round = _mm_set1_epi16(128);
1542
1543
6.56k
  const __m128i top = LoadUnaligned16(top_row);
1544
6.56k
  const __m128i top_lo = cvtepu8_epi16(top);
1545
6.56k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1546
59.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1547
52.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1548
52.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1549
52.4k
    const __m128i scaled_bottom_left_y =
1550
52.4k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1551
52.4k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1552
52.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1553
52.4k
                                   round);
1554
52.4k
    dst += stride;
1555
52.4k
  }
1556
59.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1557
52.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1558
52.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1559
52.4k
    const __m128i scaled_bottom_left_y =
1560
52.4k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1561
52.4k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1562
52.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1563
52.4k
                                   round);
1564
52.4k
    dst += stride;
1565
52.4k
  }
1566
59.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1567
52.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1568
52.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1569
52.4k
    const __m128i scaled_bottom_left_y =
1570
52.4k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1571
52.4k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1572
52.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1573
52.4k
                                   round);
1574
52.4k
    dst += stride;
1575
52.4k
  }
1576
59.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1577
52.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1578
52.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1579
52.4k
    const __m128i scaled_bottom_left_y =
1580
52.4k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1581
52.4k
    write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1582
52.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1583
52.4k
                                   round);
1584
52.4k
    dst += stride;
1585
52.4k
  }
1586
6.56k
}
1587
1588
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1589
void aom_smooth_v_predictor_16x64_ssse3(
1590
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1591
    const uint8_t *LIBAOM_RESTRICT top_row,
1592
1.34k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1593
1.34k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1594
1.34k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1595
1.34k
  const __m128i round = _mm_set1_epi16(128);
1596
1.34k
  const __m128i zero = _mm_setzero_si128();
1597
1.34k
  const __m128i top = LoadUnaligned16(top_row);
1598
1.34k
  const __m128i top_lo = cvtepu8_epi16(top);
1599
1.34k
  const __m128i top_hi = _mm_unpackhi_epi8(top, zero);
1600
1.34k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1601
6.73k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1602
5.38k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1603
5.38k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1604
5.38k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1605
5.38k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1606
5.38k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1607
5.38k
    const __m128i scaled_bottom_left_lo =
1608
5.38k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1609
5.38k
    const __m128i scaled_bottom_left_hi =
1610
5.38k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1611
1612
48.4k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1613
43.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1614
43.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1615
43.0k
      const __m128i scaled_bottom_left_y =
1616
43.0k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1617
43.0k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1618
43.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1619
43.0k
                                     round);
1620
43.0k
      dst += stride;
1621
43.0k
    }
1622
48.4k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1623
43.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1624
43.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1625
43.0k
      const __m128i scaled_bottom_left_y =
1626
43.0k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1627
43.0k
      write_smooth_directional_sum16(dst, top_lo, top_hi, weights_y, weights_y,
1628
43.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1629
43.0k
                                     round);
1630
43.0k
      dst += stride;
1631
43.0k
    }
1632
5.38k
  }
1633
1.34k
}
1634
1635
void aom_smooth_v_predictor_32x8_ssse3(
1636
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1637
    const uint8_t *LIBAOM_RESTRICT top_row,
1638
11.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1639
11.4k
  const __m128i zero = _mm_setzero_si128();
1640
11.4k
  const __m128i bottom_left = _mm_set1_epi16(left_column[7]);
1641
11.4k
  const __m128i top_lo = LoadUnaligned16(top_row);
1642
11.4k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1643
11.4k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1644
11.4k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1645
11.4k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1646
11.4k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1647
11.4k
  __m128i scale = _mm_set1_epi16(256);
1648
11.4k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
1649
11.4k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
1650
11.4k
  const __m128i scaled_bottom_left =
1651
11.4k
      _mm_mullo_epi16(inverted_weights, bottom_left);
1652
11.4k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1653
102k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1654
91.4k
    __m128i y_select = _mm_set1_epi32(y_mask);
1655
91.4k
    const __m128i weights_y = _mm_shuffle_epi8(weights, y_select);
1656
91.4k
    const __m128i scaled_bottom_left_y =
1657
91.4k
        _mm_shuffle_epi8(scaled_bottom_left, y_select);
1658
91.4k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1659
91.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1660
91.4k
                                   round);
1661
91.4k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1662
91.4k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1663
91.4k
                                   round);
1664
91.4k
    dst += stride;
1665
91.4k
  }
1666
11.4k
}
1667
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1668
1669
void aom_smooth_v_predictor_32x16_ssse3(
1670
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1671
    const uint8_t *LIBAOM_RESTRICT top_row,
1672
5.23k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1673
5.23k
  const __m128i zero = _mm_setzero_si128();
1674
5.23k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1675
5.23k
  const __m128i top_lo = LoadUnaligned16(top_row);
1676
5.23k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1677
5.23k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1678
5.23k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1679
5.23k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1680
5.23k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1681
5.23k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1682
5.23k
  const __m128i weights1 = cvtepu8_epi16(weights);
1683
5.23k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1684
5.23k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1685
5.23k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1686
5.23k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1687
5.23k
  const __m128i scaled_bottom_left1 =
1688
5.23k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1689
5.23k
  const __m128i scaled_bottom_left2 =
1690
5.23k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1691
5.23k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1692
47.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1693
41.8k
    __m128i y_select = _mm_set1_epi32(y_mask);
1694
41.8k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1695
41.8k
    const __m128i scaled_bottom_left_y =
1696
41.8k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1697
41.8k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1698
41.8k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1699
41.8k
                                   round);
1700
41.8k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1701
41.8k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1702
41.8k
                                   round);
1703
41.8k
    dst += stride;
1704
41.8k
  }
1705
47.0k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1706
41.8k
    __m128i y_select = _mm_set1_epi32(y_mask);
1707
41.8k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1708
41.8k
    const __m128i scaled_bottom_left_y =
1709
41.8k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1710
41.8k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1711
41.8k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1712
41.8k
                                   round);
1713
41.8k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1714
41.8k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1715
41.8k
                                   round);
1716
41.8k
    dst += stride;
1717
41.8k
  }
1718
5.23k
}
1719
1720
void aom_smooth_v_predictor_32x32_ssse3(
1721
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1722
    const uint8_t *LIBAOM_RESTRICT top_row,
1723
33.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1724
33.0k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1725
33.0k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1726
33.0k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1727
33.0k
  const __m128i zero = _mm_setzero_si128();
1728
33.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1729
33.0k
  const __m128i top_lo = LoadUnaligned16(top_row);
1730
33.0k
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1731
33.0k
  const __m128i top1 = cvtepu8_epi16(top_lo);
1732
33.0k
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1733
33.0k
  const __m128i top3 = cvtepu8_epi16(top_hi);
1734
33.0k
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1735
33.0k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1736
33.0k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1737
33.0k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1738
33.0k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1739
33.0k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1740
33.0k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1741
33.0k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1742
33.0k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1743
33.0k
  const __m128i scaled_bottom_left1 =
1744
33.0k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1745
33.0k
  const __m128i scaled_bottom_left2 =
1746
33.0k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1747
33.0k
  const __m128i scaled_bottom_left3 =
1748
33.0k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1749
33.0k
  const __m128i scaled_bottom_left4 =
1750
33.0k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1751
33.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1752
297k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1753
264k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1754
264k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1755
264k
    const __m128i scaled_bottom_left_y =
1756
264k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1757
264k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1758
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1759
264k
                                   round);
1760
264k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1761
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1762
264k
                                   round);
1763
264k
    dst += stride;
1764
264k
  }
1765
297k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1766
264k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1767
264k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1768
264k
    const __m128i scaled_bottom_left_y =
1769
264k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1770
264k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1771
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1772
264k
                                   round);
1773
264k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1774
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1775
264k
                                   round);
1776
264k
    dst += stride;
1777
264k
  }
1778
297k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1779
264k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1780
264k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
1781
264k
    const __m128i scaled_bottom_left_y =
1782
264k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
1783
264k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1784
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1785
264k
                                   round);
1786
264k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1787
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1788
264k
                                   round);
1789
264k
    dst += stride;
1790
264k
  }
1791
297k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1792
264k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1793
264k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
1794
264k
    const __m128i scaled_bottom_left_y =
1795
264k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
1796
264k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1797
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1798
264k
                                   round);
1799
264k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1800
264k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1801
264k
                                   round);
1802
264k
    dst += stride;
1803
264k
  }
1804
33.0k
}
1805
1806
void aom_smooth_v_predictor_32x64_ssse3(
1807
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1808
    const uint8_t *LIBAOM_RESTRICT top_row,
1809
408
    const uint8_t *LIBAOM_RESTRICT left_column) {
1810
408
  const __m128i zero = _mm_setzero_si128();
1811
408
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
1812
408
  const __m128i top_lo = LoadUnaligned16(top_row);
1813
408
  const __m128i top_hi = LoadUnaligned16(top_row + 16);
1814
408
  const __m128i top1 = cvtepu8_epi16(top_lo);
1815
408
  const __m128i top2 = _mm_unpackhi_epi8(top_lo, zero);
1816
408
  const __m128i top3 = cvtepu8_epi16(top_hi);
1817
408
  const __m128i top4 = _mm_unpackhi_epi8(top_hi, zero);
1818
408
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1819
408
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1820
408
  const uint8_t *weights_base_ptr = smooth_weights + 60;
1821
2.04k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
1822
1.63k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
1823
1.63k
    const __m128i weights_lo = cvtepu8_epi16(weights);
1824
1.63k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
1825
1.63k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
1826
1.63k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
1827
1.63k
    const __m128i scaled_bottom_left_lo =
1828
1.63k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
1829
1.63k
    const __m128i scaled_bottom_left_hi =
1830
1.63k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
1831
1832
14.6k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1833
13.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1834
13.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
1835
13.0k
      const __m128i scaled_bottom_left_y =
1836
13.0k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
1837
13.0k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1838
13.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1839
13.0k
                                     round);
1840
13.0k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1841
13.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1842
13.0k
                                     round);
1843
13.0k
      dst += stride;
1844
13.0k
    }
1845
14.6k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1846
13.0k
      const __m128i y_select = _mm_set1_epi32(y_mask);
1847
13.0k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
1848
13.0k
      const __m128i scaled_bottom_left_y =
1849
13.0k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
1850
13.0k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1851
13.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1852
13.0k
                                     round);
1853
13.0k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1854
13.0k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
1855
13.0k
                                     round);
1856
13.0k
      dst += stride;
1857
13.0k
    }
1858
1.63k
  }
1859
408
}
1860
1861
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1862
void aom_smooth_v_predictor_64x16_ssse3(
1863
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1864
    const uint8_t *LIBAOM_RESTRICT top_row,
1865
8.39k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1866
8.39k
  const __m128i bottom_left = _mm_set1_epi16(left_column[15]);
1867
8.39k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1868
8.39k
  const __m128i zero = _mm_setzero_si128();
1869
8.39k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1870
8.39k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1871
8.39k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1872
8.39k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1873
8.39k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1874
8.39k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1875
1876
8.39k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
1877
8.39k
  const __m128i weights1 = cvtepu8_epi16(weights);
1878
8.39k
  const __m128i weights2 = _mm_unpackhi_epi8(weights, zero);
1879
8.39k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1880
8.39k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1881
8.39k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1882
8.39k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1883
8.39k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1884
8.39k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1885
8.39k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1886
8.39k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1887
8.39k
  const __m128i scaled_bottom_left1 =
1888
8.39k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1889
8.39k
  const __m128i scaled_bottom_left2 =
1890
8.39k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1891
8.39k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1892
75.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1893
67.1k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1894
67.1k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1895
67.1k
    const __m128i scaled_bottom_left_y =
1896
67.1k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1897
67.1k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1898
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1899
67.1k
                                   round);
1900
67.1k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1901
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1902
67.1k
                                   round);
1903
67.1k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1904
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1905
67.1k
                                   round);
1906
67.1k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1907
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1908
67.1k
                                   round);
1909
67.1k
    dst += stride;
1910
67.1k
  }
1911
75.5k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1912
67.1k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1913
67.1k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1914
67.1k
    const __m128i scaled_bottom_left_y =
1915
67.1k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1916
67.1k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1917
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1918
67.1k
                                   round);
1919
67.1k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1920
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1921
67.1k
                                   round);
1922
67.1k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1923
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1924
67.1k
                                   round);
1925
67.1k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1926
67.1k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1927
67.1k
                                   round);
1928
67.1k
    dst += stride;
1929
67.1k
  }
1930
8.39k
}
1931
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1932
1933
void aom_smooth_v_predictor_64x32_ssse3(
1934
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
1935
    const uint8_t *LIBAOM_RESTRICT top_row,
1936
1.54k
    const uint8_t *LIBAOM_RESTRICT left_column) {
1937
1.54k
  const __m128i zero = _mm_setzero_si128();
1938
1.54k
  const __m128i bottom_left = _mm_set1_epi16(left_column[31]);
1939
1.54k
  const __m128i top_lolo = LoadUnaligned16(top_row);
1940
1.54k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
1941
1.54k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
1942
1.54k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
1943
1.54k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
1944
1.54k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
1945
1.54k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
1946
1.54k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
1947
1.54k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
1948
1.54k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
1949
1.54k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
1950
1.54k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
1951
1.54k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
1952
1.54k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
1953
1.54k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
1954
1.54k
  const __m128i weights2 = _mm_unpackhi_epi8(weights_lo, zero);
1955
1.54k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
1956
1.54k
  const __m128i weights4 = _mm_unpackhi_epi8(weights_hi, zero);
1957
1.54k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
1958
1.54k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
1959
1.54k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
1960
1.54k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
1961
1.54k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
1962
1.54k
  const __m128i scaled_bottom_left1 =
1963
1.54k
      _mm_mullo_epi16(inverted_weights1, bottom_left);
1964
1.54k
  const __m128i scaled_bottom_left2 =
1965
1.54k
      _mm_mullo_epi16(inverted_weights2, bottom_left);
1966
1.54k
  const __m128i scaled_bottom_left3 =
1967
1.54k
      _mm_mullo_epi16(inverted_weights3, bottom_left);
1968
1.54k
  const __m128i scaled_bottom_left4 =
1969
1.54k
      _mm_mullo_epi16(inverted_weights4, bottom_left);
1970
1.54k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
1971
1972
13.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1973
12.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1974
12.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights1, y_select);
1975
12.3k
    const __m128i scaled_bottom_left_y =
1976
12.3k
        _mm_shuffle_epi8(scaled_bottom_left1, y_select);
1977
12.3k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1978
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1979
12.3k
                                   round);
1980
12.3k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
1981
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1982
12.3k
                                   round);
1983
12.3k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
1984
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1985
12.3k
                                   round);
1986
12.3k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
1987
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1988
12.3k
                                   round);
1989
12.3k
    dst += stride;
1990
12.3k
  }
1991
13.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
1992
12.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
1993
12.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights2, y_select);
1994
12.3k
    const __m128i scaled_bottom_left_y =
1995
12.3k
        _mm_shuffle_epi8(scaled_bottom_left2, y_select);
1996
12.3k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
1997
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
1998
12.3k
                                   round);
1999
12.3k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2000
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2001
12.3k
                                   round);
2002
12.3k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2003
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2004
12.3k
                                   round);
2005
12.3k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2006
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2007
12.3k
                                   round);
2008
12.3k
    dst += stride;
2009
12.3k
  }
2010
13.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2011
12.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2012
12.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights3, y_select);
2013
12.3k
    const __m128i scaled_bottom_left_y =
2014
12.3k
        _mm_shuffle_epi8(scaled_bottom_left3, y_select);
2015
12.3k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2016
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2017
12.3k
                                   round);
2018
12.3k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2019
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2020
12.3k
                                   round);
2021
12.3k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2022
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2023
12.3k
                                   round);
2024
12.3k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2025
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2026
12.3k
                                   round);
2027
12.3k
    dst += stride;
2028
12.3k
  }
2029
13.8k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2030
12.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2031
12.3k
    const __m128i weights_y = _mm_shuffle_epi8(weights4, y_select);
2032
12.3k
    const __m128i scaled_bottom_left_y =
2033
12.3k
        _mm_shuffle_epi8(scaled_bottom_left4, y_select);
2034
12.3k
    write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2035
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2036
12.3k
                                   round);
2037
12.3k
    write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2038
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2039
12.3k
                                   round);
2040
12.3k
    write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2041
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2042
12.3k
                                   round);
2043
12.3k
    write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2044
12.3k
                                   scaled_bottom_left_y, scaled_bottom_left_y,
2045
12.3k
                                   round);
2046
12.3k
    dst += stride;
2047
12.3k
  }
2048
1.54k
}
2049
2050
void aom_smooth_v_predictor_64x64_ssse3(
2051
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2052
    const uint8_t *LIBAOM_RESTRICT top_row,
2053
4.09k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2054
4.09k
  const __m128i zero = _mm_setzero_si128();
2055
4.09k
  const __m128i bottom_left = _mm_set1_epi16(left_column[63]);
2056
4.09k
  const __m128i top_lolo = LoadUnaligned16(top_row);
2057
4.09k
  const __m128i top_lohi = LoadUnaligned16(top_row + 16);
2058
4.09k
  const __m128i top1 = cvtepu8_epi16(top_lolo);
2059
4.09k
  const __m128i top2 = _mm_unpackhi_epi8(top_lolo, zero);
2060
4.09k
  const __m128i top3 = cvtepu8_epi16(top_lohi);
2061
4.09k
  const __m128i top4 = _mm_unpackhi_epi8(top_lohi, zero);
2062
4.09k
  const __m128i top_hilo = LoadUnaligned16(top_row + 32);
2063
4.09k
  const __m128i top_hihi = LoadUnaligned16(top_row + 48);
2064
4.09k
  const __m128i top5 = cvtepu8_epi16(top_hilo);
2065
4.09k
  const __m128i top6 = _mm_unpackhi_epi8(top_hilo, zero);
2066
4.09k
  const __m128i top7 = cvtepu8_epi16(top_hihi);
2067
4.09k
  const __m128i top8 = _mm_unpackhi_epi8(top_hihi, zero);
2068
4.09k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2069
4.09k
  const __m128i round = _mm_set1_epi16(128);
2070
4.09k
  const uint8_t *weights_base_ptr = smooth_weights + 60;
2071
20.4k
  for (int left_offset = 0; left_offset < 64; left_offset += 16) {
2072
16.3k
    const __m128i weights = LoadUnaligned16(weights_base_ptr + left_offset);
2073
16.3k
    const __m128i weights_lo = cvtepu8_epi16(weights);
2074
16.3k
    const __m128i weights_hi = _mm_unpackhi_epi8(weights, zero);
2075
16.3k
    const __m128i inverted_weights_lo = _mm_sub_epi16(scale, weights_lo);
2076
16.3k
    const __m128i inverted_weights_hi = _mm_sub_epi16(scale, weights_hi);
2077
16.3k
    const __m128i scaled_bottom_left_lo =
2078
16.3k
        _mm_mullo_epi16(inverted_weights_lo, bottom_left);
2079
16.3k
    const __m128i scaled_bottom_left_hi =
2080
16.3k
        _mm_mullo_epi16(inverted_weights_hi, bottom_left);
2081
147k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2082
131k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2083
131k
      const __m128i weights_y = _mm_shuffle_epi8(weights_lo, y_select);
2084
131k
      const __m128i scaled_bottom_left_y =
2085
131k
          _mm_shuffle_epi8(scaled_bottom_left_lo, y_select);
2086
131k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2087
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2088
131k
                                     round);
2089
131k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2090
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2091
131k
                                     round);
2092
131k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2093
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2094
131k
                                     round);
2095
131k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2096
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2097
131k
                                     round);
2098
131k
      dst += stride;
2099
131k
    }
2100
147k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2101
131k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2102
131k
      const __m128i weights_y = _mm_shuffle_epi8(weights_hi, y_select);
2103
131k
      const __m128i scaled_bottom_left_y =
2104
131k
          _mm_shuffle_epi8(scaled_bottom_left_hi, y_select);
2105
131k
      write_smooth_directional_sum16(dst, top1, top2, weights_y, weights_y,
2106
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2107
131k
                                     round);
2108
131k
      write_smooth_directional_sum16(dst + 16, top3, top4, weights_y, weights_y,
2109
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2110
131k
                                     round);
2111
131k
      write_smooth_directional_sum16(dst + 32, top5, top6, weights_y, weights_y,
2112
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2113
131k
                                     round);
2114
131k
      write_smooth_directional_sum16(dst + 48, top7, top8, weights_y, weights_y,
2115
131k
                                     scaled_bottom_left_y, scaled_bottom_left_y,
2116
131k
                                     round);
2117
131k
      dst += stride;
2118
131k
    }
2119
16.3k
  }
2120
4.09k
}
2121
2122
// -----------------------------------------------------------------------------
2123
// SMOOTH_H_PRED
2124
static AOM_FORCE_INLINE void write_smooth_horizontal_sum4(
2125
    uint8_t *LIBAOM_RESTRICT dst, const __m128i *left_y, const __m128i *weights,
2126
384k
    const __m128i *scaled_top_right, const __m128i *round) {
2127
384k
  const __m128i weighted_left_y = _mm_mullo_epi16(*left_y, *weights);
2128
384k
  const __m128i pred_sum = _mm_add_epi32(*scaled_top_right, weighted_left_y);
2129
  // Equivalent to RightShiftWithRounding(pred[x][y], 8).
2130
384k
  const __m128i pred = _mm_srli_epi32(_mm_add_epi32(pred_sum, *round), 8);
2131
384k
  const __m128i cvtepi32_epi8 = _mm_set1_epi32(0x0C080400);
2132
384k
  Store4(dst, _mm_shuffle_epi8(pred, cvtepi32_epi8));
2133
384k
}
2134
2135
void aom_smooth_h_predictor_4x4_ssse3(
2136
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2137
    const uint8_t *LIBAOM_RESTRICT top_row,
2138
37.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2139
37.0k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2140
37.0k
  const __m128i left = cvtepu8_epi32(Load4(left_column));
2141
37.0k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2142
37.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2143
37.0k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2144
37.0k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2145
37.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2146
37.0k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2147
37.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2148
37.0k
                               &round);
2149
37.0k
  dst += stride;
2150
37.0k
  left_y = _mm_shuffle_epi32(left, 0x55);
2151
37.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2152
37.0k
                               &round);
2153
37.0k
  dst += stride;
2154
37.0k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2155
37.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2156
37.0k
                               &round);
2157
37.0k
  dst += stride;
2158
37.0k
  left_y = _mm_shuffle_epi32(left, 0xff);
2159
37.0k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2160
37.0k
                               &round);
2161
37.0k
}
2162
2163
void aom_smooth_h_predictor_4x8_ssse3(
2164
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2165
    const uint8_t *LIBAOM_RESTRICT top_row,
2166
11.2k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2167
11.2k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2168
11.2k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2169
11.2k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2170
11.2k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2171
11.2k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2172
11.2k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2173
11.2k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2174
11.2k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2175
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2176
11.2k
                               &round);
2177
11.2k
  dst += stride;
2178
11.2k
  left_y = _mm_shuffle_epi32(left, 0x55);
2179
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2180
11.2k
                               &round);
2181
11.2k
  dst += stride;
2182
11.2k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2183
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2184
11.2k
                               &round);
2185
11.2k
  dst += stride;
2186
11.2k
  left_y = _mm_shuffle_epi32(left, 0xff);
2187
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2188
11.2k
                               &round);
2189
11.2k
  dst += stride;
2190
2191
11.2k
  left = cvtepu8_epi32(Load4(left_column + 4));
2192
11.2k
  left_y = _mm_shuffle_epi32(left, 0);
2193
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2194
11.2k
                               &round);
2195
11.2k
  dst += stride;
2196
11.2k
  left_y = _mm_shuffle_epi32(left, 0x55);
2197
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2198
11.2k
                               &round);
2199
11.2k
  dst += stride;
2200
11.2k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2201
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2202
11.2k
                               &round);
2203
11.2k
  dst += stride;
2204
11.2k
  left_y = _mm_shuffle_epi32(left, 0xff);
2205
11.2k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2206
11.2k
                               &round);
2207
11.2k
}
2208
2209
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2210
void aom_smooth_h_predictor_4x16_ssse3(
2211
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2212
    const uint8_t *LIBAOM_RESTRICT top_row,
2213
9.14k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2214
9.14k
  const __m128i top_right = _mm_set1_epi32(top_row[3]);
2215
9.14k
  const __m128i weights = cvtepu8_epi32(Load4(smooth_weights));
2216
9.14k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2217
9.14k
  const __m128i inverted_weights = _mm_sub_epi32(scale, weights);
2218
9.14k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2219
9.14k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2220
9.14k
  __m128i left = cvtepu8_epi32(Load4(left_column));
2221
9.14k
  __m128i left_y = _mm_shuffle_epi32(left, 0);
2222
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2223
9.14k
                               &round);
2224
9.14k
  dst += stride;
2225
9.14k
  left_y = _mm_shuffle_epi32(left, 0x55);
2226
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2227
9.14k
                               &round);
2228
9.14k
  dst += stride;
2229
9.14k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2230
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2231
9.14k
                               &round);
2232
9.14k
  dst += stride;
2233
9.14k
  left_y = _mm_shuffle_epi32(left, 0xff);
2234
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2235
9.14k
                               &round);
2236
9.14k
  dst += stride;
2237
2238
9.14k
  left = cvtepu8_epi32(Load4(left_column + 4));
2239
9.14k
  left_y = _mm_shuffle_epi32(left, 0);
2240
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2241
9.14k
                               &round);
2242
9.14k
  dst += stride;
2243
9.14k
  left_y = _mm_shuffle_epi32(left, 0x55);
2244
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2245
9.14k
                               &round);
2246
9.14k
  dst += stride;
2247
9.14k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2248
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2249
9.14k
                               &round);
2250
9.14k
  dst += stride;
2251
9.14k
  left_y = _mm_shuffle_epi32(left, 0xff);
2252
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2253
9.14k
                               &round);
2254
9.14k
  dst += stride;
2255
2256
9.14k
  left = cvtepu8_epi32(Load4(left_column + 8));
2257
9.14k
  left_y = _mm_shuffle_epi32(left, 0);
2258
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2259
9.14k
                               &round);
2260
9.14k
  dst += stride;
2261
9.14k
  left_y = _mm_shuffle_epi32(left, 0x55);
2262
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2263
9.14k
                               &round);
2264
9.14k
  dst += stride;
2265
9.14k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2266
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2267
9.14k
                               &round);
2268
9.14k
  dst += stride;
2269
9.14k
  left_y = _mm_shuffle_epi32(left, 0xff);
2270
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2271
9.14k
                               &round);
2272
9.14k
  dst += stride;
2273
2274
9.14k
  left = cvtepu8_epi32(Load4(left_column + 12));
2275
9.14k
  left_y = _mm_shuffle_epi32(left, 0);
2276
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2277
9.14k
                               &round);
2278
9.14k
  dst += stride;
2279
9.14k
  left_y = _mm_shuffle_epi32(left, 0x55);
2280
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2281
9.14k
                               &round);
2282
9.14k
  dst += stride;
2283
9.14k
  left_y = _mm_shuffle_epi32(left, 0xaa);
2284
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2285
9.14k
                               &round);
2286
9.14k
  dst += stride;
2287
9.14k
  left_y = _mm_shuffle_epi32(left, 0xff);
2288
9.14k
  write_smooth_horizontal_sum4(dst, &left_y, &weights, &scaled_top_right,
2289
9.14k
                               &round);
2290
9.14k
}
2291
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2292
2293
// For SMOOTH_H, |pixels| is the repeated left value for the row. For SMOOTH_V,
2294
// |pixels| is a segment of the top row or the whole top row, and |weights| is
2295
// repeated.
2296
void aom_smooth_h_predictor_8x4_ssse3(
2297
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2298
    const uint8_t *LIBAOM_RESTRICT top_row,
2299
19.3k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2300
19.3k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2301
19.3k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2302
19.3k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2303
19.3k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2304
19.3k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2305
19.3k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2306
19.3k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2307
19.3k
  __m128i y_select = _mm_set1_epi32(0x01000100);
2308
19.3k
  __m128i left_y = _mm_shuffle_epi8(left, y_select);
2309
19.3k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2310
19.3k
                                &round);
2311
19.3k
  dst += stride;
2312
19.3k
  y_select = _mm_set1_epi32(0x03020302);
2313
19.3k
  left_y = _mm_shuffle_epi8(left, y_select);
2314
19.3k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2315
19.3k
                                &round);
2316
19.3k
  dst += stride;
2317
19.3k
  y_select = _mm_set1_epi32(0x05040504);
2318
19.3k
  left_y = _mm_shuffle_epi8(left, y_select);
2319
19.3k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2320
19.3k
                                &round);
2321
19.3k
  dst += stride;
2322
19.3k
  y_select = _mm_set1_epi32(0x07060706);
2323
19.3k
  left_y = _mm_shuffle_epi8(left, y_select);
2324
19.3k
  write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2325
19.3k
                                &round);
2326
19.3k
}
2327
2328
void aom_smooth_h_predictor_8x8_ssse3(
2329
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2330
    const uint8_t *LIBAOM_RESTRICT top_row,
2331
27.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2332
27.0k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2333
27.0k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2334
27.0k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2335
27.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2336
27.0k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2337
27.0k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2338
27.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2339
243k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2340
216k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2341
216k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2342
216k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2343
216k
                                  &round);
2344
216k
    dst += stride;
2345
216k
  }
2346
27.0k
}
2347
2348
void aom_smooth_h_predictor_8x16_ssse3(
2349
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2350
    const uint8_t *LIBAOM_RESTRICT top_row,
2351
11.4k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2352
11.4k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2353
11.4k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2354
11.4k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2355
11.4k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2356
11.4k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2357
11.4k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2358
11.4k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2359
102k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2360
91.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2361
91.3k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2362
91.3k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2363
91.3k
                                  &round);
2364
91.3k
    dst += stride;
2365
91.3k
  }
2366
11.4k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2367
102k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2368
91.3k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2369
91.3k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2370
91.3k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2371
91.3k
                                  &round);
2372
91.3k
    dst += stride;
2373
91.3k
  }
2374
11.4k
}
2375
2376
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2377
void aom_smooth_h_predictor_8x32_ssse3(
2378
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2379
    const uint8_t *LIBAOM_RESTRICT top_row,
2380
4.21k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2381
4.21k
  const __m128i top_right = _mm_set1_epi16(top_row[7]);
2382
4.21k
  const __m128i weights = cvtepu8_epi16(LoadLo8(smooth_weights + 4));
2383
4.21k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2384
4.21k
  const __m128i inverted_weights = _mm_sub_epi16(scale, weights);
2385
4.21k
  const __m128i scaled_top_right = _mm_mullo_epi16(inverted_weights, top_right);
2386
4.21k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2387
4.21k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2388
37.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2389
33.7k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2390
33.7k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2391
33.7k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2392
33.7k
                                  &round);
2393
33.7k
    dst += stride;
2394
33.7k
  }
2395
4.21k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2396
37.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2397
33.7k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2398
33.7k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2399
33.7k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2400
33.7k
                                  &round);
2401
33.7k
    dst += stride;
2402
33.7k
  }
2403
4.21k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2404
37.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2405
33.7k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2406
33.7k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2407
33.7k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2408
33.7k
                                  &round);
2409
33.7k
    dst += stride;
2410
33.7k
  }
2411
4.21k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2412
37.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2413
33.7k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2414
33.7k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2415
33.7k
    write_smooth_directional_sum8(dst, &left_y, &weights, &scaled_top_right,
2416
33.7k
                                  &round);
2417
33.7k
    dst += stride;
2418
33.7k
  }
2419
4.21k
}
2420
2421
void aom_smooth_h_predictor_16x4_ssse3(
2422
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2423
    const uint8_t *LIBAOM_RESTRICT top_row,
2424
16.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2425
16.1k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2426
16.1k
  const __m128i left = cvtepu8_epi16(Load4(left_column));
2427
16.1k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2428
16.1k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2429
16.1k
  const __m128i weights1 = cvtepu8_epi16(weights);
2430
16.1k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2431
16.1k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2432
16.1k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2433
16.1k
  const __m128i scaled_top_right1 =
2434
16.1k
      _mm_mullo_epi16(inverted_weights1, top_right);
2435
16.1k
  const __m128i scaled_top_right2 =
2436
16.1k
      _mm_mullo_epi16(inverted_weights2, top_right);
2437
16.1k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2438
16.1k
  __m128i y_mask = _mm_set1_epi32(0x01000100);
2439
16.1k
  __m128i left_y = _mm_shuffle_epi8(left, y_mask);
2440
16.1k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2441
16.1k
                                 scaled_top_right1, scaled_top_right2, round);
2442
16.1k
  dst += stride;
2443
16.1k
  y_mask = _mm_set1_epi32(0x03020302);
2444
16.1k
  left_y = _mm_shuffle_epi8(left, y_mask);
2445
16.1k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2446
16.1k
                                 scaled_top_right1, scaled_top_right2, round);
2447
16.1k
  dst += stride;
2448
16.1k
  y_mask = _mm_set1_epi32(0x05040504);
2449
16.1k
  left_y = _mm_shuffle_epi8(left, y_mask);
2450
16.1k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2451
16.1k
                                 scaled_top_right1, scaled_top_right2, round);
2452
16.1k
  dst += stride;
2453
16.1k
  y_mask = _mm_set1_epi32(0x07060706);
2454
16.1k
  left_y = _mm_shuffle_epi8(left, y_mask);
2455
16.1k
  write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2456
16.1k
                                 scaled_top_right1, scaled_top_right2, round);
2457
16.1k
}
2458
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2459
2460
void aom_smooth_h_predictor_16x8_ssse3(
2461
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2462
    const uint8_t *LIBAOM_RESTRICT top_row,
2463
13.0k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2464
13.0k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2465
13.0k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2466
13.0k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2467
13.0k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2468
13.0k
  const __m128i weights1 = cvtepu8_epi16(weights);
2469
13.0k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2470
13.0k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2471
13.0k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2472
13.0k
  const __m128i scaled_top_right1 =
2473
13.0k
      _mm_mullo_epi16(inverted_weights1, top_right);
2474
13.0k
  const __m128i scaled_top_right2 =
2475
13.0k
      _mm_mullo_epi16(inverted_weights2, top_right);
2476
13.0k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2477
117k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2478
104k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2479
104k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2480
104k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2481
104k
                                   scaled_top_right1, scaled_top_right2, round);
2482
104k
    dst += stride;
2483
104k
  }
2484
13.0k
}
2485
2486
void aom_smooth_h_predictor_16x16_ssse3(
2487
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2488
    const uint8_t *LIBAOM_RESTRICT top_row,
2489
25.5k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2490
25.5k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2491
25.5k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2492
25.5k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2493
25.5k
  const __m128i weights1 = cvtepu8_epi16(weights);
2494
25.5k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2495
25.5k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2496
25.5k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2497
25.5k
  const __m128i scaled_top_right1 =
2498
25.5k
      _mm_mullo_epi16(inverted_weights1, top_right);
2499
25.5k
  const __m128i scaled_top_right2 =
2500
25.5k
      _mm_mullo_epi16(inverted_weights2, top_right);
2501
25.5k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2502
25.5k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2503
229k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2504
204k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2505
204k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2506
204k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2507
204k
                                   scaled_top_right1, scaled_top_right2, round);
2508
204k
    dst += stride;
2509
204k
  }
2510
25.5k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2511
229k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2512
204k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2513
204k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2514
204k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2515
204k
                                   scaled_top_right1, scaled_top_right2, round);
2516
204k
    dst += stride;
2517
204k
  }
2518
25.5k
}
2519
2520
void aom_smooth_h_predictor_16x32_ssse3(
2521
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2522
    const uint8_t *LIBAOM_RESTRICT top_row,
2523
8.93k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2524
8.93k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2525
8.93k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2526
8.93k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2527
8.93k
  const __m128i weights1 = cvtepu8_epi16(weights);
2528
8.93k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2529
8.93k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2530
8.93k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2531
8.93k
  const __m128i scaled_top_right1 =
2532
8.93k
      _mm_mullo_epi16(inverted_weights1, top_right);
2533
8.93k
  const __m128i scaled_top_right2 =
2534
8.93k
      _mm_mullo_epi16(inverted_weights2, top_right);
2535
8.93k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2536
8.93k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2537
80.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2538
71.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2539
71.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2540
71.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2541
71.4k
                                   scaled_top_right1, scaled_top_right2, round);
2542
71.4k
    dst += stride;
2543
71.4k
  }
2544
8.93k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2545
80.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2546
71.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2547
71.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2548
71.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2549
71.4k
                                   scaled_top_right1, scaled_top_right2, round);
2550
71.4k
    dst += stride;
2551
71.4k
  }
2552
8.93k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2553
80.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2554
71.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2555
71.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2556
71.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2557
71.4k
                                   scaled_top_right1, scaled_top_right2, round);
2558
71.4k
    dst += stride;
2559
71.4k
  }
2560
8.93k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2561
80.3k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2562
71.4k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2563
71.4k
    const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2564
71.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2565
71.4k
                                   scaled_top_right1, scaled_top_right2, round);
2566
71.4k
    dst += stride;
2567
71.4k
  }
2568
8.93k
}
2569
2570
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2571
void aom_smooth_h_predictor_16x64_ssse3(
2572
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2573
    const uint8_t *LIBAOM_RESTRICT top_row,
2574
2.63k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2575
2.63k
  const __m128i top_right = _mm_set1_epi16(top_row[15]);
2576
2.63k
  const __m128i weights = LoadUnaligned16(smooth_weights + 12);
2577
2.63k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2578
2.63k
  const __m128i weights1 = cvtepu8_epi16(weights);
2579
2.63k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights, 8));
2580
2.63k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2581
2.63k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2582
2.63k
  const __m128i scaled_top_right1 =
2583
2.63k
      _mm_mullo_epi16(inverted_weights1, top_right);
2584
2.63k
  const __m128i scaled_top_right2 =
2585
2.63k
      _mm_mullo_epi16(inverted_weights2, top_right);
2586
2.63k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2587
23.7k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2588
21.1k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2589
189k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2590
168k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2591
168k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2592
168k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2593
168k
                                     scaled_top_right1, scaled_top_right2,
2594
168k
                                     round);
2595
168k
      dst += stride;
2596
168k
    }
2597
21.1k
  }
2598
2.63k
}
2599
2600
void aom_smooth_h_predictor_32x8_ssse3(
2601
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2602
    const uint8_t *LIBAOM_RESTRICT top_row,
2603
12.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2604
12.1k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2605
12.1k
  const __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2606
12.1k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2607
12.1k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2608
12.1k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2609
12.1k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2610
12.1k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2611
12.1k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2612
12.1k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2613
12.1k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2614
12.1k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2615
12.1k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2616
12.1k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2617
12.1k
  const __m128i scaled_top_right1 =
2618
12.1k
      _mm_mullo_epi16(inverted_weights1, top_right);
2619
12.1k
  const __m128i scaled_top_right2 =
2620
12.1k
      _mm_mullo_epi16(inverted_weights2, top_right);
2621
12.1k
  const __m128i scaled_top_right3 =
2622
12.1k
      _mm_mullo_epi16(inverted_weights3, top_right);
2623
12.1k
  const __m128i scaled_top_right4 =
2624
12.1k
      _mm_mullo_epi16(inverted_weights4, top_right);
2625
12.1k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2626
108k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2627
96.8k
    __m128i y_select = _mm_set1_epi32(y_mask);
2628
96.8k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2629
96.8k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2630
96.8k
                                   scaled_top_right1, scaled_top_right2, round);
2631
96.8k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2632
96.8k
                                   scaled_top_right3, scaled_top_right4, round);
2633
96.8k
    dst += stride;
2634
96.8k
  }
2635
12.1k
}
2636
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2637
2638
void aom_smooth_h_predictor_32x16_ssse3(
2639
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2640
    const uint8_t *LIBAOM_RESTRICT top_row,
2641
6.58k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2642
6.58k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2643
6.58k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2644
6.58k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2645
6.58k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2646
6.58k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2647
6.58k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2648
6.58k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2649
6.58k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2650
6.58k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2651
6.58k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2652
6.58k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2653
6.58k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2654
6.58k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2655
6.58k
  const __m128i scaled_top_right1 =
2656
6.58k
      _mm_mullo_epi16(inverted_weights1, top_right);
2657
6.58k
  const __m128i scaled_top_right2 =
2658
6.58k
      _mm_mullo_epi16(inverted_weights2, top_right);
2659
6.58k
  const __m128i scaled_top_right3 =
2660
6.58k
      _mm_mullo_epi16(inverted_weights3, top_right);
2661
6.58k
  const __m128i scaled_top_right4 =
2662
6.58k
      _mm_mullo_epi16(inverted_weights4, top_right);
2663
6.58k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2664
59.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2665
52.6k
    __m128i y_select = _mm_set1_epi32(y_mask);
2666
52.6k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2667
52.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2668
52.6k
                                   scaled_top_right1, scaled_top_right2, round);
2669
52.6k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2670
52.6k
                                   scaled_top_right3, scaled_top_right4, round);
2671
52.6k
    dst += stride;
2672
52.6k
  }
2673
6.58k
  const __m128i left2 =
2674
6.58k
      cvtepu8_epi16(LoadLo8((const uint8_t *)left_column + 8));
2675
59.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2676
52.6k
    __m128i y_select = _mm_set1_epi32(y_mask);
2677
52.6k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2678
52.6k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2679
52.6k
                                   scaled_top_right1, scaled_top_right2, round);
2680
52.6k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2681
52.6k
                                   scaled_top_right3, scaled_top_right4, round);
2682
52.6k
    dst += stride;
2683
52.6k
  }
2684
6.58k
}
2685
2686
void aom_smooth_h_predictor_32x32_ssse3(
2687
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2688
    const uint8_t *LIBAOM_RESTRICT top_row,
2689
28.1k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2690
28.1k
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2691
28.1k
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2692
28.1k
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2693
28.1k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2694
28.1k
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2695
28.1k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2696
28.1k
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2697
28.1k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2698
28.1k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2699
28.1k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2700
28.1k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2701
28.1k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2702
28.1k
  const __m128i scaled_top_right1 =
2703
28.1k
      _mm_mullo_epi16(inverted_weights1, top_right);
2704
28.1k
  const __m128i scaled_top_right2 =
2705
28.1k
      _mm_mullo_epi16(inverted_weights2, top_right);
2706
28.1k
  const __m128i scaled_top_right3 =
2707
28.1k
      _mm_mullo_epi16(inverted_weights3, top_right);
2708
28.1k
  const __m128i scaled_top_right4 =
2709
28.1k
      _mm_mullo_epi16(inverted_weights4, top_right);
2710
28.1k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2711
28.1k
  __m128i left = cvtepu8_epi16(LoadLo8(left_column));
2712
252k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2713
224k
    __m128i y_select = _mm_set1_epi32(y_mask);
2714
224k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2715
224k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2716
224k
                                   scaled_top_right1, scaled_top_right2, round);
2717
224k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2718
224k
                                   scaled_top_right3, scaled_top_right4, round);
2719
224k
    dst += stride;
2720
224k
  }
2721
28.1k
  left = cvtepu8_epi16(LoadLo8(left_column + 8));
2722
252k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2723
224k
    __m128i y_select = _mm_set1_epi32(y_mask);
2724
224k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2725
224k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2726
224k
                                   scaled_top_right1, scaled_top_right2, round);
2727
224k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2728
224k
                                   scaled_top_right3, scaled_top_right4, round);
2729
224k
    dst += stride;
2730
224k
  }
2731
28.1k
  left = cvtepu8_epi16(LoadLo8(left_column + 16));
2732
252k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2733
224k
    __m128i y_select = _mm_set1_epi32(y_mask);
2734
224k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2735
224k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2736
224k
                                   scaled_top_right1, scaled_top_right2, round);
2737
224k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2738
224k
                                   scaled_top_right3, scaled_top_right4, round);
2739
224k
    dst += stride;
2740
224k
  }
2741
28.1k
  left = cvtepu8_epi16(LoadLo8(left_column + 24));
2742
252k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2743
224k
    __m128i y_select = _mm_set1_epi32(y_mask);
2744
224k
    __m128i left_y = _mm_shuffle_epi8(left, y_select);
2745
224k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2746
224k
                                   scaled_top_right1, scaled_top_right2, round);
2747
224k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2748
224k
                                   scaled_top_right3, scaled_top_right4, round);
2749
224k
    dst += stride;
2750
224k
  }
2751
28.1k
}
2752
2753
void aom_smooth_h_predictor_32x64_ssse3(
2754
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2755
    const uint8_t *LIBAOM_RESTRICT top_row,
2756
558
    const uint8_t *LIBAOM_RESTRICT left_column) {
2757
558
  const __m128i top_right = _mm_set1_epi16(top_row[31]);
2758
558
  const __m128i weights_lo = LoadUnaligned16(smooth_weights + 28);
2759
558
  const __m128i weights_hi = LoadUnaligned16(smooth_weights + 44);
2760
558
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2761
558
  const __m128i weights1 = cvtepu8_epi16(weights_lo);
2762
558
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lo, 8));
2763
558
  const __m128i weights3 = cvtepu8_epi16(weights_hi);
2764
558
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_hi, 8));
2765
558
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2766
558
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2767
558
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2768
558
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2769
558
  const __m128i scaled_top_right1 =
2770
558
      _mm_mullo_epi16(inverted_weights1, top_right);
2771
558
  const __m128i scaled_top_right2 =
2772
558
      _mm_mullo_epi16(inverted_weights2, top_right);
2773
558
  const __m128i scaled_top_right3 =
2774
558
      _mm_mullo_epi16(inverted_weights3, top_right);
2775
558
  const __m128i scaled_top_right4 =
2776
558
      _mm_mullo_epi16(inverted_weights4, top_right);
2777
558
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2778
5.02k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
2779
4.46k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
2780
40.1k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2781
35.7k
      const __m128i y_select = _mm_set1_epi32(y_mask);
2782
35.7k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
2783
35.7k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2784
35.7k
                                     scaled_top_right1, scaled_top_right2,
2785
35.7k
                                     round);
2786
35.7k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
2787
35.7k
                                     weights4, scaled_top_right3,
2788
35.7k
                                     scaled_top_right4, round);
2789
35.7k
      dst += stride;
2790
35.7k
    }
2791
4.46k
  }
2792
558
}
2793
2794
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2795
void aom_smooth_h_predictor_64x16_ssse3(
2796
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2797
    const uint8_t *LIBAOM_RESTRICT top_row,
2798
3.55k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2799
3.55k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2800
3.55k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2801
3.55k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2802
3.55k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2803
3.55k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2804
3.55k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2805
3.55k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2806
3.55k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2807
3.55k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2808
3.55k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2809
3.55k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2810
3.55k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2811
3.55k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2812
3.55k
  const __m128i scaled_top_right1 =
2813
3.55k
      _mm_mullo_epi16(inverted_weights1, top_right);
2814
3.55k
  const __m128i scaled_top_right2 =
2815
3.55k
      _mm_mullo_epi16(inverted_weights2, top_right);
2816
3.55k
  const __m128i scaled_top_right3 =
2817
3.55k
      _mm_mullo_epi16(inverted_weights3, top_right);
2818
3.55k
  const __m128i scaled_top_right4 =
2819
3.55k
      _mm_mullo_epi16(inverted_weights4, top_right);
2820
3.55k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2821
3.55k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2822
3.55k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2823
3.55k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2824
3.55k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2825
3.55k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2826
3.55k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2827
3.55k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2828
3.55k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2829
3.55k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2830
3.55k
  const __m128i scaled_top_right5 =
2831
3.55k
      _mm_mullo_epi16(inverted_weights5, top_right);
2832
3.55k
  const __m128i scaled_top_right6 =
2833
3.55k
      _mm_mullo_epi16(inverted_weights6, top_right);
2834
3.55k
  const __m128i scaled_top_right7 =
2835
3.55k
      _mm_mullo_epi16(inverted_weights7, top_right);
2836
3.55k
  const __m128i scaled_top_right8 =
2837
3.55k
      _mm_mullo_epi16(inverted_weights8, top_right);
2838
3.55k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2839
31.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2840
28.4k
    __m128i y_select = _mm_set1_epi32(y_mask);
2841
28.4k
    __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2842
28.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2843
28.4k
                                   scaled_top_right1, scaled_top_right2, round);
2844
28.4k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2845
28.4k
                                   scaled_top_right3, scaled_top_right4, round);
2846
28.4k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2847
28.4k
                                   scaled_top_right5, scaled_top_right6, round);
2848
28.4k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2849
28.4k
                                   scaled_top_right7, scaled_top_right8, round);
2850
28.4k
    dst += stride;
2851
28.4k
  }
2852
3.55k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2853
31.9k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2854
28.4k
    __m128i y_select = _mm_set1_epi32(y_mask);
2855
28.4k
    __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2856
28.4k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2857
28.4k
                                   scaled_top_right1, scaled_top_right2, round);
2858
28.4k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2859
28.4k
                                   scaled_top_right3, scaled_top_right4, round);
2860
28.4k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2861
28.4k
                                   scaled_top_right5, scaled_top_right6, round);
2862
28.4k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2863
28.4k
                                   scaled_top_right7, scaled_top_right8, round);
2864
28.4k
    dst += stride;
2865
28.4k
  }
2866
3.55k
}
2867
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
2868
2869
void aom_smooth_h_predictor_64x32_ssse3(
2870
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2871
    const uint8_t *LIBAOM_RESTRICT top_row,
2872
1.13k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2873
1.13k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2874
1.13k
  const __m128i left1 = cvtepu8_epi16(LoadLo8(left_column));
2875
1.13k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2876
1.13k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2877
1.13k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2878
1.13k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2879
1.13k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2880
1.13k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2881
1.13k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2882
1.13k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2883
1.13k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2884
1.13k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2885
1.13k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2886
1.13k
  const __m128i scaled_top_right1 =
2887
1.13k
      _mm_mullo_epi16(inverted_weights1, top_right);
2888
1.13k
  const __m128i scaled_top_right2 =
2889
1.13k
      _mm_mullo_epi16(inverted_weights2, top_right);
2890
1.13k
  const __m128i scaled_top_right3 =
2891
1.13k
      _mm_mullo_epi16(inverted_weights3, top_right);
2892
1.13k
  const __m128i scaled_top_right4 =
2893
1.13k
      _mm_mullo_epi16(inverted_weights4, top_right);
2894
1.13k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2895
1.13k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2896
1.13k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2897
1.13k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2898
1.13k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2899
1.13k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
2900
1.13k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
2901
1.13k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
2902
1.13k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
2903
1.13k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
2904
1.13k
  const __m128i scaled_top_right5 =
2905
1.13k
      _mm_mullo_epi16(inverted_weights5, top_right);
2906
1.13k
  const __m128i scaled_top_right6 =
2907
1.13k
      _mm_mullo_epi16(inverted_weights6, top_right);
2908
1.13k
  const __m128i scaled_top_right7 =
2909
1.13k
      _mm_mullo_epi16(inverted_weights7, top_right);
2910
1.13k
  const __m128i scaled_top_right8 =
2911
1.13k
      _mm_mullo_epi16(inverted_weights8, top_right);
2912
1.13k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
2913
10.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2914
9.07k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2915
9.07k
    const __m128i left_y = _mm_shuffle_epi8(left1, y_select);
2916
9.07k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2917
9.07k
                                   scaled_top_right1, scaled_top_right2, round);
2918
9.07k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2919
9.07k
                                   scaled_top_right3, scaled_top_right4, round);
2920
9.07k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2921
9.07k
                                   scaled_top_right5, scaled_top_right6, round);
2922
9.07k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2923
9.07k
                                   scaled_top_right7, scaled_top_right8, round);
2924
9.07k
    dst += stride;
2925
9.07k
  }
2926
1.13k
  const __m128i left2 = cvtepu8_epi16(LoadLo8(left_column + 8));
2927
10.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2928
9.07k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2929
9.07k
    const __m128i left_y = _mm_shuffle_epi8(left2, y_select);
2930
9.07k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2931
9.07k
                                   scaled_top_right1, scaled_top_right2, round);
2932
9.07k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2933
9.07k
                                   scaled_top_right3, scaled_top_right4, round);
2934
9.07k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2935
9.07k
                                   scaled_top_right5, scaled_top_right6, round);
2936
9.07k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2937
9.07k
                                   scaled_top_right7, scaled_top_right8, round);
2938
9.07k
    dst += stride;
2939
9.07k
  }
2940
1.13k
  const __m128i left3 = cvtepu8_epi16(LoadLo8(left_column + 16));
2941
10.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2942
9.07k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2943
9.07k
    const __m128i left_y = _mm_shuffle_epi8(left3, y_select);
2944
9.07k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2945
9.07k
                                   scaled_top_right1, scaled_top_right2, round);
2946
9.07k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2947
9.07k
                                   scaled_top_right3, scaled_top_right4, round);
2948
9.07k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2949
9.07k
                                   scaled_top_right5, scaled_top_right6, round);
2950
9.07k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2951
9.07k
                                   scaled_top_right7, scaled_top_right8, round);
2952
9.07k
    dst += stride;
2953
9.07k
  }
2954
1.13k
  const __m128i left4 = cvtepu8_epi16(LoadLo8(left_column + 24));
2955
10.2k
  for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
2956
9.07k
    const __m128i y_select = _mm_set1_epi32(y_mask);
2957
9.07k
    const __m128i left_y = _mm_shuffle_epi8(left4, y_select);
2958
9.07k
    write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
2959
9.07k
                                   scaled_top_right1, scaled_top_right2, round);
2960
9.07k
    write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3, weights4,
2961
9.07k
                                   scaled_top_right3, scaled_top_right4, round);
2962
9.07k
    write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5, weights6,
2963
9.07k
                                   scaled_top_right5, scaled_top_right6, round);
2964
9.07k
    write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7, weights8,
2965
9.07k
                                   scaled_top_right7, scaled_top_right8, round);
2966
9.07k
    dst += stride;
2967
9.07k
  }
2968
1.13k
}
2969
2970
void aom_smooth_h_predictor_64x64_ssse3(
2971
    uint8_t *LIBAOM_RESTRICT dst, ptrdiff_t stride,
2972
    const uint8_t *LIBAOM_RESTRICT top_row,
2973
3.63k
    const uint8_t *LIBAOM_RESTRICT left_column) {
2974
3.63k
  const __m128i top_right = _mm_set1_epi16(top_row[63]);
2975
3.63k
  const __m128i weights_lolo = LoadUnaligned16(smooth_weights + 60);
2976
3.63k
  const __m128i weights_lohi = LoadUnaligned16(smooth_weights + 76);
2977
3.63k
  const __m128i scale = _mm_set1_epi16(1 << SMOOTH_WEIGHT_LOG2_SCALE);
2978
3.63k
  const __m128i weights1 = cvtepu8_epi16(weights_lolo);
2979
3.63k
  const __m128i weights2 = cvtepu8_epi16(_mm_srli_si128(weights_lolo, 8));
2980
3.63k
  const __m128i weights3 = cvtepu8_epi16(weights_lohi);
2981
3.63k
  const __m128i weights4 = cvtepu8_epi16(_mm_srli_si128(weights_lohi, 8));
2982
3.63k
  const __m128i inverted_weights1 = _mm_sub_epi16(scale, weights1);
2983
3.63k
  const __m128i inverted_weights2 = _mm_sub_epi16(scale, weights2);
2984
3.63k
  const __m128i inverted_weights3 = _mm_sub_epi16(scale, weights3);
2985
3.63k
  const __m128i inverted_weights4 = _mm_sub_epi16(scale, weights4);
2986
3.63k
  const __m128i scaled_top_right1 =
2987
3.63k
      _mm_mullo_epi16(inverted_weights1, top_right);
2988
3.63k
  const __m128i scaled_top_right2 =
2989
3.63k
      _mm_mullo_epi16(inverted_weights2, top_right);
2990
3.63k
  const __m128i scaled_top_right3 =
2991
3.63k
      _mm_mullo_epi16(inverted_weights3, top_right);
2992
3.63k
  const __m128i scaled_top_right4 =
2993
3.63k
      _mm_mullo_epi16(inverted_weights4, top_right);
2994
3.63k
  const __m128i weights_hilo = LoadUnaligned16(smooth_weights + 92);
2995
3.63k
  const __m128i weights_hihi = LoadUnaligned16(smooth_weights + 108);
2996
3.63k
  const __m128i weights5 = cvtepu8_epi16(weights_hilo);
2997
3.63k
  const __m128i weights6 = cvtepu8_epi16(_mm_srli_si128(weights_hilo, 8));
2998
3.63k
  const __m128i weights7 = cvtepu8_epi16(weights_hihi);
2999
3.63k
  const __m128i weights8 = cvtepu8_epi16(_mm_srli_si128(weights_hihi, 8));
3000
3.63k
  const __m128i inverted_weights5 = _mm_sub_epi16(scale, weights5);
3001
3.63k
  const __m128i inverted_weights6 = _mm_sub_epi16(scale, weights6);
3002
3.63k
  const __m128i inverted_weights7 = _mm_sub_epi16(scale, weights7);
3003
3.63k
  const __m128i inverted_weights8 = _mm_sub_epi16(scale, weights8);
3004
3.63k
  const __m128i scaled_top_right5 =
3005
3.63k
      _mm_mullo_epi16(inverted_weights5, top_right);
3006
3.63k
  const __m128i scaled_top_right6 =
3007
3.63k
      _mm_mullo_epi16(inverted_weights6, top_right);
3008
3.63k
  const __m128i scaled_top_right7 =
3009
3.63k
      _mm_mullo_epi16(inverted_weights7, top_right);
3010
3.63k
  const __m128i scaled_top_right8 =
3011
3.63k
      _mm_mullo_epi16(inverted_weights8, top_right);
3012
3.63k
  const __m128i round = _mm_set1_epi16(1 << (SMOOTH_WEIGHT_LOG2_SCALE - 1));
3013
32.7k
  for (int left_offset = 0; left_offset < 64; left_offset += 8) {
3014
29.0k
    const __m128i left = cvtepu8_epi16(LoadLo8(left_column + left_offset));
3015
261k
    for (int y_mask = 0x01000100; y_mask < 0x0F0E0F0F; y_mask += 0x02020202) {
3016
232k
      const __m128i y_select = _mm_set1_epi32(y_mask);
3017
232k
      const __m128i left_y = _mm_shuffle_epi8(left, y_select);
3018
232k
      write_smooth_directional_sum16(dst, left_y, left_y, weights1, weights2,
3019
232k
                                     scaled_top_right1, scaled_top_right2,
3020
232k
                                     round);
3021
232k
      write_smooth_directional_sum16(dst + 16, left_y, left_y, weights3,
3022
232k
                                     weights4, scaled_top_right3,
3023
232k
                                     scaled_top_right4, round);
3024
232k
      write_smooth_directional_sum16(dst + 32, left_y, left_y, weights5,
3025
232k
                                     weights6, scaled_top_right5,
3026
232k
                                     scaled_top_right6, round);
3027
232k
      write_smooth_directional_sum16(dst + 48, left_y, left_y, weights7,
3028
232k
                                     weights8, scaled_top_right7,
3029
232k
                                     scaled_top_right8, round);
3030
232k
      dst += stride;
3031
232k
    }
3032
29.0k
  }
3033
3.63k
}