Coverage Report

Created: 2025-07-23 06:32

/src/aom/aom_dsp/x86/intrapred_avx2.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/av1_rtcd.h"
15
#include "aom_dsp/x86/intrapred_x86.h"
16
#include "aom_dsp/x86/intrapred_utils.h"
17
#include "aom_dsp/x86/lpf_common_sse2.h"
18
19
316k
static inline __m256i dc_sum_64(const uint8_t *ref) {
20
316k
  const __m256i x0 = _mm256_loadu_si256((const __m256i *)ref);
21
316k
  const __m256i x1 = _mm256_loadu_si256((const __m256i *)(ref + 32));
22
316k
  const __m256i zero = _mm256_setzero_si256();
23
316k
  __m256i y0 = _mm256_sad_epu8(x0, zero);
24
316k
  __m256i y1 = _mm256_sad_epu8(x1, zero);
25
316k
  y0 = _mm256_add_epi64(y0, y1);
26
316k
  __m256i u0 = _mm256_permute2x128_si256(y0, y0, 1);
27
316k
  y0 = _mm256_add_epi64(u0, y0);
28
316k
  u0 = _mm256_unpackhi_epi64(y0, y0);
29
316k
  return _mm256_add_epi16(y0, u0);
30
316k
}
31
32
2.23M
static inline __m256i dc_sum_32(const uint8_t *ref) {
33
2.23M
  const __m256i x = _mm256_loadu_si256((const __m256i *)ref);
34
2.23M
  const __m256i zero = _mm256_setzero_si256();
35
2.23M
  __m256i y = _mm256_sad_epu8(x, zero);
36
2.23M
  __m256i u = _mm256_permute2x128_si256(y, y, 1);
37
2.23M
  y = _mm256_add_epi64(u, y);
38
2.23M
  u = _mm256_unpackhi_epi64(y, y);
39
2.23M
  return _mm256_add_epi16(y, u);
40
2.23M
}
41
42
static inline void row_store_32xh(const __m256i *r, int height, uint8_t *dst,
43
1.40M
                                  ptrdiff_t stride) {
44
44.2M
  for (int i = 0; i < height; ++i) {
45
42.8M
    _mm256_storeu_si256((__m256i *)dst, *r);
46
42.8M
    dst += stride;
47
42.8M
  }
48
1.40M
}
49
50
static inline void row_store_32x2xh(const __m256i *r0, const __m256i *r1,
51
                                    int height, uint8_t *dst,
52
4.33k
                                    ptrdiff_t stride) {
53
209k
  for (int i = 0; i < height; ++i) {
54
204k
    _mm256_storeu_si256((__m256i *)dst, *r0);
55
204k
    _mm256_storeu_si256((__m256i *)(dst + 32), *r1);
56
204k
    dst += stride;
57
204k
  }
58
4.33k
}
59
60
static inline void row_store_64xh(const __m256i *r, int height, uint8_t *dst,
61
212k
                                  ptrdiff_t stride) {
62
10.9M
  for (int i = 0; i < height; ++i) {
63
10.7M
    _mm256_storeu_si256((__m256i *)dst, *r);
64
10.7M
    _mm256_storeu_si256((__m256i *)(dst + 32), *r);
65
10.7M
    dst += stride;
66
10.7M
  }
67
212k
}
68
69
#if CONFIG_AV1_HIGHBITDEPTH
70
static DECLARE_ALIGNED(16, uint8_t, HighbdLoadMaskx[8][16]) = {
71
  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
72
  { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
73
  { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
74
  { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
75
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
76
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
77
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
78
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
79
};
80
81
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx4[4][16]) = {
82
  { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
83
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
84
  { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
85
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 }
86
};
87
88
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx[8][32]) = {
89
  { 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25, 28, 29,
90
    2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
91
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27,
92
    0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
93
  { 0, 1, 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25,
94
    0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27 },
95
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23,
96
    0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25 },
97
  { 0, 1, 0, 1, 0, 1, 0, 1, 8,  9,  12, 13, 16, 17, 20, 21,
98
    0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19, 22, 23 },
99
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19,
100
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17, 20, 21 },
101
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17,
102
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15, 18, 19 },
103
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15,
104
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
105
};
106
107
static DECLARE_ALIGNED(32, uint16_t, HighbdBaseMask[17][16]) = {
108
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
109
  { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
110
  { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
  { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
  { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
113
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
114
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115
    0 },
116
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
117
    0, 0 },
118
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
119
    0, 0, 0, 0 },
120
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
121
    0, 0, 0, 0, 0, 0 },
122
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
123
    0xffff, 0, 0, 0, 0, 0, 0 },
124
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
125
    0xffff, 0xffff, 0, 0, 0, 0, 0 },
126
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
127
    0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
128
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
129
    0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
130
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
131
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
132
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
133
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
134
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
135
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
136
};
137
138
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
139
56.4k
static inline void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
140
56.4k
  __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
141
142
56.4k
  r0 = _mm_unpacklo_epi16(x[0], x[1]);
143
56.4k
  r1 = _mm_unpacklo_epi16(x[2], x[3]);
144
56.4k
  r2 = _mm_unpacklo_epi16(x[4], x[5]);
145
56.4k
  r3 = _mm_unpacklo_epi16(x[6], x[7]);
146
147
56.4k
  r4 = _mm_unpacklo_epi16(x[8], x[9]);
148
56.4k
  r5 = _mm_unpacklo_epi16(x[10], x[11]);
149
56.4k
  r6 = _mm_unpacklo_epi16(x[12], x[13]);
150
56.4k
  r7 = _mm_unpacklo_epi16(x[14], x[15]);
151
152
56.4k
  r8 = _mm_unpacklo_epi32(r0, r1);
153
56.4k
  r9 = _mm_unpackhi_epi32(r0, r1);
154
56.4k
  r10 = _mm_unpacklo_epi32(r2, r3);
155
56.4k
  r11 = _mm_unpackhi_epi32(r2, r3);
156
157
56.4k
  r12 = _mm_unpacklo_epi32(r4, r5);
158
56.4k
  r13 = _mm_unpackhi_epi32(r4, r5);
159
56.4k
  r14 = _mm_unpacklo_epi32(r6, r7);
160
56.4k
  r15 = _mm_unpackhi_epi32(r6, r7);
161
162
56.4k
  r0 = _mm_unpacklo_epi64(r8, r9);
163
56.4k
  r1 = _mm_unpackhi_epi64(r8, r9);
164
56.4k
  r2 = _mm_unpacklo_epi64(r10, r11);
165
56.4k
  r3 = _mm_unpackhi_epi64(r10, r11);
166
167
56.4k
  r4 = _mm_unpacklo_epi64(r12, r13);
168
56.4k
  r5 = _mm_unpackhi_epi64(r12, r13);
169
56.4k
  r6 = _mm_unpacklo_epi64(r14, r15);
170
56.4k
  r7 = _mm_unpackhi_epi64(r14, r15);
171
172
56.4k
  d[0] = _mm_unpacklo_epi64(r0, r2);
173
56.4k
  d[1] = _mm_unpacklo_epi64(r4, r6);
174
56.4k
  d[2] = _mm_unpacklo_epi64(r1, r3);
175
56.4k
  d[3] = _mm_unpacklo_epi64(r5, r7);
176
177
56.4k
  d[4] = _mm_unpackhi_epi64(r0, r2);
178
56.4k
  d[5] = _mm_unpackhi_epi64(r4, r6);
179
56.4k
  d[6] = _mm_unpackhi_epi64(r1, r3);
180
56.4k
  d[7] = _mm_unpackhi_epi64(r5, r7);
181
56.4k
}
182
183
23.7k
static inline void highbd_transpose4x16_avx2(__m256i *x, __m256i *d) {
184
23.7k
  __m256i w0, w1, w2, w3, ww0, ww1;
185
186
23.7k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
187
23.7k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
188
23.7k
  w2 = _mm256_unpackhi_epi16(x[0], x[1]);  // 40 50 41 51 42 52 43 53
189
23.7k
  w3 = _mm256_unpackhi_epi16(x[2], x[3]);  // 60 70 61 71 62 72 63 73
190
191
23.7k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
192
23.7k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
193
194
23.7k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
195
23.7k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
196
197
23.7k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
198
23.7k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
199
200
23.7k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
201
23.7k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
202
23.7k
}
203
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
204
205
166k
static inline void highbd_transpose8x16_16x8_avx2(__m256i *x, __m256i *d) {
206
166k
  __m256i w0, w1, w2, w3, ww0, ww1;
207
208
166k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
209
166k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
210
166k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);  // 40 50 41 51 42 52 43 53
211
166k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);  // 60 70 61 71 62 72 63 73
212
213
166k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
214
166k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
215
216
166k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
217
166k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
218
219
166k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
220
166k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
221
222
166k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
223
166k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
224
225
166k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);  // 04 14 05 15 06 16 07 17
226
166k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);  // 24 34 25 35 26 36 27 37
227
166k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);  // 44 54 45 55 46 56 47 57
228
166k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);  // 64 74 65 75 66 76 67 77
229
230
166k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 04 14 24 34 05 15 25 35
231
166k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 44 54 64 74 45 55 65 75
232
233
166k
  d[4] = _mm256_unpacklo_epi64(ww0, ww1);  // 04 14 24 34 44 54 64 74
234
166k
  d[5] = _mm256_unpackhi_epi64(ww0, ww1);  // 05 15 25 35 45 55 65 75
235
236
166k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 06 16 26 36 07 17 27 37
237
166k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 46 56 66 76 47 57 67 77
238
239
166k
  d[6] = _mm256_unpacklo_epi64(ww0, ww1);  // 06 16 26 36 46 56 66 76
240
166k
  d[7] = _mm256_unpackhi_epi64(ww0, ww1);  // 07 17 27 37 47 57 67 77
241
166k
}
242
243
1.14M
static inline void highbd_transpose16x16_avx2(__m256i *x, __m256i *d) {
244
1.14M
  __m256i w0, w1, w2, w3, ww0, ww1;
245
1.14M
  __m256i dd[16];
246
1.14M
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);
247
1.14M
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);
248
1.14M
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);
249
1.14M
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);
250
251
1.14M
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
252
1.14M
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
253
254
1.14M
  dd[0] = _mm256_unpacklo_epi64(ww0, ww1);
255
1.14M
  dd[1] = _mm256_unpackhi_epi64(ww0, ww1);
256
257
1.14M
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
258
1.14M
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
259
260
1.14M
  dd[2] = _mm256_unpacklo_epi64(ww0, ww1);
261
1.14M
  dd[3] = _mm256_unpackhi_epi64(ww0, ww1);
262
263
1.14M
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);
264
1.14M
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);
265
1.14M
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);
266
1.14M
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);
267
268
1.14M
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
269
1.14M
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
270
271
1.14M
  dd[4] = _mm256_unpacklo_epi64(ww0, ww1);
272
1.14M
  dd[5] = _mm256_unpackhi_epi64(ww0, ww1);
273
274
1.14M
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
275
1.14M
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
276
277
1.14M
  dd[6] = _mm256_unpacklo_epi64(ww0, ww1);
278
1.14M
  dd[7] = _mm256_unpackhi_epi64(ww0, ww1);
279
280
1.14M
  w0 = _mm256_unpacklo_epi16(x[8], x[9]);
281
1.14M
  w1 = _mm256_unpacklo_epi16(x[10], x[11]);
282
1.14M
  w2 = _mm256_unpacklo_epi16(x[12], x[13]);
283
1.14M
  w3 = _mm256_unpacklo_epi16(x[14], x[15]);
284
285
1.14M
  ww0 = _mm256_unpacklo_epi32(w0, w1);
286
1.14M
  ww1 = _mm256_unpacklo_epi32(w2, w3);
287
288
1.14M
  dd[8] = _mm256_unpacklo_epi64(ww0, ww1);
289
1.14M
  dd[9] = _mm256_unpackhi_epi64(ww0, ww1);
290
291
1.14M
  ww0 = _mm256_unpackhi_epi32(w0, w1);
292
1.14M
  ww1 = _mm256_unpackhi_epi32(w2, w3);
293
294
1.14M
  dd[10] = _mm256_unpacklo_epi64(ww0, ww1);
295
1.14M
  dd[11] = _mm256_unpackhi_epi64(ww0, ww1);
296
297
1.14M
  w0 = _mm256_unpackhi_epi16(x[8], x[9]);
298
1.14M
  w1 = _mm256_unpackhi_epi16(x[10], x[11]);
299
1.14M
  w2 = _mm256_unpackhi_epi16(x[12], x[13]);
300
1.14M
  w3 = _mm256_unpackhi_epi16(x[14], x[15]);
301
302
1.14M
  ww0 = _mm256_unpacklo_epi32(w0, w1);
303
1.14M
  ww1 = _mm256_unpacklo_epi32(w2, w3);
304
305
1.14M
  dd[12] = _mm256_unpacklo_epi64(ww0, ww1);
306
1.14M
  dd[13] = _mm256_unpackhi_epi64(ww0, ww1);
307
308
1.14M
  ww0 = _mm256_unpackhi_epi32(w0, w1);
309
1.14M
  ww1 = _mm256_unpackhi_epi32(w2, w3);
310
311
1.14M
  dd[14] = _mm256_unpacklo_epi64(ww0, ww1);
312
1.14M
  dd[15] = _mm256_unpackhi_epi64(ww0, ww1);
313
314
10.3M
  for (int i = 0; i < 8; i++) {
315
9.18M
    d[i] = _mm256_insertf128_si256(dd[i], _mm256_castsi256_si128(dd[i + 8]), 1);
316
9.18M
    d[i + 8] = _mm256_insertf128_si256(dd[i + 8],
317
9.18M
                                       _mm256_extracti128_si256(dd[i], 1), 0);
318
9.18M
  }
319
1.14M
}
320
#endif  // CONFIG_AV1_HIGHBITDEPTH
321
322
void aom_dc_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
323
1.02M
                                 const uint8_t *above, const uint8_t *left) {
324
1.02M
  const __m256i sum_above = dc_sum_32(above);
325
1.02M
  __m256i sum_left = dc_sum_32(left);
326
1.02M
  sum_left = _mm256_add_epi16(sum_left, sum_above);
327
1.02M
  const __m256i thirtytwo = _mm256_set1_epi16(32);
328
1.02M
  sum_left = _mm256_add_epi16(sum_left, thirtytwo);
329
1.02M
  sum_left = _mm256_srai_epi16(sum_left, 6);
330
1.02M
  const __m256i zero = _mm256_setzero_si256();
331
1.02M
  __m256i row = _mm256_shuffle_epi8(sum_left, zero);
332
1.02M
  row_store_32xh(&row, 32, dst, stride);
333
1.02M
}
334
335
void aom_dc_top_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
336
                                     const uint8_t *above,
337
60.3k
                                     const uint8_t *left) {
338
60.3k
  __m256i sum = dc_sum_32(above);
339
60.3k
  (void)left;
340
341
60.3k
  const __m256i sixteen = _mm256_set1_epi16(16);
342
60.3k
  sum = _mm256_add_epi16(sum, sixteen);
343
60.3k
  sum = _mm256_srai_epi16(sum, 5);
344
60.3k
  const __m256i zero = _mm256_setzero_si256();
345
60.3k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
346
60.3k
  row_store_32xh(&row, 32, dst, stride);
347
60.3k
}
348
349
void aom_dc_left_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
350
                                      const uint8_t *above,
351
107k
                                      const uint8_t *left) {
352
107k
  __m256i sum = dc_sum_32(left);
353
107k
  (void)above;
354
355
107k
  const __m256i sixteen = _mm256_set1_epi16(16);
356
107k
  sum = _mm256_add_epi16(sum, sixteen);
357
107k
  sum = _mm256_srai_epi16(sum, 5);
358
107k
  const __m256i zero = _mm256_setzero_si256();
359
107k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
360
107k
  row_store_32xh(&row, 32, dst, stride);
361
107k
}
362
363
void aom_dc_128_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
364
                                     const uint8_t *above,
365
20.5k
                                     const uint8_t *left) {
366
20.5k
  (void)above;
367
20.5k
  (void)left;
368
20.5k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
369
20.5k
  row_store_32xh(&row, 32, dst, stride);
370
20.5k
}
371
372
void aom_v_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
373
30.0k
                                const uint8_t *above, const uint8_t *left) {
374
30.0k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
375
30.0k
  (void)left;
376
30.0k
  row_store_32xh(&row, 32, dst, stride);
377
30.0k
}
378
379
// There are 32 rows togeter. This function does line:
380
// 0,1,2,3, and 16,17,18,19. The next call would do
381
// 4,5,6,7, and 20,21,22,23. So 4 times of calling
382
// would finish 32 rows.
383
static inline void h_predictor_32x8line(const __m256i *row, uint8_t *dst,
384
514k
                                        ptrdiff_t stride) {
385
514k
  __m256i t[4];
386
514k
  __m256i m = _mm256_setzero_si256();
387
514k
  const __m256i inc = _mm256_set1_epi8(4);
388
514k
  int i;
389
390
2.57M
  for (i = 0; i < 4; i++) {
391
2.05M
    t[i] = _mm256_shuffle_epi8(*row, m);
392
2.05M
    __m256i r0 = _mm256_permute2x128_si256(t[i], t[i], 0);
393
2.05M
    __m256i r1 = _mm256_permute2x128_si256(t[i], t[i], 0x11);
394
2.05M
    _mm256_storeu_si256((__m256i *)dst, r0);
395
2.05M
    _mm256_storeu_si256((__m256i *)(dst + (stride << 4)), r1);
396
2.05M
    dst += stride;
397
2.05M
    m = _mm256_add_epi8(m, inc);
398
2.05M
  }
399
514k
}
400
401
void aom_h_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
402
128k
                                const uint8_t *above, const uint8_t *left) {
403
128k
  (void)above;
404
128k
  const __m256i left_col = _mm256_loadu_si256((__m256i const *)left);
405
406
128k
  __m256i u = _mm256_unpacklo_epi8(left_col, left_col);
407
408
128k
  __m256i v = _mm256_unpacklo_epi8(u, u);
409
128k
  h_predictor_32x8line(&v, dst, stride);
410
128k
  dst += stride << 2;
411
412
128k
  v = _mm256_unpackhi_epi8(u, u);
413
128k
  h_predictor_32x8line(&v, dst, stride);
414
128k
  dst += stride << 2;
415
416
128k
  u = _mm256_unpackhi_epi8(left_col, left_col);
417
418
128k
  v = _mm256_unpacklo_epi8(u, u);
419
128k
  h_predictor_32x8line(&v, dst, stride);
420
128k
  dst += stride << 2;
421
422
128k
  v = _mm256_unpackhi_epi8(u, u);
423
128k
  h_predictor_32x8line(&v, dst, stride);
424
128k
}
425
426
// -----------------------------------------------------------------------------
427
// Rectangle
428
void aom_dc_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
429
134k
                                 const uint8_t *above, const uint8_t *left) {
430
134k
  const __m128i top_sum = dc_sum_32_sse2(above);
431
134k
  __m128i left_sum = dc_sum_16_sse2(left);
432
134k
  left_sum = _mm_add_epi16(top_sum, left_sum);
433
134k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(left_sum);
434
134k
  sum += 24;
435
134k
  sum /= 48;
436
134k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
437
134k
  row_store_32xh(&row, 16, dst, stride);
438
134k
}
439
440
void aom_dc_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
441
7.18k
                                 const uint8_t *above, const uint8_t *left) {
442
7.18k
  const __m256i sum_above = dc_sum_32(above);
443
7.18k
  __m256i sum_left = dc_sum_64(left);
444
7.18k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
445
7.18k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
446
7.18k
  sum += 48;
447
7.18k
  sum /= 96;
448
7.18k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
449
7.18k
  row_store_32xh(&row, 64, dst, stride);
450
7.18k
}
451
452
void aom_dc_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
453
106k
                                 const uint8_t *above, const uint8_t *left) {
454
106k
  const __m256i sum_above = dc_sum_64(above);
455
106k
  __m256i sum_left = dc_sum_64(left);
456
106k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
457
106k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
458
106k
  sum += 64;
459
106k
  sum /= 128;
460
106k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
461
106k
  row_store_64xh(&row, 64, dst, stride);
462
106k
}
463
464
void aom_dc_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
465
13.0k
                                 const uint8_t *above, const uint8_t *left) {
466
13.0k
  const __m256i sum_above = dc_sum_64(above);
467
13.0k
  __m256i sum_left = dc_sum_32(left);
468
13.0k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
469
13.0k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
470
13.0k
  sum += 48;
471
13.0k
  sum /= 96;
472
13.0k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
473
13.0k
  row_store_64xh(&row, 32, dst, stride);
474
13.0k
}
475
476
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
477
void aom_dc_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
478
47.2k
                                 const uint8_t *above, const uint8_t *left) {
479
47.2k
  const __m256i sum_above = dc_sum_64(above);
480
47.2k
  __m256i sum_left = _mm256_castsi128_si256(dc_sum_16_sse2(left));
481
47.2k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
482
47.2k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
483
47.2k
  sum += 40;
484
47.2k
  sum /= 80;
485
47.2k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
486
47.2k
  row_store_64xh(&row, 16, dst, stride);
487
47.2k
}
488
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
489
490
void aom_dc_top_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
491
                                     const uint8_t *above,
492
3.46k
                                     const uint8_t *left) {
493
3.46k
  __m256i sum = dc_sum_32(above);
494
3.46k
  (void)left;
495
496
3.46k
  const __m256i sixteen = _mm256_set1_epi16(16);
497
3.46k
  sum = _mm256_add_epi16(sum, sixteen);
498
3.46k
  sum = _mm256_srai_epi16(sum, 5);
499
3.46k
  const __m256i zero = _mm256_setzero_si256();
500
3.46k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
501
3.46k
  row_store_32xh(&row, 16, dst, stride);
502
3.46k
}
503
504
void aom_dc_top_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
505
                                     const uint8_t *above,
506
912
                                     const uint8_t *left) {
507
912
  __m256i sum = dc_sum_32(above);
508
912
  (void)left;
509
510
912
  const __m256i sixteen = _mm256_set1_epi16(16);
511
912
  sum = _mm256_add_epi16(sum, sixteen);
512
912
  sum = _mm256_srai_epi16(sum, 5);
513
912
  const __m256i zero = _mm256_setzero_si256();
514
912
  __m256i row = _mm256_shuffle_epi8(sum, zero);
515
912
  row_store_32xh(&row, 64, dst, stride);
516
912
}
517
518
void aom_dc_top_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
519
                                     const uint8_t *above,
520
13.7k
                                     const uint8_t *left) {
521
13.7k
  __m256i sum = dc_sum_64(above);
522
13.7k
  (void)left;
523
524
13.7k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
525
13.7k
  sum = _mm256_add_epi16(sum, thirtytwo);
526
13.7k
  sum = _mm256_srai_epi16(sum, 6);
527
13.7k
  const __m256i zero = _mm256_setzero_si256();
528
13.7k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
529
13.7k
  row_store_64xh(&row, 64, dst, stride);
530
13.7k
}
531
532
void aom_dc_top_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
533
                                     const uint8_t *above,
534
389
                                     const uint8_t *left) {
535
389
  __m256i sum = dc_sum_64(above);
536
389
  (void)left;
537
538
389
  const __m256i thirtytwo = _mm256_set1_epi16(32);
539
389
  sum = _mm256_add_epi16(sum, thirtytwo);
540
389
  sum = _mm256_srai_epi16(sum, 6);
541
389
  const __m256i zero = _mm256_setzero_si256();
542
389
  __m256i row = _mm256_shuffle_epi8(sum, zero);
543
389
  row_store_64xh(&row, 32, dst, stride);
544
389
}
545
546
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
547
void aom_dc_top_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
548
                                     const uint8_t *above,
549
1.84k
                                     const uint8_t *left) {
550
1.84k
  __m256i sum = dc_sum_64(above);
551
1.84k
  (void)left;
552
553
1.84k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
554
1.84k
  sum = _mm256_add_epi16(sum, thirtytwo);
555
1.84k
  sum = _mm256_srai_epi16(sum, 6);
556
1.84k
  const __m256i zero = _mm256_setzero_si256();
557
1.84k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
558
1.84k
  row_store_64xh(&row, 16, dst, stride);
559
1.84k
}
560
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
561
562
void aom_dc_left_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
563
                                      const uint8_t *above,
564
3.75k
                                      const uint8_t *left) {
565
3.75k
  __m128i sum = dc_sum_16_sse2(left);
566
3.75k
  (void)above;
567
568
3.75k
  const __m128i eight = _mm_set1_epi16(8);
569
3.75k
  sum = _mm_add_epi16(sum, eight);
570
3.75k
  sum = _mm_srai_epi16(sum, 4);
571
3.75k
  const __m128i zero = _mm_setzero_si128();
572
3.75k
  const __m128i r = _mm_shuffle_epi8(sum, zero);
573
3.75k
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
574
3.75k
  row_store_32xh(&row, 16, dst, stride);
575
3.75k
}
576
577
void aom_dc_left_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
578
                                      const uint8_t *above,
579
1.24k
                                      const uint8_t *left) {
580
1.24k
  __m256i sum = dc_sum_64(left);
581
1.24k
  (void)above;
582
583
1.24k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
584
1.24k
  sum = _mm256_add_epi16(sum, thirtytwo);
585
1.24k
  sum = _mm256_srai_epi16(sum, 6);
586
1.24k
  const __m256i zero = _mm256_setzero_si256();
587
1.24k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
588
1.24k
  row_store_32xh(&row, 64, dst, stride);
589
1.24k
}
590
591
void aom_dc_left_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
592
                                      const uint8_t *above,
593
18.9k
                                      const uint8_t *left) {
594
18.9k
  __m256i sum = dc_sum_64(left);
595
18.9k
  (void)above;
596
597
18.9k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
598
18.9k
  sum = _mm256_add_epi16(sum, thirtytwo);
599
18.9k
  sum = _mm256_srai_epi16(sum, 6);
600
18.9k
  const __m256i zero = _mm256_setzero_si256();
601
18.9k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
602
18.9k
  row_store_64xh(&row, 64, dst, stride);
603
18.9k
}
604
605
void aom_dc_left_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
606
                                      const uint8_t *above,
607
769
                                      const uint8_t *left) {
608
769
  __m256i sum = dc_sum_32(left);
609
769
  (void)above;
610
611
769
  const __m256i sixteen = _mm256_set1_epi16(16);
612
769
  sum = _mm256_add_epi16(sum, sixteen);
613
769
  sum = _mm256_srai_epi16(sum, 5);
614
769
  const __m256i zero = _mm256_setzero_si256();
615
769
  __m256i row = _mm256_shuffle_epi8(sum, zero);
616
769
  row_store_64xh(&row, 32, dst, stride);
617
769
}
618
619
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
620
void aom_dc_left_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
621
                                      const uint8_t *above,
622
256
                                      const uint8_t *left) {
623
256
  __m128i sum = dc_sum_16_sse2(left);
624
256
  (void)above;
625
626
256
  const __m128i eight = _mm_set1_epi16(8);
627
256
  sum = _mm_add_epi16(sum, eight);
628
256
  sum = _mm_srai_epi16(sum, 4);
629
256
  const __m128i zero = _mm_setzero_si128();
630
256
  const __m128i r = _mm_shuffle_epi8(sum, zero);
631
256
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
632
256
  row_store_64xh(&row, 16, dst, stride);
633
256
}
634
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
635
636
void aom_dc_128_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
637
                                     const uint8_t *above,
638
4.84k
                                     const uint8_t *left) {
639
4.84k
  (void)above;
640
4.84k
  (void)left;
641
4.84k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
642
4.84k
  row_store_32xh(&row, 16, dst, stride);
643
4.84k
}
644
645
void aom_dc_128_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
646
                                     const uint8_t *above,
647
1.15k
                                     const uint8_t *left) {
648
1.15k
  (void)above;
649
1.15k
  (void)left;
650
1.15k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
651
1.15k
  row_store_32xh(&row, 64, dst, stride);
652
1.15k
}
653
654
void aom_dc_128_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
655
                                     const uint8_t *above,
656
7.88k
                                     const uint8_t *left) {
657
7.88k
  (void)above;
658
7.88k
  (void)left;
659
7.88k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
660
7.88k
  row_store_64xh(&row, 64, dst, stride);
661
7.88k
}
662
663
void aom_dc_128_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
664
                                     const uint8_t *above,
665
2.09k
                                     const uint8_t *left) {
666
2.09k
  (void)above;
667
2.09k
  (void)left;
668
2.09k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
669
2.09k
  row_store_64xh(&row, 32, dst, stride);
670
2.09k
}
671
672
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
673
void aom_dc_128_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
674
                                     const uint8_t *above,
675
352
                                     const uint8_t *left) {
676
352
  (void)above;
677
352
  (void)left;
678
352
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
679
352
  row_store_64xh(&row, 16, dst, stride);
680
352
}
681
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
682
683
void aom_v_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
684
8.90k
                                const uint8_t *above, const uint8_t *left) {
685
8.90k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
686
8.90k
  (void)left;
687
8.90k
  row_store_32xh(&row, 16, dst, stride);
688
8.90k
}
689
690
void aom_v_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
691
568
                                const uint8_t *above, const uint8_t *left) {
692
568
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
693
568
  (void)left;
694
568
  row_store_32xh(&row, 64, dst, stride);
695
568
}
696
697
void aom_v_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
698
2.64k
                                const uint8_t *above, const uint8_t *left) {
699
2.64k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
700
2.64k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
701
2.64k
  (void)left;
702
2.64k
  row_store_32x2xh(&row0, &row1, 64, dst, stride);
703
2.64k
}
704
705
void aom_v_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
706
536
                                const uint8_t *above, const uint8_t *left) {
707
536
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
708
536
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
709
536
  (void)left;
710
536
  row_store_32x2xh(&row0, &row1, 32, dst, stride);
711
536
}
712
713
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
void aom_v_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
715
1.15k
                                const uint8_t *above, const uint8_t *left) {
716
1.15k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
717
1.15k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
718
1.15k
  (void)left;
719
1.15k
  row_store_32x2xh(&row0, &row1, 16, dst, stride);
720
1.15k
}
721
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
722
723
// -----------------------------------------------------------------------------
724
// PAETH_PRED
725
726
// Return 16 16-bit pixels in one row (__m256i)
727
static inline __m256i paeth_pred(const __m256i *left, const __m256i *top,
728
80.6M
                                 const __m256i *topleft) {
729
80.6M
  const __m256i base =
730
80.6M
      _mm256_sub_epi16(_mm256_add_epi16(*top, *left), *topleft);
731
732
80.6M
  __m256i pl = _mm256_abs_epi16(_mm256_sub_epi16(base, *left));
733
80.6M
  __m256i pt = _mm256_abs_epi16(_mm256_sub_epi16(base, *top));
734
80.6M
  __m256i ptl = _mm256_abs_epi16(_mm256_sub_epi16(base, *topleft));
735
736
80.6M
  __m256i mask1 = _mm256_cmpgt_epi16(pl, pt);
737
80.6M
  mask1 = _mm256_or_si256(mask1, _mm256_cmpgt_epi16(pl, ptl));
738
80.6M
  __m256i mask2 = _mm256_cmpgt_epi16(pt, ptl);
739
740
80.6M
  pl = _mm256_andnot_si256(mask1, *left);
741
742
80.6M
  ptl = _mm256_and_si256(mask2, *topleft);
743
80.6M
  pt = _mm256_andnot_si256(mask2, *top);
744
80.6M
  pt = _mm256_or_si256(pt, ptl);
745
80.6M
  pt = _mm256_and_si256(mask1, pt);
746
747
80.6M
  return _mm256_or_si256(pt, pl);
748
80.6M
}
749
750
// Return 16 8-bit pixels in one row (__m128i)
751
static inline __m128i paeth_16x1_pred(const __m256i *left, const __m256i *top,
752
79.7M
                                      const __m256i *topleft) {
753
79.7M
  const __m256i p0 = paeth_pred(left, top, topleft);
754
79.7M
  const __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
755
79.7M
  const __m256i p = _mm256_packus_epi16(p0, p1);
756
79.7M
  return _mm256_castsi256_si128(p);
757
79.7M
}
758
759
2.25M
static inline __m256i get_top_vector(const uint8_t *above) {
760
2.25M
  const __m128i x = _mm_load_si128((const __m128i *)above);
761
2.25M
  const __m128i zero = _mm_setzero_si128();
762
2.25M
  const __m128i t0 = _mm_unpacklo_epi8(x, zero);
763
2.25M
  const __m128i t1 = _mm_unpackhi_epi8(x, zero);
764
2.25M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(t0), t1, 1);
765
2.25M
}
766
767
void aom_paeth_predictor_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
768
71.5k
                                   const uint8_t *above, const uint8_t *left) {
769
71.5k
  __m128i x = _mm_loadl_epi64((const __m128i *)left);
770
71.5k
  const __m256i l = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
771
71.5k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
772
71.5k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
773
71.5k
  const __m256i one = _mm256_set1_epi16(1);
774
71.5k
  const __m256i top = get_top_vector(above);
775
776
71.5k
  int i;
777
643k
  for (i = 0; i < 8; ++i) {
778
572k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
779
572k
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
780
781
572k
    _mm_store_si128((__m128i *)dst, row);
782
572k
    dst += stride;
783
572k
    rep = _mm256_add_epi16(rep, one);
784
572k
  }
785
71.5k
}
786
787
3.76M
static inline __m256i get_left_vector(const uint8_t *left) {
788
3.76M
  const __m128i x = _mm_load_si128((const __m128i *)left);
789
3.76M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
790
3.76M
}
791
792
void aom_paeth_predictor_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
793
100k
                                    const uint8_t *above, const uint8_t *left) {
794
100k
  const __m256i l = get_left_vector(left);
795
100k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
796
100k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
797
100k
  const __m256i one = _mm256_set1_epi16(1);
798
100k
  const __m256i top = get_top_vector(above);
799
800
100k
  int i;
801
1.70M
  for (i = 0; i < 16; ++i) {
802
1.60M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
803
1.60M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
804
805
1.60M
    _mm_store_si128((__m128i *)dst, row);
806
1.60M
    dst += stride;
807
1.60M
    rep = _mm256_add_epi16(rep, one);
808
1.60M
  }
809
100k
}
810
811
void aom_paeth_predictor_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
812
942k
                                    const uint8_t *above, const uint8_t *left) {
813
942k
  __m256i l = get_left_vector(left);
814
942k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
815
942k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
816
942k
  const __m256i one = _mm256_set1_epi16(1);
817
942k
  const __m256i top = get_top_vector(above);
818
819
942k
  int i;
820
16.0M
  for (i = 0; i < 16; ++i) {
821
15.0M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
822
15.0M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
823
824
15.0M
    _mm_store_si128((__m128i *)dst, row);
825
15.0M
    dst += stride;
826
15.0M
    rep = _mm256_add_epi16(rep, one);
827
15.0M
  }
828
829
942k
  l = get_left_vector(left + 16);
830
942k
  rep = _mm256_set1_epi16((short)0x8000);
831
16.0M
  for (i = 0; i < 16; ++i) {
832
15.0M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
833
15.0M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
834
835
15.0M
    _mm_store_si128((__m128i *)dst, row);
836
15.0M
    dst += stride;
837
15.0M
    rep = _mm256_add_epi16(rep, one);
838
15.0M
  }
839
942k
}
840
841
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
842
void aom_paeth_predictor_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
843
231k
                                    const uint8_t *above, const uint8_t *left) {
844
231k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
845
231k
  const __m256i one = _mm256_set1_epi16(1);
846
231k
  const __m256i top = get_top_vector(above);
847
848
1.15M
  for (int j = 0; j < 4; ++j) {
849
925k
    const __m256i l = get_left_vector(left + j * 16);
850
925k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
851
15.7M
    for (int i = 0; i < 16; ++i) {
852
14.8M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
853
14.8M
      const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
854
855
14.8M
      _mm_store_si128((__m128i *)dst, row);
856
14.8M
      dst += stride;
857
14.8M
      rep = _mm256_add_epi16(rep, one);
858
14.8M
    }
859
925k
  }
860
231k
}
861
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
862
863
// Return 32 8-bit pixels in one row (__m256i)
864
static inline __m256i paeth_32x1_pred(const __m256i *left, const __m256i *top0,
865
                                      const __m256i *top1,
866
433k
                                      const __m256i *topleft) {
867
433k
  __m256i p0 = paeth_pred(left, top0, topleft);
868
433k
  __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
869
433k
  const __m256i x0 = _mm256_packus_epi16(p0, p1);
870
871
433k
  p0 = paeth_pred(left, top1, topleft);
872
433k
  p1 = _mm256_permute4x64_epi64(p0, 0xe);
873
433k
  const __m256i x1 = _mm256_packus_epi16(p0, p1);
874
875
433k
  return _mm256_permute2x128_si256(x0, x1, 0x20);
876
433k
}
877
878
void aom_paeth_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
879
27.0k
                                    const uint8_t *above, const uint8_t *left) {
880
27.0k
  const __m256i l = get_left_vector(left);
881
27.0k
  const __m256i t0 = get_top_vector(above);
882
27.0k
  const __m256i t1 = get_top_vector(above + 16);
883
27.0k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
884
27.0k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
885
27.0k
  const __m256i one = _mm256_set1_epi16(1);
886
887
27.0k
  int i;
888
460k
  for (i = 0; i < 16; ++i) {
889
433k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
890
891
433k
    const __m256i r = paeth_32x1_pred(&l16, &t0, &t1, &tl);
892
893
433k
    _mm256_storeu_si256((__m256i *)dst, r);
894
895
433k
    dst += stride;
896
433k
    rep = _mm256_add_epi16(rep, one);
897
433k
  }
898
27.0k
}
899
900
void aom_paeth_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
901
312k
                                    const uint8_t *above, const uint8_t *left) {
902
312k
  __m256i l = get_left_vector(left);
903
312k
  const __m256i t0 = get_top_vector(above);
904
312k
  const __m256i t1 = get_top_vector(above + 16);
905
312k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
906
312k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
907
312k
  const __m256i one = _mm256_set1_epi16(1);
908
909
312k
  int i;
910
5.31M
  for (i = 0; i < 16; ++i) {
911
5.00M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
912
913
5.00M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
914
5.00M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
915
916
5.00M
    _mm_store_si128((__m128i *)dst, r0);
917
5.00M
    _mm_store_si128((__m128i *)(dst + 16), r1);
918
919
5.00M
    dst += stride;
920
5.00M
    rep = _mm256_add_epi16(rep, one);
921
5.00M
  }
922
923
312k
  l = get_left_vector(left + 16);
924
312k
  rep = _mm256_set1_epi16((short)0x8000);
925
5.31M
  for (i = 0; i < 16; ++i) {
926
5.00M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
927
928
5.00M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
929
5.00M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
930
931
5.00M
    _mm_store_si128((__m128i *)dst, r0);
932
5.00M
    _mm_store_si128((__m128i *)(dst + 16), r1);
933
934
5.00M
    dst += stride;
935
5.00M
    rep = _mm256_add_epi16(rep, one);
936
5.00M
  }
937
312k
}
938
939
void aom_paeth_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
940
3.70k
                                    const uint8_t *above, const uint8_t *left) {
941
3.70k
  const __m256i t0 = get_top_vector(above);
942
3.70k
  const __m256i t1 = get_top_vector(above + 16);
943
3.70k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
944
3.70k
  const __m256i one = _mm256_set1_epi16(1);
945
946
3.70k
  int i, j;
947
18.5k
  for (j = 0; j < 4; ++j) {
948
14.8k
    const __m256i l = get_left_vector(left + j * 16);
949
14.8k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
950
252k
    for (i = 0; i < 16; ++i) {
951
237k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
952
953
237k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
954
237k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
955
956
237k
      _mm_store_si128((__m128i *)dst, r0);
957
237k
      _mm_store_si128((__m128i *)(dst + 16), r1);
958
959
237k
      dst += stride;
960
237k
      rep = _mm256_add_epi16(rep, one);
961
237k
    }
962
14.8k
  }
963
3.70k
}
964
965
void aom_paeth_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
966
4.29k
                                    const uint8_t *above, const uint8_t *left) {
967
4.29k
  const __m256i t0 = get_top_vector(above);
968
4.29k
  const __m256i t1 = get_top_vector(above + 16);
969
4.29k
  const __m256i t2 = get_top_vector(above + 32);
970
4.29k
  const __m256i t3 = get_top_vector(above + 48);
971
4.29k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
972
4.29k
  const __m256i one = _mm256_set1_epi16(1);
973
974
4.29k
  int i, j;
975
12.8k
  for (j = 0; j < 2; ++j) {
976
8.58k
    const __m256i l = get_left_vector(left + j * 16);
977
8.58k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
978
145k
    for (i = 0; i < 16; ++i) {
979
137k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
980
981
137k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
982
137k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
983
137k
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
984
137k
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
985
986
137k
      _mm_store_si128((__m128i *)dst, r0);
987
137k
      _mm_store_si128((__m128i *)(dst + 16), r1);
988
137k
      _mm_store_si128((__m128i *)(dst + 32), r2);
989
137k
      _mm_store_si128((__m128i *)(dst + 48), r3);
990
991
137k
      dst += stride;
992
137k
      rep = _mm256_add_epi16(rep, one);
993
137k
    }
994
8.58k
  }
995
4.29k
}
996
997
void aom_paeth_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
998
43.1k
                                    const uint8_t *above, const uint8_t *left) {
999
43.1k
  const __m256i t0 = get_top_vector(above);
1000
43.1k
  const __m256i t1 = get_top_vector(above + 16);
1001
43.1k
  const __m256i t2 = get_top_vector(above + 32);
1002
43.1k
  const __m256i t3 = get_top_vector(above + 48);
1003
43.1k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1004
43.1k
  const __m256i one = _mm256_set1_epi16(1);
1005
1006
43.1k
  int i, j;
1007
215k
  for (j = 0; j < 4; ++j) {
1008
172k
    const __m256i l = get_left_vector(left + j * 16);
1009
172k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
1010
2.93M
    for (i = 0; i < 16; ++i) {
1011
2.75M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1012
1013
2.75M
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1014
2.75M
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1015
2.75M
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1016
2.75M
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1017
1018
2.75M
      _mm_store_si128((__m128i *)dst, r0);
1019
2.75M
      _mm_store_si128((__m128i *)(dst + 16), r1);
1020
2.75M
      _mm_store_si128((__m128i *)(dst + 32), r2);
1021
2.75M
      _mm_store_si128((__m128i *)(dst + 48), r3);
1022
1023
2.75M
      dst += stride;
1024
2.75M
      rep = _mm256_add_epi16(rep, one);
1025
2.75M
    }
1026
172k
  }
1027
43.1k
}
1028
1029
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1030
void aom_paeth_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
1031
8.68k
                                    const uint8_t *above, const uint8_t *left) {
1032
8.68k
  const __m256i t0 = get_top_vector(above);
1033
8.68k
  const __m256i t1 = get_top_vector(above + 16);
1034
8.68k
  const __m256i t2 = get_top_vector(above + 32);
1035
8.68k
  const __m256i t3 = get_top_vector(above + 48);
1036
8.68k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1037
8.68k
  const __m256i one = _mm256_set1_epi16(1);
1038
1039
8.68k
  int i;
1040
8.68k
  const __m256i l = get_left_vector(left);
1041
8.68k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
1042
147k
  for (i = 0; i < 16; ++i) {
1043
138k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1044
1045
138k
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1046
138k
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1047
138k
    const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1048
138k
    const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1049
1050
138k
    _mm_store_si128((__m128i *)dst, r0);
1051
138k
    _mm_store_si128((__m128i *)(dst + 16), r1);
1052
138k
    _mm_store_si128((__m128i *)(dst + 32), r2);
1053
138k
    _mm_store_si128((__m128i *)(dst + 48), r3);
1054
1055
138k
    dst += stride;
1056
138k
    rep = _mm256_add_epi16(rep, one);
1057
138k
  }
1058
8.68k
}
1059
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1060
1061
#if CONFIG_AV1_HIGHBITDEPTH
1062
1063
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_4xN_internal_avx2(
1064
347k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1065
347k
  const int frac_bits = 6 - upsample_above;
1066
347k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1067
1068
347k
  assert(dx > 0);
1069
  // pre-filter above pixels
1070
  // store in temp buffers:
1071
  //   above[x] * 32 + 16
1072
  //   above[x+1] - above[x]
1073
  // final pixels will be calculated as:
1074
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1075
347k
  __m256i a0, a1, a32, a16;
1076
347k
  __m256i diff, c3f;
1077
347k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1078
347k
  __m128i a0_128, a1_128;
1079
347k
  a16 = _mm256_set1_epi16(16);
1080
347k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1081
347k
  max_base_x128 = _mm_set1_epi16(max_base_x);
1082
347k
  c3f = _mm256_set1_epi16(0x3f);
1083
1084
347k
  int x = dx;
1085
2.62M
  for (int r = 0; r < N; r++) {
1086
2.27M
    __m256i b, res, shift;
1087
2.27M
    __m128i res1;
1088
1089
2.27M
    int base = x >> frac_bits;
1090
2.27M
    if (base >= max_base_x) {
1091
8.00k
      for (int i = r; i < N; ++i) {
1092
4.67k
        dst[i] = a_mbase_x;  // save 4 values
1093
4.67k
      }
1094
3.33k
      return;
1095
3.33k
    }
1096
1097
2.27M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
1098
2.27M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1099
1100
2.27M
    if (upsample_above) {
1101
859k
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)HighbdEvenOddMaskx4[0]);
1102
859k
      a1_128 = _mm_srli_si128(a0_128, 8);
1103
1104
859k
      base_inc128 = _mm_setr_epi16(base, base + 2, base + 4, base + 6, base + 8,
1105
859k
                                   base + 10, base + 12, base + 14);
1106
859k
      shift = _mm256_srli_epi16(
1107
859k
          _mm256_and_si256(
1108
859k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above),
1109
859k
              _mm256_set1_epi16(0x3f)),
1110
859k
          1);
1111
1.41M
    } else {
1112
1.41M
      base_inc128 = _mm_setr_epi16(base, base + 1, base + 2, base + 3, base + 4,
1113
1.41M
                                   base + 5, base + 6, base + 7);
1114
1.41M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1115
1.41M
    }
1116
2.27M
    a0 = _mm256_castsi128_si256(a0_128);
1117
2.27M
    a1 = _mm256_castsi128_si256(a1_128);
1118
2.27M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1119
2.27M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1120
2.27M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1121
1122
2.27M
    b = _mm256_mullo_epi16(diff, shift);
1123
2.27M
    res = _mm256_add_epi16(a32, b);
1124
2.27M
    res = _mm256_srli_epi16(res, 5);
1125
2.27M
    res1 = _mm256_castsi256_si128(res);
1126
1127
2.27M
    mask128 = _mm_cmpgt_epi16(max_base_x128, base_inc128);
1128
2.27M
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1129
2.27M
    x += dx;
1130
2.27M
  }
1131
347k
}
1132
1133
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_4xN_internal_avx2(
1134
183k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1135
183k
  const int frac_bits = 6 - upsample_above;
1136
183k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1137
1138
183k
  assert(dx > 0);
1139
  // pre-filter above pixels
1140
  // store in temp buffers:
1141
  //   above[x] * 32 + 16
1142
  //   above[x+1] - above[x]
1143
  // final pixels will be calculated as:
1144
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1145
183k
  __m256i a0, a1, a32, a16;
1146
183k
  __m256i diff;
1147
183k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1148
1149
183k
  a16 = _mm256_set1_epi32(16);
1150
183k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1151
183k
  max_base_x128 = _mm_set1_epi32(max_base_x);
1152
1153
183k
  int x = dx;
1154
1.39M
  for (int r = 0; r < N; r++) {
1155
1.21M
    __m256i b, res, shift;
1156
1.21M
    __m128i res1;
1157
1158
1.21M
    int base = x >> frac_bits;
1159
1.21M
    if (base >= max_base_x) {
1160
2.94k
      for (int i = r; i < N; ++i) {
1161
2.06k
        dst[i] = a_mbase_x;  // save 4 values
1162
2.06k
      }
1163
871
      return;
1164
871
    }
1165
1166
1.21M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1167
1.21M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1168
1169
1.21M
    if (upsample_above) {
1170
289k
      a0 = _mm256_permutevar8x32_epi32(
1171
289k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1172
289k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1173
289k
      base_inc128 = _mm_setr_epi32(base, base + 2, base + 4, base + 6);
1174
289k
      shift = _mm256_srli_epi32(
1175
289k
          _mm256_and_si256(
1176
289k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1177
289k
              _mm256_set1_epi32(0x3f)),
1178
289k
          1);
1179
926k
    } else {
1180
926k
      base_inc128 = _mm_setr_epi32(base, base + 1, base + 2, base + 3);
1181
926k
      shift = _mm256_srli_epi32(
1182
926k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1183
926k
    }
1184
1185
1.21M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1186
1.21M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1187
1.21M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1188
1189
1.21M
    b = _mm256_mullo_epi32(diff, shift);
1190
1.21M
    res = _mm256_add_epi32(a32, b);
1191
1.21M
    res = _mm256_srli_epi32(res, 5);
1192
1193
1.21M
    res1 = _mm256_castsi256_si128(res);
1194
1.21M
    res1 = _mm_packus_epi32(res1, res1);
1195
1196
1.21M
    mask128 = _mm_cmpgt_epi32(max_base_x128, base_inc128);
1197
1.21M
    mask128 = _mm_packs_epi32(mask128, mask128);  // goto 16 bit
1198
1.21M
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1199
1.21M
    x += dx;
1200
1.21M
  }
1201
183k
}
1202
1203
static void highbd_dr_prediction_z1_4xN_avx2(int N, uint16_t *dst,
1204
                                             ptrdiff_t stride,
1205
                                             const uint16_t *above,
1206
                                             int upsample_above, int dx,
1207
178k
                                             int bd) {
1208
178k
  __m128i dstvec[16];
1209
178k
  if (bd < 12) {
1210
93.5k
    highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
1211
93.5k
                                              dx);
1212
93.5k
  } else {
1213
85.1k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(N, dstvec, above,
1214
85.1k
                                                    upsample_above, dx);
1215
85.1k
  }
1216
1.35M
  for (int i = 0; i < N; i++) {
1217
1.17M
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
1218
1.17M
  }
1219
178k
}
1220
1221
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_8xN_internal_avx2(
1222
274k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1223
274k
  const int frac_bits = 6 - upsample_above;
1224
274k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1225
1226
274k
  assert(dx > 0);
1227
  // pre-filter above pixels
1228
  // store in temp buffers:
1229
  //   above[x] * 32 + 16
1230
  //   above[x+1] - above[x]
1231
  // final pixels will be calculated as:
1232
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1233
274k
  __m256i a0, a1, a0_1, a1_1, a32, a16;
1234
274k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1235
1236
274k
  a16 = _mm256_set1_epi32(16);
1237
274k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1238
274k
  max_base_x256 = _mm256_set1_epi32(max_base_x);
1239
1240
274k
  int x = dx;
1241
2.99M
  for (int r = 0; r < N; r++) {
1242
2.72M
    __m256i b, res, res1, shift;
1243
1244
2.72M
    int base = x >> frac_bits;
1245
2.72M
    if (base >= max_base_x) {
1246
2.21k
      for (int i = r; i < N; ++i) {
1247
1.44k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1248
1.44k
      }
1249
770
      return;
1250
770
    }
1251
1252
2.71M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1253
2.71M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1254
1255
2.71M
    if (upsample_above) {
1256
329k
      a0 = _mm256_permutevar8x32_epi32(
1257
329k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1258
329k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1259
1260
329k
      a0_1 =
1261
329k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1262
329k
      a0_1 = _mm256_permutevar8x32_epi32(
1263
329k
          a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1264
329k
      a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
1265
1266
329k
      a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
1267
329k
      a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
1268
329k
      base_inc256 =
1269
329k
          _mm256_setr_epi32(base, base + 2, base + 4, base + 6, base + 8,
1270
329k
                            base + 10, base + 12, base + 14);
1271
329k
      shift = _mm256_srli_epi32(
1272
329k
          _mm256_and_si256(
1273
329k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1274
329k
              _mm256_set1_epi32(0x3f)),
1275
329k
          1);
1276
2.38M
    } else {
1277
2.38M
      base_inc256 = _mm256_setr_epi32(base, base + 1, base + 2, base + 3,
1278
2.38M
                                      base + 4, base + 5, base + 6, base + 7);
1279
2.38M
      shift = _mm256_srli_epi32(
1280
2.38M
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1281
2.38M
    }
1282
1283
2.71M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1284
2.71M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1285
2.71M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1286
1287
2.71M
    b = _mm256_mullo_epi32(diff, shift);
1288
2.71M
    res = _mm256_add_epi32(a32, b);
1289
2.71M
    res = _mm256_srli_epi32(res, 5);
1290
1291
2.71M
    res1 = _mm256_packus_epi32(
1292
2.71M
        res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
1293
1294
2.71M
    mask256 = _mm256_cmpgt_epi32(max_base_x256, base_inc256);
1295
2.71M
    mask256 = _mm256_packs_epi32(
1296
2.71M
        mask256, _mm256_castsi128_si256(
1297
2.71M
                     _mm256_extracti128_si256(mask256, 1)));  // goto 16 bit
1298
2.71M
    res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1299
2.71M
    dst[r] = _mm256_castsi256_si128(res1);
1300
2.71M
    x += dx;
1301
2.71M
  }
1302
274k
}
1303
1304
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
1305
324k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1306
324k
  const int frac_bits = 6 - upsample_above;
1307
324k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1308
1309
324k
  assert(dx > 0);
1310
  // pre-filter above pixels
1311
  // store in temp buffers:
1312
  //   above[x] * 32 + 16
1313
  //   above[x+1] - above[x]
1314
  // final pixels will be calculated as:
1315
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1316
324k
  __m256i a0, a1, a32, a16, c3f;
1317
324k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1318
324k
  __m128i a0_x128, a1_x128;
1319
1320
324k
  a16 = _mm256_set1_epi16(16);
1321
324k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1322
324k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1323
324k
  c3f = _mm256_set1_epi16(0x3f);
1324
1325
324k
  int x = dx;
1326
4.33M
  for (int r = 0; r < N; r++) {
1327
4.01M
    __m256i b, res, res1, shift;
1328
1329
4.01M
    int base = x >> frac_bits;
1330
4.01M
    if (base >= max_base_x) {
1331
4.48k
      for (int i = r; i < N; ++i) {
1332
3.21k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1333
3.21k
      }
1334
1.27k
      return;
1335
1.27k
    }
1336
1337
4.01M
    a0_x128 = _mm_loadu_si128((__m128i *)(above + base));
1338
4.01M
    if (upsample_above) {
1339
889k
      __m128i mask, atmp0, atmp1, atmp2, atmp3;
1340
889k
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 8));
1341
889k
      atmp0 = _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1342
889k
      atmp1 = _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1343
889k
      atmp2 =
1344
889k
          _mm_shuffle_epi8(a0_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1345
889k
      atmp3 =
1346
889k
          _mm_shuffle_epi8(a1_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1347
889k
      mask =
1348
889k
          _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[0], _mm_set1_epi8(15));
1349
889k
      a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
1350
889k
      mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[0] + 16),
1351
889k
                            _mm_set1_epi8(15));
1352
889k
      a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
1353
1354
889k
      base_inc256 = _mm256_setr_epi16(base, base + 2, base + 4, base + 6,
1355
889k
                                      base + 8, base + 10, base + 12, base + 14,
1356
889k
                                      0, 0, 0, 0, 0, 0, 0, 0);
1357
889k
      shift = _mm256_srli_epi16(
1358
889k
          _mm256_and_si256(
1359
889k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
1360
889k
          1);
1361
3.12M
    } else {
1362
3.12M
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1363
3.12M
      base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1364
3.12M
                                      base + 4, base + 5, base + 6, base + 7, 0,
1365
3.12M
                                      0, 0, 0, 0, 0, 0, 0);
1366
3.12M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1367
3.12M
    }
1368
4.01M
    a0 = _mm256_castsi128_si256(a0_x128);
1369
4.01M
    a1 = _mm256_castsi128_si256(a1_x128);
1370
1371
4.01M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1372
4.01M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1373
4.01M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1374
1375
4.01M
    b = _mm256_mullo_epi16(diff, shift);
1376
4.01M
    res = _mm256_add_epi16(a32, b);
1377
4.01M
    res = _mm256_srli_epi16(res, 5);
1378
1379
4.01M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1380
4.01M
    res1 = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1381
4.01M
    dst[r] = _mm256_castsi256_si128(res1);
1382
4.01M
    x += dx;
1383
4.01M
  }
1384
324k
}
1385
1386
static void highbd_dr_prediction_z1_8xN_avx2(int N, uint16_t *dst,
1387
                                             ptrdiff_t stride,
1388
                                             const uint16_t *above,
1389
                                             int upsample_above, int dx,
1390
251k
                                             int bd) {
1391
251k
  __m128i dstvec[32];
1392
251k
  if (bd < 12) {
1393
131k
    highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
1394
131k
                                              dx);
1395
131k
  } else {
1396
120k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(N, dstvec, above,
1397
120k
                                                    upsample_above, dx);
1398
120k
  }
1399
2.61M
  for (int i = 0; i < N; i++) {
1400
2.36M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
1401
2.36M
  }
1402
251k
}
1403
1404
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_16xN_internal_avx2(
1405
129k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1406
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1407
129k
  (void)upsample_above;
1408
129k
  const int frac_bits = 6;
1409
129k
  const int max_base_x = ((16 + N) - 1);
1410
1411
  // pre-filter above pixels
1412
  // store in temp buffers:
1413
  //   above[x] * 32 + 16
1414
  //   above[x+1] - above[x]
1415
  // final pixels will be calculated as:
1416
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1417
129k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1418
129k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1419
1420
129k
  a16 = _mm256_set1_epi32(16);
1421
129k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1422
129k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1423
1424
129k
  int x = dx;
1425
1.56M
  for (int r = 0; r < N; r++) {
1426
1.43M
    __m256i b, res[2], res1;
1427
1428
1.43M
    int base = x >> frac_bits;
1429
1.43M
    if (base >= max_base_x) {
1430
637
      for (int i = r; i < N; ++i) {
1431
525
        dstvec[i] = a_mbase_x;  // save 16 values
1432
525
      }
1433
112
      return;
1434
112
    }
1435
1.43M
    __m256i shift = _mm256_srli_epi32(
1436
1.43M
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1437
1438
1.43M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1439
1.43M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1440
1441
1.43M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1442
1.43M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1443
1.43M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1444
1.43M
    b = _mm256_mullo_epi32(diff, shift);
1445
1446
1.43M
    res[0] = _mm256_add_epi32(a32, b);
1447
1.43M
    res[0] = _mm256_srli_epi32(res[0], 5);
1448
1.43M
    res[0] = _mm256_packus_epi32(
1449
1.43M
        res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1450
1451
1.43M
    int mdif = max_base_x - base;
1452
1.43M
    if (mdif > 8) {
1453
1.43M
      a0_1 =
1454
1.43M
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1455
1.43M
      a1_1 =
1456
1.43M
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
1457
1458
1.43M
      diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1459
1.43M
      a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1460
1.43M
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1461
1.43M
      b = _mm256_mullo_epi32(diff, shift);
1462
1463
1.43M
      res[1] = _mm256_add_epi32(a32, b);
1464
1.43M
      res[1] = _mm256_srli_epi32(res[1], 5);
1465
1.43M
      res[1] = _mm256_packus_epi32(
1466
1.43M
          res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1467
1.43M
    } else {
1468
2.53k
      res[1] = a_mbase_x;
1469
2.53k
    }
1470
1.43M
    res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1471
1.43M
                                   1);  // 16 16bit values
1472
1473
1.43M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1474
1.43M
                                    base + 4, base + 5, base + 6, base + 7,
1475
1.43M
                                    base + 8, base + 9, base + 10, base + 11,
1476
1.43M
                                    base + 12, base + 13, base + 14, base + 15);
1477
1.43M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1478
1.43M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1479
1.43M
    x += dx;
1480
1.43M
  }
1481
129k
}
1482
1483
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
1484
266k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1485
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1486
266k
  (void)upsample_above;
1487
266k
  const int frac_bits = 6;
1488
266k
  const int max_base_x = ((16 + N) - 1);
1489
1490
  // pre-filter above pixels
1491
  // store in temp buffers:
1492
  //   above[x] * 32 + 16
1493
  //   above[x+1] - above[x]
1494
  // final pixels will be calculated as:
1495
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1496
266k
  __m256i a0, a1, a32, a16, c3f;
1497
266k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1498
1499
266k
  a16 = _mm256_set1_epi16(16);
1500
266k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1501
266k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1502
266k
  c3f = _mm256_set1_epi16(0x3f);
1503
1504
266k
  int x = dx;
1505
4.96M
  for (int r = 0; r < N; r++) {
1506
4.69M
    __m256i b, res;
1507
1508
4.69M
    int base = x >> frac_bits;
1509
4.69M
    if (base >= max_base_x) {
1510
2.63k
      for (int i = r; i < N; ++i) {
1511
2.08k
        dstvec[i] = a_mbase_x;  // save 16 values
1512
2.08k
      }
1513
549
      return;
1514
549
    }
1515
4.69M
    __m256i shift =
1516
4.69M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1517
1518
4.69M
    a0 = _mm256_loadu_si256((__m256i *)(above + base));
1519
4.69M
    a1 = _mm256_loadu_si256((__m256i *)(above + base + 1));
1520
1521
4.69M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1522
4.69M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1523
4.69M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1524
4.69M
    b = _mm256_mullo_epi16(diff, shift);
1525
1526
4.69M
    res = _mm256_add_epi16(a32, b);
1527
4.69M
    res = _mm256_srli_epi16(res, 5);  // 16 16bit values
1528
1529
4.69M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1530
4.69M
                                    base + 4, base + 5, base + 6, base + 7,
1531
4.69M
                                    base + 8, base + 9, base + 10, base + 11,
1532
4.69M
                                    base + 12, base + 13, base + 14, base + 15);
1533
4.69M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1534
4.69M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1535
4.69M
    x += dx;
1536
4.69M
  }
1537
266k
}
1538
1539
static void highbd_dr_prediction_z1_16xN_avx2(int N, uint16_t *dst,
1540
                                              ptrdiff_t stride,
1541
                                              const uint16_t *above,
1542
                                              int upsample_above, int dx,
1543
204k
                                              int bd) {
1544
204k
  __m256i dstvec[64];
1545
204k
  if (bd < 12) {
1546
118k
    highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
1547
118k
                                               dx);
1548
118k
  } else {
1549
86.8k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(N, dstvec, above,
1550
86.8k
                                                     upsample_above, dx);
1551
86.8k
  }
1552
2.89M
  for (int i = 0; i < N; i++) {
1553
2.69M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1554
2.69M
  }
1555
204k
}
1556
1557
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_32xN_internal_avx2(
1558
19.7k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1559
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1560
19.7k
  (void)upsample_above;
1561
19.7k
  const int frac_bits = 6;
1562
19.7k
  const int max_base_x = ((32 + N) - 1);
1563
1564
  // pre-filter above pixels
1565
  // store in temp buffers:
1566
  //   above[x] * 32 + 16
1567
  //   above[x+1] - above[x]
1568
  // final pixels will be calculated as:
1569
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1570
19.7k
  __m256i a0, a0_1, a1, a1_1, a32, a16, c3f;
1571
19.7k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1572
1573
19.7k
  a16 = _mm256_set1_epi32(16);
1574
19.7k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1575
19.7k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1576
19.7k
  c3f = _mm256_set1_epi16(0x3f);
1577
1578
19.7k
  int x = dx;
1579
448k
  for (int r = 0; r < N; r++) {
1580
428k
    __m256i b, res[2], res1;
1581
1582
428k
    int base = x >> frac_bits;
1583
428k
    if (base >= max_base_x) {
1584
0
      for (int i = r; i < N; ++i) {
1585
0
        dstvec[i] = a_mbase_x;  // save 32 values
1586
0
        dstvec[i + N] = a_mbase_x;
1587
0
      }
1588
0
      return;
1589
0
    }
1590
1591
428k
    __m256i shift =
1592
428k
        _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
1593
1594
1.28M
    for (int j = 0; j < 32; j += 16) {
1595
856k
      int mdif = max_base_x - (base + j);
1596
856k
      if (mdif <= 0) {
1597
538
        res1 = a_mbase_x;
1598
856k
      } else {
1599
856k
        a0 = _mm256_cvtepu16_epi32(
1600
856k
            _mm_loadu_si128((__m128i *)(above + base + j)));
1601
856k
        a1 = _mm256_cvtepu16_epi32(
1602
856k
            _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
1603
1604
856k
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1605
856k
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1606
856k
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1607
856k
        b = _mm256_mullo_epi32(diff, shift);
1608
1609
856k
        res[0] = _mm256_add_epi32(a32, b);
1610
856k
        res[0] = _mm256_srli_epi32(res[0], 5);
1611
856k
        res[0] = _mm256_packus_epi32(
1612
856k
            res[0],
1613
856k
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1614
856k
        if (mdif > 8) {
1615
854k
          a0_1 = _mm256_cvtepu16_epi32(
1616
854k
              _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
1617
854k
          a1_1 = _mm256_cvtepu16_epi32(
1618
854k
              _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
1619
1620
854k
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1621
854k
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1622
854k
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1623
854k
          b = _mm256_mullo_epi32(diff, shift);
1624
1625
854k
          res[1] = _mm256_add_epi32(a32, b);
1626
854k
          res[1] = _mm256_srli_epi32(res[1], 5);
1627
854k
          res[1] = _mm256_packus_epi32(
1628
854k
              res[1],
1629
854k
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1630
854k
        } else {
1631
2.35k
          res[1] = a_mbase_x;
1632
2.35k
        }
1633
856k
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1634
856k
                                       1);  // 16 16bit values
1635
856k
        base_inc256 = _mm256_setr_epi16(
1636
856k
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1637
856k
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1638
856k
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1639
856k
            base + j + 13, base + j + 14, base + j + 15);
1640
1641
856k
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1642
856k
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1643
856k
      }
1644
856k
      if (!j) {
1645
428k
        dstvec[r] = res1;
1646
428k
      } else {
1647
428k
        dstvec[r + N] = res1;
1648
428k
      }
1649
856k
    }
1650
428k
    x += dx;
1651
428k
  }
1652
19.7k
}
1653
1654
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
1655
189k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1656
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1657
189k
  (void)upsample_above;
1658
189k
  const int frac_bits = 6;
1659
189k
  const int max_base_x = ((32 + N) - 1);
1660
1661
  // pre-filter above pixels
1662
  // store in temp buffers:
1663
  //   above[x] * 32 + 16
1664
  //   above[x+1] - above[x]
1665
  // final pixels will be calculated as:
1666
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1667
189k
  __m256i a0, a1, a32, a16, c3f;
1668
189k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1669
1670
189k
  a16 = _mm256_set1_epi16(16);
1671
189k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1672
189k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1673
189k
  c3f = _mm256_set1_epi16(0x3f);
1674
1675
189k
  int x = dx;
1676
5.33M
  for (int r = 0; r < N; r++) {
1677
5.14M
    __m256i b, res;
1678
1679
5.14M
    int base = x >> frac_bits;
1680
5.14M
    if (base >= max_base_x) {
1681
0
      for (int i = r; i < N; ++i) {
1682
0
        dstvec[i] = a_mbase_x;  // save 32 values
1683
0
        dstvec[i + N] = a_mbase_x;
1684
0
      }
1685
0
      return;
1686
0
    }
1687
1688
5.14M
    __m256i shift =
1689
5.14M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1690
1691
15.4M
    for (int j = 0; j < 32; j += 16) {
1692
10.2M
      int mdif = max_base_x - (base + j);
1693
10.2M
      if (mdif <= 0) {
1694
826
        res = a_mbase_x;
1695
10.2M
      } else {
1696
10.2M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1697
10.2M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1698
1699
10.2M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1700
10.2M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1701
10.2M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1702
10.2M
        b = _mm256_mullo_epi16(diff, shift);
1703
1704
10.2M
        res = _mm256_add_epi16(a32, b);
1705
10.2M
        res = _mm256_srli_epi16(res, 5);
1706
1707
10.2M
        base_inc256 = _mm256_setr_epi16(
1708
10.2M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1709
10.2M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1710
10.2M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1711
10.2M
            base + j + 13, base + j + 14, base + j + 15);
1712
1713
10.2M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1714
10.2M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1715
10.2M
      }
1716
10.2M
      if (!j) {
1717
5.14M
        dstvec[r] = res;
1718
5.14M
      } else {
1719
5.14M
        dstvec[r + N] = res;
1720
5.14M
      }
1721
10.2M
    }
1722
5.14M
    x += dx;
1723
5.14M
  }
1724
189k
}
1725
1726
static void highbd_dr_prediction_z1_32xN_avx2(int N, uint16_t *dst,
1727
                                              ptrdiff_t stride,
1728
                                              const uint16_t *above,
1729
                                              int upsample_above, int dx,
1730
79.0k
                                              int bd) {
1731
79.0k
  __m256i dstvec[128];
1732
79.0k
  if (bd < 12) {
1733
69.2k
    highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
1734
69.2k
                                               dx);
1735
69.2k
  } else {
1736
9.81k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(N, dstvec, above,
1737
9.81k
                                                     upsample_above, dx);
1738
9.81k
  }
1739
2.18M
  for (int i = 0; i < N; i++) {
1740
2.10M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1741
2.10M
    _mm256_storeu_si256((__m256i *)(dst + stride * i + 16), dstvec[i + N]);
1742
2.10M
  }
1743
79.0k
}
1744
1745
static void highbd_dr_prediction_32bit_z1_64xN_avx2(int N, uint16_t *dst,
1746
                                                    ptrdiff_t stride,
1747
                                                    const uint16_t *above,
1748
                                                    int upsample_above,
1749
18.3k
                                                    int dx) {
1750
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1751
18.3k
  (void)upsample_above;
1752
18.3k
  const int frac_bits = 6;
1753
18.3k
  const int max_base_x = ((64 + N) - 1);
1754
1755
  // pre-filter above pixels
1756
  // store in temp buffers:
1757
  //   above[x] * 32 + 16
1758
  //   above[x+1] - above[x]
1759
  // final pixels will be calculated as:
1760
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1761
18.3k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1762
18.3k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1763
1764
18.3k
  a16 = _mm256_set1_epi32(16);
1765
18.3k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1766
18.3k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1767
1768
18.3k
  int x = dx;
1769
1.12M
  for (int r = 0; r < N; r++, dst += stride) {
1770
1.10M
    __m256i b, res[2], res1;
1771
1772
1.10M
    int base = x >> frac_bits;
1773
1.10M
    if (base >= max_base_x) {
1774
0
      for (int i = r; i < N; ++i) {
1775
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1776
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1777
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1778
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1779
0
        dst += stride;
1780
0
      }
1781
0
      return;
1782
0
    }
1783
1784
1.10M
    __m256i shift = _mm256_srli_epi32(
1785
1.10M
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1786
1787
1.10M
    __m128i a0_128, a0_1_128, a1_128, a1_1_128;
1788
5.54M
    for (int j = 0; j < 64; j += 16) {
1789
4.43M
      int mdif = max_base_x - (base + j);
1790
4.43M
      if (mdif <= 0) {
1791
3.20k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1792
4.43M
      } else {
1793
4.43M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
1794
4.43M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
1795
4.43M
        a0 = _mm256_cvtepu16_epi32(a0_128);
1796
4.43M
        a1 = _mm256_cvtepu16_epi32(a1_128);
1797
1798
4.43M
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1799
4.43M
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1800
4.43M
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1801
4.43M
        b = _mm256_mullo_epi32(diff, shift);
1802
1803
4.43M
        res[0] = _mm256_add_epi32(a32, b);
1804
4.43M
        res[0] = _mm256_srli_epi32(res[0], 5);
1805
4.43M
        res[0] = _mm256_packus_epi32(
1806
4.43M
            res[0],
1807
4.43M
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1808
4.43M
        if (mdif > 8) {
1809
4.42M
          a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
1810
4.42M
          a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
1811
4.42M
          a0_1 = _mm256_cvtepu16_epi32(a0_1_128);
1812
4.42M
          a1_1 = _mm256_cvtepu16_epi32(a1_1_128);
1813
1814
4.42M
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1815
4.42M
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1816
4.42M
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1817
4.42M
          b = _mm256_mullo_epi32(diff, shift);
1818
1819
4.42M
          res[1] = _mm256_add_epi32(a32, b);
1820
4.42M
          res[1] = _mm256_srli_epi32(res[1], 5);
1821
4.42M
          res[1] = _mm256_packus_epi32(
1822
4.42M
              res[1],
1823
4.42M
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1824
4.42M
        } else {
1825
4.99k
          res[1] = a_mbase_x;
1826
4.99k
        }
1827
4.43M
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1828
4.43M
                                       1);  // 16 16bit values
1829
4.43M
        base_inc256 = _mm256_setr_epi16(
1830
4.43M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1831
4.43M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1832
4.43M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1833
4.43M
            base + j + 13, base + j + 14, base + j + 15);
1834
1835
4.43M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1836
4.43M
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1837
4.43M
        _mm256_storeu_si256((__m256i *)(dst + j), res1);
1838
4.43M
      }
1839
4.43M
    }
1840
1.10M
    x += dx;
1841
1.10M
  }
1842
18.3k
}
1843
1844
static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
1845
                                              ptrdiff_t stride,
1846
                                              const uint16_t *above,
1847
46.3k
                                              int upsample_above, int dx) {
1848
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1849
46.3k
  (void)upsample_above;
1850
46.3k
  const int frac_bits = 6;
1851
46.3k
  const int max_base_x = ((64 + N) - 1);
1852
1853
  // pre-filter above pixels
1854
  // store in temp buffers:
1855
  //   above[x] * 32 + 16
1856
  //   above[x+1] - above[x]
1857
  // final pixels will be calculated as:
1858
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1859
46.3k
  __m256i a0, a1, a32, a16, c3f;
1860
46.3k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1861
1862
46.3k
  a16 = _mm256_set1_epi16(16);
1863
46.3k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1864
46.3k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1865
46.3k
  c3f = _mm256_set1_epi16(0x3f);
1866
1867
46.3k
  int x = dx;
1868
2.50M
  for (int r = 0; r < N; r++, dst += stride) {
1869
2.46M
    __m256i b, res;
1870
1871
2.46M
    int base = x >> frac_bits;
1872
2.46M
    if (base >= max_base_x) {
1873
0
      for (int i = r; i < N; ++i) {
1874
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1875
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1876
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1877
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1878
0
        dst += stride;
1879
0
      }
1880
0
      return;
1881
0
    }
1882
1883
2.46M
    __m256i shift =
1884
2.46M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1885
1886
12.3M
    for (int j = 0; j < 64; j += 16) {
1887
9.85M
      int mdif = max_base_x - (base + j);
1888
9.85M
      if (mdif <= 0) {
1889
3.02k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1890
9.84M
      } else {
1891
9.84M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1892
9.84M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1893
1894
9.84M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1895
9.84M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1896
9.84M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1897
9.84M
        b = _mm256_mullo_epi16(diff, shift);
1898
1899
9.84M
        res = _mm256_add_epi16(a32, b);
1900
9.84M
        res = _mm256_srli_epi16(res, 5);
1901
1902
9.84M
        base_inc256 = _mm256_setr_epi16(
1903
9.84M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1904
9.84M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1905
9.84M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1906
9.84M
            base + j + 13, base + j + 14, base + j + 15);
1907
1908
9.84M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1909
9.84M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1910
9.84M
        _mm256_storeu_si256((__m256i *)(dst + j), res);  // 16 16bit values
1911
9.84M
      }
1912
9.85M
    }
1913
2.46M
    x += dx;
1914
2.46M
  }
1915
46.3k
}
1916
1917
// Directional prediction, zone 1: 0 < angle < 90
1918
void av1_highbd_dr_prediction_z1_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
1919
                                      int bh, const uint16_t *above,
1920
                                      const uint16_t *left, int upsample_above,
1921
736k
                                      int dx, int dy, int bd) {
1922
736k
  (void)left;
1923
736k
  (void)dy;
1924
1925
736k
  switch (bw) {
1926
178k
    case 4:
1927
178k
      highbd_dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above,
1928
178k
                                       dx, bd);
1929
178k
      break;
1930
251k
    case 8:
1931
251k
      highbd_dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above,
1932
251k
                                       dx, bd);
1933
251k
      break;
1934
204k
    case 16:
1935
204k
      highbd_dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above,
1936
204k
                                        dx, bd);
1937
204k
      break;
1938
76.5k
    case 32:
1939
76.5k
      highbd_dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above,
1940
76.5k
                                        dx, bd);
1941
76.5k
      break;
1942
23.9k
    case 64:
1943
23.9k
      if (bd < 12) {
1944
10.7k
        highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above,
1945
10.7k
                                          upsample_above, dx);
1946
13.1k
      } else {
1947
13.1k
        highbd_dr_prediction_32bit_z1_64xN_avx2(bh, dst, stride, above,
1948
13.1k
                                                upsample_above, dx);
1949
13.1k
      }
1950
23.9k
      break;
1951
0
    default: break;
1952
736k
  }
1953
736k
  return;
1954
736k
}
1955
1956
static void highbd_transpose_TX_16X16(const uint16_t *src, ptrdiff_t pitchSrc,
1957
588k
                                      uint16_t *dst, ptrdiff_t pitchDst) {
1958
588k
  __m256i r[16];
1959
588k
  __m256i d[16];
1960
10.0M
  for (int j = 0; j < 16; j++) {
1961
9.41M
    r[j] = _mm256_loadu_si256((__m256i *)(src + j * pitchSrc));
1962
9.41M
  }
1963
588k
  highbd_transpose16x16_avx2(r, d);
1964
10.0M
  for (int j = 0; j < 16; j++) {
1965
9.41M
    _mm256_storeu_si256((__m256i *)(dst + j * pitchDst), d[j]);
1966
9.41M
  }
1967
588k
}
1968
1969
static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
1970
                             uint16_t *dst, ptrdiff_t pitchDst, int width,
1971
43.2k
                             int height) {
1972
211k
  for (int j = 0; j < height; j += 16)
1973
756k
    for (int i = 0; i < width; i += 16)
1974
588k
      highbd_transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
1975
588k
                                dst + j * pitchDst + i, pitchDst);
1976
43.2k
}
1977
1978
static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
1979
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
1980
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
1981
310k
    int dy) {
1982
310k
  const int min_base_x = -(1 << upsample_above);
1983
310k
  const int min_base_y = -(1 << upsample_left);
1984
310k
  const int frac_bits_x = 6 - upsample_above;
1985
310k
  const int frac_bits_y = 6 - upsample_left;
1986
1987
310k
  assert(dx > 0);
1988
  // pre-filter above pixels
1989
  // store in temp buffers:
1990
  //   above[x] * 32 + 16
1991
  //   above[x+1] - above[x]
1992
  // final pixels will be calculated as:
1993
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1994
310k
  __m256i a0_x, a1_x, a32, a16;
1995
310k
  __m256i diff;
1996
310k
  __m128i c3f, min_base_y128;
1997
1998
310k
  a16 = _mm256_set1_epi32(16);
1999
310k
  c3f = _mm_set1_epi32(0x3f);
2000
310k
  min_base_y128 = _mm_set1_epi32(min_base_y);
2001
2002
1.83M
  for (int r = 0; r < N; r++) {
2003
1.52M
    __m256i b, res, shift;
2004
1.52M
    __m128i resx, resy, resxy;
2005
1.52M
    __m128i a0_x128, a1_x128;
2006
1.52M
    int y = r + 1;
2007
1.52M
    int base_x = (-y * dx) >> frac_bits_x;
2008
1.52M
    int base_shift = 0;
2009
1.52M
    if (base_x < (min_base_x - 1)) {
2010
1.25M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2011
1.25M
    }
2012
1.52M
    int base_min_diff =
2013
1.52M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2014
1.52M
    if (base_min_diff > 4) {
2015
1.02M
      base_min_diff = 4;
2016
1.02M
    } else {
2017
499k
      if (base_min_diff < 0) base_min_diff = 0;
2018
499k
    }
2019
2020
1.52M
    if (base_shift > 3) {
2021
1.02M
      a0_x = _mm256_setzero_si256();
2022
1.02M
      a1_x = _mm256_setzero_si256();
2023
1.02M
      shift = _mm256_setzero_si256();
2024
1.02M
    } else {
2025
499k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2026
499k
      if (upsample_above) {
2027
140k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2028
140k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2029
140k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2030
2031
140k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2032
140k
            _mm_and_si128(
2033
140k
                _mm_slli_epi32(
2034
140k
                    _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2035
140k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx),
2036
140k
                    upsample_above),
2037
140k
                c3f),
2038
140k
            1));
2039
359k
      } else {
2040
359k
        a0_x128 =
2041
359k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2042
359k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2043
2044
359k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2045
359k
            _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2046
359k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx),
2047
359k
                          c3f),
2048
359k
            1));
2049
359k
      }
2050
499k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2051
499k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2052
499k
    }
2053
    // y calc
2054
1.52M
    __m128i a0_y, a1_y, shifty;
2055
1.52M
    if (base_x < min_base_x) {
2056
1.34M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2057
1.34M
      DECLARE_ALIGNED(32, int, base_y_c[4]);
2058
1.34M
      r6 = _mm_set1_epi32(r << 6);
2059
1.34M
      dy128 = _mm_set1_epi32(dy);
2060
1.34M
      c1234 = _mm_setr_epi32(1, 2, 3, 4);
2061
1.34M
      y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
2062
1.34M
      base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
2063
1.34M
      mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
2064
1.34M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2065
1.34M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2066
2067
1.34M
      a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
2068
1.34M
                            left[base_y_c[2]], left[base_y_c[3]]);
2069
1.34M
      a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2070
1.34M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
2071
2072
1.34M
      if (upsample_left) {
2073
311k
        shifty = _mm_srli_epi32(
2074
311k
            _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
2075
1.03M
      } else {
2076
1.03M
        shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
2077
1.03M
      }
2078
1.34M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2079
1.34M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2080
1.34M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2081
1.34M
    }
2082
2083
1.52M
    diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2084
1.52M
    a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2085
1.52M
    a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2086
2087
1.52M
    b = _mm256_mullo_epi32(diff, shift);
2088
1.52M
    res = _mm256_add_epi32(a32, b);
2089
1.52M
    res = _mm256_srli_epi32(res, 5);
2090
2091
1.52M
    resx = _mm256_castsi256_si128(res);
2092
1.52M
    resx = _mm_packus_epi32(resx, resx);
2093
2094
1.52M
    resy = _mm256_extracti128_si256(res, 1);
2095
1.52M
    resy = _mm_packus_epi32(resy, resy);
2096
2097
1.52M
    resxy =
2098
1.52M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2099
1.52M
    _mm_storel_epi64((__m128i *)(dst), resxy);
2100
1.52M
    dst += stride;
2101
1.52M
  }
2102
310k
}
2103
2104
static void highbd_dr_prediction_z2_Nx4_avx2(
2105
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2106
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2107
144k
    int dy) {
2108
144k
  const int min_base_x = -(1 << upsample_above);
2109
144k
  const int min_base_y = -(1 << upsample_left);
2110
144k
  const int frac_bits_x = 6 - upsample_above;
2111
144k
  const int frac_bits_y = 6 - upsample_left;
2112
2113
144k
  assert(dx > 0);
2114
  // pre-filter above pixels
2115
  // store in temp buffers:
2116
  //   above[x] * 32 + 16
2117
  //   above[x+1] - above[x]
2118
  // final pixels will be calculated as:
2119
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2120
144k
  __m256i a0_x, a1_x, a32, a16;
2121
144k
  __m256i diff;
2122
144k
  __m128i c3f, min_base_y128;
2123
2124
144k
  a16 = _mm256_set1_epi16(16);
2125
144k
  c3f = _mm_set1_epi16(0x3f);
2126
144k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2127
2128
1.14M
  for (int r = 0; r < N; r++) {
2129
998k
    __m256i b, res, shift;
2130
998k
    __m128i resx, resy, resxy;
2131
998k
    __m128i a0_x128, a1_x128;
2132
998k
    int y = r + 1;
2133
998k
    int base_x = (-y * dx) >> frac_bits_x;
2134
998k
    int base_shift = 0;
2135
998k
    if (base_x < (min_base_x - 1)) {
2136
726k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2137
726k
    }
2138
998k
    int base_min_diff =
2139
998k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2140
998k
    if (base_min_diff > 4) {
2141
465k
      base_min_diff = 4;
2142
532k
    } else {
2143
532k
      if (base_min_diff < 0) base_min_diff = 0;
2144
532k
    }
2145
2146
998k
    if (base_shift > 3) {
2147
465k
      a0_x = _mm256_setzero_si256();
2148
465k
      a1_x = _mm256_setzero_si256();
2149
465k
      shift = _mm256_setzero_si256();
2150
532k
    } else {
2151
532k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2152
532k
      if (upsample_above) {
2153
167k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2154
167k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2155
167k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2156
2157
167k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2158
167k
            _mm_and_si128(
2159
167k
                _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2160
167k
                                              (2 << 6) - y * dx,
2161
167k
                                              (3 << 6) - y * dx, 0, 0, 0, 0),
2162
167k
                               upsample_above),
2163
167k
                c3f),
2164
167k
            1));
2165
365k
      } else {
2166
365k
        a0_x128 =
2167
365k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2168
365k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2169
2170
365k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2171
365k
            _mm_and_si128(
2172
365k
                _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2173
365k
                               (3 << 6) - y * dx, 0, 0, 0, 0),
2174
365k
                c3f),
2175
365k
            1));
2176
365k
      }
2177
532k
      a0_x = _mm256_castsi128_si256(a0_x128);
2178
532k
      a1_x = _mm256_castsi128_si256(a1_x128);
2179
532k
    }
2180
    // y calc
2181
998k
    __m128i a0_y, a1_y, shifty;
2182
998k
    if (base_x < min_base_x) {
2183
830k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2184
830k
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2185
830k
      r6 = _mm_set1_epi16(r << 6);
2186
830k
      dy128 = _mm_set1_epi16(dy);
2187
830k
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
2188
830k
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2189
830k
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2190
830k
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2191
830k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2192
830k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2193
2194
830k
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2195
830k
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
2196
830k
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2197
830k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
2198
830k
                            0, 0);
2199
2200
830k
      if (upsample_left) {
2201
239k
        shifty = _mm_srli_epi16(
2202
239k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
2203
590k
      } else {
2204
590k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2205
590k
      }
2206
830k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2207
830k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2208
830k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2209
830k
    }
2210
2211
998k
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2212
998k
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2213
998k
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2214
2215
998k
    b = _mm256_mullo_epi16(diff, shift);
2216
998k
    res = _mm256_add_epi16(a32, b);
2217
998k
    res = _mm256_srli_epi16(res, 5);
2218
2219
998k
    resx = _mm256_castsi256_si128(res);
2220
998k
    resy = _mm256_extracti128_si256(res, 1);
2221
998k
    resxy =
2222
998k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2223
998k
    _mm_storel_epi64((__m128i *)(dst), resxy);
2224
998k
    dst += stride;
2225
998k
  }
2226
144k
}
2227
2228
static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
2229
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2230
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2231
264k
    int dy) {
2232
264k
  const int min_base_x = -(1 << upsample_above);
2233
264k
  const int min_base_y = -(1 << upsample_left);
2234
264k
  const int frac_bits_x = 6 - upsample_above;
2235
264k
  const int frac_bits_y = 6 - upsample_left;
2236
2237
  // pre-filter above pixels
2238
  // store in temp buffers:
2239
  //   above[x] * 32 + 16
2240
  //   above[x+1] - above[x]
2241
  // final pixels will be calculated as:
2242
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2243
264k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
2244
264k
  __m256i diff;
2245
264k
  __m128i a0_x128, a1_x128;
2246
2247
264k
  a16 = _mm256_set1_epi32(16);
2248
264k
  c3f = _mm256_set1_epi32(0x3f);
2249
264k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2250
2251
2.79M
  for (int r = 0; r < N; r++) {
2252
2.53M
    __m256i b, res, shift;
2253
2.53M
    __m128i resx, resy, resxy;
2254
2.53M
    int y = r + 1;
2255
2.53M
    int base_x = (-y * dx) >> frac_bits_x;
2256
2.53M
    int base_shift = 0;
2257
2.53M
    if (base_x < (min_base_x - 1)) {
2258
1.97M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2259
1.97M
    }
2260
2.53M
    int base_min_diff =
2261
2.53M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2262
2.53M
    if (base_min_diff > 8) {
2263
1.26M
      base_min_diff = 8;
2264
1.26M
    } else {
2265
1.26M
      if (base_min_diff < 0) base_min_diff = 0;
2266
1.26M
    }
2267
2268
2.53M
    if (base_shift > 7) {
2269
1.26M
      resx = _mm_setzero_si128();
2270
1.26M
    } else {
2271
1.26M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2272
1.26M
      if (upsample_above) {
2273
82.6k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2274
82.6k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2275
82.6k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2276
82.6k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2277
82.6k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2278
82.6k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2279
82.6k
        atmp2 = _mm_shuffle_epi8(
2280
82.6k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2281
82.6k
        atmp3 = _mm_shuffle_epi8(
2282
82.6k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2283
82.6k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2284
82.6k
                              _mm_set1_epi8(15));
2285
82.6k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2286
82.6k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2287
82.6k
                              _mm_set1_epi8(15));
2288
82.6k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2289
82.6k
        shift = _mm256_srli_epi32(
2290
82.6k
            _mm256_and_si256(
2291
82.6k
                _mm256_slli_epi32(
2292
82.6k
                    _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
2293
82.6k
                                      (2 << 6) - y * dx, (3 << 6) - y * dx,
2294
82.6k
                                      (4 << 6) - y * dx, (5 << 6) - y * dx,
2295
82.6k
                                      (6 << 6) - y * dx, (7 << 6) - y * dx),
2296
82.6k
                    upsample_above),
2297
82.6k
                c3f),
2298
82.6k
            1);
2299
1.18M
      } else {
2300
1.18M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2301
1.18M
        a0_x128 =
2302
1.18M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2303
1.18M
        a1_x128 =
2304
1.18M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2305
2306
1.18M
        shift = _mm256_srli_epi32(
2307
1.18M
            _mm256_and_si256(
2308
1.18M
                _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2309
1.18M
                                  (3 << 6) - y * dx, (4 << 6) - y * dx,
2310
1.18M
                                  (5 << 6) - y * dx, (6 << 6) - y * dx,
2311
1.18M
                                  (7 << 6) - y * dx),
2312
1.18M
                c3f),
2313
1.18M
            1);
2314
1.18M
      }
2315
1.26M
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2316
1.26M
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2317
2318
1.26M
      diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2319
1.26M
      a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2320
1.26M
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2321
2322
1.26M
      b = _mm256_mullo_epi32(diff, shift);
2323
1.26M
      res = _mm256_add_epi32(a32, b);
2324
1.26M
      res = _mm256_srli_epi32(res, 5);
2325
2326
1.26M
      resx = _mm256_castsi256_si128(_mm256_packus_epi32(
2327
1.26M
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2328
1.26M
    }
2329
    // y calc
2330
2.53M
    if (base_x < min_base_x) {
2331
2.17M
      DECLARE_ALIGNED(32, int, base_y_c[8]);
2332
2.17M
      __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
2333
2.17M
      r6 = _mm256_set1_epi32(r << 6);
2334
2.17M
      dy256 = _mm256_set1_epi32(dy);
2335
2.17M
      c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
2336
2.17M
      y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2337
2.17M
      base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2338
2.17M
      mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2339
2.17M
      base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2340
2.17M
      _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2341
2342
2.17M
      a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2343
2.17M
          left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2344
2.17M
          left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2345
2.17M
          left[base_y_c[6]], left[base_y_c[7]]));
2346
2.17M
      a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2347
2.17M
          left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2348
2.17M
          left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2349
2.17M
          left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2350
2351
2.17M
      if (upsample_left) {
2352
120k
        shift = _mm256_srli_epi32(
2353
120k
            _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
2354
120k
            1);
2355
2.04M
      } else {
2356
2.04M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2357
2.04M
      }
2358
2.17M
      diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2359
2.17M
      a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2360
2.17M
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2361
2362
2.17M
      b = _mm256_mullo_epi32(diff, shift);
2363
2.17M
      res = _mm256_add_epi32(a32, b);
2364
2.17M
      res = _mm256_srli_epi32(res, 5);
2365
2366
2.17M
      resy = _mm256_castsi256_si128(_mm256_packus_epi32(
2367
2.17M
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2368
2.17M
    } else {
2369
363k
      resy = resx;
2370
363k
    }
2371
2.53M
    resxy =
2372
2.53M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2373
2.53M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2374
2.53M
    dst += stride;
2375
2.53M
  }
2376
264k
}
2377
2378
static void highbd_dr_prediction_z2_Nx8_avx2(
2379
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2380
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2381
213k
    int dy) {
2382
213k
  const int min_base_x = -(1 << upsample_above);
2383
213k
  const int min_base_y = -(1 << upsample_left);
2384
213k
  const int frac_bits_x = 6 - upsample_above;
2385
213k
  const int frac_bits_y = 6 - upsample_left;
2386
2387
  // pre-filter above pixels
2388
  // store in temp buffers:
2389
  //   above[x] * 32 + 16
2390
  //   above[x+1] - above[x]
2391
  // final pixels will be calculated as:
2392
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2393
213k
  __m128i c3f, min_base_y128;
2394
213k
  __m256i a0_x, a1_x, diff, a32, a16;
2395
213k
  __m128i a0_x128, a1_x128;
2396
2397
213k
  a16 = _mm256_set1_epi16(16);
2398
213k
  c3f = _mm_set1_epi16(0x3f);
2399
213k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2400
2401
2.34M
  for (int r = 0; r < N; r++) {
2402
2.13M
    __m256i b, res, shift;
2403
2.13M
    __m128i resx, resy, resxy;
2404
2.13M
    int y = r + 1;
2405
2.13M
    int base_x = (-y * dx) >> frac_bits_x;
2406
2.13M
    int base_shift = 0;
2407
2.13M
    if (base_x < (min_base_x - 1)) {
2408
1.56M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2409
1.56M
    }
2410
2.13M
    int base_min_diff =
2411
2.13M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2412
2.13M
    if (base_min_diff > 8) {
2413
921k
      base_min_diff = 8;
2414
1.21M
    } else {
2415
1.21M
      if (base_min_diff < 0) base_min_diff = 0;
2416
1.21M
    }
2417
2418
2.13M
    if (base_shift > 7) {
2419
921k
      a0_x = _mm256_setzero_si256();
2420
921k
      a1_x = _mm256_setzero_si256();
2421
921k
      shift = _mm256_setzero_si256();
2422
1.21M
    } else {
2423
1.21M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2424
1.21M
      if (upsample_above) {
2425
308k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2426
308k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2427
308k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2428
308k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2429
308k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2430
308k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2431
308k
        atmp2 = _mm_shuffle_epi8(
2432
308k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2433
308k
        atmp3 = _mm_shuffle_epi8(
2434
308k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2435
308k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2436
308k
                              _mm_set1_epi8(15));
2437
308k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2438
308k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2439
308k
                              _mm_set1_epi8(15));
2440
308k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2441
2442
308k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2443
308k
            _mm_and_si128(
2444
308k
                _mm_slli_epi16(
2445
308k
                    _mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2446
308k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx,
2447
308k
                                   (4 << 6) - y * dx, (5 << 6) - y * dx,
2448
308k
                                   (6 << 6) - y * dx, (7 << 6) - y * dx),
2449
308k
                    upsample_above),
2450
308k
                c3f),
2451
308k
            1));
2452
906k
      } else {
2453
906k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2454
906k
        a0_x128 =
2455
906k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2456
906k
        a1_x128 =
2457
906k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2458
2459
906k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2460
906k
            _mm_and_si128(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2461
906k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx,
2462
906k
                                         (4 << 6) - y * dx, (5 << 6) - y * dx,
2463
906k
                                         (6 << 6) - y * dx, (7 << 6) - y * dx),
2464
906k
                          c3f),
2465
906k
            1));
2466
906k
      }
2467
1.21M
      a0_x = _mm256_castsi128_si256(a0_x128);
2468
1.21M
      a1_x = _mm256_castsi128_si256(a1_x128);
2469
1.21M
    }
2470
2471
    // y calc
2472
2.13M
    __m128i a0_y, a1_y, shifty;
2473
2.13M
    if (base_x < min_base_x) {
2474
1.75M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2475
1.75M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2476
1.75M
      r6 = _mm_set1_epi16(r << 6);
2477
1.75M
      dy128 = _mm_set1_epi16(dy);
2478
1.75M
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
2479
1.75M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2480
1.75M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2481
1.75M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2482
1.75M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2483
1.75M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2484
2485
1.75M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2486
1.75M
                            left[base_y_c[2]], left[base_y_c[3]],
2487
1.75M
                            left[base_y_c[4]], left[base_y_c[5]],
2488
1.75M
                            left[base_y_c[6]], left[base_y_c[7]]);
2489
1.75M
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2490
1.75M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1],
2491
1.75M
                            left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2492
1.75M
                            left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
2493
2494
1.75M
      if (upsample_left) {
2495
429k
        shifty = _mm_srli_epi16(
2496
429k
            _mm_and_si128(_mm_slli_epi16((y_c128), upsample_left), c3f), 1);
2497
1.32M
      } else {
2498
1.32M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2499
1.32M
      }
2500
1.75M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2501
1.75M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2502
1.75M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2503
1.75M
    }
2504
2505
2.13M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2506
2.13M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2507
2.13M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2508
2509
2.13M
    b = _mm256_mullo_epi16(diff, shift);
2510
2.13M
    res = _mm256_add_epi16(a32, b);
2511
2.13M
    res = _mm256_srli_epi16(res, 5);
2512
2513
2.13M
    resx = _mm256_castsi256_si128(res);
2514
2.13M
    resy = _mm256_extracti128_si256(res, 1);
2515
2516
2.13M
    resxy =
2517
2.13M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2518
2.13M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2519
2.13M
    dst += stride;
2520
2.13M
  }
2521
213k
}
2522
2523
static void highbd_dr_prediction_32bit_z2_HxW_avx2(
2524
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2525
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2526
166k
    int dy) {
2527
  // here upsample_above and upsample_left are 0 by design of
2528
  // av1_use_intra_edge_upsample
2529
166k
  const int min_base_x = -1;
2530
166k
  const int min_base_y = -1;
2531
166k
  (void)upsample_above;
2532
166k
  (void)upsample_left;
2533
166k
  const int frac_bits_x = 6;
2534
166k
  const int frac_bits_y = 6;
2535
2536
  // pre-filter above pixels
2537
  // store in temp buffers:
2538
  //   above[x] * 32 + 16
2539
  //   above[x+1] - above[x]
2540
  // final pixels will be calculated as:
2541
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2542
166k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16, c1;
2543
166k
  __m256i diff, min_base_y256, c3f, dy256, c1234, c0123, c8;
2544
166k
  __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2545
166k
  DECLARE_ALIGNED(32, int, base_y_c[16]);
2546
2547
166k
  a16 = _mm256_set1_epi32(16);
2548
166k
  c1 = _mm256_srli_epi32(a16, 4);
2549
166k
  c8 = _mm256_srli_epi32(a16, 1);
2550
166k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2551
166k
  c3f = _mm256_set1_epi32(0x3f);
2552
166k
  dy256 = _mm256_set1_epi32(dy);
2553
166k
  c0123 = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
2554
166k
  c1234 = _mm256_add_epi32(c0123, c1);
2555
2556
2.45M
  for (int r = 0; r < H; r++) {
2557
2.28M
    __m256i b, res, shift, ydx;
2558
2.28M
    __m256i resx[2], resy[2];
2559
2.28M
    __m256i resxy, j256, r6;
2560
7.08M
    for (int j = 0; j < W; j += 16) {
2561
4.79M
      j256 = _mm256_set1_epi32(j);
2562
4.79M
      int y = r + 1;
2563
4.79M
      ydx = _mm256_set1_epi32(y * dx);
2564
2565
4.79M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2566
4.79M
      int base_shift = 0;
2567
4.79M
      if ((base_x) < (min_base_x - 1)) {
2568
3.03M
        base_shift = (min_base_x - base_x - 1);
2569
3.03M
      }
2570
4.79M
      int base_min_diff = (min_base_x - base_x);
2571
4.79M
      if (base_min_diff > 16) {
2572
1.99M
        base_min_diff = 16;
2573
2.80M
      } else {
2574
2.80M
        if (base_min_diff < 0) base_min_diff = 0;
2575
2.80M
      }
2576
2577
4.79M
      if (base_shift > 7) {
2578
2.40M
        resx[0] = _mm256_setzero_si256();
2579
2.40M
      } else {
2580
2.39M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2581
2.39M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2582
2.39M
        a0_x128 =
2583
2.39M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2584
2.39M
        a1_x128 =
2585
2.39M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2586
2587
2.39M
        a0_x = _mm256_cvtepu16_epi32(a0_x128);
2588
2.39M
        a1_x = _mm256_cvtepu16_epi32(a1_x128);
2589
2590
2.39M
        r6 = _mm256_slli_epi32(_mm256_add_epi32(c0123, j256), 6);
2591
2.39M
        shift = _mm256_srli_epi32(
2592
2.39M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2593
2594
2.39M
        diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2595
2.39M
        a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2596
2.39M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2597
2598
2.39M
        b = _mm256_mullo_epi32(diff, shift);
2599
2.39M
        res = _mm256_add_epi32(a32, b);
2600
2.39M
        res = _mm256_srli_epi32(res, 5);
2601
2602
2.39M
        resx[0] = _mm256_packus_epi32(
2603
2.39M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2604
2.39M
      }
2605
4.79M
      int base_shift8 = 0;
2606
4.79M
      if ((base_x + 8) < (min_base_x - 1)) {
2607
2.35M
        base_shift8 = (min_base_x - (base_x + 8) - 1);
2608
2.35M
      }
2609
4.79M
      if (base_shift8 > 7) {
2610
1.99M
        resx[1] = _mm256_setzero_si256();
2611
2.80M
      } else {
2612
2.80M
        a0_1_x128 =
2613
2.80M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 8));
2614
2.80M
        a1_1_x128 =
2615
2.80M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 9));
2616
2.80M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2617
2.80M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2618
2.80M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2619
2.80M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2620
2621
2.80M
        a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
2622
2.80M
        a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
2623
2624
2.80M
        r6 = _mm256_slli_epi32(
2625
2.80M
            _mm256_add_epi32(c0123, _mm256_add_epi32(j256, c8)), 6);
2626
2.80M
        shift = _mm256_srli_epi32(
2627
2.80M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2628
2629
2.80M
        diff = _mm256_sub_epi32(a1_1_x, a0_1_x);  // a[x+1] - a[x]
2630
2.80M
        a32 = _mm256_slli_epi32(a0_1_x, 5);       // a[x] * 32
2631
2.80M
        a32 = _mm256_add_epi32(a32, a16);         // a[x] * 32 + 16
2632
2.80M
        b = _mm256_mullo_epi32(diff, shift);
2633
2634
2.80M
        resx[1] = _mm256_add_epi32(a32, b);
2635
2.80M
        resx[1] = _mm256_srli_epi32(resx[1], 5);
2636
2.80M
        resx[1] = _mm256_packus_epi32(
2637
2.80M
            resx[1],
2638
2.80M
            _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
2639
2.80M
      }
2640
4.79M
      resx[0] =
2641
4.79M
          _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
2642
4.79M
                                  1);  // 16 16bit values
2643
2644
      // y calc
2645
4.79M
      resy[0] = _mm256_setzero_si256();
2646
4.79M
      if ((base_x < min_base_x)) {
2647
3.19M
        __m256i c256, y_c256, y_c_1_256, base_y_c256, mask256;
2648
3.19M
        r6 = _mm256_set1_epi32(r << 6);
2649
3.19M
        c256 = _mm256_add_epi32(j256, c1234);
2650
3.19M
        y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2651
3.19M
        base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2652
3.19M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2653
3.19M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2654
3.19M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2655
3.19M
        c256 = _mm256_add_epi32(c256, c8);
2656
3.19M
        y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2657
3.19M
        base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
2658
3.19M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2659
3.19M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2660
3.19M
        _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
2661
2662
3.19M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2663
3.19M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2664
3.19M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2665
3.19M
            left[base_y_c[6]], left[base_y_c[7]]));
2666
3.19M
        a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2667
3.19M
            left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2668
3.19M
            left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2669
3.19M
            left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2670
2671
3.19M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2672
2673
3.19M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2674
3.19M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2675
3.19M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2676
2677
3.19M
        b = _mm256_mullo_epi32(diff, shift);
2678
3.19M
        res = _mm256_add_epi32(a32, b);
2679
3.19M
        res = _mm256_srli_epi32(res, 5);
2680
2681
3.19M
        resy[0] = _mm256_packus_epi32(
2682
3.19M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2683
2684
3.19M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2685
3.19M
            left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
2686
3.19M
            left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
2687
3.19M
            left[base_y_c[14]], left[base_y_c[15]]));
2688
3.19M
        a1_y = _mm256_cvtepu16_epi32(
2689
3.19M
            _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
2690
3.19M
                           left[base_y_c[10] + 1], left[base_y_c[11] + 1],
2691
3.19M
                           left[base_y_c[12] + 1], left[base_y_c[13] + 1],
2692
3.19M
                           left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
2693
3.19M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
2694
2695
3.19M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2696
3.19M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2697
3.19M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2698
2699
3.19M
        b = _mm256_mullo_epi32(diff, shift);
2700
3.19M
        res = _mm256_add_epi32(a32, b);
2701
3.19M
        res = _mm256_srli_epi32(res, 5);
2702
2703
3.19M
        resy[1] = _mm256_packus_epi32(
2704
3.19M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2705
2706
3.19M
        resy[0] =
2707
3.19M
            _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
2708
3.19M
                                    1);  // 16 16bit values
2709
3.19M
      }
2710
2711
4.79M
      resxy = _mm256_blendv_epi8(resx[0], resy[0],
2712
4.79M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2713
4.79M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2714
4.79M
    }  // for j
2715
2.28M
    dst += stride;
2716
2.28M
  }
2717
166k
}
2718
2719
static void highbd_dr_prediction_z2_HxW_avx2(
2720
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2721
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2722
390k
    int dy) {
2723
  // here upsample_above and upsample_left are 0 by design of
2724
  // av1_use_intra_edge_upsample
2725
390k
  const int min_base_x = -1;
2726
390k
  const int min_base_y = -1;
2727
390k
  (void)upsample_above;
2728
390k
  (void)upsample_left;
2729
390k
  const int frac_bits_x = 6;
2730
390k
  const int frac_bits_y = 6;
2731
2732
  // pre-filter above pixels
2733
  // store in temp buffers:
2734
  //   above[x] * 32 + 16
2735
  //   above[x+1] - above[x]
2736
  // final pixels will be calculated as:
2737
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2738
390k
  __m256i a0_x, a1_x, a32, a16, c3f, c1;
2739
390k
  __m256i diff, min_base_y256, dy256, c1234, c0123;
2740
390k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
2741
2742
390k
  a16 = _mm256_set1_epi16(16);
2743
390k
  c1 = _mm256_srli_epi16(a16, 4);
2744
390k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
2745
390k
  c3f = _mm256_set1_epi16(0x3f);
2746
390k
  dy256 = _mm256_set1_epi16(dy);
2747
390k
  c0123 =
2748
390k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
2749
390k
  c1234 = _mm256_add_epi16(c0123, c1);
2750
2751
8.18M
  for (int r = 0; r < H; r++) {
2752
7.79M
    __m256i b, res, shift;
2753
7.79M
    __m256i resx, resy, ydx;
2754
7.79M
    __m256i resxy, j256, r6;
2755
7.79M
    __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2756
7.79M
    int y = r + 1;
2757
7.79M
    ydx = _mm256_set1_epi16((short)(y * dx));
2758
2759
21.5M
    for (int j = 0; j < W; j += 16) {
2760
13.7M
      j256 = _mm256_set1_epi16(j);
2761
13.7M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2762
13.7M
      int base_shift = 0;
2763
13.7M
      if ((base_x) < (min_base_x - 1)) {
2764
9.96M
        base_shift = (min_base_x - (base_x)-1);
2765
9.96M
      }
2766
13.7M
      int base_min_diff = (min_base_x - base_x);
2767
13.7M
      if (base_min_diff > 16) {
2768
7.17M
        base_min_diff = 16;
2769
7.17M
      } else {
2770
6.57M
        if (base_min_diff < 0) base_min_diff = 0;
2771
6.57M
      }
2772
2773
13.7M
      if (base_shift < 8) {
2774
5.51M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2775
5.51M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2776
5.51M
        a0_x128 =
2777
5.51M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2778
5.51M
        a1_x128 =
2779
5.51M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2780
2781
5.51M
        a0_x = _mm256_castsi128_si256(a0_x128);
2782
5.51M
        a1_x = _mm256_castsi128_si256(a1_x128);
2783
8.22M
      } else {
2784
8.22M
        a0_x = _mm256_setzero_si256();
2785
8.22M
        a1_x = _mm256_setzero_si256();
2786
8.22M
      }
2787
2788
13.7M
      int base_shift1 = 0;
2789
13.7M
      if (base_shift > 8) {
2790
8.05M
        base_shift1 = base_shift - 8;
2791
8.05M
      }
2792
13.7M
      if (base_shift1 < 8) {
2793
6.57M
        a0_1_x128 =
2794
6.57M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 8));
2795
6.57M
        a1_1_x128 =
2796
6.57M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 9));
2797
6.57M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2798
6.57M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2799
6.57M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2800
6.57M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2801
2802
6.57M
        a0_x = _mm256_inserti128_si256(a0_x, a0_1_x128, 1);
2803
6.57M
        a1_x = _mm256_inserti128_si256(a1_x, a1_1_x128, 1);
2804
6.57M
      }
2805
13.7M
      r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
2806
13.7M
      shift = _mm256_srli_epi16(
2807
13.7M
          _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
2808
2809
13.7M
      diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2810
13.7M
      a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2811
13.7M
      a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2812
2813
13.7M
      b = _mm256_mullo_epi16(diff, shift);
2814
13.7M
      res = _mm256_add_epi16(a32, b);
2815
13.7M
      resx = _mm256_srli_epi16(res, 5);  // 16 16-bit values
2816
2817
      // y calc
2818
13.7M
      resy = _mm256_setzero_si256();
2819
13.7M
      __m256i a0_y, a1_y, shifty;
2820
13.7M
      if ((base_x < min_base_x)) {
2821
10.5M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
2822
10.5M
        r6 = _mm256_set1_epi16(r << 6);
2823
10.5M
        c256 = _mm256_add_epi16(j256, c1234);
2824
10.5M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
2825
10.5M
                                 _mm256_srli_epi16(min_base_y256, 1));
2826
10.5M
        y_c256 = _mm256_sub_epi16(r6, mul16);
2827
10.5M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
2828
10.5M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
2829
10.5M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2830
10.5M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2831
2832
10.5M
        a0_y = _mm256_setr_epi16(
2833
10.5M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2834
10.5M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2835
10.5M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2836
10.5M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2837
10.5M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2838
10.5M
            left[base_y_c[15]]);
2839
10.5M
        base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
2840
10.5M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2841
2842
10.5M
        a1_y = _mm256_setr_epi16(
2843
10.5M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2844
10.5M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2845
10.5M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2846
10.5M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2847
10.5M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2848
10.5M
            left[base_y_c[15]]);
2849
2850
10.5M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
2851
2852
10.5M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
2853
10.5M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
2854
10.5M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2855
2856
10.5M
        b = _mm256_mullo_epi16(diff, shifty);
2857
10.5M
        res = _mm256_add_epi16(a32, b);
2858
10.5M
        resy = _mm256_srli_epi16(res, 5);
2859
10.5M
      }
2860
2861
13.7M
      resxy = _mm256_blendv_epi8(resx, resy,
2862
13.7M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2863
13.7M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2864
13.7M
    }  // for j
2865
7.79M
    dst += stride;
2866
7.79M
  }
2867
390k
}
2868
2869
// Directional prediction, zone 2: 90 < angle < 180
2870
void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
2871
                                      int bh, const uint16_t *above,
2872
                                      const uint16_t *left, int upsample_above,
2873
                                      int upsample_left, int dx, int dy,
2874
1.49M
                                      int bd) {
2875
1.49M
  (void)bd;
2876
1.49M
  assert(dx > 0);
2877
1.49M
  assert(dy > 0);
2878
1.49M
  switch (bw) {
2879
455k
    case 4:
2880
455k
      if (bd < 12) {
2881
144k
        highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
2882
144k
                                         upsample_above, upsample_left, dx, dy);
2883
310k
      } else {
2884
310k
        highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
2885
310k
                                               upsample_above, upsample_left,
2886
310k
                                               dx, dy);
2887
310k
      }
2888
455k
      break;
2889
478k
    case 8:
2890
478k
      if (bd < 12) {
2891
213k
        highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
2892
213k
                                         upsample_above, upsample_left, dx, dy);
2893
264k
      } else {
2894
264k
        highbd_dr_prediction_32bit_z2_Nx8_avx2(bh, dst, stride, above, left,
2895
264k
                                               upsample_above, upsample_left,
2896
264k
                                               dx, dy);
2897
264k
      }
2898
478k
      break;
2899
557k
    default:
2900
557k
      if (bd < 12) {
2901
390k
        highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2902
390k
                                         upsample_above, upsample_left, dx, dy);
2903
390k
      } else {
2904
166k
        highbd_dr_prediction_32bit_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2905
166k
                                               upsample_above, upsample_left,
2906
166k
                                               dx, dy);
2907
166k
      }
2908
557k
      break;
2909
1.49M
  }
2910
1.49M
}
2911
2912
//  Directional prediction, zone 3 functions
2913
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
2914
                                             const uint16_t *left,
2915
                                             int upsample_left, int dy,
2916
235k
                                             int bd) {
2917
235k
  __m128i dstvec[4], d[4];
2918
235k
  if (bd < 12) {
2919
180k
    highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left,
2920
180k
                                              dy);
2921
180k
  } else {
2922
55.3k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(4, dstvec, left,
2923
55.3k
                                                    upsample_left, dy);
2924
55.3k
  }
2925
235k
  highbd_transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2],
2926
235k
                                   &dstvec[3], &d[0], &d[1], &d[2], &d[3]);
2927
235k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
2928
235k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
2929
235k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
2930
235k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
2931
235k
  return;
2932
235k
}
2933
2934
static void highbd_dr_prediction_z3_8x8_avx2(uint16_t *dst, ptrdiff_t stride,
2935
                                             const uint16_t *left,
2936
                                             int upsample_left, int dy,
2937
191k
                                             int bd) {
2938
191k
  __m128i dstvec[8], d[8];
2939
191k
  if (bd < 12) {
2940
93.5k
    highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left,
2941
93.5k
                                              dy);
2942
98.1k
  } else {
2943
98.1k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(8, dstvec, left,
2944
98.1k
                                                    upsample_left, dy);
2945
98.1k
  }
2946
191k
  highbd_transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2947
191k
                           &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2948
191k
                           &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2949
191k
                           &d[7]);
2950
1.72M
  for (int i = 0; i < 8; i++) {
2951
1.53M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
2952
1.53M
  }
2953
191k
}
2954
2955
static void highbd_dr_prediction_z3_4x8_avx2(uint16_t *dst, ptrdiff_t stride,
2956
                                             const uint16_t *left,
2957
                                             int upsample_left, int dy,
2958
29.6k
                                             int bd) {
2959
29.6k
  __m128i dstvec[4], d[8];
2960
29.6k
  if (bd < 12) {
2961
15.5k
    highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left,
2962
15.5k
                                              dy);
2963
15.5k
  } else {
2964
14.0k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(4, dstvec, left,
2965
14.0k
                                                    upsample_left, dy);
2966
14.0k
  }
2967
2968
29.6k
  highbd_transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2969
29.6k
                               &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2970
29.6k
                               &d[7]);
2971
266k
  for (int i = 0; i < 8; i++) {
2972
236k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
2973
236k
  }
2974
29.6k
}
2975
2976
static void highbd_dr_prediction_z3_8x4_avx2(uint16_t *dst, ptrdiff_t stride,
2977
                                             const uint16_t *left,
2978
                                             int upsample_left, int dy,
2979
59.1k
                                             int bd) {
2980
59.1k
  __m128i dstvec[8], d[4];
2981
59.1k
  if (bd < 12) {
2982
31.5k
    highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left,
2983
31.5k
                                              dy);
2984
31.5k
  } else {
2985
27.5k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(8, dstvec, left,
2986
27.5k
                                                    upsample_left, dy);
2987
27.5k
  }
2988
2989
59.1k
  highbd_transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2990
59.1k
                               &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2991
59.1k
                               &d[0], &d[1], &d[2], &d[3]);
2992
59.1k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
2993
59.1k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[1]);
2994
59.1k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[2]);
2995
59.1k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[3]);
2996
59.1k
}
2997
2998
static void highbd_dr_prediction_z3_8x16_avx2(uint16_t *dst, ptrdiff_t stride,
2999
                                              const uint16_t *left,
3000
                                              int upsample_left, int dy,
3001
40.1k
                                              int bd) {
3002
40.1k
  __m256i dstvec[8], d[8];
3003
40.1k
  if (bd < 12) {
3004
26.5k
    highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
3005
26.5k
                                               dy);
3006
26.5k
  } else {
3007
13.5k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(8, dstvec, left,
3008
13.5k
                                                     upsample_left, dy);
3009
13.5k
  }
3010
40.1k
  highbd_transpose8x16_16x8_avx2(dstvec, d);
3011
360k
  for (int i = 0; i < 8; i++) {
3012
320k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3013
320k
                     _mm256_castsi256_si128(d[i]));
3014
320k
  }
3015
360k
  for (int i = 8; i < 16; i++) {
3016
320k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3017
320k
                     _mm256_extracti128_si256(d[i - 8], 1));
3018
320k
  }
3019
40.1k
}
3020
3021
static void highbd_dr_prediction_z3_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
3022
                                              const uint16_t *left,
3023
                                              int upsample_left, int dy,
3024
81.4k
                                              int bd) {
3025
81.4k
  __m128i dstvec[16], d[16];
3026
81.4k
  if (bd < 12) {
3027
47.2k
    highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
3028
47.2k
                                              dy);
3029
47.2k
  } else {
3030
34.1k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(16, dstvec, left,
3031
34.1k
                                                    upsample_left, dy);
3032
34.1k
  }
3033
244k
  for (int i = 0; i < 16; i += 8) {
3034
162k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3035
162k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3036
162k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3037
162k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3038
162k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3039
162k
  }
3040
733k
  for (int i = 0; i < 8; i++) {
3041
651k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3042
651k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3043
651k
  }
3044
81.4k
}
3045
3046
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3047
static void highbd_dr_prediction_z3_4x16_avx2(uint16_t *dst, ptrdiff_t stride,
3048
                                              const uint16_t *left,
3049
                                              int upsample_left, int dy,
3050
23.7k
                                              int bd) {
3051
23.7k
  __m256i dstvec[4], d[4], d1;
3052
23.7k
  if (bd < 12) {
3053
14.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
3054
14.6k
                                               dy);
3055
14.6k
  } else {
3056
9.07k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(4, dstvec, left,
3057
9.07k
                                                     upsample_left, dy);
3058
9.07k
  }
3059
23.7k
  highbd_transpose4x16_avx2(dstvec, d);
3060
118k
  for (int i = 0; i < 4; i++) {
3061
94.8k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
3062
94.8k
                     _mm256_castsi256_si128(d[i]));
3063
94.8k
    d1 = _mm256_bsrli_epi128(d[i], 8);
3064
94.8k
    _mm_storel_epi64((__m128i *)(dst + (i + 4) * stride),
3065
94.8k
                     _mm256_castsi256_si128(d1));
3066
94.8k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
3067
94.8k
                     _mm256_extracti128_si256(d[i], 1));
3068
94.8k
    _mm_storel_epi64((__m128i *)(dst + (i + 12) * stride),
3069
94.8k
                     _mm256_extracti128_si256(d1, 1));
3070
94.8k
  }
3071
23.7k
}
3072
3073
static void highbd_dr_prediction_z3_16x4_avx2(uint16_t *dst, ptrdiff_t stride,
3074
                                              const uint16_t *left,
3075
                                              int upsample_left, int dy,
3076
56.4k
                                              int bd) {
3077
56.4k
  __m128i dstvec[16], d[8];
3078
56.4k
  if (bd < 12) {
3079
41.2k
    highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
3080
41.2k
                                              dy);
3081
41.2k
  } else {
3082
15.1k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(16, dstvec, left,
3083
15.1k
                                                    upsample_left, dy);
3084
15.1k
  }
3085
56.4k
  highbd_transpose16x4_8x8_sse2(dstvec, d);
3086
3087
56.4k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
3088
56.4k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride + 8), d[1]);
3089
56.4k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[2]);
3090
56.4k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride + 8), d[3]);
3091
56.4k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[4]);
3092
56.4k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride + 8), d[5]);
3093
56.4k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[6]);
3094
56.4k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride + 8), d[7]);
3095
56.4k
}
3096
3097
static void highbd_dr_prediction_z3_8x32_avx2(uint16_t *dst, ptrdiff_t stride,
3098
                                              const uint16_t *left,
3099
                                              int upsample_left, int dy,
3100
12.1k
                                              int bd) {
3101
12.1k
  __m256i dstvec[16], d[16];
3102
12.1k
  if (bd < 12) {
3103
9.66k
    highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
3104
9.66k
                                               dy);
3105
9.66k
  } else {
3106
2.51k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(8, dstvec, left,
3107
2.51k
                                                     upsample_left, dy);
3108
2.51k
  }
3109
3110
36.5k
  for (int i = 0; i < 16; i += 8) {
3111
24.3k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3112
24.3k
  }
3113
3114
109k
  for (int i = 0; i < 8; i++) {
3115
97.4k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3116
97.4k
                     _mm256_castsi256_si128(d[i]));
3117
97.4k
  }
3118
109k
  for (int i = 0; i < 8; i++) {
3119
97.4k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3120
97.4k
                     _mm256_extracti128_si256(d[i], 1));
3121
97.4k
  }
3122
109k
  for (int i = 8; i < 16; i++) {
3123
97.4k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3124
97.4k
                     _mm256_castsi256_si128(d[i]));
3125
97.4k
  }
3126
109k
  for (int i = 8; i < 16; i++) {
3127
97.4k
    _mm_storeu_si128((__m128i *)(dst + (i + 16) * stride),
3128
97.4k
                     _mm256_extracti128_si256(d[i], 1));
3129
97.4k
  }
3130
12.1k
}
3131
3132
static void highbd_dr_prediction_z3_32x8_avx2(uint16_t *dst, ptrdiff_t stride,
3133
                                              const uint16_t *left,
3134
                                              int upsample_left, int dy,
3135
44.4k
                                              int bd) {
3136
44.4k
  __m128i dstvec[32], d[32];
3137
44.4k
  if (bd < 12) {
3138
36.1k
    highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
3139
36.1k
                                              dy);
3140
36.1k
  } else {
3141
8.24k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(32, dstvec, left,
3142
8.24k
                                                    upsample_left, dy);
3143
8.24k
  }
3144
3145
222k
  for (int i = 0; i < 32; i += 8) {
3146
177k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3147
177k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3148
177k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3149
177k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3150
177k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3151
177k
  }
3152
399k
  for (int i = 0; i < 8; i++) {
3153
355k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3154
355k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3155
355k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 16]);
3156
355k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 24), d[i + 24]);
3157
355k
  }
3158
44.4k
}
3159
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3160
3161
static void highbd_dr_prediction_z3_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
3162
                                               const uint16_t *left,
3163
                                               int upsample_left, int dy,
3164
91.6k
                                               int bd) {
3165
91.6k
  __m256i dstvec[16], d[16];
3166
91.6k
  if (bd < 12) {
3167
74.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
3168
74.6k
                                               dy);
3169
74.6k
  } else {
3170
17.0k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(16, dstvec, left,
3171
17.0k
                                                     upsample_left, dy);
3172
17.0k
  }
3173
3174
91.6k
  highbd_transpose16x16_avx2(dstvec, d);
3175
3176
1.55M
  for (int i = 0; i < 16; i++) {
3177
1.46M
    _mm256_storeu_si256((__m256i *)(dst + i * stride), d[i]);
3178
1.46M
  }
3179
91.6k
}
3180
3181
static void highbd_dr_prediction_z3_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
3182
                                               const uint16_t *left,
3183
                                               int upsample_left, int dy,
3184
92.5k
                                               int bd) {
3185
92.5k
  __m256i dstvec[64], d[16];
3186
92.5k
  if (bd < 12) {
3187
87.2k
    highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
3188
87.2k
                                               dy);
3189
87.2k
  } else {
3190
5.37k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(32, dstvec, left,
3191
5.37k
                                                     upsample_left, dy);
3192
5.37k
  }
3193
92.5k
  highbd_transpose16x16_avx2(dstvec, d);
3194
1.57M
  for (int j = 0; j < 16; j++) {
3195
1.48M
    _mm256_storeu_si256((__m256i *)(dst + j * stride), d[j]);
3196
1.48M
  }
3197
92.5k
  highbd_transpose16x16_avx2(dstvec + 16, d);
3198
1.57M
  for (int j = 0; j < 16; j++) {
3199
1.48M
    _mm256_storeu_si256((__m256i *)(dst + j * stride + 16), d[j]);
3200
1.48M
  }
3201
92.5k
  highbd_transpose16x16_avx2(dstvec + 32, d);
3202
1.57M
  for (int j = 0; j < 16; j++) {
3203
1.48M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride), d[j]);
3204
1.48M
  }
3205
92.5k
  highbd_transpose16x16_avx2(dstvec + 48, d);
3206
1.57M
  for (int j = 0; j < 16; j++) {
3207
1.48M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride + 16), d[j]);
3208
1.48M
  }
3209
92.5k
}
3210
3211
static void highbd_dr_prediction_z3_64x64_avx2(uint16_t *dst, ptrdiff_t stride,
3212
                                               const uint16_t *left,
3213
                                               int upsample_left, int dy,
3214
33.1k
                                               int bd) {
3215
33.1k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
3216
33.1k
  if (bd < 12) {
3217
28.9k
    highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
3218
28.9k
  } else {
3219
4.19k
    highbd_dr_prediction_32bit_z1_64xN_avx2(64, dstT, 64, left, upsample_left,
3220
4.19k
                                            dy);
3221
4.19k
  }
3222
33.1k
  highbd_transpose(dstT, 64, dst, stride, 64, 64);
3223
33.1k
}
3224
3225
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
3226
                                               const uint16_t *left,
3227
                                               int upsample_left, int dy,
3228
25.5k
                                               int bd) {
3229
25.5k
  __m256i dstvec[32], d[32];
3230
25.5k
  if (bd < 12) {
3231
23.5k
    highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
3232
23.5k
                                               dy);
3233
23.5k
  } else {
3234
2.01k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(16, dstvec, left,
3235
2.01k
                                                     upsample_left, dy);
3236
2.01k
  }
3237
127k
  for (int i = 0; i < 32; i += 8) {
3238
102k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3239
102k
  }
3240
  // store
3241
76.7k
  for (int j = 0; j < 32; j += 16) {
3242
460k
    for (int i = 0; i < 8; i++) {
3243
409k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride),
3244
409k
                       _mm256_castsi256_si128(d[(i + j)]));
3245
409k
    }
3246
460k
    for (int i = 0; i < 8; i++) {
3247
409k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride + 8),
3248
409k
                       _mm256_castsi256_si128(d[(i + j) + 8]));
3249
409k
    }
3250
460k
    for (int i = 8; i < 16; i++) {
3251
409k
      _mm256_storeu_si256(
3252
409k
          (__m256i *)(dst + (i + j) * stride),
3253
409k
          _mm256_inserti128_si256(
3254
409k
              d[(i + j)], _mm256_extracti128_si256(d[(i + j) - 8], 1), 0));
3255
409k
    }
3256
51.1k
  }
3257
25.5k
}
3258
3259
static void highbd_dr_prediction_z3_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
3260
                                               const uint16_t *left,
3261
                                               int upsample_left, int dy,
3262
23.9k
                                               int bd) {
3263
23.9k
  __m256i dstvec[32], d[16];
3264
23.9k
  if (bd < 12) {
3265
21.3k
    highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
3266
21.3k
                                               dy);
3267
21.3k
  } else {
3268
2.67k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(32, dstvec, left,
3269
2.67k
                                                     upsample_left, dy);
3270
2.67k
  }
3271
71.9k
  for (int i = 0; i < 32; i += 16) {
3272
47.9k
    highbd_transpose16x16_avx2((dstvec + i), d);
3273
815k
    for (int j = 0; j < 16; j++) {
3274
767k
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3275
767k
    }
3276
47.9k
  }
3277
23.9k
}
3278
3279
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
3280
                                               const uint16_t *left,
3281
                                               int upsample_left, int dy,
3282
2.05k
                                               int bd) {
3283
2.05k
  uint16_t dstT[64 * 32];
3284
2.05k
  if (bd < 12) {
3285
1.63k
    highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
3286
1.63k
  } else {
3287
421
    highbd_dr_prediction_32bit_z1_64xN_avx2(32, dstT, 64, left, upsample_left,
3288
421
                                            dy);
3289
421
  }
3290
2.05k
  highbd_transpose(dstT, 64, dst, stride, 32, 64);
3291
2.05k
}
3292
3293
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
3294
                                               const uint16_t *left,
3295
                                               int upsample_left, int dy,
3296
2.54k
                                               int bd) {
3297
2.54k
  DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
3298
2.54k
  highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy, bd);
3299
2.54k
  highbd_transpose(dstT, 32, dst, stride, 64, 32);
3300
2.54k
  return;
3301
2.54k
}
3302
3303
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3304
static void highbd_dr_prediction_z3_16x64_avx2(uint16_t *dst, ptrdiff_t stride,
3305
                                               const uint16_t *left,
3306
                                               int upsample_left, int dy,
3307
5.54k
                                               int bd) {
3308
5.54k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
3309
5.54k
  if (bd < 12) {
3310
5.02k
    highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
3311
5.02k
  } else {
3312
523
    highbd_dr_prediction_32bit_z1_64xN_avx2(16, dstT, 64, left, upsample_left,
3313
523
                                            dy);
3314
523
  }
3315
5.54k
  highbd_transpose(dstT, 64, dst, stride, 16, 64);
3316
5.54k
}
3317
3318
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
3319
                                               const uint16_t *left,
3320
                                               int upsample_left, int dy,
3321
12.4k
                                               int bd) {
3322
12.4k
  __m256i dstvec[64], d[16];
3323
12.4k
  if (bd < 12) {
3324
11.7k
    highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
3325
11.7k
                                               dy);
3326
11.7k
  } else {
3327
619
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(64, dstvec, left,
3328
619
                                                     upsample_left, dy);
3329
619
  }
3330
62.0k
  for (int i = 0; i < 64; i += 16) {
3331
49.6k
    highbd_transpose16x16_avx2((dstvec + i), d);
3332
844k
    for (int j = 0; j < 16; j++) {
3333
794k
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3334
794k
    }
3335
49.6k
  }
3336
12.4k
}
3337
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3338
3339
void av1_highbd_dr_prediction_z3_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
3340
                                      int bh, const uint16_t *above,
3341
                                      const uint16_t *left, int upsample_left,
3342
1.06M
                                      int dx, int dy, int bd) {
3343
1.06M
  (void)above;
3344
1.06M
  (void)dx;
3345
3346
1.06M
  assert(dx == 1);
3347
1.06M
  assert(dy > 0);
3348
1.06M
  if (bw == bh) {
3349
644k
    switch (bw) {
3350
235k
      case 4:
3351
235k
        highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy,
3352
235k
                                         bd);
3353
235k
        break;
3354
191k
      case 8:
3355
191k
        highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy,
3356
191k
                                         bd);
3357
191k
        break;
3358
91.6k
      case 16:
3359
91.6k
        highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy,
3360
91.6k
                                           bd);
3361
91.6k
        break;
3362
92.5k
      case 32:
3363
92.5k
        highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy,
3364
92.5k
                                           bd);
3365
92.5k
        break;
3366
33.1k
      case 64:
3367
33.1k
        highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy,
3368
33.1k
                                           bd);
3369
33.1k
        break;
3370
644k
    }
3371
644k
  } else {
3372
419k
    if (bw < bh) {
3373
138k
      if (bw + bw == bh) {
3374
97.3k
        switch (bw) {
3375
29.6k
          case 4:
3376
29.6k
            highbd_dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left,
3377
29.6k
                                             dy, bd);
3378
29.6k
            break;
3379
40.1k
          case 8:
3380
40.1k
            highbd_dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left,
3381
40.1k
                                              dy, bd);
3382
40.1k
            break;
3383
25.5k
          case 16:
3384
25.5k
            highbd_dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left,
3385
25.5k
                                               dy, bd);
3386
25.5k
            break;
3387
2.05k
          case 32:
3388
2.05k
            highbd_dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left,
3389
2.05k
                                               dy, bd);
3390
2.05k
            break;
3391
97.3k
        }
3392
97.3k
      } else {
3393
41.4k
        switch (bw) {
3394
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3395
23.7k
          case 4:
3396
23.7k
            highbd_dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left,
3397
23.7k
                                              dy, bd);
3398
23.7k
            break;
3399
12.1k
          case 8:
3400
12.1k
            highbd_dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left,
3401
12.1k
                                              dy, bd);
3402
12.1k
            break;
3403
5.54k
          case 16:
3404
5.54k
            highbd_dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left,
3405
5.54k
                                               dy, bd);
3406
5.54k
            break;
3407
41.4k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3408
41.4k
        }
3409
41.4k
      }
3410
280k
    } else {
3411
280k
      if (bh + bh == bw) {
3412
167k
        switch (bh) {
3413
59.1k
          case 4:
3414
59.1k
            highbd_dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left,
3415
59.1k
                                             dy, bd);
3416
59.1k
            break;
3417
81.4k
          case 8:
3418
81.4k
            highbd_dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left,
3419
81.4k
                                              dy, bd);
3420
81.4k
            break;
3421
23.9k
          case 16:
3422
23.9k
            highbd_dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left,
3423
23.9k
                                               dy, bd);
3424
23.9k
            break;
3425
2.54k
          case 32:
3426
2.54k
            highbd_dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left,
3427
2.54k
                                               dy, bd);
3428
2.54k
            break;
3429
167k
        }
3430
167k
      } else {
3431
113k
        switch (bh) {
3432
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3433
56.4k
          case 4:
3434
56.4k
            highbd_dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left,
3435
56.4k
                                              dy, bd);
3436
56.4k
            break;
3437
44.4k
          case 8:
3438
44.4k
            highbd_dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left,
3439
44.4k
                                              dy, bd);
3440
44.4k
            break;
3441
12.4k
          case 16:
3442
12.4k
            highbd_dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left,
3443
12.4k
                                               dy, bd);
3444
12.4k
            break;
3445
113k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3446
113k
        }
3447
113k
      }
3448
280k
    }
3449
419k
  }
3450
1.06M
  return;
3451
1.06M
}
3452
#endif  // CONFIG_AV1_HIGHBITDEPTH
3453
3454
// Low bit depth functions
3455
static DECLARE_ALIGNED(32, uint8_t, BaseMask[33][32]) = {
3456
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3457
    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3458
  { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3459
    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3460
  { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3461
    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3462
  { 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3463
    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3464
  { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3465
    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3466
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3467
    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3468
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3469
    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3470
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3471
    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0 },
3472
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
3473
    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0 },
3474
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
3475
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0 },
3476
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3477
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3478
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3479
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3480
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3481
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3482
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3483
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3484
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3485
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3486
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,
3487
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3488
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3489
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,
3490
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3491
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3492
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,
3493
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3494
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3495
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,
3496
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3497
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3498
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,
3499
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3500
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3501
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,
3502
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3503
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3504
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,
3505
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3506
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3507
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,
3508
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3509
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3510
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3511
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3512
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3513
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3514
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3515
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3516
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3517
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0 },
3518
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3519
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3520
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0 },
3521
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3522
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3523
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0 },
3524
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3525
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3526
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0 },
3527
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3528
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3529
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0 },
3530
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3531
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3532
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0 },
3533
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3534
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3535
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0 },
3536
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3537
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3538
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0 },
3539
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3540
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3541
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
3542
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3543
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3544
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
3545
};
3546
3547
/* clang-format on */
3548
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_avx2(
3549
    int H, int W, __m128i *dst, const uint8_t *above, int upsample_above,
3550
829k
    int dx) {
3551
829k
  const int frac_bits = 6 - upsample_above;
3552
829k
  const int max_base_x = ((W + H) - 1) << upsample_above;
3553
3554
829k
  assert(dx > 0);
3555
  // pre-filter above pixels
3556
  // store in temp buffers:
3557
  //   above[x] * 32 + 16
3558
  //   above[x+1] - above[x]
3559
  // final pixels will be calculated as:
3560
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3561
829k
  __m256i a0, a1, a32, a16;
3562
829k
  __m256i diff, c3f;
3563
829k
  __m128i a_mbase_x;
3564
3565
829k
  a16 = _mm256_set1_epi16(16);
3566
829k
  a_mbase_x = _mm_set1_epi8((int8_t)above[max_base_x]);
3567
829k
  c3f = _mm256_set1_epi16(0x3f);
3568
3569
829k
  int x = dx;
3570
11.0M
  for (int r = 0; r < W; r++) {
3571
10.2M
    __m256i b, res, shift;
3572
10.2M
    __m128i res1, a0_128, a1_128;
3573
3574
10.2M
    int base = x >> frac_bits;
3575
10.2M
    int base_max_diff = (max_base_x - base) >> upsample_above;
3576
10.2M
    if (base_max_diff <= 0) {
3577
10.7k
      for (int i = r; i < W; ++i) {
3578
7.14k
        dst[i] = a_mbase_x;  // save 4 values
3579
7.14k
      }
3580
3.61k
      return;
3581
3.61k
    }
3582
10.2M
    if (base_max_diff > H) base_max_diff = H;
3583
10.2M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
3584
10.2M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
3585
3586
10.2M
    if (upsample_above) {
3587
1.67M
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)EvenOddMaskx[0]);
3588
1.67M
      a1_128 = _mm_srli_si128(a0_128, 8);
3589
3590
1.67M
      shift = _mm256_srli_epi16(
3591
1.67M
          _mm256_and_si256(
3592
1.67M
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
3593
1.67M
          1);
3594
8.52M
    } else {
3595
8.52M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3596
8.52M
    }
3597
10.2M
    a0 = _mm256_cvtepu8_epi16(a0_128);
3598
10.2M
    a1 = _mm256_cvtepu8_epi16(a1_128);
3599
3600
10.2M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3601
10.2M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3602
10.2M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3603
3604
10.2M
    b = _mm256_mullo_epi16(diff, shift);
3605
10.2M
    res = _mm256_add_epi16(a32, b);
3606
10.2M
    res = _mm256_srli_epi16(res, 5);
3607
3608
10.2M
    res = _mm256_packus_epi16(
3609
10.2M
        res, _mm256_castsi128_si256(
3610
10.2M
                 _mm256_extracti128_si256(res, 1)));  // goto 8 bit
3611
10.2M
    res1 = _mm256_castsi256_si128(res);               // 16 8bit values
3612
3613
10.2M
    dst[r] =
3614
10.2M
        _mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
3615
10.2M
    x += dx;
3616
10.2M
  }
3617
829k
}
3618
3619
static void dr_prediction_z1_4xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3620
                                      const uint8_t *above, int upsample_above,
3621
118k
                                      int dx) {
3622
118k
  __m128i dstvec[16];
3623
3624
118k
  dr_prediction_z1_HxW_internal_avx2(4, N, dstvec, above, upsample_above, dx);
3625
828k
  for (int i = 0; i < N; i++) {
3626
709k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
3627
709k
  }
3628
118k
}
3629
3630
static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3631
                                      const uint8_t *above, int upsample_above,
3632
107k
                                      int dx) {
3633
107k
  __m128i dstvec[32];
3634
3635
107k
  dr_prediction_z1_HxW_internal_avx2(8, N, dstvec, above, upsample_above, dx);
3636
1.15M
  for (int i = 0; i < N; i++) {
3637
1.04M
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
3638
1.04M
  }
3639
107k
}
3640
3641
static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3642
                                       const uint8_t *above, int upsample_above,
3643
107k
                                       int dx) {
3644
107k
  __m128i dstvec[64];
3645
3646
107k
  dr_prediction_z1_HxW_internal_avx2(16, N, dstvec, above, upsample_above, dx);
3647
1.62M
  for (int i = 0; i < N; i++) {
3648
1.51M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
3649
1.51M
  }
3650
107k
}
3651
3652
static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_avx2(
3653
173k
    int N, __m256i *dstvec, const uint8_t *above, int upsample_above, int dx) {
3654
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3655
173k
  (void)upsample_above;
3656
173k
  const int frac_bits = 6;
3657
173k
  const int max_base_x = ((32 + N) - 1);
3658
3659
  // pre-filter above pixels
3660
  // store in temp buffers:
3661
  //   above[x] * 32 + 16
3662
  //   above[x+1] - above[x]
3663
  // final pixels will be calculated as:
3664
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3665
173k
  __m256i a0, a1, a32, a16;
3666
173k
  __m256i a_mbase_x, diff, c3f;
3667
3668
173k
  a16 = _mm256_set1_epi16(16);
3669
173k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3670
173k
  c3f = _mm256_set1_epi16(0x3f);
3671
3672
173k
  int x = dx;
3673
4.89M
  for (int r = 0; r < N; r++) {
3674
4.71M
    __m256i b, res, res16[2];
3675
4.71M
    __m128i a0_128, a1_128;
3676
3677
4.71M
    int base = x >> frac_bits;
3678
4.71M
    int base_max_diff = (max_base_x - base);
3679
4.71M
    if (base_max_diff <= 0) {
3680
0
      for (int i = r; i < N; ++i) {
3681
0
        dstvec[i] = a_mbase_x;  // save 32 values
3682
0
      }
3683
0
      return;
3684
0
    }
3685
4.71M
    if (base_max_diff > 32) base_max_diff = 32;
3686
4.71M
    __m256i shift =
3687
4.71M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3688
3689
14.1M
    for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
3690
9.43M
      int mdiff = base_max_diff - j;
3691
9.43M
      if (mdiff <= 0) {
3692
588
        res16[jj] = a_mbase_x;
3693
9.43M
      } else {
3694
9.43M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3695
9.43M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + j + 1));
3696
9.43M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3697
9.43M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3698
3699
9.43M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3700
9.43M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3701
9.43M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3702
9.43M
        b = _mm256_mullo_epi16(diff, shift);
3703
3704
9.43M
        res = _mm256_add_epi16(a32, b);
3705
9.43M
        res = _mm256_srli_epi16(res, 5);
3706
9.43M
        res16[jj] = _mm256_packus_epi16(
3707
9.43M
            res, _mm256_castsi128_si256(
3708
9.43M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3709
9.43M
      }
3710
9.43M
    }
3711
4.71M
    res16[1] =
3712
4.71M
        _mm256_inserti128_si256(res16[0], _mm256_castsi256_si128(res16[1]),
3713
4.71M
                                1);  // 32 8bit values
3714
3715
4.71M
    dstvec[r] = _mm256_blendv_epi8(
3716
4.71M
        a_mbase_x, res16[1],
3717
4.71M
        *(__m256i *)BaseMask[base_max_diff]);  // 32 8bit values
3718
4.71M
    x += dx;
3719
4.71M
  }
3720
173k
}
3721
3722
static void dr_prediction_z1_32xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3723
                                       const uint8_t *above, int upsample_above,
3724
67.8k
                                       int dx) {
3725
67.8k
  __m256i dstvec[64];
3726
67.8k
  dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above, dx);
3727
1.93M
  for (int i = 0; i < N; i++) {
3728
1.86M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
3729
1.86M
  }
3730
67.8k
}
3731
3732
static void dr_prediction_z1_64xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3733
                                       const uint8_t *above, int upsample_above,
3734
39.5k
                                       int dx) {
3735
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3736
39.5k
  (void)upsample_above;
3737
39.5k
  const int frac_bits = 6;
3738
39.5k
  const int max_base_x = ((64 + N) - 1);
3739
3740
  // pre-filter above pixels
3741
  // store in temp buffers:
3742
  //   above[x] * 32 + 16
3743
  //   above[x+1] - above[x]
3744
  // final pixels will be calculated as:
3745
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3746
39.5k
  __m256i a0, a1, a32, a16;
3747
39.5k
  __m256i a_mbase_x, diff, c3f;
3748
39.5k
  __m128i max_base_x128, base_inc128, mask128;
3749
3750
39.5k
  a16 = _mm256_set1_epi16(16);
3751
39.5k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3752
39.5k
  max_base_x128 = _mm_set1_epi8(max_base_x);
3753
39.5k
  c3f = _mm256_set1_epi16(0x3f);
3754
3755
39.5k
  int x = dx;
3756
2.15M
  for (int r = 0; r < N; r++, dst += stride) {
3757
2.11M
    __m256i b, res;
3758
2.11M
    int base = x >> frac_bits;
3759
2.11M
    if (base >= max_base_x) {
3760
0
      for (int i = r; i < N; ++i) {
3761
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
3762
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
3763
0
        dst += stride;
3764
0
      }
3765
0
      return;
3766
0
    }
3767
3768
2.11M
    __m256i shift =
3769
2.11M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3770
3771
2.11M
    __m128i a0_128, a1_128, res128;
3772
10.5M
    for (int j = 0; j < 64; j += 16) {
3773
8.47M
      int mdif = max_base_x - (base + j);
3774
8.47M
      if (mdif <= 0) {
3775
2.92k
        _mm_storeu_si128((__m128i *)(dst + j),
3776
2.92k
                         _mm256_castsi256_si128(a_mbase_x));
3777
8.47M
      } else {
3778
8.47M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3779
8.47M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
3780
8.47M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3781
8.47M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3782
3783
8.47M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3784
8.47M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3785
8.47M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3786
8.47M
        b = _mm256_mullo_epi16(diff, shift);
3787
3788
8.47M
        res = _mm256_add_epi16(a32, b);
3789
8.47M
        res = _mm256_srli_epi16(res, 5);
3790
8.47M
        res = _mm256_packus_epi16(
3791
8.47M
            res, _mm256_castsi128_si256(
3792
8.47M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3793
3794
8.47M
        base_inc128 =
3795
8.47M
            _mm_setr_epi8((int8_t)(base + j), (int8_t)(base + j + 1),
3796
8.47M
                          (int8_t)(base + j + 2), (int8_t)(base + j + 3),
3797
8.47M
                          (int8_t)(base + j + 4), (int8_t)(base + j + 5),
3798
8.47M
                          (int8_t)(base + j + 6), (int8_t)(base + j + 7),
3799
8.47M
                          (int8_t)(base + j + 8), (int8_t)(base + j + 9),
3800
8.47M
                          (int8_t)(base + j + 10), (int8_t)(base + j + 11),
3801
8.47M
                          (int8_t)(base + j + 12), (int8_t)(base + j + 13),
3802
8.47M
                          (int8_t)(base + j + 14), (int8_t)(base + j + 15));
3803
3804
8.47M
        mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
3805
8.47M
                                 _mm_setzero_si128());
3806
8.47M
        res128 = _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x),
3807
8.47M
                                 _mm256_castsi256_si128(res), mask128);
3808
8.47M
        _mm_storeu_si128((__m128i *)(dst + j), res128);
3809
8.47M
      }
3810
8.47M
    }
3811
2.11M
    x += dx;
3812
2.11M
  }
3813
39.5k
}
3814
3815
// Directional prediction, zone 1: 0 < angle < 90
3816
void av1_dr_prediction_z1_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
3817
                               const uint8_t *above, const uint8_t *left,
3818
411k
                               int upsample_above, int dx, int dy) {
3819
411k
  (void)left;
3820
411k
  (void)dy;
3821
411k
  switch (bw) {
3822
118k
    case 4:
3823
118k
      dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above, dx);
3824
118k
      break;
3825
107k
    case 8:
3826
107k
      dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above, dx);
3827
107k
      break;
3828
107k
    case 16:
3829
107k
      dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above, dx);
3830
107k
      break;
3831
65.5k
    case 32:
3832
65.5k
      dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above, dx);
3833
65.5k
      break;
3834
13.0k
    case 64:
3835
13.0k
      dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above, dx);
3836
13.0k
      break;
3837
0
    default: break;
3838
411k
  }
3839
411k
  return;
3840
411k
}
3841
3842
static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3843
                                      const uint8_t *above, const uint8_t *left,
3844
                                      int upsample_above, int upsample_left,
3845
342k
                                      int dx, int dy) {
3846
342k
  const int min_base_x = -(1 << upsample_above);
3847
342k
  const int min_base_y = -(1 << upsample_left);
3848
342k
  const int frac_bits_x = 6 - upsample_above;
3849
342k
  const int frac_bits_y = 6 - upsample_left;
3850
3851
342k
  assert(dx > 0);
3852
  // pre-filter above pixels
3853
  // store in temp buffers:
3854
  //   above[x] * 32 + 16
3855
  //   above[x+1] - above[x]
3856
  // final pixels will be calculated as:
3857
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3858
342k
  __m128i a0_x, a1_x, a32, a16, diff;
3859
342k
  __m128i c3f, min_base_y128, c1234, dy128;
3860
3861
342k
  a16 = _mm_set1_epi16(16);
3862
342k
  c3f = _mm_set1_epi16(0x3f);
3863
342k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3864
342k
  c1234 = _mm_setr_epi16(0, 1, 2, 3, 4, 0, 0, 0);
3865
342k
  dy128 = _mm_set1_epi16(dy);
3866
3867
2.23M
  for (int r = 0; r < N; r++) {
3868
1.89M
    __m128i b, res, shift, r6, ydx;
3869
1.89M
    __m128i resx, resy, resxy;
3870
1.89M
    __m128i a0_x128, a1_x128;
3871
1.89M
    int y = r + 1;
3872
1.89M
    int base_x = (-y * dx) >> frac_bits_x;
3873
1.89M
    int base_shift = 0;
3874
1.89M
    if (base_x < (min_base_x - 1)) {
3875
1.49M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3876
1.49M
    }
3877
1.89M
    int base_min_diff =
3878
1.89M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3879
1.89M
    if (base_min_diff > 4) {
3880
1.02M
      base_min_diff = 4;
3881
1.02M
    } else {
3882
863k
      if (base_min_diff < 0) base_min_diff = 0;
3883
863k
    }
3884
3885
1.89M
    if (base_shift > 3) {
3886
1.02M
      a0_x = _mm_setzero_si128();
3887
1.02M
      a1_x = _mm_setzero_si128();
3888
1.02M
      shift = _mm_setzero_si128();
3889
1.02M
    } else {
3890
863k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
3891
863k
      ydx = _mm_set1_epi16(y * dx);
3892
863k
      r6 = _mm_slli_epi16(c1234, 6);
3893
3894
863k
      if (upsample_above) {
3895
253k
        a0_x128 =
3896
253k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
3897
253k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
3898
3899
253k
        shift = _mm_srli_epi16(
3900
253k
            _mm_and_si128(
3901
253k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
3902
253k
            1);
3903
610k
      } else {
3904
610k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
3905
610k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
3906
3907
610k
        shift = _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1);
3908
610k
      }
3909
863k
      a0_x = _mm_cvtepu8_epi16(a0_x128);
3910
863k
      a1_x = _mm_cvtepu8_epi16(a1_x128);
3911
863k
    }
3912
    // y calc
3913
1.89M
    __m128i a0_y, a1_y, shifty;
3914
1.89M
    if (base_x < min_base_x) {
3915
1.66M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
3916
1.66M
      __m128i y_c128, base_y_c128, mask128, c1234_;
3917
1.66M
      c1234_ = _mm_srli_si128(c1234, 2);
3918
1.66M
      r6 = _mm_set1_epi16(r << 6);
3919
1.66M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234_, dy128));
3920
1.66M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
3921
1.66M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
3922
1.66M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
3923
1.66M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3924
3925
1.66M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3926
1.66M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3927
1.66M
      base_y_c128 = _mm_add_epi16(base_y_c128, _mm_srli_epi16(a16, 4));
3928
1.66M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3929
1.66M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3930
1.66M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3931
3932
1.66M
      if (upsample_left) {
3933
987k
        shifty = _mm_srli_epi16(
3934
987k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
3935
987k
      } else {
3936
679k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
3937
679k
      }
3938
1.66M
      a0_x = _mm_unpacklo_epi64(a0_x, a0_y);
3939
1.66M
      a1_x = _mm_unpacklo_epi64(a1_x, a1_y);
3940
1.66M
      shift = _mm_unpacklo_epi64(shift, shifty);
3941
1.66M
    }
3942
3943
1.89M
    diff = _mm_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
3944
1.89M
    a32 = _mm_slli_epi16(a0_x, 5);     // a[x] * 32
3945
1.89M
    a32 = _mm_add_epi16(a32, a16);     // a[x] * 32 + 16
3946
3947
1.89M
    b = _mm_mullo_epi16(diff, shift);
3948
1.89M
    res = _mm_add_epi16(a32, b);
3949
1.89M
    res = _mm_srli_epi16(res, 5);
3950
3951
1.89M
    resx = _mm_packus_epi16(res, res);
3952
1.89M
    resy = _mm_srli_si128(resx, 4);
3953
3954
1.89M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
3955
1.89M
    *(int *)(dst) = _mm_cvtsi128_si32(resxy);
3956
1.89M
    dst += stride;
3957
1.89M
  }
3958
342k
}
3959
3960
static void dr_prediction_z2_Nx8_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3961
                                      const uint8_t *above, const uint8_t *left,
3962
                                      int upsample_above, int upsample_left,
3963
237k
                                      int dx, int dy) {
3964
237k
  const int min_base_x = -(1 << upsample_above);
3965
237k
  const int min_base_y = -(1 << upsample_left);
3966
237k
  const int frac_bits_x = 6 - upsample_above;
3967
237k
  const int frac_bits_y = 6 - upsample_left;
3968
3969
  // pre-filter above pixels
3970
  // store in temp buffers:
3971
  //   above[x] * 32 + 16
3972
  //   above[x+1] - above[x]
3973
  // final pixels will be calculated as:
3974
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3975
237k
  __m256i diff, a32, a16;
3976
237k
  __m256i a0_x, a1_x;
3977
237k
  __m128i a0_x128, a1_x128, min_base_y128, c3f;
3978
237k
  __m128i c1234, dy128;
3979
3980
237k
  a16 = _mm256_set1_epi16(16);
3981
237k
  c3f = _mm_set1_epi16(0x3f);
3982
237k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3983
237k
  dy128 = _mm_set1_epi16(dy);
3984
237k
  c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
3985
3986
2.48M
  for (int r = 0; r < N; r++) {
3987
2.24M
    __m256i b, res, shift;
3988
2.24M
    __m128i resx, resy, resxy, r6, ydx;
3989
3990
2.24M
    int y = r + 1;
3991
2.24M
    int base_x = (-y * dx) >> frac_bits_x;
3992
2.24M
    int base_shift = 0;
3993
2.24M
    if (base_x < (min_base_x - 1)) {
3994
1.68M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3995
1.68M
    }
3996
2.24M
    int base_min_diff =
3997
2.24M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3998
2.24M
    if (base_min_diff > 8) {
3999
976k
      base_min_diff = 8;
4000
1.27M
    } else {
4001
1.27M
      if (base_min_diff < 0) base_min_diff = 0;
4002
1.27M
    }
4003
4004
2.24M
    if (base_shift > 7) {
4005
976k
      a0_x = _mm256_setzero_si256();
4006
976k
      a1_x = _mm256_setzero_si256();
4007
976k
      shift = _mm256_setzero_si256();
4008
1.27M
    } else {
4009
1.27M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
4010
1.27M
      ydx = _mm_set1_epi16(y * dx);
4011
1.27M
      r6 = _mm_slli_epi16(_mm_srli_si128(c1234, 2), 6);
4012
1.27M
      if (upsample_above) {
4013
394k
        a0_x128 =
4014
394k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
4015
394k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
4016
4017
394k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
4018
394k
            _mm_and_si128(
4019
394k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
4020
394k
            1));
4021
878k
      } else {
4022
878k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
4023
878k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4024
878k
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4025
4026
878k
        shift = _mm256_castsi128_si256(
4027
878k
            _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1));
4028
878k
      }
4029
1.27M
      a0_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a0_x128));
4030
1.27M
      a1_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a1_x128));
4031
1.27M
    }
4032
4033
    // y calc
4034
2.24M
    __m128i a0_y, a1_y, shifty;
4035
2.24M
    if (base_x < min_base_x) {
4036
1.87M
      DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4037
1.87M
      __m128i y_c128, base_y_c128, mask128;
4038
1.87M
      r6 = _mm_set1_epi16(r << 6);
4039
1.87M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
4040
1.87M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
4041
1.87M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
4042
1.87M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
4043
1.87M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4044
4045
1.87M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4046
1.87M
                            left[base_y_c[2]], left[base_y_c[3]],
4047
1.87M
                            left[base_y_c[4]], left[base_y_c[5]],
4048
1.87M
                            left[base_y_c[6]], left[base_y_c[7]]);
4049
1.87M
      base_y_c128 = _mm_add_epi16(
4050
1.87M
          base_y_c128, _mm_srli_epi16(_mm256_castsi256_si128(a16), 4));
4051
1.87M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4052
4053
1.87M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4054
1.87M
                            left[base_y_c[2]], left[base_y_c[3]],
4055
1.87M
                            left[base_y_c[4]], left[base_y_c[5]],
4056
1.87M
                            left[base_y_c[6]], left[base_y_c[7]]);
4057
4058
1.87M
      if (upsample_left) {
4059
504k
        shifty = _mm_srli_epi16(
4060
504k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
4061
1.37M
      } else {
4062
1.37M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
4063
1.37M
      }
4064
4065
1.87M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
4066
1.87M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
4067
1.87M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
4068
1.87M
    }
4069
4070
2.24M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4071
2.24M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4072
2.24M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4073
4074
2.24M
    b = _mm256_mullo_epi16(diff, shift);
4075
2.24M
    res = _mm256_add_epi16(a32, b);
4076
2.24M
    res = _mm256_srli_epi16(res, 5);
4077
4078
2.24M
    resx = _mm_packus_epi16(_mm256_castsi256_si128(res),
4079
2.24M
                            _mm256_castsi256_si128(res));
4080
2.24M
    resy = _mm256_extracti128_si256(res, 1);
4081
2.24M
    resy = _mm_packus_epi16(resy, resy);
4082
4083
2.24M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4084
2.24M
    _mm_storel_epi64((__m128i *)(dst), resxy);
4085
2.24M
    dst += stride;
4086
2.24M
  }
4087
237k
}
4088
4089
static void dr_prediction_z2_HxW_avx2(int H, int W, uint8_t *dst,
4090
                                      ptrdiff_t stride, const uint8_t *above,
4091
                                      const uint8_t *left, int upsample_above,
4092
412k
                                      int upsample_left, int dx, int dy) {
4093
  // here upsample_above and upsample_left are 0 by design of
4094
  // av1_use_intra_edge_upsample
4095
412k
  const int min_base_x = -1;
4096
412k
  const int min_base_y = -1;
4097
412k
  (void)upsample_above;
4098
412k
  (void)upsample_left;
4099
412k
  const int frac_bits_x = 6;
4100
412k
  const int frac_bits_y = 6;
4101
4102
412k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c1234, c0123;
4103
412k
  __m256i diff, min_base_y256, c3f, shifty, dy256, c1;
4104
412k
  __m128i a0_x128, a1_x128;
4105
4106
412k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4107
412k
  a16 = _mm256_set1_epi16(16);
4108
412k
  c1 = _mm256_srli_epi16(a16, 4);
4109
412k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
4110
412k
  c3f = _mm256_set1_epi16(0x3f);
4111
412k
  dy256 = _mm256_set1_epi16(dy);
4112
412k
  c0123 =
4113
412k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4114
412k
  c1234 = _mm256_add_epi16(c0123, c1);
4115
4116
8.36M
  for (int r = 0; r < H; r++) {
4117
7.95M
    __m256i b, res, shift, j256, r6, ydx;
4118
7.95M
    __m128i resx, resy;
4119
7.95M
    __m128i resxy;
4120
7.95M
    int y = r + 1;
4121
7.95M
    ydx = _mm256_set1_epi16((int16_t)(y * dx));
4122
4123
7.95M
    int base_x = (-y * dx) >> frac_bits_x;
4124
23.0M
    for (int j = 0; j < W; j += 16) {
4125
15.0M
      j256 = _mm256_set1_epi16(j);
4126
15.0M
      int base_shift = 0;
4127
15.0M
      if ((base_x + j) < (min_base_x - 1)) {
4128
10.9M
        base_shift = (min_base_x - (base_x + j) - 1);
4129
10.9M
      }
4130
15.0M
      int base_min_diff = (min_base_x - base_x - j);
4131
15.0M
      if (base_min_diff > 16) {
4132
7.91M
        base_min_diff = 16;
4133
7.91M
      } else {
4134
7.16M
        if (base_min_diff < 0) base_min_diff = 0;
4135
7.16M
      }
4136
4137
15.0M
      if (base_shift < 16) {
4138
7.16M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
4139
7.16M
        a1_x128 =
4140
7.16M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
4141
7.16M
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4142
7.16M
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4143
4144
7.16M
        a0_x = _mm256_cvtepu8_epi16(a0_x128);
4145
7.16M
        a1_x = _mm256_cvtepu8_epi16(a1_x128);
4146
4147
7.16M
        r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
4148
7.16M
        shift = _mm256_srli_epi16(
4149
7.16M
            _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
4150
4151
7.16M
        diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4152
7.16M
        a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4153
7.16M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4154
4155
7.16M
        b = _mm256_mullo_epi16(diff, shift);
4156
7.16M
        res = _mm256_add_epi16(a32, b);
4157
7.16M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4158
7.16M
        resx = _mm256_castsi256_si128(_mm256_packus_epi16(
4159
7.16M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4160
7.91M
      } else {
4161
7.91M
        resx = _mm_setzero_si128();
4162
7.91M
      }
4163
4164
      // y calc
4165
15.0M
      if (base_x < min_base_x) {
4166
13.9M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
4167
13.9M
        r6 = _mm256_set1_epi16(r << 6);
4168
13.9M
        c256 = _mm256_add_epi16(j256, c1234);
4169
13.9M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
4170
13.9M
                                 _mm256_srli_epi16(min_base_y256, 1));
4171
13.9M
        y_c256 = _mm256_sub_epi16(r6, mul16);
4172
4173
13.9M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
4174
13.9M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
4175
4176
13.9M
        base_y_c256 = _mm256_blendv_epi8(base_y_c256, min_base_y256, mask256);
4177
13.9M
        int16_t min_y = (int16_t)_mm_extract_epi16(
4178
13.9M
            _mm256_extracti128_si256(base_y_c256, 1), 7);
4179
13.9M
        int16_t max_y =
4180
13.9M
            (int16_t)_mm_extract_epi16(_mm256_castsi256_si128(base_y_c256), 0);
4181
13.9M
        int16_t offset_diff = max_y - min_y;
4182
4183
13.9M
        if (offset_diff < 16) {
4184
13.1M
          __m256i min_y256 = _mm256_set1_epi16(min_y);
4185
4186
13.1M
          __m256i base_y_offset = _mm256_sub_epi16(base_y_c256, min_y256);
4187
13.1M
          __m128i base_y_offset128 =
4188
13.1M
              _mm_packs_epi16(_mm256_extracti128_si256(base_y_offset, 0),
4189
13.1M
                              _mm256_extracti128_si256(base_y_offset, 1));
4190
4191
13.1M
          __m128i a0_y128 = _mm_maskload_epi32(
4192
13.1M
              (int *)(left + min_y), *(__m128i *)LoadMaskz2[offset_diff / 4]);
4193
13.1M
          __m128i a1_y128 =
4194
13.1M
              _mm_maskload_epi32((int *)(left + min_y + 1),
4195
13.1M
                                 *(__m128i *)LoadMaskz2[offset_diff / 4]);
4196
13.1M
          a0_y128 = _mm_shuffle_epi8(a0_y128, base_y_offset128);
4197
13.1M
          a1_y128 = _mm_shuffle_epi8(a1_y128, base_y_offset128);
4198
13.1M
          a0_y = _mm256_cvtepu8_epi16(a0_y128);
4199
13.1M
          a1_y = _mm256_cvtepu8_epi16(a1_y128);
4200
13.1M
        } else {
4201
829k
          base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
4202
829k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4203
4204
829k
          a0_y = _mm256_setr_epi16(
4205
829k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4206
829k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4207
829k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4208
829k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4209
829k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4210
829k
              left[base_y_c[15]]);
4211
829k
          base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
4212
829k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4213
4214
829k
          a1_y = _mm256_setr_epi16(
4215
829k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4216
829k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4217
829k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4218
829k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4219
829k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4220
829k
              left[base_y_c[15]]);
4221
829k
        }
4222
13.9M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
4223
4224
13.9M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
4225
13.9M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
4226
13.9M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4227
4228
13.9M
        b = _mm256_mullo_epi16(diff, shifty);
4229
13.9M
        res = _mm256_add_epi16(a32, b);
4230
13.9M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4231
13.9M
        resy = _mm256_castsi256_si128(_mm256_packus_epi16(
4232
13.9M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4233
13.9M
      } else {
4234
1.12M
        resy = _mm_setzero_si128();
4235
1.12M
      }
4236
15.0M
      resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4237
15.0M
      _mm_storeu_si128((__m128i *)(dst + j), resxy);
4238
15.0M
    }  // for j
4239
7.95M
    dst += stride;
4240
7.95M
  }
4241
412k
}
4242
4243
// Directional prediction, zone 2: 90 < angle < 180
4244
void av1_dr_prediction_z2_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4245
                               const uint8_t *above, const uint8_t *left,
4246
                               int upsample_above, int upsample_left, int dx,
4247
991k
                               int dy) {
4248
991k
  assert(dx > 0);
4249
991k
  assert(dy > 0);
4250
991k
  switch (bw) {
4251
342k
    case 4:
4252
342k
      dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left, upsample_above,
4253
342k
                                upsample_left, dx, dy);
4254
342k
      break;
4255
237k
    case 8:
4256
237k
      dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left, upsample_above,
4257
237k
                                upsample_left, dx, dy);
4258
237k
      break;
4259
412k
    default:
4260
412k
      dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
4261
412k
                                upsample_above, upsample_left, dx, dy);
4262
412k
      break;
4263
991k
  }
4264
991k
  return;
4265
991k
}
4266
4267
// z3 functions
4268
182k
static inline void transpose16x32_avx2(__m256i *x, __m256i *d) {
4269
182k
  __m256i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
4270
182k
  __m256i w10, w11, w12, w13, w14, w15;
4271
4272
182k
  w0 = _mm256_unpacklo_epi8(x[0], x[1]);
4273
182k
  w1 = _mm256_unpacklo_epi8(x[2], x[3]);
4274
182k
  w2 = _mm256_unpacklo_epi8(x[4], x[5]);
4275
182k
  w3 = _mm256_unpacklo_epi8(x[6], x[7]);
4276
4277
182k
  w8 = _mm256_unpacklo_epi8(x[8], x[9]);
4278
182k
  w9 = _mm256_unpacklo_epi8(x[10], x[11]);
4279
182k
  w10 = _mm256_unpacklo_epi8(x[12], x[13]);
4280
182k
  w11 = _mm256_unpacklo_epi8(x[14], x[15]);
4281
4282
182k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4283
182k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4284
182k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4285
182k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4286
4287
182k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4288
182k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4289
182k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4290
182k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4291
4292
  // Store first 4-line result
4293
182k
  d[0] = _mm256_unpacklo_epi64(w6, w14);
4294
182k
  d[1] = _mm256_unpackhi_epi64(w6, w14);
4295
182k
  d[2] = _mm256_unpacklo_epi64(w7, w15);
4296
182k
  d[3] = _mm256_unpackhi_epi64(w7, w15);
4297
4298
182k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4299
182k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4300
182k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4301
182k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4302
4303
182k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4304
182k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4305
182k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4306
182k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4307
4308
  // Store second 4-line result
4309
182k
  d[4] = _mm256_unpacklo_epi64(w6, w14);
4310
182k
  d[5] = _mm256_unpackhi_epi64(w6, w14);
4311
182k
  d[6] = _mm256_unpacklo_epi64(w7, w15);
4312
182k
  d[7] = _mm256_unpackhi_epi64(w7, w15);
4313
4314
  // upper half
4315
182k
  w0 = _mm256_unpackhi_epi8(x[0], x[1]);
4316
182k
  w1 = _mm256_unpackhi_epi8(x[2], x[3]);
4317
182k
  w2 = _mm256_unpackhi_epi8(x[4], x[5]);
4318
182k
  w3 = _mm256_unpackhi_epi8(x[6], x[7]);
4319
4320
182k
  w8 = _mm256_unpackhi_epi8(x[8], x[9]);
4321
182k
  w9 = _mm256_unpackhi_epi8(x[10], x[11]);
4322
182k
  w10 = _mm256_unpackhi_epi8(x[12], x[13]);
4323
182k
  w11 = _mm256_unpackhi_epi8(x[14], x[15]);
4324
4325
182k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4326
182k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4327
182k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4328
182k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4329
4330
182k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4331
182k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4332
182k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4333
182k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4334
4335
  // Store first 4-line result
4336
182k
  d[8] = _mm256_unpacklo_epi64(w6, w14);
4337
182k
  d[9] = _mm256_unpackhi_epi64(w6, w14);
4338
182k
  d[10] = _mm256_unpacklo_epi64(w7, w15);
4339
182k
  d[11] = _mm256_unpackhi_epi64(w7, w15);
4340
4341
182k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4342
182k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4343
182k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4344
182k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4345
4346
182k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4347
182k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4348
182k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4349
182k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4350
4351
  // Store second 4-line result
4352
182k
  d[12] = _mm256_unpacklo_epi64(w6, w14);
4353
182k
  d[13] = _mm256_unpackhi_epi64(w6, w14);
4354
182k
  d[14] = _mm256_unpacklo_epi64(w7, w15);
4355
182k
  d[15] = _mm256_unpackhi_epi64(w7, w15);
4356
182k
}
4357
4358
static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
4359
                                      const uint8_t *left, int upsample_left,
4360
73.2k
                                      int dy) {
4361
73.2k
  __m128i dstvec[4], d[4];
4362
4363
73.2k
  dr_prediction_z1_HxW_internal_avx2(4, 4, dstvec, left, upsample_left, dy);
4364
73.2k
  transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4365
73.2k
                            &d[0], &d[1], &d[2], &d[3]);
4366
4367
73.2k
  *(int *)(dst + stride * 0) = _mm_cvtsi128_si32(d[0]);
4368
73.2k
  *(int *)(dst + stride * 1) = _mm_cvtsi128_si32(d[1]);
4369
73.2k
  *(int *)(dst + stride * 2) = _mm_cvtsi128_si32(d[2]);
4370
73.2k
  *(int *)(dst + stride * 3) = _mm_cvtsi128_si32(d[3]);
4371
73.2k
  return;
4372
73.2k
}
4373
4374
static void dr_prediction_z3_8x8_avx2(uint8_t *dst, ptrdiff_t stride,
4375
                                      const uint8_t *left, int upsample_left,
4376
88.1k
                                      int dy) {
4377
88.1k
  __m128i dstvec[8], d[8];
4378
4379
88.1k
  dr_prediction_z1_HxW_internal_avx2(8, 8, dstvec, left, upsample_left, dy);
4380
88.1k
  transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
4381
88.1k
                    &dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
4382
88.1k
                    &d[3]);
4383
4384
88.1k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4385
88.1k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), _mm_srli_si128(d[0], 8));
4386
88.1k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[1]);
4387
88.1k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), _mm_srli_si128(d[1], 8));
4388
88.1k
  _mm_storel_epi64((__m128i *)(dst + 4 * stride), d[2]);
4389
88.1k
  _mm_storel_epi64((__m128i *)(dst + 5 * stride), _mm_srli_si128(d[2], 8));
4390
88.1k
  _mm_storel_epi64((__m128i *)(dst + 6 * stride), d[3]);
4391
88.1k
  _mm_storel_epi64((__m128i *)(dst + 7 * stride), _mm_srli_si128(d[3], 8));
4392
88.1k
}
4393
4394
static void dr_prediction_z3_4x8_avx2(uint8_t *dst, ptrdiff_t stride,
4395
                                      const uint8_t *left, int upsample_left,
4396
21.8k
                                      int dy) {
4397
21.8k
  __m128i dstvec[4], d[8];
4398
4399
21.8k
  dr_prediction_z1_HxW_internal_avx2(8, 4, dstvec, left, upsample_left, dy);
4400
21.8k
  transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
4401
21.8k
                        &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
4402
196k
  for (int i = 0; i < 8; i++) {
4403
174k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4404
174k
  }
4405
21.8k
}
4406
4407
static void dr_prediction_z3_8x4_avx2(uint8_t *dst, ptrdiff_t stride,
4408
                                      const uint8_t *left, int upsample_left,
4409
34.8k
                                      int dy) {
4410
34.8k
  __m128i dstvec[8], d[4];
4411
4412
34.8k
  dr_prediction_z1_HxW_internal_avx2(4, 8, dstvec, left, upsample_left, dy);
4413
34.8k
  transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4414
34.8k
                        &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
4415
34.8k
                        &d[1], &d[2], &d[3]);
4416
34.8k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4417
34.8k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
4418
34.8k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
4419
34.8k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
4420
34.8k
}
4421
4422
static void dr_prediction_z3_8x16_avx2(uint8_t *dst, ptrdiff_t stride,
4423
                                       const uint8_t *left, int upsample_left,
4424
22.6k
                                       int dy) {
4425
22.6k
  __m128i dstvec[8], d[8];
4426
4427
22.6k
  dr_prediction_z1_HxW_internal_avx2(16, 8, dstvec, left, upsample_left, dy);
4428
22.6k
  transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
4429
22.6k
                          dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
4430
22.6k
                          d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
4431
203k
  for (int i = 0; i < 8; i++) {
4432
181k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
4433
181k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
4434
181k
                     _mm_srli_si128(d[i], 8));
4435
181k
  }
4436
22.6k
}
4437
4438
static void dr_prediction_z3_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
4439
                                       const uint8_t *left, int upsample_left,
4440
42.1k
                                       int dy) {
4441
42.1k
  __m128i dstvec[16], d[16];
4442
4443
42.1k
  dr_prediction_z1_HxW_internal_avx2(8, 16, dstvec, left, upsample_left, dy);
4444
42.1k
  transpose16x8_8x16_sse2(
4445
42.1k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4446
42.1k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4447
42.1k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4448
42.1k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4449
4450
379k
  for (int i = 0; i < 8; i++) {
4451
337k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4452
337k
  }
4453
42.1k
}
4454
4455
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4456
static void dr_prediction_z3_4x16_avx2(uint8_t *dst, ptrdiff_t stride,
4457
                                       const uint8_t *left, int upsample_left,
4458
16.6k
                                       int dy) {
4459
16.6k
  __m128i dstvec[4], d[16];
4460
4461
16.6k
  dr_prediction_z1_HxW_internal_avx2(16, 4, dstvec, left, upsample_left, dy);
4462
16.6k
  transpose4x16_sse2(dstvec, d);
4463
283k
  for (int i = 0; i < 16; i++) {
4464
266k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4465
266k
  }
4466
16.6k
}
4467
4468
static void dr_prediction_z3_16x4_avx2(uint8_t *dst, ptrdiff_t stride,
4469
                                       const uint8_t *left, int upsample_left,
4470
50.9k
                                       int dy) {
4471
50.9k
  __m128i dstvec[16], d[8];
4472
4473
50.9k
  dr_prediction_z1_HxW_internal_avx2(4, 16, dstvec, left, upsample_left, dy);
4474
254k
  for (int i = 4; i < 8; i++) {
4475
203k
    d[i] = _mm_setzero_si128();
4476
203k
  }
4477
50.9k
  transpose16x8_8x16_sse2(
4478
50.9k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4479
50.9k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4480
50.9k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4481
50.9k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4482
4483
254k
  for (int i = 0; i < 4; i++) {
4484
203k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4485
203k
  }
4486
50.9k
}
4487
4488
static void dr_prediction_z3_8x32_avx2(uint8_t *dst, ptrdiff_t stride,
4489
                                       const uint8_t *left, int upsample_left,
4490
9.83k
                                       int dy) {
4491
9.83k
  __m256i dstvec[16], d[16];
4492
4493
9.83k
  dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left, dy);
4494
88.5k
  for (int i = 8; i < 16; i++) {
4495
78.7k
    dstvec[i] = _mm256_setzero_si256();
4496
78.7k
  }
4497
9.83k
  transpose16x32_avx2(dstvec, d);
4498
4499
167k
  for (int i = 0; i < 16; i++) {
4500
157k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
4501
157k
                     _mm256_castsi256_si128(d[i]));
4502
157k
  }
4503
167k
  for (int i = 0; i < 16; i++) {
4504
157k
    _mm_storel_epi64((__m128i *)(dst + (i + 16) * stride),
4505
157k
                     _mm256_extracti128_si256(d[i], 1));
4506
157k
  }
4507
9.83k
}
4508
4509
static void dr_prediction_z3_32x8_avx2(uint8_t *dst, ptrdiff_t stride,
4510
                                       const uint8_t *left, int upsample_left,
4511
38.1k
                                       int dy) {
4512
38.1k
  __m128i dstvec[32], d[16];
4513
4514
38.1k
  dr_prediction_z1_HxW_internal_avx2(8, 32, dstvec, left, upsample_left, dy);
4515
4516
38.1k
  transpose16x8_8x16_sse2(
4517
38.1k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4518
38.1k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4519
38.1k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4520
38.1k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4521
38.1k
  transpose16x8_8x16_sse2(
4522
38.1k
      &dstvec[0 + 16], &dstvec[1 + 16], &dstvec[2 + 16], &dstvec[3 + 16],
4523
38.1k
      &dstvec[4 + 16], &dstvec[5 + 16], &dstvec[6 + 16], &dstvec[7 + 16],
4524
38.1k
      &dstvec[8 + 16], &dstvec[9 + 16], &dstvec[10 + 16], &dstvec[11 + 16],
4525
38.1k
      &dstvec[12 + 16], &dstvec[13 + 16], &dstvec[14 + 16], &dstvec[15 + 16],
4526
38.1k
      &d[0 + 8], &d[1 + 8], &d[2 + 8], &d[3 + 8], &d[4 + 8], &d[5 + 8],
4527
38.1k
      &d[6 + 8], &d[7 + 8]);
4528
4529
343k
  for (int i = 0; i < 8; i++) {
4530
305k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4531
305k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 8]);
4532
305k
  }
4533
38.1k
}
4534
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4535
4536
static void dr_prediction_z3_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
4537
                                        const uint8_t *left, int upsample_left,
4538
76.7k
                                        int dy) {
4539
76.7k
  __m128i dstvec[16], d[16];
4540
4541
76.7k
  dr_prediction_z1_HxW_internal_avx2(16, 16, dstvec, left, upsample_left, dy);
4542
76.7k
  transpose16x16_sse2(dstvec, d);
4543
4544
1.30M
  for (int i = 0; i < 16; i++) {
4545
1.22M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4546
1.22M
  }
4547
76.7k
}
4548
4549
static void dr_prediction_z3_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
4550
                                        const uint8_t *left, int upsample_left,
4551
76.9k
                                        int dy) {
4552
76.9k
  __m256i dstvec[32], d[32];
4553
4554
76.9k
  dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left, dy);
4555
76.9k
  transpose16x32_avx2(dstvec, d);
4556
76.9k
  transpose16x32_avx2(dstvec + 16, d + 16);
4557
1.30M
  for (int j = 0; j < 16; j++) {
4558
1.23M
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4559
1.23M
                     _mm256_castsi256_si128(d[j]));
4560
1.23M
    _mm_storeu_si128((__m128i *)(dst + j * stride + 16),
4561
1.23M
                     _mm256_castsi256_si128(d[j + 16]));
4562
1.23M
  }
4563
1.30M
  for (int j = 0; j < 16; j++) {
4564
1.23M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4565
1.23M
                     _mm256_extracti128_si256(d[j], 1));
4566
1.23M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride + 16),
4567
1.23M
                     _mm256_extracti128_si256(d[j + 16], 1));
4568
1.23M
  }
4569
76.9k
}
4570
4571
static void dr_prediction_z3_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
4572
                                        const uint8_t *left, int upsample_left,
4573
20.9k
                                        int dy) {
4574
20.9k
  DECLARE_ALIGNED(16, uint8_t, dstT[64 * 64]);
4575
20.9k
  dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
4576
20.9k
  transpose(dstT, 64, dst, stride, 64, 64);
4577
20.9k
}
4578
4579
static void dr_prediction_z3_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
4580
                                        const uint8_t *left, int upsample_left,
4581
19.2k
                                        int dy) {
4582
19.2k
  __m256i dstvec[16], d[16];
4583
4584
19.2k
  dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left, dy);
4585
19.2k
  transpose16x32_avx2(dstvec, d);
4586
  // store
4587
327k
  for (int j = 0; j < 16; j++) {
4588
308k
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4589
308k
                     _mm256_castsi256_si128(d[j]));
4590
308k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4591
308k
                     _mm256_extracti128_si256(d[j], 1));
4592
308k
  }
4593
19.2k
}
4594
4595
static void dr_prediction_z3_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
4596
                                        const uint8_t *left, int upsample_left,
4597
17.5k
                                        int dy) {
4598
17.5k
  __m128i dstvec[32], d[16];
4599
4600
17.5k
  dr_prediction_z1_HxW_internal_avx2(16, 32, dstvec, left, upsample_left, dy);
4601
52.5k
  for (int i = 0; i < 32; i += 16) {
4602
35.0k
    transpose16x16_sse2((dstvec + i), d);
4603
595k
    for (int j = 0; j < 16; j++) {
4604
560k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4605
560k
    }
4606
35.0k
  }
4607
17.5k
}
4608
4609
static void dr_prediction_z3_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
4610
                                        const uint8_t *left, int upsample_left,
4611
1.51k
                                        int dy) {
4612
1.51k
  uint8_t dstT[64 * 32];
4613
1.51k
  dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
4614
1.51k
  transpose(dstT, 64, dst, stride, 32, 64);
4615
1.51k
}
4616
4617
static void dr_prediction_z3_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
4618
                                        const uint8_t *left, int upsample_left,
4619
2.25k
                                        int dy) {
4620
2.25k
  uint8_t dstT[32 * 64];
4621
2.25k
  dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
4622
2.25k
  transpose(dstT, 32, dst, stride, 64, 32);
4623
2.25k
  return;
4624
2.25k
}
4625
4626
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4627
static void dr_prediction_z3_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
4628
                                        const uint8_t *left, int upsample_left,
4629
4.02k
                                        int dy) {
4630
4.02k
  uint8_t dstT[64 * 16];
4631
4.02k
  dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
4632
4.02k
  transpose(dstT, 64, dst, stride, 16, 64);
4633
4.02k
}
4634
4635
static void dr_prediction_z3_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
4636
                                        const uint8_t *left, int upsample_left,
4637
13.0k
                                        int dy) {
4638
13.0k
  __m128i dstvec[64], d[16];
4639
4640
13.0k
  dr_prediction_z1_HxW_internal_avx2(16, 64, dstvec, left, upsample_left, dy);
4641
65.3k
  for (int i = 0; i < 64; i += 16) {
4642
52.2k
    transpose16x16_sse2((dstvec + i), d);
4643
888k
    for (int j = 0; j < 16; j++) {
4644
836k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4645
836k
    }
4646
52.2k
  }
4647
13.0k
}
4648
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4649
4650
void av1_dr_prediction_z3_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4651
                               const uint8_t *above, const uint8_t *left,
4652
630k
                               int upsample_left, int dx, int dy) {
4653
630k
  (void)above;
4654
630k
  (void)dx;
4655
630k
  assert(dx == 1);
4656
630k
  assert(dy > 0);
4657
4658
630k
  if (bw == bh) {
4659
336k
    switch (bw) {
4660
73.2k
      case 4:
4661
73.2k
        dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
4662
73.2k
        break;
4663
88.1k
      case 8:
4664
88.1k
        dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
4665
88.1k
        break;
4666
76.7k
      case 16:
4667
76.7k
        dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy);
4668
76.7k
        break;
4669
76.9k
      case 32:
4670
76.9k
        dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy);
4671
76.9k
        break;
4672
20.9k
      case 64:
4673
20.9k
        dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy);
4674
20.9k
        break;
4675
336k
    }
4676
336k
  } else {
4677
294k
    if (bw < bh) {
4678
95.8k
      if (bw + bw == bh) {
4679
65.2k
        switch (bw) {
4680
21.8k
          case 4:
4681
21.8k
            dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left, dy);
4682
21.8k
            break;
4683
22.6k
          case 8:
4684
22.6k
            dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left, dy);
4685
22.6k
            break;
4686
19.2k
          case 16:
4687
19.2k
            dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left, dy);
4688
19.2k
            break;
4689
1.51k
          case 32:
4690
1.51k
            dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left, dy);
4691
1.51k
            break;
4692
65.2k
        }
4693
65.2k
      } else {
4694
30.5k
        switch (bw) {
4695
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4696
16.6k
          case 4:
4697
16.6k
            dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left, dy);
4698
16.6k
            break;
4699
9.83k
          case 8:
4700
9.83k
            dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left, dy);
4701
9.83k
            break;
4702
4.02k
          case 16:
4703
4.02k
            dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left, dy);
4704
4.02k
            break;
4705
30.5k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4706
30.5k
        }
4707
30.5k
      }
4708
198k
    } else {
4709
198k
      if (bh + bh == bw) {
4710
96.8k
        switch (bh) {
4711
34.8k
          case 4:
4712
34.8k
            dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left, dy);
4713
34.8k
            break;
4714
42.1k
          case 8:
4715
42.1k
            dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left, dy);
4716
42.1k
            break;
4717
17.5k
          case 16:
4718
17.5k
            dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left, dy);
4719
17.5k
            break;
4720
2.25k
          case 32:
4721
2.25k
            dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left, dy);
4722
2.25k
            break;
4723
96.8k
        }
4724
102k
      } else {
4725
102k
        switch (bh) {
4726
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4727
50.9k
          case 4:
4728
50.9k
            dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left, dy);
4729
50.9k
            break;
4730
38.1k
          case 8:
4731
38.1k
            dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left, dy);
4732
38.1k
            break;
4733
13.0k
          case 16:
4734
13.0k
            dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left, dy);
4735
13.0k
            break;
4736
102k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4737
102k
        }
4738
102k
      }
4739
198k
    }
4740
294k
  }
4741
630k
}