Coverage Report

Created: 2026-02-14 07:00

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/aom_dsp/x86/intrapred_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/av1_rtcd.h"
15
#include "aom_dsp/x86/intrapred_x86.h"
16
#include "aom_dsp/x86/intrapred_utils.h"
17
#include "aom_dsp/x86/lpf_common_sse2.h"
18
19
287k
static inline __m256i dc_sum_64(const uint8_t *ref) {
20
287k
  const __m256i x0 = _mm256_loadu_si256((const __m256i *)ref);
21
287k
  const __m256i x1 = _mm256_loadu_si256((const __m256i *)(ref + 32));
22
287k
  const __m256i zero = _mm256_setzero_si256();
23
287k
  __m256i y0 = _mm256_sad_epu8(x0, zero);
24
287k
  __m256i y1 = _mm256_sad_epu8(x1, zero);
25
287k
  y0 = _mm256_add_epi64(y0, y1);
26
287k
  __m256i u0 = _mm256_permute2x128_si256(y0, y0, 1);
27
287k
  y0 = _mm256_add_epi64(u0, y0);
28
287k
  u0 = _mm256_unpackhi_epi64(y0, y0);
29
287k
  return _mm256_add_epi16(y0, u0);
30
287k
}
31
32
1.84M
static inline __m256i dc_sum_32(const uint8_t *ref) {
33
1.84M
  const __m256i x = _mm256_loadu_si256((const __m256i *)ref);
34
1.84M
  const __m256i zero = _mm256_setzero_si256();
35
1.84M
  __m256i y = _mm256_sad_epu8(x, zero);
36
1.84M
  __m256i u = _mm256_permute2x128_si256(y, y, 1);
37
1.84M
  y = _mm256_add_epi64(u, y);
38
1.84M
  u = _mm256_unpackhi_epi64(y, y);
39
1.84M
  return _mm256_add_epi16(y, u);
40
1.84M
}
41
42
static inline void row_store_32xh(const __m256i *r, int height, uint8_t *dst,
43
1.18M
                                  ptrdiff_t stride) {
44
37.1M
  for (int i = 0; i < height; ++i) {
45
35.9M
    _mm256_storeu_si256((__m256i *)dst, *r);
46
35.9M
    dst += stride;
47
35.9M
  }
48
1.18M
}
49
50
static inline void row_store_32x2xh(const __m256i *r0, const __m256i *r1,
51
                                    int height, uint8_t *dst,
52
3.21k
                                    ptrdiff_t stride) {
53
155k
  for (int i = 0; i < height; ++i) {
54
152k
    _mm256_storeu_si256((__m256i *)dst, *r0);
55
152k
    _mm256_storeu_si256((__m256i *)(dst + 32), *r1);
56
152k
    dst += stride;
57
152k
  }
58
3.21k
}
59
60
static inline void row_store_64xh(const __m256i *r, int height, uint8_t *dst,
61
198k
                                  ptrdiff_t stride) {
62
9.88M
  for (int i = 0; i < height; ++i) {
63
9.68M
    _mm256_storeu_si256((__m256i *)dst, *r);
64
9.68M
    _mm256_storeu_si256((__m256i *)(dst + 32), *r);
65
9.68M
    dst += stride;
66
9.68M
  }
67
198k
}
68
69
#if CONFIG_AV1_HIGHBITDEPTH
70
static DECLARE_ALIGNED(16, uint8_t, HighbdLoadMaskx[8][16]) = {
71
  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
72
  { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
73
  { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
74
  { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
75
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
76
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
77
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
78
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
79
};
80
81
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx4[4][16]) = {
82
  { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
83
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
84
  { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
85
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 }
86
};
87
88
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx[8][32]) = {
89
  { 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25, 28, 29,
90
    2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
91
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27,
92
    0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
93
  { 0, 1, 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25,
94
    0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27 },
95
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23,
96
    0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25 },
97
  { 0, 1, 0, 1, 0, 1, 0, 1, 8,  9,  12, 13, 16, 17, 20, 21,
98
    0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19, 22, 23 },
99
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19,
100
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17, 20, 21 },
101
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17,
102
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15, 18, 19 },
103
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15,
104
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
105
};
106
107
static DECLARE_ALIGNED(32, uint16_t, HighbdBaseMask[17][16]) = {
108
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
109
  { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
110
  { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
  { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
  { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
113
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
114
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115
    0 },
116
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
117
    0, 0 },
118
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
119
    0, 0, 0, 0 },
120
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
121
    0, 0, 0, 0, 0, 0 },
122
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
123
    0xffff, 0, 0, 0, 0, 0, 0 },
124
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
125
    0xffff, 0xffff, 0, 0, 0, 0, 0 },
126
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
127
    0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
128
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
129
    0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
130
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
131
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
132
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
133
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
134
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
135
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
136
};
137
138
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
139
59.6k
static inline void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
140
59.6k
  __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
141
142
59.6k
  r0 = _mm_unpacklo_epi16(x[0], x[1]);
143
59.6k
  r1 = _mm_unpacklo_epi16(x[2], x[3]);
144
59.6k
  r2 = _mm_unpacklo_epi16(x[4], x[5]);
145
59.6k
  r3 = _mm_unpacklo_epi16(x[6], x[7]);
146
147
59.6k
  r4 = _mm_unpacklo_epi16(x[8], x[9]);
148
59.6k
  r5 = _mm_unpacklo_epi16(x[10], x[11]);
149
59.6k
  r6 = _mm_unpacklo_epi16(x[12], x[13]);
150
59.6k
  r7 = _mm_unpacklo_epi16(x[14], x[15]);
151
152
59.6k
  r8 = _mm_unpacklo_epi32(r0, r1);
153
59.6k
  r9 = _mm_unpackhi_epi32(r0, r1);
154
59.6k
  r10 = _mm_unpacklo_epi32(r2, r3);
155
59.6k
  r11 = _mm_unpackhi_epi32(r2, r3);
156
157
59.6k
  r12 = _mm_unpacklo_epi32(r4, r5);
158
59.6k
  r13 = _mm_unpackhi_epi32(r4, r5);
159
59.6k
  r14 = _mm_unpacklo_epi32(r6, r7);
160
59.6k
  r15 = _mm_unpackhi_epi32(r6, r7);
161
162
59.6k
  r0 = _mm_unpacklo_epi64(r8, r9);
163
59.6k
  r1 = _mm_unpackhi_epi64(r8, r9);
164
59.6k
  r2 = _mm_unpacklo_epi64(r10, r11);
165
59.6k
  r3 = _mm_unpackhi_epi64(r10, r11);
166
167
59.6k
  r4 = _mm_unpacklo_epi64(r12, r13);
168
59.6k
  r5 = _mm_unpackhi_epi64(r12, r13);
169
59.6k
  r6 = _mm_unpacklo_epi64(r14, r15);
170
59.6k
  r7 = _mm_unpackhi_epi64(r14, r15);
171
172
59.6k
  d[0] = _mm_unpacklo_epi64(r0, r2);
173
59.6k
  d[1] = _mm_unpacklo_epi64(r4, r6);
174
59.6k
  d[2] = _mm_unpacklo_epi64(r1, r3);
175
59.6k
  d[3] = _mm_unpacklo_epi64(r5, r7);
176
177
59.6k
  d[4] = _mm_unpackhi_epi64(r0, r2);
178
59.6k
  d[5] = _mm_unpackhi_epi64(r4, r6);
179
59.6k
  d[6] = _mm_unpackhi_epi64(r1, r3);
180
59.6k
  d[7] = _mm_unpackhi_epi64(r5, r7);
181
59.6k
}
182
183
25.9k
static inline void highbd_transpose4x16_avx2(__m256i *x, __m256i *d) {
184
25.9k
  __m256i w0, w1, w2, w3, ww0, ww1;
185
186
25.9k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
187
25.9k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
188
25.9k
  w2 = _mm256_unpackhi_epi16(x[0], x[1]);  // 40 50 41 51 42 52 43 53
189
25.9k
  w3 = _mm256_unpackhi_epi16(x[2], x[3]);  // 60 70 61 71 62 72 63 73
190
191
25.9k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
192
25.9k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
193
194
25.9k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
195
25.9k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
196
197
25.9k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
198
25.9k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
199
200
25.9k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
201
25.9k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
202
25.9k
}
203
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
204
205
157k
static inline void highbd_transpose8x16_16x8_avx2(__m256i *x, __m256i *d) {
206
157k
  __m256i w0, w1, w2, w3, ww0, ww1;
207
208
157k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
209
157k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
210
157k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);  // 40 50 41 51 42 52 43 53
211
157k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);  // 60 70 61 71 62 72 63 73
212
213
157k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
214
157k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
215
216
157k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
217
157k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
218
219
157k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
220
157k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
221
222
157k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
223
157k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
224
225
157k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);  // 04 14 05 15 06 16 07 17
226
157k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);  // 24 34 25 35 26 36 27 37
227
157k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);  // 44 54 45 55 46 56 47 57
228
157k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);  // 64 74 65 75 66 76 67 77
229
230
157k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 04 14 24 34 05 15 25 35
231
157k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 44 54 64 74 45 55 65 75
232
233
157k
  d[4] = _mm256_unpacklo_epi64(ww0, ww1);  // 04 14 24 34 44 54 64 74
234
157k
  d[5] = _mm256_unpackhi_epi64(ww0, ww1);  // 05 15 25 35 45 55 65 75
235
236
157k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 06 16 26 36 07 17 27 37
237
157k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 46 56 66 76 47 57 67 77
238
239
157k
  d[6] = _mm256_unpacklo_epi64(ww0, ww1);  // 06 16 26 36 46 56 66 76
240
157k
  d[7] = _mm256_unpackhi_epi64(ww0, ww1);  // 07 17 27 37 47 57 67 77
241
157k
}
242
243
981k
static inline void highbd_transpose16x16_avx2(__m256i *x, __m256i *d) {
244
981k
  __m256i w0, w1, w2, w3, ww0, ww1;
245
981k
  __m256i dd[16];
246
981k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);
247
981k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);
248
981k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);
249
981k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);
250
251
981k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
252
981k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
253
254
981k
  dd[0] = _mm256_unpacklo_epi64(ww0, ww1);
255
981k
  dd[1] = _mm256_unpackhi_epi64(ww0, ww1);
256
257
981k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
258
981k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
259
260
981k
  dd[2] = _mm256_unpacklo_epi64(ww0, ww1);
261
981k
  dd[3] = _mm256_unpackhi_epi64(ww0, ww1);
262
263
981k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);
264
981k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);
265
981k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);
266
981k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);
267
268
981k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
269
981k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
270
271
981k
  dd[4] = _mm256_unpacklo_epi64(ww0, ww1);
272
981k
  dd[5] = _mm256_unpackhi_epi64(ww0, ww1);
273
274
981k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
275
981k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
276
277
981k
  dd[6] = _mm256_unpacklo_epi64(ww0, ww1);
278
981k
  dd[7] = _mm256_unpackhi_epi64(ww0, ww1);
279
280
981k
  w0 = _mm256_unpacklo_epi16(x[8], x[9]);
281
981k
  w1 = _mm256_unpacklo_epi16(x[10], x[11]);
282
981k
  w2 = _mm256_unpacklo_epi16(x[12], x[13]);
283
981k
  w3 = _mm256_unpacklo_epi16(x[14], x[15]);
284
285
981k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
286
981k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
287
288
981k
  dd[8] = _mm256_unpacklo_epi64(ww0, ww1);
289
981k
  dd[9] = _mm256_unpackhi_epi64(ww0, ww1);
290
291
981k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
292
981k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
293
294
981k
  dd[10] = _mm256_unpacklo_epi64(ww0, ww1);
295
981k
  dd[11] = _mm256_unpackhi_epi64(ww0, ww1);
296
297
981k
  w0 = _mm256_unpackhi_epi16(x[8], x[9]);
298
981k
  w1 = _mm256_unpackhi_epi16(x[10], x[11]);
299
981k
  w2 = _mm256_unpackhi_epi16(x[12], x[13]);
300
981k
  w3 = _mm256_unpackhi_epi16(x[14], x[15]);
301
302
981k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
303
981k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
304
305
981k
  dd[12] = _mm256_unpacklo_epi64(ww0, ww1);
306
981k
  dd[13] = _mm256_unpackhi_epi64(ww0, ww1);
307
308
981k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
309
981k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
310
311
981k
  dd[14] = _mm256_unpacklo_epi64(ww0, ww1);
312
981k
  dd[15] = _mm256_unpackhi_epi64(ww0, ww1);
313
314
8.83M
  for (int i = 0; i < 8; i++) {
315
7.85M
    d[i] = _mm256_insertf128_si256(dd[i], _mm256_castsi256_si128(dd[i + 8]), 1);
316
7.85M
    d[i + 8] = _mm256_insertf128_si256(dd[i + 8],
317
7.85M
                                       _mm256_extracti128_si256(dd[i], 1), 0);
318
7.85M
  }
319
981k
}
320
#endif  // CONFIG_AV1_HIGHBITDEPTH
321
322
void aom_dc_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
323
825k
                                 const uint8_t *above, const uint8_t *left) {
324
825k
  const __m256i sum_above = dc_sum_32(above);
325
825k
  __m256i sum_left = dc_sum_32(left);
326
825k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
327
825k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
328
825k
  sum_left = _mm256_add_epi16(sum_left, thirtytwo);
329
825k
  sum_left = _mm256_srai_epi16(sum_left, 6);
330
825k
  const __m256i zero = _mm256_setzero_si256();
331
825k
  __m256i row = _mm256_shuffle_epi8(sum_left, zero);
332
825k
  row_store_32xh(&row, 32, dst, stride);
333
825k
}
334
335
void aom_dc_top_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
336
                                     const uint8_t *above,
337
56.0k
                                     const uint8_t *left) {
338
56.0k
  __m256i sum = dc_sum_32(above);
339
56.0k
  (void)left;
340
341
56.0k
  const __m256i sixteen = _mm256_set1_epi16(16);
342
56.0k
  sum = _mm256_add_epi16(sum, sixteen);
343
56.0k
  sum = _mm256_srai_epi16(sum, 5);
344
56.0k
  const __m256i zero = _mm256_setzero_si256();
345
56.0k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
346
56.0k
  row_store_32xh(&row, 32, dst, stride);
347
56.0k
}
348
349
void aom_dc_left_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
350
                                      const uint8_t *above,
351
103k
                                      const uint8_t *left) {
352
103k
  __m256i sum = dc_sum_32(left);
353
103k
  (void)above;
354
355
103k
  const __m256i sixteen = _mm256_set1_epi16(16);
356
103k
  sum = _mm256_add_epi16(sum, sixteen);
357
103k
  sum = _mm256_srai_epi16(sum, 5);
358
103k
  const __m256i zero = _mm256_setzero_si256();
359
103k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
360
103k
  row_store_32xh(&row, 32, dst, stride);
361
103k
}
362
363
void aom_dc_128_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
364
                                     const uint8_t *above,
365
20.5k
                                     const uint8_t *left) {
366
20.5k
  (void)above;
367
20.5k
  (void)left;
368
20.5k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
369
20.5k
  row_store_32xh(&row, 32, dst, stride);
370
20.5k
}
371
372
void aom_v_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
373
24.8k
                                const uint8_t *above, const uint8_t *left) {
374
24.8k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
375
24.8k
  (void)left;
376
24.8k
  row_store_32xh(&row, 32, dst, stride);
377
24.8k
}
378
379
// There are 32 rows togeter. This function does line:
380
// 0,1,2,3, and 16,17,18,19. The next call would do
381
// 4,5,6,7, and 20,21,22,23. So 4 times of calling
382
// would finish 32 rows.
383
static inline void h_predictor_32x8line(const __m256i *row, uint8_t *dst,
384
527k
                                        ptrdiff_t stride) {
385
527k
  __m256i t[4];
386
527k
  __m256i m = _mm256_setzero_si256();
387
527k
  const __m256i inc = _mm256_set1_epi8(4);
388
527k
  int i;
389
390
2.63M
  for (i = 0; i < 4; i++) {
391
2.10M
    t[i] = _mm256_shuffle_epi8(*row, m);
392
2.10M
    __m256i r0 = _mm256_permute2x128_si256(t[i], t[i], 0);
393
2.10M
    __m256i r1 = _mm256_permute2x128_si256(t[i], t[i], 0x11);
394
2.10M
    _mm256_storeu_si256((__m256i *)dst, r0);
395
2.10M
    _mm256_storeu_si256((__m256i *)(dst + (stride << 4)), r1);
396
2.10M
    dst += stride;
397
2.10M
    m = _mm256_add_epi8(m, inc);
398
2.10M
  }
399
527k
}
400
401
void aom_h_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
402
131k
                                const uint8_t *above, const uint8_t *left) {
403
131k
  (void)above;
404
131k
  const __m256i left_col = _mm256_loadu_si256((__m256i const *)left);
405
406
131k
  __m256i u = _mm256_unpacklo_epi8(left_col, left_col);
407
408
131k
  __m256i v = _mm256_unpacklo_epi8(u, u);
409
131k
  h_predictor_32x8line(&v, dst, stride);
410
131k
  dst += stride << 2;
411
412
131k
  v = _mm256_unpackhi_epi8(u, u);
413
131k
  h_predictor_32x8line(&v, dst, stride);
414
131k
  dst += stride << 2;
415
416
131k
  u = _mm256_unpackhi_epi8(left_col, left_col);
417
418
131k
  v = _mm256_unpacklo_epi8(u, u);
419
131k
  h_predictor_32x8line(&v, dst, stride);
420
131k
  dst += stride << 2;
421
422
131k
  v = _mm256_unpackhi_epi8(u, u);
423
131k
  h_predictor_32x8line(&v, dst, stride);
424
131k
}
425
426
// -----------------------------------------------------------------------------
427
// Rectangle
428
void aom_dc_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
429
120k
                                 const uint8_t *above, const uint8_t *left) {
430
120k
  const __m128i top_sum = dc_sum_32_sse2(above);
431
120k
  __m128i left_sum = dc_sum_16_sse2(left);
432
120k
  left_sum = _mm_add_epi16(top_sum, left_sum);
433
120k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(left_sum);
434
120k
  sum += 24;
435
120k
  sum /= 48;
436
120k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
437
120k
  row_store_32xh(&row, 16, dst, stride);
438
120k
}
439
440
void aom_dc_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
441
6.73k
                                 const uint8_t *above, const uint8_t *left) {
442
6.73k
  const __m256i sum_above = dc_sum_32(above);
443
6.73k
  __m256i sum_left = dc_sum_64(left);
444
6.73k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
445
6.73k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
446
6.73k
  sum += 48;
447
6.73k
  sum /= 96;
448
6.73k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
449
6.73k
  row_store_32xh(&row, 64, dst, stride);
450
6.73k
}
451
452
void aom_dc_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
453
91.9k
                                 const uint8_t *above, const uint8_t *left) {
454
91.9k
  const __m256i sum_above = dc_sum_64(above);
455
91.9k
  __m256i sum_left = dc_sum_64(left);
456
91.9k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
457
91.9k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
458
91.9k
  sum += 64;
459
91.9k
  sum /= 128;
460
91.9k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
461
91.9k
  row_store_64xh(&row, 64, dst, stride);
462
91.9k
}
463
464
void aom_dc_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
465
13.4k
                                 const uint8_t *above, const uint8_t *left) {
466
13.4k
  const __m256i sum_above = dc_sum_64(above);
467
13.4k
  __m256i sum_left = dc_sum_32(left);
468
13.4k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
469
13.4k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
470
13.4k
  sum += 48;
471
13.4k
  sum /= 96;
472
13.4k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
473
13.4k
  row_store_64xh(&row, 32, dst, stride);
474
13.4k
}
475
476
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
477
void aom_dc_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
478
48.2k
                                 const uint8_t *above, const uint8_t *left) {
479
48.2k
  const __m256i sum_above = dc_sum_64(above);
480
48.2k
  __m256i sum_left = _mm256_castsi128_si256(dc_sum_16_sse2(left));
481
48.2k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
482
48.2k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
483
48.2k
  sum += 40;
484
48.2k
  sum /= 80;
485
48.2k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
486
48.2k
  row_store_64xh(&row, 16, dst, stride);
487
48.2k
}
488
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
489
490
void aom_dc_top_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
491
                                     const uint8_t *above,
492
7.11k
                                     const uint8_t *left) {
493
7.11k
  __m256i sum = dc_sum_32(above);
494
7.11k
  (void)left;
495
496
7.11k
  const __m256i sixteen = _mm256_set1_epi16(16);
497
7.11k
  sum = _mm256_add_epi16(sum, sixteen);
498
7.11k
  sum = _mm256_srai_epi16(sum, 5);
499
7.11k
  const __m256i zero = _mm256_setzero_si256();
500
7.11k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
501
7.11k
  row_store_32xh(&row, 16, dst, stride);
502
7.11k
}
503
504
void aom_dc_top_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
505
                                     const uint8_t *above,
506
1.07k
                                     const uint8_t *left) {
507
1.07k
  __m256i sum = dc_sum_32(above);
508
1.07k
  (void)left;
509
510
1.07k
  const __m256i sixteen = _mm256_set1_epi16(16);
511
1.07k
  sum = _mm256_add_epi16(sum, sixteen);
512
1.07k
  sum = _mm256_srai_epi16(sum, 5);
513
1.07k
  const __m256i zero = _mm256_setzero_si256();
514
1.07k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
515
1.07k
  row_store_32xh(&row, 64, dst, stride);
516
1.07k
}
517
518
void aom_dc_top_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
519
                                     const uint8_t *above,
520
11.9k
                                     const uint8_t *left) {
521
11.9k
  __m256i sum = dc_sum_64(above);
522
11.9k
  (void)left;
523
524
11.9k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
525
11.9k
  sum = _mm256_add_epi16(sum, thirtytwo);
526
11.9k
  sum = _mm256_srai_epi16(sum, 6);
527
11.9k
  const __m256i zero = _mm256_setzero_si256();
528
11.9k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
529
11.9k
  row_store_64xh(&row, 64, dst, stride);
530
11.9k
}
531
532
void aom_dc_top_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
533
                                     const uint8_t *above,
534
553
                                     const uint8_t *left) {
535
553
  __m256i sum = dc_sum_64(above);
536
553
  (void)left;
537
538
553
  const __m256i thirtytwo = _mm256_set1_epi16(32);
539
553
  sum = _mm256_add_epi16(sum, thirtytwo);
540
553
  sum = _mm256_srai_epi16(sum, 6);
541
553
  const __m256i zero = _mm256_setzero_si256();
542
553
  __m256i row = _mm256_shuffle_epi8(sum, zero);
543
553
  row_store_64xh(&row, 32, dst, stride);
544
553
}
545
546
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
547
void aom_dc_top_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
548
                                     const uint8_t *above,
549
2.20k
                                     const uint8_t *left) {
550
2.20k
  __m256i sum = dc_sum_64(above);
551
2.20k
  (void)left;
552
553
2.20k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
554
2.20k
  sum = _mm256_add_epi16(sum, thirtytwo);
555
2.20k
  sum = _mm256_srai_epi16(sum, 6);
556
2.20k
  const __m256i zero = _mm256_setzero_si256();
557
2.20k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
558
2.20k
  row_store_64xh(&row, 16, dst, stride);
559
2.20k
}
560
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
561
562
void aom_dc_left_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
563
                                      const uint8_t *above,
564
5.77k
                                      const uint8_t *left) {
565
5.77k
  __m128i sum = dc_sum_16_sse2(left);
566
5.77k
  (void)above;
567
568
5.77k
  const __m128i eight = _mm_set1_epi16(8);
569
5.77k
  sum = _mm_add_epi16(sum, eight);
570
5.77k
  sum = _mm_srai_epi16(sum, 4);
571
5.77k
  const __m128i zero = _mm_setzero_si128();
572
5.77k
  const __m128i r = _mm_shuffle_epi8(sum, zero);
573
5.77k
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
574
5.77k
  row_store_32xh(&row, 16, dst, stride);
575
5.77k
}
576
577
void aom_dc_left_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
578
                                      const uint8_t *above,
579
1.45k
                                      const uint8_t *left) {
580
1.45k
  __m256i sum = dc_sum_64(left);
581
1.45k
  (void)above;
582
583
1.45k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
584
1.45k
  sum = _mm256_add_epi16(sum, thirtytwo);
585
1.45k
  sum = _mm256_srai_epi16(sum, 6);
586
1.45k
  const __m256i zero = _mm256_setzero_si256();
587
1.45k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
588
1.45k
  row_store_32xh(&row, 64, dst, stride);
589
1.45k
}
590
591
void aom_dc_left_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
592
                                      const uint8_t *above,
593
18.9k
                                      const uint8_t *left) {
594
18.9k
  __m256i sum = dc_sum_64(left);
595
18.9k
  (void)above;
596
597
18.9k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
598
18.9k
  sum = _mm256_add_epi16(sum, thirtytwo);
599
18.9k
  sum = _mm256_srai_epi16(sum, 6);
600
18.9k
  const __m256i zero = _mm256_setzero_si256();
601
18.9k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
602
18.9k
  row_store_64xh(&row, 64, dst, stride);
603
18.9k
}
604
605
void aom_dc_left_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
606
                                      const uint8_t *above,
607
791
                                      const uint8_t *left) {
608
791
  __m256i sum = dc_sum_32(left);
609
791
  (void)above;
610
611
791
  const __m256i sixteen = _mm256_set1_epi16(16);
612
791
  sum = _mm256_add_epi16(sum, sixteen);
613
791
  sum = _mm256_srai_epi16(sum, 5);
614
791
  const __m256i zero = _mm256_setzero_si256();
615
791
  __m256i row = _mm256_shuffle_epi8(sum, zero);
616
791
  row_store_64xh(&row, 32, dst, stride);
617
791
}
618
619
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
620
void aom_dc_left_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
621
                                      const uint8_t *above,
622
255
                                      const uint8_t *left) {
623
255
  __m128i sum = dc_sum_16_sse2(left);
624
255
  (void)above;
625
626
255
  const __m128i eight = _mm_set1_epi16(8);
627
255
  sum = _mm_add_epi16(sum, eight);
628
255
  sum = _mm_srai_epi16(sum, 4);
629
255
  const __m128i zero = _mm_setzero_si128();
630
255
  const __m128i r = _mm_shuffle_epi8(sum, zero);
631
255
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
632
255
  row_store_64xh(&row, 16, dst, stride);
633
255
}
634
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
635
636
void aom_dc_128_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
637
                                     const uint8_t *above,
638
4.08k
                                     const uint8_t *left) {
639
4.08k
  (void)above;
640
4.08k
  (void)left;
641
4.08k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
642
4.08k
  row_store_32xh(&row, 16, dst, stride);
643
4.08k
}
644
645
void aom_dc_128_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
646
                                     const uint8_t *above,
647
500
                                     const uint8_t *left) {
648
500
  (void)above;
649
500
  (void)left;
650
500
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
651
500
  row_store_32xh(&row, 64, dst, stride);
652
500
}
653
654
void aom_dc_128_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
655
                                     const uint8_t *above,
656
7.53k
                                     const uint8_t *left) {
657
7.53k
  (void)above;
658
7.53k
  (void)left;
659
7.53k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
660
7.53k
  row_store_64xh(&row, 64, dst, stride);
661
7.53k
}
662
663
void aom_dc_128_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
664
                                     const uint8_t *above,
665
1.28k
                                     const uint8_t *left) {
666
1.28k
  (void)above;
667
1.28k
  (void)left;
668
1.28k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
669
1.28k
  row_store_64xh(&row, 32, dst, stride);
670
1.28k
}
671
672
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
673
void aom_dc_128_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
674
                                     const uint8_t *above,
675
1.07k
                                     const uint8_t *left) {
676
1.07k
  (void)above;
677
1.07k
  (void)left;
678
1.07k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
679
1.07k
  row_store_64xh(&row, 16, dst, stride);
680
1.07k
}
681
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
682
683
void aom_v_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
684
8.09k
                                const uint8_t *above, const uint8_t *left) {
685
8.09k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
686
8.09k
  (void)left;
687
8.09k
  row_store_32xh(&row, 16, dst, stride);
688
8.09k
}
689
690
void aom_v_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
691
461
                                const uint8_t *above, const uint8_t *left) {
692
461
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
693
461
  (void)left;
694
461
  row_store_32xh(&row, 64, dst, stride);
695
461
}
696
697
void aom_v_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
698
1.94k
                                const uint8_t *above, const uint8_t *left) {
699
1.94k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
700
1.94k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
701
1.94k
  (void)left;
702
1.94k
  row_store_32x2xh(&row0, &row1, 64, dst, stride);
703
1.94k
}
704
705
void aom_v_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
706
507
                                const uint8_t *above, const uint8_t *left) {
707
507
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
708
507
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
709
507
  (void)left;
710
507
  row_store_32x2xh(&row0, &row1, 32, dst, stride);
711
507
}
712
713
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
void aom_v_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
715
767
                                const uint8_t *above, const uint8_t *left) {
716
767
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
717
767
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
718
767
  (void)left;
719
767
  row_store_32x2xh(&row0, &row1, 16, dst, stride);
720
767
}
721
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
722
723
// -----------------------------------------------------------------------------
724
// PAETH_PRED
725
726
// Return 16 16-bit pixels in one row (__m256i)
727
static inline __m256i paeth_pred(const __m256i *left, const __m256i *top,
728
47.0M
                                 const __m256i *topleft) {
729
47.0M
  const __m256i base =
730
47.0M
      _mm256_sub_epi16(_mm256_add_epi16(*top, *left), *topleft);
731
732
47.0M
  __m256i pl = _mm256_abs_epi16(_mm256_sub_epi16(base, *left));
733
47.0M
  __m256i pt = _mm256_abs_epi16(_mm256_sub_epi16(base, *top));
734
47.0M
  __m256i ptl = _mm256_abs_epi16(_mm256_sub_epi16(base, *topleft));
735
736
47.0M
  __m256i mask1 = _mm256_cmpgt_epi16(pl, pt);
737
47.0M
  mask1 = _mm256_or_si256(mask1, _mm256_cmpgt_epi16(pl, ptl));
738
47.0M
  __m256i mask2 = _mm256_cmpgt_epi16(pt, ptl);
739
740
47.0M
  pl = _mm256_andnot_si256(mask1, *left);
741
742
47.0M
  ptl = _mm256_and_si256(mask2, *topleft);
743
47.0M
  pt = _mm256_andnot_si256(mask2, *top);
744
47.0M
  pt = _mm256_or_si256(pt, ptl);
745
47.0M
  pt = _mm256_and_si256(mask1, pt);
746
747
47.0M
  return _mm256_or_si256(pt, pl);
748
47.0M
}
749
750
// Return 16 8-bit pixels in one row (__m128i)
751
static inline __m128i paeth_16x1_pred(const __m256i *left, const __m256i *top,
752
46.4M
                                      const __m256i *topleft) {
753
46.4M
  const __m256i p0 = paeth_pred(left, top, topleft);
754
46.4M
  const __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
755
46.4M
  const __m256i p = _mm256_packus_epi16(p0, p1);
756
46.4M
  return _mm256_castsi256_si128(p);
757
46.4M
}
758
759
1.33M
static inline __m256i get_top_vector(const uint8_t *above) {
760
1.33M
  const __m128i x = _mm_load_si128((const __m128i *)above);
761
1.33M
  const __m128i zero = _mm_setzero_si128();
762
1.33M
  const __m128i t0 = _mm_unpacklo_epi8(x, zero);
763
1.33M
  const __m128i t1 = _mm_unpackhi_epi8(x, zero);
764
1.33M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(t0), t1, 1);
765
1.33M
}
766
767
void aom_paeth_predictor_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
768
54.9k
                                   const uint8_t *above, const uint8_t *left) {
769
54.9k
  __m128i x = _mm_loadl_epi64((const __m128i *)left);
770
54.9k
  const __m256i l = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
771
54.9k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
772
54.9k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
773
54.9k
  const __m256i one = _mm256_set1_epi16(1);
774
54.9k
  const __m256i top = get_top_vector(above);
775
776
54.9k
  int i;
777
494k
  for (i = 0; i < 8; ++i) {
778
439k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
779
439k
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
780
781
439k
    _mm_store_si128((__m128i *)dst, row);
782
439k
    dst += stride;
783
439k
    rep = _mm256_add_epi16(rep, one);
784
439k
  }
785
54.9k
}
786
787
2.34M
static inline __m256i get_left_vector(const uint8_t *left) {
788
2.34M
  const __m128i x = _mm_load_si128((const __m128i *)left);
789
2.34M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
790
2.34M
}
791
792
void aom_paeth_predictor_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
793
74.3k
                                    const uint8_t *above, const uint8_t *left) {
794
74.3k
  const __m256i l = get_left_vector(left);
795
74.3k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
796
74.3k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
797
74.3k
  const __m256i one = _mm256_set1_epi16(1);
798
74.3k
  const __m256i top = get_top_vector(above);
799
800
74.3k
  int i;
801
1.26M
  for (i = 0; i < 16; ++i) {
802
1.18M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
803
1.18M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
804
805
1.18M
    _mm_store_si128((__m128i *)dst, row);
806
1.18M
    dst += stride;
807
1.18M
    rep = _mm256_add_epi16(rep, one);
808
1.18M
  }
809
74.3k
}
810
811
void aom_paeth_predictor_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
812
630k
                                    const uint8_t *above, const uint8_t *left) {
813
630k
  __m256i l = get_left_vector(left);
814
630k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
815
630k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
816
630k
  const __m256i one = _mm256_set1_epi16(1);
817
630k
  const __m256i top = get_top_vector(above);
818
819
630k
  int i;
820
10.7M
  for (i = 0; i < 16; ++i) {
821
10.0M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
822
10.0M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
823
824
10.0M
    _mm_store_si128((__m128i *)dst, row);
825
10.0M
    dst += stride;
826
10.0M
    rep = _mm256_add_epi16(rep, one);
827
10.0M
  }
828
829
630k
  l = get_left_vector(left + 16);
830
630k
  rep = _mm256_set1_epi16((short)0x8000);
831
10.7M
  for (i = 0; i < 16; ++i) {
832
10.0M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
833
10.0M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
834
835
10.0M
    _mm_store_si128((__m128i *)dst, row);
836
10.0M
    dst += stride;
837
10.0M
    rep = _mm256_add_epi16(rep, one);
838
10.0M
  }
839
630k
}
840
841
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
842
void aom_paeth_predictor_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
843
156k
                                    const uint8_t *above, const uint8_t *left) {
844
156k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
845
156k
  const __m256i one = _mm256_set1_epi16(1);
846
156k
  const __m256i top = get_top_vector(above);
847
848
782k
  for (int j = 0; j < 4; ++j) {
849
625k
    const __m256i l = get_left_vector(left + j * 16);
850
625k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
851
10.6M
    for (int i = 0; i < 16; ++i) {
852
10.0M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
853
10.0M
      const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
854
855
10.0M
      _mm_store_si128((__m128i *)dst, row);
856
10.0M
      dst += stride;
857
10.0M
      rep = _mm256_add_epi16(rep, one);
858
10.0M
    }
859
625k
  }
860
156k
}
861
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
862
863
// Return 32 8-bit pixels in one row (__m256i)
864
static inline __m256i paeth_32x1_pred(const __m256i *left, const __m256i *top0,
865
                                      const __m256i *top1,
866
268k
                                      const __m256i *topleft) {
867
268k
  __m256i p0 = paeth_pred(left, top0, topleft);
868
268k
  __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
869
268k
  const __m256i x0 = _mm256_packus_epi16(p0, p1);
870
871
268k
  p0 = paeth_pred(left, top1, topleft);
872
268k
  p1 = _mm256_permute4x64_epi64(p0, 0xe);
873
268k
  const __m256i x1 = _mm256_packus_epi16(p0, p1);
874
875
268k
  return _mm256_permute2x128_si256(x0, x1, 0x20);
876
268k
}
877
878
void aom_paeth_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
879
16.7k
                                    const uint8_t *above, const uint8_t *left) {
880
16.7k
  const __m256i l = get_left_vector(left);
881
16.7k
  const __m256i t0 = get_top_vector(above);
882
16.7k
  const __m256i t1 = get_top_vector(above + 16);
883
16.7k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
884
16.7k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
885
16.7k
  const __m256i one = _mm256_set1_epi16(1);
886
887
16.7k
  int i;
888
285k
  for (i = 0; i < 16; ++i) {
889
268k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
890
891
268k
    const __m256i r = paeth_32x1_pred(&l16, &t0, &t1, &tl);
892
893
268k
    _mm256_storeu_si256((__m256i *)dst, r);
894
895
268k
    dst += stride;
896
268k
    rep = _mm256_add_epi16(rep, one);
897
268k
  }
898
16.7k
}
899
900
void aom_paeth_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
901
127k
                                    const uint8_t *above, const uint8_t *left) {
902
127k
  __m256i l = get_left_vector(left);
903
127k
  const __m256i t0 = get_top_vector(above);
904
127k
  const __m256i t1 = get_top_vector(above + 16);
905
127k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
906
127k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
907
127k
  const __m256i one = _mm256_set1_epi16(1);
908
909
127k
  int i;
910
2.17M
  for (i = 0; i < 16; ++i) {
911
2.04M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
912
913
2.04M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
914
2.04M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
915
916
2.04M
    _mm_store_si128((__m128i *)dst, r0);
917
2.04M
    _mm_store_si128((__m128i *)(dst + 16), r1);
918
919
2.04M
    dst += stride;
920
2.04M
    rep = _mm256_add_epi16(rep, one);
921
2.04M
  }
922
923
127k
  l = get_left_vector(left + 16);
924
127k
  rep = _mm256_set1_epi16((short)0x8000);
925
2.17M
  for (i = 0; i < 16; ++i) {
926
2.04M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
927
928
2.04M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
929
2.04M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
930
931
2.04M
    _mm_store_si128((__m128i *)dst, r0);
932
2.04M
    _mm_store_si128((__m128i *)(dst + 16), r1);
933
934
2.04M
    dst += stride;
935
2.04M
    rep = _mm256_add_epi16(rep, one);
936
2.04M
  }
937
127k
}
938
939
void aom_paeth_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
940
3.57k
                                    const uint8_t *above, const uint8_t *left) {
941
3.57k
  const __m256i t0 = get_top_vector(above);
942
3.57k
  const __m256i t1 = get_top_vector(above + 16);
943
3.57k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
944
3.57k
  const __m256i one = _mm256_set1_epi16(1);
945
946
3.57k
  int i, j;
947
17.8k
  for (j = 0; j < 4; ++j) {
948
14.3k
    const __m256i l = get_left_vector(left + j * 16);
949
14.3k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
950
243k
    for (i = 0; i < 16; ++i) {
951
228k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
952
953
228k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
954
228k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
955
956
228k
      _mm_store_si128((__m128i *)dst, r0);
957
228k
      _mm_store_si128((__m128i *)(dst + 16), r1);
958
959
228k
      dst += stride;
960
228k
      rep = _mm256_add_epi16(rep, one);
961
228k
    }
962
14.3k
  }
963
3.57k
}
964
965
void aom_paeth_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
966
3.29k
                                    const uint8_t *above, const uint8_t *left) {
967
3.29k
  const __m256i t0 = get_top_vector(above);
968
3.29k
  const __m256i t1 = get_top_vector(above + 16);
969
3.29k
  const __m256i t2 = get_top_vector(above + 32);
970
3.29k
  const __m256i t3 = get_top_vector(above + 48);
971
3.29k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
972
3.29k
  const __m256i one = _mm256_set1_epi16(1);
973
974
3.29k
  int i, j;
975
9.87k
  for (j = 0; j < 2; ++j) {
976
6.58k
    const __m256i l = get_left_vector(left + j * 16);
977
6.58k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
978
111k
    for (i = 0; i < 16; ++i) {
979
105k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
980
981
105k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
982
105k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
983
105k
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
984
105k
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
985
986
105k
      _mm_store_si128((__m128i *)dst, r0);
987
105k
      _mm_store_si128((__m128i *)(dst + 16), r1);
988
105k
      _mm_store_si128((__m128i *)(dst + 32), r2);
989
105k
      _mm_store_si128((__m128i *)(dst + 48), r3);
990
991
105k
      dst += stride;
992
105k
      rep = _mm256_add_epi16(rep, one);
993
105k
    }
994
6.58k
  }
995
3.29k
}
996
997
void aom_paeth_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
998
20.2k
                                    const uint8_t *above, const uint8_t *left) {
999
20.2k
  const __m256i t0 = get_top_vector(above);
1000
20.2k
  const __m256i t1 = get_top_vector(above + 16);
1001
20.2k
  const __m256i t2 = get_top_vector(above + 32);
1002
20.2k
  const __m256i t3 = get_top_vector(above + 48);
1003
20.2k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1004
20.2k
  const __m256i one = _mm256_set1_epi16(1);
1005
1006
20.2k
  int i, j;
1007
101k
  for (j = 0; j < 4; ++j) {
1008
80.8k
    const __m256i l = get_left_vector(left + j * 16);
1009
80.8k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
1010
1.37M
    for (i = 0; i < 16; ++i) {
1011
1.29M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1012
1013
1.29M
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1014
1.29M
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1015
1.29M
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1016
1.29M
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1017
1018
1.29M
      _mm_store_si128((__m128i *)dst, r0);
1019
1.29M
      _mm_store_si128((__m128i *)(dst + 16), r1);
1020
1.29M
      _mm_store_si128((__m128i *)(dst + 32), r2);
1021
1.29M
      _mm_store_si128((__m128i *)(dst + 48), r3);
1022
1023
1.29M
      dst += stride;
1024
1.29M
      rep = _mm256_add_epi16(rep, one);
1025
1.29M
    }
1026
80.8k
  }
1027
20.2k
}
1028
1029
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1030
void aom_paeth_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
1031
6.45k
                                    const uint8_t *above, const uint8_t *left) {
1032
6.45k
  const __m256i t0 = get_top_vector(above);
1033
6.45k
  const __m256i t1 = get_top_vector(above + 16);
1034
6.45k
  const __m256i t2 = get_top_vector(above + 32);
1035
6.45k
  const __m256i t3 = get_top_vector(above + 48);
1036
6.45k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1037
6.45k
  const __m256i one = _mm256_set1_epi16(1);
1038
1039
6.45k
  int i;
1040
6.45k
  const __m256i l = get_left_vector(left);
1041
6.45k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
1042
109k
  for (i = 0; i < 16; ++i) {
1043
103k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1044
1045
103k
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1046
103k
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1047
103k
    const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1048
103k
    const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1049
1050
103k
    _mm_store_si128((__m128i *)dst, r0);
1051
103k
    _mm_store_si128((__m128i *)(dst + 16), r1);
1052
103k
    _mm_store_si128((__m128i *)(dst + 32), r2);
1053
103k
    _mm_store_si128((__m128i *)(dst + 48), r3);
1054
1055
103k
    dst += stride;
1056
103k
    rep = _mm256_add_epi16(rep, one);
1057
103k
  }
1058
6.45k
}
1059
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1060
1061
#if CONFIG_AV1_HIGHBITDEPTH
1062
1063
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_4xN_internal_avx2(
1064
281k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1065
281k
  const int frac_bits = 6 - upsample_above;
1066
281k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1067
1068
281k
  assert(dx > 0);
1069
  // pre-filter above pixels
1070
  // store in temp buffers:
1071
  //   above[x] * 32 + 16
1072
  //   above[x+1] - above[x]
1073
  // final pixels will be calculated as:
1074
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1075
281k
  __m256i a0, a1, a32, a16;
1076
281k
  __m256i diff, c3f;
1077
281k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1078
281k
  __m128i a0_128, a1_128;
1079
281k
  a16 = _mm256_set1_epi16(16);
1080
281k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1081
281k
  max_base_x128 = _mm_set1_epi16(max_base_x);
1082
281k
  c3f = _mm256_set1_epi16(0x3f);
1083
1084
281k
  int x = dx;
1085
2.36M
  for (int r = 0; r < N; r++) {
1086
2.09M
    __m256i b, res, shift;
1087
2.09M
    __m128i res1;
1088
1089
2.09M
    int base = x >> frac_bits;
1090
2.09M
    if (base >= max_base_x) {
1091
9.12k
      for (int i = r; i < N; ++i) {
1092
5.41k
        dst[i] = a_mbase_x;  // save 4 values
1093
5.41k
      }
1094
3.70k
      return;
1095
3.70k
    }
1096
1097
2.08M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
1098
2.08M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1099
1100
2.08M
    if (upsample_above) {
1101
730k
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)HighbdEvenOddMaskx4[0]);
1102
730k
      a1_128 = _mm_srli_si128(a0_128, 8);
1103
1104
730k
      base_inc128 = _mm_setr_epi16(base, base + 2, base + 4, base + 6, base + 8,
1105
730k
                                   base + 10, base + 12, base + 14);
1106
730k
      shift = _mm256_srli_epi16(
1107
730k
          _mm256_and_si256(
1108
730k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above),
1109
730k
              _mm256_set1_epi16(0x3f)),
1110
730k
          1);
1111
1.35M
    } else {
1112
1.35M
      base_inc128 = _mm_setr_epi16(base, base + 1, base + 2, base + 3, base + 4,
1113
1.35M
                                   base + 5, base + 6, base + 7);
1114
1.35M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1115
1.35M
    }
1116
2.08M
    a0 = _mm256_castsi128_si256(a0_128);
1117
2.08M
    a1 = _mm256_castsi128_si256(a1_128);
1118
2.08M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1119
2.08M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1120
2.08M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1121
1122
2.08M
    b = _mm256_mullo_epi16(diff, shift);
1123
2.08M
    res = _mm256_add_epi16(a32, b);
1124
2.08M
    res = _mm256_srli_epi16(res, 5);
1125
2.08M
    res1 = _mm256_castsi256_si128(res);
1126
1127
2.08M
    mask128 = _mm_cmpgt_epi16(max_base_x128, base_inc128);
1128
2.08M
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1129
2.08M
    x += dx;
1130
2.08M
  }
1131
281k
}
1132
1133
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_4xN_internal_avx2(
1134
88.1k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1135
88.1k
  const int frac_bits = 6 - upsample_above;
1136
88.1k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1137
1138
88.1k
  assert(dx > 0);
1139
  // pre-filter above pixels
1140
  // store in temp buffers:
1141
  //   above[x] * 32 + 16
1142
  //   above[x+1] - above[x]
1143
  // final pixels will be calculated as:
1144
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1145
88.1k
  __m256i a0, a1, a32, a16;
1146
88.1k
  __m256i diff;
1147
88.1k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1148
1149
88.1k
  a16 = _mm256_set1_epi32(16);
1150
88.1k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1151
88.1k
  max_base_x128 = _mm_set1_epi32(max_base_x);
1152
1153
88.1k
  int x = dx;
1154
742k
  for (int r = 0; r < N; r++) {
1155
655k
    __m256i b, res, shift;
1156
655k
    __m128i res1;
1157
1158
655k
    int base = x >> frac_bits;
1159
655k
    if (base >= max_base_x) {
1160
3.33k
      for (int i = r; i < N; ++i) {
1161
2.33k
        dst[i] = a_mbase_x;  // save 4 values
1162
2.33k
      }
1163
998
      return;
1164
998
    }
1165
1166
654k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1167
654k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1168
1169
654k
    if (upsample_above) {
1170
135k
      a0 = _mm256_permutevar8x32_epi32(
1171
135k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1172
135k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1173
135k
      base_inc128 = _mm_setr_epi32(base, base + 2, base + 4, base + 6);
1174
135k
      shift = _mm256_srli_epi32(
1175
135k
          _mm256_and_si256(
1176
135k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1177
135k
              _mm256_set1_epi32(0x3f)),
1178
135k
          1);
1179
518k
    } else {
1180
518k
      base_inc128 = _mm_setr_epi32(base, base + 1, base + 2, base + 3);
1181
518k
      shift = _mm256_srli_epi32(
1182
518k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1183
518k
    }
1184
1185
654k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1186
654k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1187
654k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1188
1189
654k
    b = _mm256_mullo_epi32(diff, shift);
1190
654k
    res = _mm256_add_epi32(a32, b);
1191
654k
    res = _mm256_srli_epi32(res, 5);
1192
1193
654k
    res1 = _mm256_castsi256_si128(res);
1194
654k
    res1 = _mm_packus_epi32(res1, res1);
1195
1196
654k
    mask128 = _mm_cmpgt_epi32(max_base_x128, base_inc128);
1197
654k
    mask128 = _mm_packs_epi32(mask128, mask128);  // goto 16 bit
1198
654k
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1199
654k
    x += dx;
1200
654k
  }
1201
88.1k
}
1202
1203
static void highbd_dr_prediction_z1_4xN_avx2(int N, uint16_t *dst,
1204
                                             ptrdiff_t stride,
1205
                                             const uint16_t *above,
1206
                                             int upsample_above, int dx,
1207
123k
                                             int bd) {
1208
123k
  __m128i dstvec[16];
1209
123k
  if (bd < 12) {
1210
76.7k
    highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
1211
76.7k
                                              dx);
1212
76.7k
  } else {
1213
46.9k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(N, dstvec, above,
1214
46.9k
                                                    upsample_above, dx);
1215
46.9k
  }
1216
1.00M
  for (int i = 0; i < N; i++) {
1217
880k
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
1218
880k
  }
1219
123k
}
1220
1221
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_8xN_internal_avx2(
1222
106k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1223
106k
  const int frac_bits = 6 - upsample_above;
1224
106k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1225
1226
106k
  assert(dx > 0);
1227
  // pre-filter above pixels
1228
  // store in temp buffers:
1229
  //   above[x] * 32 + 16
1230
  //   above[x+1] - above[x]
1231
  // final pixels will be calculated as:
1232
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1233
106k
  __m256i a0, a1, a0_1, a1_1, a32, a16;
1234
106k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1235
1236
106k
  a16 = _mm256_set1_epi32(16);
1237
106k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1238
106k
  max_base_x256 = _mm256_set1_epi32(max_base_x);
1239
1240
106k
  int x = dx;
1241
1.27M
  for (int r = 0; r < N; r++) {
1242
1.17M
    __m256i b, res, res1, shift;
1243
1244
1.17M
    int base = x >> frac_bits;
1245
1.17M
    if (base >= max_base_x) {
1246
3.16k
      for (int i = r; i < N; ++i) {
1247
2.38k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1248
2.38k
      }
1249
785
      return;
1250
785
    }
1251
1252
1.17M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1253
1.17M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1254
1255
1.17M
    if (upsample_above) {
1256
208k
      a0 = _mm256_permutevar8x32_epi32(
1257
208k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1258
208k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1259
1260
208k
      a0_1 =
1261
208k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1262
208k
      a0_1 = _mm256_permutevar8x32_epi32(
1263
208k
          a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1264
208k
      a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
1265
1266
208k
      a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
1267
208k
      a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
1268
208k
      base_inc256 =
1269
208k
          _mm256_setr_epi32(base, base + 2, base + 4, base + 6, base + 8,
1270
208k
                            base + 10, base + 12, base + 14);
1271
208k
      shift = _mm256_srli_epi32(
1272
208k
          _mm256_and_si256(
1273
208k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1274
208k
              _mm256_set1_epi32(0x3f)),
1275
208k
          1);
1276
962k
    } else {
1277
962k
      base_inc256 = _mm256_setr_epi32(base, base + 1, base + 2, base + 3,
1278
962k
                                      base + 4, base + 5, base + 6, base + 7);
1279
962k
      shift = _mm256_srli_epi32(
1280
962k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1281
962k
    }
1282
1283
1.17M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1284
1.17M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1285
1.17M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1286
1287
1.17M
    b = _mm256_mullo_epi32(diff, shift);
1288
1.17M
    res = _mm256_add_epi32(a32, b);
1289
1.17M
    res = _mm256_srli_epi32(res, 5);
1290
1291
1.17M
    res1 = _mm256_packus_epi32(
1292
1.17M
        res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
1293
1294
1.17M
    mask256 = _mm256_cmpgt_epi32(max_base_x256, base_inc256);
1295
1.17M
    mask256 = _mm256_packs_epi32(
1296
1.17M
        mask256, _mm256_castsi128_si256(
1297
1.17M
                     _mm256_extracti128_si256(mask256, 1)));  // goto 16 bit
1298
1.17M
    res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1299
1.17M
    dst[r] = _mm256_castsi256_si128(res1);
1300
1.17M
    x += dx;
1301
1.17M
  }
1302
106k
}
1303
1304
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
1305
342k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1306
342k
  const int frac_bits = 6 - upsample_above;
1307
342k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1308
1309
342k
  assert(dx > 0);
1310
  // pre-filter above pixels
1311
  // store in temp buffers:
1312
  //   above[x] * 32 + 16
1313
  //   above[x+1] - above[x]
1314
  // final pixels will be calculated as:
1315
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1316
342k
  __m256i a0, a1, a32, a16, c3f;
1317
342k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1318
342k
  __m128i a0_x128, a1_x128;
1319
1320
342k
  a16 = _mm256_set1_epi16(16);
1321
342k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1322
342k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1323
342k
  c3f = _mm256_set1_epi16(0x3f);
1324
1325
342k
  int x = dx;
1326
4.83M
  for (int r = 0; r < N; r++) {
1327
4.49M
    __m256i b, res, res1, shift;
1328
1329
4.49M
    int base = x >> frac_bits;
1330
4.49M
    if (base >= max_base_x) {
1331
7.19k
      for (int i = r; i < N; ++i) {
1332
5.26k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1333
5.26k
      }
1334
1.93k
      return;
1335
1.93k
    }
1336
1337
4.49M
    a0_x128 = _mm_loadu_si128((__m128i *)(above + base));
1338
4.49M
    if (upsample_above) {
1339
908k
      __m128i mask, atmp0, atmp1, atmp2, atmp3;
1340
908k
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 8));
1341
908k
      atmp0 = _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1342
908k
      atmp1 = _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1343
908k
      atmp2 =
1344
908k
          _mm_shuffle_epi8(a0_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1345
908k
      atmp3 =
1346
908k
          _mm_shuffle_epi8(a1_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1347
908k
      mask =
1348
908k
          _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[0], _mm_set1_epi8(15));
1349
908k
      a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
1350
908k
      mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[0] + 16),
1351
908k
                            _mm_set1_epi8(15));
1352
908k
      a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
1353
1354
908k
      base_inc256 = _mm256_setr_epi16(base, base + 2, base + 4, base + 6,
1355
908k
                                      base + 8, base + 10, base + 12, base + 14,
1356
908k
                                      0, 0, 0, 0, 0, 0, 0, 0);
1357
908k
      shift = _mm256_srli_epi16(
1358
908k
          _mm256_and_si256(
1359
908k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
1360
908k
          1);
1361
3.58M
    } else {
1362
3.58M
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1363
3.58M
      base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1364
3.58M
                                      base + 4, base + 5, base + 6, base + 7, 0,
1365
3.58M
                                      0, 0, 0, 0, 0, 0, 0);
1366
3.58M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1367
3.58M
    }
1368
4.49M
    a0 = _mm256_castsi128_si256(a0_x128);
1369
4.49M
    a1 = _mm256_castsi128_si256(a1_x128);
1370
1371
4.49M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1372
4.49M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1373
4.49M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1374
1375
4.49M
    b = _mm256_mullo_epi16(diff, shift);
1376
4.49M
    res = _mm256_add_epi16(a32, b);
1377
4.49M
    res = _mm256_srli_epi16(res, 5);
1378
1379
4.49M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1380
4.49M
    res1 = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1381
4.49M
    dst[r] = _mm256_castsi256_si128(res1);
1382
4.49M
    x += dx;
1383
4.49M
  }
1384
342k
}
1385
1386
static void highbd_dr_prediction_z1_8xN_avx2(int N, uint16_t *dst,
1387
                                             ptrdiff_t stride,
1388
                                             const uint16_t *above,
1389
                                             int upsample_above, int dx,
1390
183k
                                             int bd) {
1391
183k
  __m128i dstvec[32];
1392
183k
  if (bd < 12) {
1393
136k
    highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
1394
136k
                                              dx);
1395
136k
  } else {
1396
47.5k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(N, dstvec, above,
1397
47.5k
                                                    upsample_above, dx);
1398
47.5k
  }
1399
2.00M
  for (int i = 0; i < N; i++) {
1400
1.82M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
1401
1.82M
  }
1402
183k
}
1403
1404
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_16xN_internal_avx2(
1405
77.4k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1406
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1407
77.4k
  (void)upsample_above;
1408
77.4k
  const int frac_bits = 6;
1409
77.4k
  const int max_base_x = ((16 + N) - 1);
1410
1411
  // pre-filter above pixels
1412
  // store in temp buffers:
1413
  //   above[x] * 32 + 16
1414
  //   above[x+1] - above[x]
1415
  // final pixels will be calculated as:
1416
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1417
77.4k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1418
77.4k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1419
1420
77.4k
  a16 = _mm256_set1_epi32(16);
1421
77.4k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1422
77.4k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1423
1424
77.4k
  int x = dx;
1425
959k
  for (int r = 0; r < N; r++) {
1426
882k
    __m256i b, res[2], res1;
1427
1428
882k
    int base = x >> frac_bits;
1429
882k
    if (base >= max_base_x) {
1430
1.01k
      for (int i = r; i < N; ++i) {
1431
796
        dstvec[i] = a_mbase_x;  // save 16 values
1432
796
      }
1433
215
      return;
1434
215
    }
1435
881k
    __m256i shift = _mm256_srli_epi32(
1436
881k
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1437
1438
881k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1439
881k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1440
1441
881k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1442
881k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1443
881k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1444
881k
    b = _mm256_mullo_epi32(diff, shift);
1445
1446
881k
    res[0] = _mm256_add_epi32(a32, b);
1447
881k
    res[0] = _mm256_srli_epi32(res[0], 5);
1448
881k
    res[0] = _mm256_packus_epi32(
1449
881k
        res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1450
1451
881k
    int mdif = max_base_x - base;
1452
881k
    if (mdif > 8) {
1453
879k
      a0_1 =
1454
879k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1455
879k
      a1_1 =
1456
879k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
1457
1458
879k
      diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1459
879k
      a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1460
879k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1461
879k
      b = _mm256_mullo_epi32(diff, shift);
1462
1463
879k
      res[1] = _mm256_add_epi32(a32, b);
1464
879k
      res[1] = _mm256_srli_epi32(res[1], 5);
1465
879k
      res[1] = _mm256_packus_epi32(
1466
879k
          res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1467
879k
    } else {
1468
2.35k
      res[1] = a_mbase_x;
1469
2.35k
    }
1470
881k
    res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1471
881k
                                   1);  // 16 16bit values
1472
1473
881k
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1474
881k
                                    base + 4, base + 5, base + 6, base + 7,
1475
881k
                                    base + 8, base + 9, base + 10, base + 11,
1476
881k
                                    base + 12, base + 13, base + 14, base + 15);
1477
881k
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1478
881k
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1479
881k
    x += dx;
1480
881k
  }
1481
77.4k
}
1482
1483
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
1484
295k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1485
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1486
295k
  (void)upsample_above;
1487
295k
  const int frac_bits = 6;
1488
295k
  const int max_base_x = ((16 + N) - 1);
1489
1490
  // pre-filter above pixels
1491
  // store in temp buffers:
1492
  //   above[x] * 32 + 16
1493
  //   above[x+1] - above[x]
1494
  // final pixels will be calculated as:
1495
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1496
295k
  __m256i a0, a1, a32, a16, c3f;
1497
295k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1498
1499
295k
  a16 = _mm256_set1_epi16(16);
1500
295k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1501
295k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1502
295k
  c3f = _mm256_set1_epi16(0x3f);
1503
1504
295k
  int x = dx;
1505
5.78M
  for (int r = 0; r < N; r++) {
1506
5.49M
    __m256i b, res;
1507
1508
5.49M
    int base = x >> frac_bits;
1509
5.49M
    if (base >= max_base_x) {
1510
2.06k
      for (int i = r; i < N; ++i) {
1511
1.60k
        dstvec[i] = a_mbase_x;  // save 16 values
1512
1.60k
      }
1513
458
      return;
1514
458
    }
1515
5.48M
    __m256i shift =
1516
5.48M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1517
1518
5.48M
    a0 = _mm256_loadu_si256((__m256i *)(above + base));
1519
5.48M
    a1 = _mm256_loadu_si256((__m256i *)(above + base + 1));
1520
1521
5.48M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1522
5.48M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1523
5.48M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1524
5.48M
    b = _mm256_mullo_epi16(diff, shift);
1525
1526
5.48M
    res = _mm256_add_epi16(a32, b);
1527
5.48M
    res = _mm256_srli_epi16(res, 5);  // 16 16bit values
1528
1529
5.48M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1530
5.48M
                                    base + 4, base + 5, base + 6, base + 7,
1531
5.48M
                                    base + 8, base + 9, base + 10, base + 11,
1532
5.48M
                                    base + 12, base + 13, base + 14, base + 15);
1533
5.48M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1534
5.48M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1535
5.48M
    x += dx;
1536
5.48M
  }
1537
295k
}
1538
1539
static void highbd_dr_prediction_z1_16xN_avx2(int N, uint16_t *dst,
1540
                                              ptrdiff_t stride,
1541
                                              const uint16_t *above,
1542
                                              int upsample_above, int dx,
1543
163k
                                              int bd) {
1544
163k
  __m256i dstvec[64];
1545
163k
  if (bd < 12) {
1546
123k
    highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
1547
123k
                                               dx);
1548
123k
  } else {
1549
40.2k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(N, dstvec, above,
1550
40.2k
                                                     upsample_above, dx);
1551
40.2k
  }
1552
2.39M
  for (int i = 0; i < N; i++) {
1553
2.23M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1554
2.23M
  }
1555
163k
}
1556
1557
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_32xN_internal_avx2(
1558
19.5k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1559
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1560
19.5k
  (void)upsample_above;
1561
19.5k
  const int frac_bits = 6;
1562
19.5k
  const int max_base_x = ((32 + N) - 1);
1563
1564
  // pre-filter above pixels
1565
  // store in temp buffers:
1566
  //   above[x] * 32 + 16
1567
  //   above[x+1] - above[x]
1568
  // final pixels will be calculated as:
1569
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1570
19.5k
  __m256i a0, a0_1, a1, a1_1, a32, a16, c3f;
1571
19.5k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1572
1573
19.5k
  a16 = _mm256_set1_epi32(16);
1574
19.5k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1575
19.5k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1576
19.5k
  c3f = _mm256_set1_epi16(0x3f);
1577
1578
19.5k
  int x = dx;
1579
438k
  for (int r = 0; r < N; r++) {
1580
418k
    __m256i b, res[2], res1;
1581
1582
418k
    int base = x >> frac_bits;
1583
418k
    if (base >= max_base_x) {
1584
0
      for (int i = r; i < N; ++i) {
1585
0
        dstvec[i] = a_mbase_x;  // save 32 values
1586
0
        dstvec[i + N] = a_mbase_x;
1587
0
      }
1588
0
      return;
1589
0
    }
1590
1591
418k
    __m256i shift =
1592
418k
        _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
1593
1594
1.25M
    for (int j = 0; j < 32; j += 16) {
1595
837k
      int mdif = max_base_x - (base + j);
1596
837k
      if (mdif <= 0) {
1597
316
        res1 = a_mbase_x;
1598
837k
      } else {
1599
837k
        a0 = _mm256_cvtepu16_epi32(
1600
837k
            _mm_loadu_si128((__m128i *)(above + base + j)));
1601
837k
        a1 = _mm256_cvtepu16_epi32(
1602
837k
            _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
1603
1604
837k
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1605
837k
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1606
837k
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1607
837k
        b = _mm256_mullo_epi32(diff, shift);
1608
1609
837k
        res[0] = _mm256_add_epi32(a32, b);
1610
837k
        res[0] = _mm256_srli_epi32(res[0], 5);
1611
837k
        res[0] = _mm256_packus_epi32(
1612
837k
            res[0],
1613
837k
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1614
837k
        if (mdif > 8) {
1615
834k
          a0_1 = _mm256_cvtepu16_epi32(
1616
834k
              _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
1617
834k
          a1_1 = _mm256_cvtepu16_epi32(
1618
834k
              _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
1619
1620
834k
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1621
834k
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1622
834k
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1623
834k
          b = _mm256_mullo_epi32(diff, shift);
1624
1625
834k
          res[1] = _mm256_add_epi32(a32, b);
1626
834k
          res[1] = _mm256_srli_epi32(res[1], 5);
1627
834k
          res[1] = _mm256_packus_epi32(
1628
834k
              res[1],
1629
834k
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1630
834k
        } else {
1631
2.34k
          res[1] = a_mbase_x;
1632
2.34k
        }
1633
837k
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1634
837k
                                       1);  // 16 16bit values
1635
837k
        base_inc256 = _mm256_setr_epi16(
1636
837k
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1637
837k
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1638
837k
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1639
837k
            base + j + 13, base + j + 14, base + j + 15);
1640
1641
837k
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1642
837k
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1643
837k
      }
1644
837k
      if (!j) {
1645
418k
        dstvec[r] = res1;
1646
418k
      } else {
1647
418k
        dstvec[r + N] = res1;
1648
418k
      }
1649
837k
    }
1650
418k
    x += dx;
1651
418k
  }
1652
19.5k
}
1653
1654
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
1655
177k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1656
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1657
177k
  (void)upsample_above;
1658
177k
  const int frac_bits = 6;
1659
177k
  const int max_base_x = ((32 + N) - 1);
1660
1661
  // pre-filter above pixels
1662
  // store in temp buffers:
1663
  //   above[x] * 32 + 16
1664
  //   above[x+1] - above[x]
1665
  // final pixels will be calculated as:
1666
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1667
177k
  __m256i a0, a1, a32, a16, c3f;
1668
177k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1669
1670
177k
  a16 = _mm256_set1_epi16(16);
1671
177k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1672
177k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1673
177k
  c3f = _mm256_set1_epi16(0x3f);
1674
1675
177k
  int x = dx;
1676
4.88M
  for (int r = 0; r < N; r++) {
1677
4.70M
    __m256i b, res;
1678
1679
4.70M
    int base = x >> frac_bits;
1680
4.70M
    if (base >= max_base_x) {
1681
0
      for (int i = r; i < N; ++i) {
1682
0
        dstvec[i] = a_mbase_x;  // save 32 values
1683
0
        dstvec[i + N] = a_mbase_x;
1684
0
      }
1685
0
      return;
1686
0
    }
1687
1688
4.70M
    __m256i shift =
1689
4.70M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1690
1691
14.1M
    for (int j = 0; j < 32; j += 16) {
1692
9.40M
      int mdif = max_base_x - (base + j);
1693
9.40M
      if (mdif <= 0) {
1694
1.01k
        res = a_mbase_x;
1695
9.40M
      } else {
1696
9.40M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1697
9.40M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1698
1699
9.40M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1700
9.40M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1701
9.40M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1702
9.40M
        b = _mm256_mullo_epi16(diff, shift);
1703
1704
9.40M
        res = _mm256_add_epi16(a32, b);
1705
9.40M
        res = _mm256_srli_epi16(res, 5);
1706
1707
9.40M
        base_inc256 = _mm256_setr_epi16(
1708
9.40M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1709
9.40M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1710
9.40M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1711
9.40M
            base + j + 13, base + j + 14, base + j + 15);
1712
1713
9.40M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1714
9.40M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1715
9.40M
      }
1716
9.40M
      if (!j) {
1717
4.70M
        dstvec[r] = res;
1718
4.70M
      } else {
1719
4.70M
        dstvec[r + N] = res;
1720
4.70M
      }
1721
9.40M
    }
1722
4.70M
    x += dx;
1723
4.70M
  }
1724
177k
}
1725
1726
static void highbd_dr_prediction_z1_32xN_avx2(int N, uint16_t *dst,
1727
                                              ptrdiff_t stride,
1728
                                              const uint16_t *above,
1729
                                              int upsample_above, int dx,
1730
79.1k
                                              int bd) {
1731
79.1k
  __m256i dstvec[128];
1732
79.1k
  if (bd < 12) {
1733
69.6k
    highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
1734
69.6k
                                               dx);
1735
69.6k
  } else {
1736
9.56k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(N, dstvec, above,
1737
9.56k
                                                     upsample_above, dx);
1738
9.56k
  }
1739
2.13M
  for (int i = 0; i < N; i++) {
1740
2.05M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1741
2.05M
    _mm256_storeu_si256((__m256i *)(dst + stride * i + 16), dstvec[i + N]);
1742
2.05M
  }
1743
79.1k
}
1744
1745
static void highbd_dr_prediction_32bit_z1_64xN_avx2(int N, uint16_t *dst,
1746
                                                    ptrdiff_t stride,
1747
                                                    const uint16_t *above,
1748
                                                    int upsample_above,
1749
16.5k
                                                    int dx) {
1750
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1751
16.5k
  (void)upsample_above;
1752
16.5k
  const int frac_bits = 6;
1753
16.5k
  const int max_base_x = ((64 + N) - 1);
1754
1755
  // pre-filter above pixels
1756
  // store in temp buffers:
1757
  //   above[x] * 32 + 16
1758
  //   above[x+1] - above[x]
1759
  // final pixels will be calculated as:
1760
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1761
16.5k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1762
16.5k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1763
1764
16.5k
  a16 = _mm256_set1_epi32(16);
1765
16.5k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1766
16.5k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1767
1768
16.5k
  int x = dx;
1769
995k
  for (int r = 0; r < N; r++, dst += stride) {
1770
978k
    __m256i b, res[2], res1;
1771
1772
978k
    int base = x >> frac_bits;
1773
978k
    if (base >= max_base_x) {
1774
0
      for (int i = r; i < N; ++i) {
1775
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1776
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1777
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1778
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1779
0
        dst += stride;
1780
0
      }
1781
0
      return;
1782
0
    }
1783
1784
978k
    __m256i shift = _mm256_srli_epi32(
1785
978k
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1786
1787
978k
    __m128i a0_128, a0_1_128, a1_128, a1_1_128;
1788
4.89M
    for (int j = 0; j < 64; j += 16) {
1789
3.91M
      int mdif = max_base_x - (base + j);
1790
3.91M
      if (mdif <= 0) {
1791
3.07k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1792
3.91M
      } else {
1793
3.91M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
1794
3.91M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
1795
3.91M
        a0 = _mm256_cvtepu16_epi32(a0_128);
1796
3.91M
        a1 = _mm256_cvtepu16_epi32(a1_128);
1797
1798
3.91M
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1799
3.91M
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1800
3.91M
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1801
3.91M
        b = _mm256_mullo_epi32(diff, shift);
1802
1803
3.91M
        res[0] = _mm256_add_epi32(a32, b);
1804
3.91M
        res[0] = _mm256_srli_epi32(res[0], 5);
1805
3.91M
        res[0] = _mm256_packus_epi32(
1806
3.91M
            res[0],
1807
3.91M
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1808
3.91M
        if (mdif > 8) {
1809
3.90M
          a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
1810
3.90M
          a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
1811
3.90M
          a0_1 = _mm256_cvtepu16_epi32(a0_1_128);
1812
3.90M
          a1_1 = _mm256_cvtepu16_epi32(a1_1_128);
1813
1814
3.90M
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1815
3.90M
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1816
3.90M
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1817
3.90M
          b = _mm256_mullo_epi32(diff, shift);
1818
1819
3.90M
          res[1] = _mm256_add_epi32(a32, b);
1820
3.90M
          res[1] = _mm256_srli_epi32(res[1], 5);
1821
3.90M
          res[1] = _mm256_packus_epi32(
1822
3.90M
              res[1],
1823
3.90M
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1824
3.90M
        } else {
1825
5.11k
          res[1] = a_mbase_x;
1826
5.11k
        }
1827
3.91M
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1828
3.91M
                                       1);  // 16 16bit values
1829
3.91M
        base_inc256 = _mm256_setr_epi16(
1830
3.91M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1831
3.91M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1832
3.91M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1833
3.91M
            base + j + 13, base + j + 14, base + j + 15);
1834
1835
3.91M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1836
3.91M
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1837
3.91M
        _mm256_storeu_si256((__m256i *)(dst + j), res1);
1838
3.91M
      }
1839
3.91M
    }
1840
978k
    x += dx;
1841
978k
  }
1842
16.5k
}
1843
1844
static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
1845
                                              ptrdiff_t stride,
1846
                                              const uint16_t *above,
1847
36.8k
                                              int upsample_above, int dx) {
1848
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1849
36.8k
  (void)upsample_above;
1850
36.8k
  const int frac_bits = 6;
1851
36.8k
  const int max_base_x = ((64 + N) - 1);
1852
1853
  // pre-filter above pixels
1854
  // store in temp buffers:
1855
  //   above[x] * 32 + 16
1856
  //   above[x+1] - above[x]
1857
  // final pixels will be calculated as:
1858
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1859
36.8k
  __m256i a0, a1, a32, a16, c3f;
1860
36.8k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1861
1862
36.8k
  a16 = _mm256_set1_epi16(16);
1863
36.8k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1864
36.8k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1865
36.8k
  c3f = _mm256_set1_epi16(0x3f);
1866
1867
36.8k
  int x = dx;
1868
1.86M
  for (int r = 0; r < N; r++, dst += stride) {
1869
1.82M
    __m256i b, res;
1870
1871
1.82M
    int base = x >> frac_bits;
1872
1.82M
    if (base >= max_base_x) {
1873
0
      for (int i = r; i < N; ++i) {
1874
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1875
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1876
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1877
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1878
0
        dst += stride;
1879
0
      }
1880
0
      return;
1881
0
    }
1882
1883
1.82M
    __m256i shift =
1884
1.82M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1885
1886
9.12M
    for (int j = 0; j < 64; j += 16) {
1887
7.30M
      int mdif = max_base_x - (base + j);
1888
7.30M
      if (mdif <= 0) {
1889
2.89k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1890
7.29M
      } else {
1891
7.29M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1892
7.29M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1893
1894
7.29M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1895
7.29M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1896
7.29M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1897
7.29M
        b = _mm256_mullo_epi16(diff, shift);
1898
1899
7.29M
        res = _mm256_add_epi16(a32, b);
1900
7.29M
        res = _mm256_srli_epi16(res, 5);
1901
1902
7.29M
        base_inc256 = _mm256_setr_epi16(
1903
7.29M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1904
7.29M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1905
7.29M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1906
7.29M
            base + j + 13, base + j + 14, base + j + 15);
1907
1908
7.29M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1909
7.29M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1910
7.29M
        _mm256_storeu_si256((__m256i *)(dst + j), res);  // 16 16bit values
1911
7.29M
      }
1912
7.30M
    }
1913
1.82M
    x += dx;
1914
1.82M
  }
1915
36.8k
}
1916
1917
// Directional prediction, zone 1: 0 < angle < 90
1918
void av1_highbd_dr_prediction_z1_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
1919
                                      int bh, const uint16_t *above,
1920
                                      const uint16_t *left, int upsample_above,
1921
570k
                                      int dx, int dy, int bd) {
1922
570k
  (void)left;
1923
570k
  (void)dy;
1924
1925
570k
  switch (bw) {
1926
123k
    case 4:
1927
123k
      highbd_dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above,
1928
123k
                                       dx, bd);
1929
123k
      break;
1930
183k
    case 8:
1931
183k
      highbd_dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above,
1932
183k
                                       dx, bd);
1933
183k
      break;
1934
163k
    case 16:
1935
163k
      highbd_dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above,
1936
163k
                                        dx, bd);
1937
163k
      break;
1938
75.8k
    case 32:
1939
75.8k
      highbd_dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above,
1940
75.8k
                                        dx, bd);
1941
75.8k
      break;
1942
23.5k
    case 64:
1943
23.5k
      if (bd < 12) {
1944
12.1k
        highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above,
1945
12.1k
                                          upsample_above, dx);
1946
12.1k
      } else {
1947
11.4k
        highbd_dr_prediction_32bit_z1_64xN_avx2(bh, dst, stride, above,
1948
11.4k
                                                upsample_above, dx);
1949
11.4k
      }
1950
23.5k
      break;
1951
0
    default: break;
1952
570k
  }
1953
570k
  return;
1954
570k
}
1955
1956
static void highbd_transpose_TX_16X16(const uint16_t *src, ptrdiff_t pitchSrc,
1957
424k
                                      uint16_t *dst, ptrdiff_t pitchDst) {
1958
424k
  __m256i r[16];
1959
424k
  __m256i d[16];
1960
7.21M
  for (int j = 0; j < 16; j++) {
1961
6.79M
    r[j] = _mm256_loadu_si256((__m256i *)(src + j * pitchSrc));
1962
6.79M
  }
1963
424k
  highbd_transpose16x16_avx2(r, d);
1964
7.21M
  for (int j = 0; j < 16; j++) {
1965
6.79M
    _mm256_storeu_si256((__m256i *)(dst + j * pitchDst), d[j]);
1966
6.79M
  }
1967
424k
}
1968
1969
static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
1970
                             uint16_t *dst, ptrdiff_t pitchDst, int width,
1971
33.1k
                             int height) {
1972
158k
  for (int j = 0; j < height; j += 16)
1973
550k
    for (int i = 0; i < width; i += 16)
1974
424k
      highbd_transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
1975
424k
                                dst + j * pitchDst + i, pitchDst);
1976
33.1k
}
1977
1978
static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
1979
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
1980
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
1981
75.7k
    int dy) {
1982
75.7k
  const int min_base_x = -(1 << upsample_above);
1983
75.7k
  const int min_base_y = -(1 << upsample_left);
1984
75.7k
  const int frac_bits_x = 6 - upsample_above;
1985
75.7k
  const int frac_bits_y = 6 - upsample_left;
1986
1987
75.7k
  assert(dx > 0);
1988
  // pre-filter above pixels
1989
  // store in temp buffers:
1990
  //   above[x] * 32 + 16
1991
  //   above[x+1] - above[x]
1992
  // final pixels will be calculated as:
1993
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1994
75.7k
  __m256i a0_x, a1_x, a32, a16;
1995
75.7k
  __m256i diff;
1996
75.7k
  __m128i c3f, min_base_y128;
1997
1998
75.7k
  a16 = _mm256_set1_epi32(16);
1999
75.7k
  c3f = _mm_set1_epi32(0x3f);
2000
75.7k
  min_base_y128 = _mm_set1_epi32(min_base_y);
2001
2002
549k
  for (int r = 0; r < N; r++) {
2003
473k
    __m256i b, res, shift;
2004
473k
    __m128i resx, resy, resxy;
2005
473k
    __m128i a0_x128, a1_x128;
2006
473k
    int y = r + 1;
2007
473k
    int base_x = (-y * dx) >> frac_bits_x;
2008
473k
    int base_shift = 0;
2009
473k
    if (base_x < (min_base_x - 1)) {
2010
357k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2011
357k
    }
2012
473k
    int base_min_diff =
2013
473k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2014
473k
    if (base_min_diff > 4) {
2015
278k
      base_min_diff = 4;
2016
278k
    } else {
2017
194k
      if (base_min_diff < 0) base_min_diff = 0;
2018
194k
    }
2019
2020
473k
    if (base_shift > 3) {
2021
278k
      a0_x = _mm256_setzero_si256();
2022
278k
      a1_x = _mm256_setzero_si256();
2023
278k
      shift = _mm256_setzero_si256();
2024
278k
    } else {
2025
194k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2026
194k
      if (upsample_above) {
2027
40.2k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2028
40.2k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2029
40.2k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2030
2031
40.2k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2032
40.2k
            _mm_and_si128(
2033
40.2k
                _mm_slli_epi32(
2034
40.2k
                    _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2035
40.2k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx),
2036
40.2k
                    upsample_above),
2037
40.2k
                c3f),
2038
40.2k
            1));
2039
154k
      } else {
2040
154k
        a0_x128 =
2041
154k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2042
154k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2043
2044
154k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2045
154k
            _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2046
154k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx),
2047
154k
                          c3f),
2048
154k
            1));
2049
154k
      }
2050
194k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2051
194k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2052
194k
    }
2053
    // y calc
2054
473k
    __m128i a0_y, a1_y, shifty;
2055
473k
    if (base_x < min_base_x) {
2056
393k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2057
393k
      DECLARE_ALIGNED(32, int, base_y_c[4]);
2058
393k
      r6 = _mm_set1_epi32(r << 6);
2059
393k
      dy128 = _mm_set1_epi32(dy);
2060
393k
      c1234 = _mm_setr_epi32(1, 2, 3, 4);
2061
393k
      y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
2062
393k
      base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
2063
393k
      mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
2064
393k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2065
393k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2066
2067
393k
      a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
2068
393k
                            left[base_y_c[2]], left[base_y_c[3]]);
2069
393k
      a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2070
393k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
2071
2072
393k
      if (upsample_left) {
2073
60.8k
        shifty = _mm_srli_epi32(
2074
60.8k
            _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
2075
333k
      } else {
2076
333k
        shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
2077
333k
      }
2078
393k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2079
393k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2080
393k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2081
393k
    }
2082
2083
473k
    diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2084
473k
    a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2085
473k
    a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2086
2087
473k
    b = _mm256_mullo_epi32(diff, shift);
2088
473k
    res = _mm256_add_epi32(a32, b);
2089
473k
    res = _mm256_srli_epi32(res, 5);
2090
2091
473k
    resx = _mm256_castsi256_si128(res);
2092
473k
    resx = _mm_packus_epi32(resx, resx);
2093
2094
473k
    resy = _mm256_extracti128_si256(res, 1);
2095
473k
    resy = _mm_packus_epi32(resy, resy);
2096
2097
473k
    resxy =
2098
473k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2099
473k
    _mm_storel_epi64((__m128i *)(dst), resxy);
2100
473k
    dst += stride;
2101
473k
  }
2102
75.7k
}
2103
2104
static void highbd_dr_prediction_z2_Nx4_avx2(
2105
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2106
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2107
172k
    int dy) {
2108
172k
  const int min_base_x = -(1 << upsample_above);
2109
172k
  const int min_base_y = -(1 << upsample_left);
2110
172k
  const int frac_bits_x = 6 - upsample_above;
2111
172k
  const int frac_bits_y = 6 - upsample_left;
2112
2113
172k
  assert(dx > 0);
2114
  // pre-filter above pixels
2115
  // store in temp buffers:
2116
  //   above[x] * 32 + 16
2117
  //   above[x+1] - above[x]
2118
  // final pixels will be calculated as:
2119
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2120
172k
  __m256i a0_x, a1_x, a32, a16;
2121
172k
  __m256i diff;
2122
172k
  __m128i c3f, min_base_y128;
2123
2124
172k
  a16 = _mm256_set1_epi16(16);
2125
172k
  c3f = _mm_set1_epi16(0x3f);
2126
172k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2127
2128
1.36M
  for (int r = 0; r < N; r++) {
2129
1.18M
    __m256i b, res, shift;
2130
1.18M
    __m128i resx, resy, resxy;
2131
1.18M
    __m128i a0_x128, a1_x128;
2132
1.18M
    int y = r + 1;
2133
1.18M
    int base_x = (-y * dx) >> frac_bits_x;
2134
1.18M
    int base_shift = 0;
2135
1.18M
    if (base_x < (min_base_x - 1)) {
2136
891k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2137
891k
    }
2138
1.18M
    int base_min_diff =
2139
1.18M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2140
1.18M
    if (base_min_diff > 4) {
2141
585k
      base_min_diff = 4;
2142
603k
    } else {
2143
603k
      if (base_min_diff < 0) base_min_diff = 0;
2144
603k
    }
2145
2146
1.18M
    if (base_shift > 3) {
2147
585k
      a0_x = _mm256_setzero_si256();
2148
585k
      a1_x = _mm256_setzero_si256();
2149
585k
      shift = _mm256_setzero_si256();
2150
603k
    } else {
2151
603k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2152
603k
      if (upsample_above) {
2153
193k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2154
193k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2155
193k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2156
2157
193k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2158
193k
            _mm_and_si128(
2159
193k
                _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2160
193k
                                              (2 << 6) - y * dx,
2161
193k
                                              (3 << 6) - y * dx, 0, 0, 0, 0),
2162
193k
                               upsample_above),
2163
193k
                c3f),
2164
193k
            1));
2165
410k
      } else {
2166
410k
        a0_x128 =
2167
410k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2168
410k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2169
2170
410k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2171
410k
            _mm_and_si128(
2172
410k
                _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2173
410k
                               (3 << 6) - y * dx, 0, 0, 0, 0),
2174
410k
                c3f),
2175
410k
            1));
2176
410k
      }
2177
603k
      a0_x = _mm256_castsi128_si256(a0_x128);
2178
603k
      a1_x = _mm256_castsi128_si256(a1_x128);
2179
603k
    }
2180
    // y calc
2181
1.18M
    __m128i a0_y, a1_y, shifty;
2182
1.18M
    if (base_x < min_base_x) {
2183
1.01M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2184
1.01M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2185
1.01M
      r6 = _mm_set1_epi16(r << 6);
2186
1.01M
      dy128 = _mm_set1_epi16(dy);
2187
1.01M
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
2188
1.01M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2189
1.01M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2190
1.01M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2191
1.01M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2192
1.01M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2193
2194
1.01M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2195
1.01M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
2196
1.01M
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2197
1.01M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
2198
1.01M
                            0, 0);
2199
2200
1.01M
      if (upsample_left) {
2201
304k
        shifty = _mm_srli_epi16(
2202
304k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
2203
705k
      } else {
2204
705k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2205
705k
      }
2206
1.01M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2207
1.01M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2208
1.01M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2209
1.01M
    }
2210
2211
1.18M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2212
1.18M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2213
1.18M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2214
2215
1.18M
    b = _mm256_mullo_epi16(diff, shift);
2216
1.18M
    res = _mm256_add_epi16(a32, b);
2217
1.18M
    res = _mm256_srli_epi16(res, 5);
2218
2219
1.18M
    resx = _mm256_castsi256_si128(res);
2220
1.18M
    resy = _mm256_extracti128_si256(res, 1);
2221
1.18M
    resxy =
2222
1.18M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2223
1.18M
    _mm_storel_epi64((__m128i *)(dst), resxy);
2224
1.18M
    dst += stride;
2225
1.18M
  }
2226
172k
}
2227
2228
static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
2229
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2230
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2231
77.1k
    int dy) {
2232
77.1k
  const int min_base_x = -(1 << upsample_above);
2233
77.1k
  const int min_base_y = -(1 << upsample_left);
2234
77.1k
  const int frac_bits_x = 6 - upsample_above;
2235
77.1k
  const int frac_bits_y = 6 - upsample_left;
2236
2237
  // pre-filter above pixels
2238
  // store in temp buffers:
2239
  //   above[x] * 32 + 16
2240
  //   above[x+1] - above[x]
2241
  // final pixels will be calculated as:
2242
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2243
77.1k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
2244
77.1k
  __m256i diff;
2245
77.1k
  __m128i a0_x128, a1_x128;
2246
2247
77.1k
  a16 = _mm256_set1_epi32(16);
2248
77.1k
  c3f = _mm256_set1_epi32(0x3f);
2249
77.1k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2250
2251
886k
  for (int r = 0; r < N; r++) {
2252
809k
    __m256i b, res, shift;
2253
809k
    __m128i resx, resy, resxy;
2254
809k
    int y = r + 1;
2255
809k
    int base_x = (-y * dx) >> frac_bits_x;
2256
809k
    int base_shift = 0;
2257
809k
    if (base_x < (min_base_x - 1)) {
2258
631k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2259
631k
    }
2260
809k
    int base_min_diff =
2261
809k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2262
809k
    if (base_min_diff > 8) {
2263
385k
      base_min_diff = 8;
2264
424k
    } else {
2265
424k
      if (base_min_diff < 0) base_min_diff = 0;
2266
424k
    }
2267
2268
809k
    if (base_shift > 7) {
2269
385k
      resx = _mm_setzero_si128();
2270
424k
    } else {
2271
424k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2272
424k
      if (upsample_above) {
2273
49.9k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2274
49.9k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2275
49.9k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2276
49.9k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2277
49.9k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2278
49.9k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2279
49.9k
        atmp2 = _mm_shuffle_epi8(
2280
49.9k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2281
49.9k
        atmp3 = _mm_shuffle_epi8(
2282
49.9k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2283
49.9k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2284
49.9k
                              _mm_set1_epi8(15));
2285
49.9k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2286
49.9k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2287
49.9k
                              _mm_set1_epi8(15));
2288
49.9k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2289
49.9k
        shift = _mm256_srli_epi32(
2290
49.9k
            _mm256_and_si256(
2291
49.9k
                _mm256_slli_epi32(
2292
49.9k
                    _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
2293
49.9k
                                      (2 << 6) - y * dx, (3 << 6) - y * dx,
2294
49.9k
                                      (4 << 6) - y * dx, (5 << 6) - y * dx,
2295
49.9k
                                      (6 << 6) - y * dx, (7 << 6) - y * dx),
2296
49.9k
                    upsample_above),
2297
49.9k
                c3f),
2298
49.9k
            1);
2299
374k
      } else {
2300
374k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2301
374k
        a0_x128 =
2302
374k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2303
374k
        a1_x128 =
2304
374k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2305
2306
374k
        shift = _mm256_srli_epi32(
2307
374k
            _mm256_and_si256(
2308
374k
                _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2309
374k
                                  (3 << 6) - y * dx, (4 << 6) - y * dx,
2310
374k
                                  (5 << 6) - y * dx, (6 << 6) - y * dx,
2311
374k
                                  (7 << 6) - y * dx),
2312
374k
                c3f),
2313
374k
            1);
2314
374k
      }
2315
424k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2316
424k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2317
2318
424k
      diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2319
424k
      a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2320
424k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2321
2322
424k
      b = _mm256_mullo_epi32(diff, shift);
2323
424k
      res = _mm256_add_epi32(a32, b);
2324
424k
      res = _mm256_srli_epi32(res, 5);
2325
2326
424k
      resx = _mm256_castsi256_si128(_mm256_packus_epi32(
2327
424k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2328
424k
    }
2329
    // y calc
2330
809k
    if (base_x < min_base_x) {
2331
698k
      DECLARE_ALIGNED(32, int, base_y_c[8]);
2332
698k
      __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
2333
698k
      r6 = _mm256_set1_epi32(r << 6);
2334
698k
      dy256 = _mm256_set1_epi32(dy);
2335
698k
      c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
2336
698k
      y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2337
698k
      base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2338
698k
      mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2339
698k
      base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2340
698k
      _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2341
2342
698k
      a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2343
698k
          left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2344
698k
          left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2345
698k
          left[base_y_c[6]], left[base_y_c[7]]));
2346
698k
      a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2347
698k
          left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2348
698k
          left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2349
698k
          left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2350
2351
698k
      if (upsample_left) {
2352
68.5k
        shift = _mm256_srli_epi32(
2353
68.5k
            _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
2354
68.5k
            1);
2355
629k
      } else {
2356
629k
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2357
629k
      }
2358
698k
      diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2359
698k
      a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2360
698k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2361
2362
698k
      b = _mm256_mullo_epi32(diff, shift);
2363
698k
      res = _mm256_add_epi32(a32, b);
2364
698k
      res = _mm256_srli_epi32(res, 5);
2365
2366
698k
      resy = _mm256_castsi256_si128(_mm256_packus_epi32(
2367
698k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2368
698k
    } else {
2369
111k
      resy = resx;
2370
111k
    }
2371
809k
    resxy =
2372
809k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2373
809k
    _mm_storeu_si128((__m128i *)(dst), resxy);
2374
809k
    dst += stride;
2375
809k
  }
2376
77.1k
}
2377
2378
static void highbd_dr_prediction_z2_Nx8_avx2(
2379
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2380
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2381
259k
    int dy) {
2382
259k
  const int min_base_x = -(1 << upsample_above);
2383
259k
  const int min_base_y = -(1 << upsample_left);
2384
259k
  const int frac_bits_x = 6 - upsample_above;
2385
259k
  const int frac_bits_y = 6 - upsample_left;
2386
2387
  // pre-filter above pixels
2388
  // store in temp buffers:
2389
  //   above[x] * 32 + 16
2390
  //   above[x+1] - above[x]
2391
  // final pixels will be calculated as:
2392
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2393
259k
  __m128i c3f, min_base_y128;
2394
259k
  __m256i a0_x, a1_x, diff, a32, a16;
2395
259k
  __m128i a0_x128, a1_x128;
2396
2397
259k
  a16 = _mm256_set1_epi16(16);
2398
259k
  c3f = _mm_set1_epi16(0x3f);
2399
259k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2400
2401
2.78M
  for (int r = 0; r < N; r++) {
2402
2.52M
    __m256i b, res, shift;
2403
2.52M
    __m128i resx, resy, resxy;
2404
2.52M
    int y = r + 1;
2405
2.52M
    int base_x = (-y * dx) >> frac_bits_x;
2406
2.52M
    int base_shift = 0;
2407
2.52M
    if (base_x < (min_base_x - 1)) {
2408
1.94M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2409
1.94M
    }
2410
2.52M
    int base_min_diff =
2411
2.52M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2412
2.52M
    if (base_min_diff > 8) {
2413
1.19M
      base_min_diff = 8;
2414
1.33M
    } else {
2415
1.33M
      if (base_min_diff < 0) base_min_diff = 0;
2416
1.33M
    }
2417
2418
2.52M
    if (base_shift > 7) {
2419
1.19M
      a0_x = _mm256_setzero_si256();
2420
1.19M
      a1_x = _mm256_setzero_si256();
2421
1.19M
      shift = _mm256_setzero_si256();
2422
1.33M
    } else {
2423
1.33M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2424
1.33M
      if (upsample_above) {
2425
371k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2426
371k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2427
371k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2428
371k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2429
371k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2430
371k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2431
371k
        atmp2 = _mm_shuffle_epi8(
2432
371k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2433
371k
        atmp3 = _mm_shuffle_epi8(
2434
371k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2435
371k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2436
371k
                              _mm_set1_epi8(15));
2437
371k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2438
371k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2439
371k
                              _mm_set1_epi8(15));
2440
371k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2441
2442
371k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2443
371k
            _mm_and_si128(
2444
371k
                _mm_slli_epi16(
2445
371k
                    _mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2446
371k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx,
2447
371k
                                   (4 << 6) - y * dx, (5 << 6) - y * dx,
2448
371k
                                   (6 << 6) - y * dx, (7 << 6) - y * dx),
2449
371k
                    upsample_above),
2450
371k
                c3f),
2451
371k
            1));
2452
960k
      } else {
2453
960k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2454
960k
        a0_x128 =
2455
960k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2456
960k
        a1_x128 =
2457
960k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2458
2459
960k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2460
960k
            _mm_and_si128(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2461
960k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx,
2462
960k
                                         (4 << 6) - y * dx, (5 << 6) - y * dx,
2463
960k
                                         (6 << 6) - y * dx, (7 << 6) - y * dx),
2464
960k
                          c3f),
2465
960k
            1));
2466
960k
      }
2467
1.33M
      a0_x = _mm256_castsi128_si256(a0_x128);
2468
1.33M
      a1_x = _mm256_castsi128_si256(a1_x128);
2469
1.33M
    }
2470
2471
    // y calc
2472
2.52M
    __m128i a0_y, a1_y, shifty;
2473
2.52M
    if (base_x < min_base_x) {
2474
2.14M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2475
2.14M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2476
2.14M
      r6 = _mm_set1_epi16(r << 6);
2477
2.14M
      dy128 = _mm_set1_epi16(dy);
2478
2.14M
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
2479
2.14M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2480
2.14M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2481
2.14M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2482
2.14M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2483
2.14M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2484
2485
2.14M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2486
2.14M
                            left[base_y_c[2]], left[base_y_c[3]],
2487
2.14M
                            left[base_y_c[4]], left[base_y_c[5]],
2488
2.14M
                            left[base_y_c[6]], left[base_y_c[7]]);
2489
2.14M
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2490
2.14M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1],
2491
2.14M
                            left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2492
2.14M
                            left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
2493
2494
2.14M
      if (upsample_left) {
2495
555k
        shifty = _mm_srli_epi16(
2496
555k
            _mm_and_si128(_mm_slli_epi16((y_c128), upsample_left), c3f), 1);
2497
1.58M
      } else {
2498
1.58M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2499
1.58M
      }
2500
2.14M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2501
2.14M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2502
2.14M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2503
2.14M
    }
2504
2505
2.52M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2506
2.52M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2507
2.52M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2508
2509
2.52M
    b = _mm256_mullo_epi16(diff, shift);
2510
2.52M
    res = _mm256_add_epi16(a32, b);
2511
2.52M
    res = _mm256_srli_epi16(res, 5);
2512
2513
2.52M
    resx = _mm256_castsi256_si128(res);
2514
2.52M
    resy = _mm256_extracti128_si256(res, 1);
2515
2516
2.52M
    resxy =
2517
2.52M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2518
2.52M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2519
2.52M
    dst += stride;
2520
2.52M
  }
2521
259k
}
2522
2523
static void highbd_dr_prediction_32bit_z2_HxW_avx2(
2524
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2525
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2526
80.2k
    int dy) {
2527
  // here upsample_above and upsample_left are 0 by design of
2528
  // av1_use_intra_edge_upsample
2529
80.2k
  const int min_base_x = -1;
2530
80.2k
  const int min_base_y = -1;
2531
80.2k
  (void)upsample_above;
2532
80.2k
  (void)upsample_left;
2533
80.2k
  const int frac_bits_x = 6;
2534
80.2k
  const int frac_bits_y = 6;
2535
2536
  // pre-filter above pixels
2537
  // store in temp buffers:
2538
  //   above[x] * 32 + 16
2539
  //   above[x+1] - above[x]
2540
  // final pixels will be calculated as:
2541
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2542
80.2k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16, c1;
2543
80.2k
  __m256i diff, min_base_y256, c3f, dy256, c1234, c0123, c8;
2544
80.2k
  __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2545
80.2k
  DECLARE_ALIGNED(32, int, base_y_c[16]);
2546
2547
80.2k
  a16 = _mm256_set1_epi32(16);
2548
80.2k
  c1 = _mm256_srli_epi32(a16, 4);
2549
80.2k
  c8 = _mm256_srli_epi32(a16, 1);
2550
80.2k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2551
80.2k
  c3f = _mm256_set1_epi32(0x3f);
2552
80.2k
  dy256 = _mm256_set1_epi32(dy);
2553
80.2k
  c0123 = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
2554
80.2k
  c1234 = _mm256_add_epi32(c0123, c1);
2555
2556
1.61M
  for (int r = 0; r < H; r++) {
2557
1.53M
    __m256i b, res, shift, ydx;
2558
1.53M
    __m256i resx[2], resy[2];
2559
1.53M
    __m256i resxy, j256, r6;
2560
5.32M
    for (int j = 0; j < W; j += 16) {
2561
3.79M
      j256 = _mm256_set1_epi32(j);
2562
3.79M
      int y = r + 1;
2563
3.79M
      ydx = _mm256_set1_epi32(y * dx);
2564
2565
3.79M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2566
3.79M
      int base_shift = 0;
2567
3.79M
      if ((base_x) < (min_base_x - 1)) {
2568
2.43M
        base_shift = (min_base_x - base_x - 1);
2569
2.43M
      }
2570
3.79M
      int base_min_diff = (min_base_x - base_x);
2571
3.79M
      if (base_min_diff > 16) {
2572
1.64M
        base_min_diff = 16;
2573
2.14M
      } else {
2574
2.14M
        if (base_min_diff < 0) base_min_diff = 0;
2575
2.14M
      }
2576
2577
3.79M
      if (base_shift > 7) {
2578
1.96M
        resx[0] = _mm256_setzero_si256();
2579
1.96M
      } else {
2580
1.82M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2581
1.82M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2582
1.82M
        a0_x128 =
2583
1.82M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2584
1.82M
        a1_x128 =
2585
1.82M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2586
2587
1.82M
        a0_x = _mm256_cvtepu16_epi32(a0_x128);
2588
1.82M
        a1_x = _mm256_cvtepu16_epi32(a1_x128);
2589
2590
1.82M
        r6 = _mm256_slli_epi32(_mm256_add_epi32(c0123, j256), 6);
2591
1.82M
        shift = _mm256_srli_epi32(
2592
1.82M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2593
2594
1.82M
        diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2595
1.82M
        a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2596
1.82M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2597
2598
1.82M
        b = _mm256_mullo_epi32(diff, shift);
2599
1.82M
        res = _mm256_add_epi32(a32, b);
2600
1.82M
        res = _mm256_srli_epi32(res, 5);
2601
2602
1.82M
        resx[0] = _mm256_packus_epi32(
2603
1.82M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2604
1.82M
      }
2605
3.79M
      int base_shift8 = 0;
2606
3.79M
      if ((base_x + 8) < (min_base_x - 1)) {
2607
1.91M
        base_shift8 = (min_base_x - (base_x + 8) - 1);
2608
1.91M
      }
2609
3.79M
      if (base_shift8 > 7) {
2610
1.64M
        resx[1] = _mm256_setzero_si256();
2611
2.14M
      } else {
2612
2.14M
        a0_1_x128 =
2613
2.14M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 8));
2614
2.14M
        a1_1_x128 =
2615
2.14M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 9));
2616
2.14M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2617
2.14M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2618
2.14M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2619
2.14M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2620
2621
2.14M
        a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
2622
2.14M
        a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
2623
2624
2.14M
        r6 = _mm256_slli_epi32(
2625
2.14M
            _mm256_add_epi32(c0123, _mm256_add_epi32(j256, c8)), 6);
2626
2.14M
        shift = _mm256_srli_epi32(
2627
2.14M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2628
2629
2.14M
        diff = _mm256_sub_epi32(a1_1_x, a0_1_x);  // a[x+1] - a[x]
2630
2.14M
        a32 = _mm256_slli_epi32(a0_1_x, 5);       // a[x] * 32
2631
2.14M
        a32 = _mm256_add_epi32(a32, a16);         // a[x] * 32 + 16
2632
2.14M
        b = _mm256_mullo_epi32(diff, shift);
2633
2634
2.14M
        resx[1] = _mm256_add_epi32(a32, b);
2635
2.14M
        resx[1] = _mm256_srli_epi32(resx[1], 5);
2636
2.14M
        resx[1] = _mm256_packus_epi32(
2637
2.14M
            resx[1],
2638
2.14M
            _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
2639
2.14M
      }
2640
3.79M
      resx[0] =
2641
3.79M
          _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
2642
3.79M
                                  1);  // 16 16bit values
2643
2644
      // y calc
2645
3.79M
      resy[0] = _mm256_setzero_si256();
2646
3.79M
      if ((base_x < min_base_x)) {
2647
2.55M
        __m256i c256, y_c256, y_c_1_256, base_y_c256, mask256;
2648
2.55M
        r6 = _mm256_set1_epi32(r << 6);
2649
2.55M
        c256 = _mm256_add_epi32(j256, c1234);
2650
2.55M
        y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2651
2.55M
        base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2652
2.55M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2653
2.55M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2654
2.55M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2655
2.55M
        c256 = _mm256_add_epi32(c256, c8);
2656
2.55M
        y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2657
2.55M
        base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
2658
2.55M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2659
2.55M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2660
2.55M
        _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
2661
2662
2.55M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2663
2.55M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2664
2.55M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2665
2.55M
            left[base_y_c[6]], left[base_y_c[7]]));
2666
2.55M
        a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2667
2.55M
            left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2668
2.55M
            left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2669
2.55M
            left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2670
2671
2.55M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2672
2673
2.55M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2674
2.55M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2675
2.55M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2676
2677
2.55M
        b = _mm256_mullo_epi32(diff, shift);
2678
2.55M
        res = _mm256_add_epi32(a32, b);
2679
2.55M
        res = _mm256_srli_epi32(res, 5);
2680
2681
2.55M
        resy[0] = _mm256_packus_epi32(
2682
2.55M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2683
2684
2.55M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2685
2.55M
            left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
2686
2.55M
            left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
2687
2.55M
            left[base_y_c[14]], left[base_y_c[15]]));
2688
2.55M
        a1_y = _mm256_cvtepu16_epi32(
2689
2.55M
            _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
2690
2.55M
                           left[base_y_c[10] + 1], left[base_y_c[11] + 1],
2691
2.55M
                           left[base_y_c[12] + 1], left[base_y_c[13] + 1],
2692
2.55M
                           left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
2693
2.55M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
2694
2695
2.55M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2696
2.55M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2697
2.55M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2698
2699
2.55M
        b = _mm256_mullo_epi32(diff, shift);
2700
2.55M
        res = _mm256_add_epi32(a32, b);
2701
2.55M
        res = _mm256_srli_epi32(res, 5);
2702
2703
2.55M
        resy[1] = _mm256_packus_epi32(
2704
2.55M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2705
2706
2.55M
        resy[0] =
2707
2.55M
            _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
2708
2.55M
                                    1);  // 16 16bit values
2709
2.55M
      }
2710
2711
3.79M
      resxy = _mm256_blendv_epi8(resx[0], resy[0],
2712
3.79M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2713
3.79M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2714
3.79M
    }  // for j
2715
1.53M
    dst += stride;
2716
1.53M
  }
2717
80.2k
}
2718
2719
static void highbd_dr_prediction_z2_HxW_avx2(
2720
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2721
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2722
453k
    int dy) {
2723
  // here upsample_above and upsample_left are 0 by design of
2724
  // av1_use_intra_edge_upsample
2725
453k
  const int min_base_x = -1;
2726
453k
  const int min_base_y = -1;
2727
453k
  (void)upsample_above;
2728
453k
  (void)upsample_left;
2729
453k
  const int frac_bits_x = 6;
2730
453k
  const int frac_bits_y = 6;
2731
2732
  // pre-filter above pixels
2733
  // store in temp buffers:
2734
  //   above[x] * 32 + 16
2735
  //   above[x+1] - above[x]
2736
  // final pixels will be calculated as:
2737
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2738
453k
  __m256i a0_x, a1_x, a32, a16, c3f, c1;
2739
453k
  __m256i diff, min_base_y256, dy256, c1234, c0123;
2740
453k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
2741
2742
453k
  a16 = _mm256_set1_epi16(16);
2743
453k
  c1 = _mm256_srli_epi16(a16, 4);
2744
453k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
2745
453k
  c3f = _mm256_set1_epi16(0x3f);
2746
453k
  dy256 = _mm256_set1_epi16(dy);
2747
453k
  c0123 =
2748
453k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
2749
453k
  c1234 = _mm256_add_epi16(c0123, c1);
2750
2751
8.70M
  for (int r = 0; r < H; r++) {
2752
8.25M
    __m256i b, res, shift;
2753
8.25M
    __m256i resx, resy, ydx;
2754
8.25M
    __m256i resxy, j256, r6;
2755
8.25M
    __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2756
8.25M
    int y = r + 1;
2757
8.25M
    ydx = _mm256_set1_epi16((short)(y * dx));
2758
2759
22.6M
    for (int j = 0; j < W; j += 16) {
2760
14.4M
      j256 = _mm256_set1_epi16(j);
2761
14.4M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2762
14.4M
      int base_shift = 0;
2763
14.4M
      if ((base_x) < (min_base_x - 1)) {
2764
10.8M
        base_shift = (min_base_x - (base_x)-1);
2765
10.8M
      }
2766
14.4M
      int base_min_diff = (min_base_x - base_x);
2767
14.4M
      if (base_min_diff > 16) {
2768
8.13M
        base_min_diff = 16;
2769
8.13M
      } else {
2770
6.31M
        if (base_min_diff < 0) base_min_diff = 0;
2771
6.31M
      }
2772
2773
14.4M
      if (base_shift < 8) {
2774
5.20M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2775
5.20M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2776
5.20M
        a0_x128 =
2777
5.20M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2778
5.20M
        a1_x128 =
2779
5.20M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2780
2781
5.20M
        a0_x = _mm256_castsi128_si256(a0_x128);
2782
5.20M
        a1_x = _mm256_castsi128_si256(a1_x128);
2783
9.23M
      } else {
2784
9.23M
        a0_x = _mm256_setzero_si256();
2785
9.23M
        a1_x = _mm256_setzero_si256();
2786
9.23M
      }
2787
2788
14.4M
      int base_shift1 = 0;
2789
14.4M
      if (base_shift > 8) {
2790
9.07M
        base_shift1 = base_shift - 8;
2791
9.07M
      }
2792
14.4M
      if (base_shift1 < 8) {
2793
6.31M
        a0_1_x128 =
2794
6.31M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 8));
2795
6.31M
        a1_1_x128 =
2796
6.31M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 9));
2797
6.31M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2798
6.31M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2799
6.31M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2800
6.31M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2801
2802
6.31M
        a0_x = _mm256_inserti128_si256(a0_x, a0_1_x128, 1);
2803
6.31M
        a1_x = _mm256_inserti128_si256(a1_x, a1_1_x128, 1);
2804
6.31M
      }
2805
14.4M
      r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
2806
14.4M
      shift = _mm256_srli_epi16(
2807
14.4M
          _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
2808
2809
14.4M
      diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2810
14.4M
      a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2811
14.4M
      a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2812
2813
14.4M
      b = _mm256_mullo_epi16(diff, shift);
2814
14.4M
      res = _mm256_add_epi16(a32, b);
2815
14.4M
      resx = _mm256_srli_epi16(res, 5);  // 16 16-bit values
2816
2817
      // y calc
2818
14.4M
      resy = _mm256_setzero_si256();
2819
14.4M
      __m256i a0_y, a1_y, shifty;
2820
14.4M
      if ((base_x < min_base_x)) {
2821
11.4M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
2822
11.4M
        r6 = _mm256_set1_epi16(r << 6);
2823
11.4M
        c256 = _mm256_add_epi16(j256, c1234);
2824
11.4M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
2825
11.4M
                                 _mm256_srli_epi16(min_base_y256, 1));
2826
11.4M
        y_c256 = _mm256_sub_epi16(r6, mul16);
2827
11.4M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
2828
11.4M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
2829
11.4M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2830
11.4M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2831
2832
11.4M
        a0_y = _mm256_setr_epi16(
2833
11.4M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2834
11.4M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2835
11.4M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2836
11.4M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2837
11.4M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2838
11.4M
            left[base_y_c[15]]);
2839
11.4M
        base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
2840
11.4M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2841
2842
11.4M
        a1_y = _mm256_setr_epi16(
2843
11.4M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2844
11.4M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2845
11.4M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2846
11.4M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2847
11.4M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2848
11.4M
            left[base_y_c[15]]);
2849
2850
11.4M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
2851
2852
11.4M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
2853
11.4M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
2854
11.4M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2855
2856
11.4M
        b = _mm256_mullo_epi16(diff, shifty);
2857
11.4M
        res = _mm256_add_epi16(a32, b);
2858
11.4M
        resy = _mm256_srli_epi16(res, 5);
2859
11.4M
      }
2860
2861
14.4M
      resxy = _mm256_blendv_epi8(resx, resy,
2862
14.4M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2863
14.4M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2864
14.4M
    }  // for j
2865
8.25M
    dst += stride;
2866
8.25M
  }
2867
453k
}
2868
2869
// Directional prediction, zone 2: 90 < angle < 180
2870
void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
2871
                                      int bh, const uint16_t *above,
2872
                                      const uint16_t *left, int upsample_above,
2873
                                      int upsample_left, int dx, int dy,
2874
1.11M
                                      int bd) {
2875
1.11M
  (void)bd;
2876
1.11M
  assert(dx > 0);
2877
1.11M
  assert(dy > 0);
2878
1.11M
  switch (bw) {
2879
248k
    case 4:
2880
248k
      if (bd < 12) {
2881
172k
        highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
2882
172k
                                         upsample_above, upsample_left, dx, dy);
2883
172k
      } else {
2884
75.7k
        highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
2885
75.7k
                                               upsample_above, upsample_left,
2886
75.7k
                                               dx, dy);
2887
75.7k
      }
2888
248k
      break;
2889
336k
    case 8:
2890
336k
      if (bd < 12) {
2891
259k
        highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
2892
259k
                                         upsample_above, upsample_left, dx, dy);
2893
259k
      } else {
2894
77.1k
        highbd_dr_prediction_32bit_z2_Nx8_avx2(bh, dst, stride, above, left,
2895
77.1k
                                               upsample_above, upsample_left,
2896
77.1k
                                               dx, dy);
2897
77.1k
      }
2898
336k
      break;
2899
534k
    default:
2900
534k
      if (bd < 12) {
2901
453k
        highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2902
453k
                                         upsample_above, upsample_left, dx, dy);
2903
453k
      } else {
2904
80.2k
        highbd_dr_prediction_32bit_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2905
80.2k
                                               upsample_above, upsample_left,
2906
80.2k
                                               dx, dy);
2907
80.2k
      }
2908
534k
      break;
2909
1.11M
  }
2910
1.11M
}
2911
2912
//  Directional prediction, zone 3 functions
2913
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
2914
                                             const uint16_t *left,
2915
                                             int upsample_left, int dy,
2916
143k
                                             int bd) {
2917
143k
  __m128i dstvec[4], d[4];
2918
143k
  if (bd < 12) {
2919
122k
    highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left,
2920
122k
                                              dy);
2921
122k
  } else {
2922
21.1k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(4, dstvec, left,
2923
21.1k
                                                    upsample_left, dy);
2924
21.1k
  }
2925
143k
  highbd_transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2],
2926
143k
                                   &dstvec[3], &d[0], &d[1], &d[2], &d[3]);
2927
143k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
2928
143k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
2929
143k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
2930
143k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
2931
143k
  return;
2932
143k
}
2933
2934
static void highbd_dr_prediction_z3_8x8_avx2(uint16_t *dst, ptrdiff_t stride,
2935
                                             const uint16_t *left,
2936
                                             int upsample_left, int dy,
2937
127k
                                             int bd) {
2938
127k
  __m128i dstvec[8], d[8];
2939
127k
  if (bd < 12) {
2940
93.9k
    highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left,
2941
93.9k
                                              dy);
2942
93.9k
  } else {
2943
33.2k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(8, dstvec, left,
2944
33.2k
                                                    upsample_left, dy);
2945
33.2k
  }
2946
127k
  highbd_transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2947
127k
                           &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2948
127k
                           &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2949
127k
                           &d[7]);
2950
1.14M
  for (int i = 0; i < 8; i++) {
2951
1.01M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
2952
1.01M
  }
2953
127k
}
2954
2955
static void highbd_dr_prediction_z3_4x8_avx2(uint16_t *dst, ptrdiff_t stride,
2956
                                             const uint16_t *left,
2957
                                             int upsample_left, int dy,
2958
24.1k
                                             int bd) {
2959
24.1k
  __m128i dstvec[4], d[8];
2960
24.1k
  if (bd < 12) {
2961
19.8k
    highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left,
2962
19.8k
                                              dy);
2963
19.8k
  } else {
2964
4.21k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(4, dstvec, left,
2965
4.21k
                                                    upsample_left, dy);
2966
4.21k
  }
2967
2968
24.1k
  highbd_transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2969
24.1k
                               &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2970
24.1k
                               &d[7]);
2971
217k
  for (int i = 0; i < 8; i++) {
2972
192k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
2973
192k
  }
2974
24.1k
}
2975
2976
static void highbd_dr_prediction_z3_8x4_avx2(uint16_t *dst, ptrdiff_t stride,
2977
                                             const uint16_t *left,
2978
                                             int upsample_left, int dy,
2979
42.4k
                                             int bd) {
2980
42.4k
  __m128i dstvec[8], d[4];
2981
42.4k
  if (bd < 12) {
2982
34.2k
    highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left,
2983
34.2k
                                              dy);
2984
34.2k
  } else {
2985
8.20k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(8, dstvec, left,
2986
8.20k
                                                    upsample_left, dy);
2987
8.20k
  }
2988
2989
42.4k
  highbd_transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2990
42.4k
                               &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2991
42.4k
                               &d[0], &d[1], &d[2], &d[3]);
2992
42.4k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
2993
42.4k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[1]);
2994
42.4k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[2]);
2995
42.4k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[3]);
2996
42.4k
}
2997
2998
static void highbd_dr_prediction_z3_8x16_avx2(uint16_t *dst, ptrdiff_t stride,
2999
                                              const uint16_t *left,
3000
                                              int upsample_left, int dy,
3001
34.5k
                                              int bd) {
3002
34.5k
  __m256i dstvec[8], d[8];
3003
34.5k
  if (bd < 12) {
3004
23.0k
    highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
3005
23.0k
                                               dy);
3006
23.0k
  } else {
3007
11.4k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(8, dstvec, left,
3008
11.4k
                                                     upsample_left, dy);
3009
11.4k
  }
3010
34.5k
  highbd_transpose8x16_16x8_avx2(dstvec, d);
3011
310k
  for (int i = 0; i < 8; i++) {
3012
276k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3013
276k
                     _mm256_castsi256_si128(d[i]));
3014
276k
  }
3015
310k
  for (int i = 8; i < 16; i++) {
3016
276k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3017
276k
                     _mm256_extracti128_si256(d[i - 8], 1));
3018
276k
  }
3019
34.5k
}
3020
3021
static void highbd_dr_prediction_z3_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
3022
                                              const uint16_t *left,
3023
                                              int upsample_left, int dy,
3024
56.8k
                                              int bd) {
3025
56.8k
  __m128i dstvec[16], d[16];
3026
56.8k
  if (bd < 12) {
3027
40.4k
    highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
3028
40.4k
                                              dy);
3029
40.4k
  } else {
3030
16.4k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(16, dstvec, left,
3031
16.4k
                                                    upsample_left, dy);
3032
16.4k
  }
3033
170k
  for (int i = 0; i < 16; i += 8) {
3034
113k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3035
113k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3036
113k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3037
113k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3038
113k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3039
113k
  }
3040
511k
  for (int i = 0; i < 8; i++) {
3041
454k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3042
454k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3043
454k
  }
3044
56.8k
}
3045
3046
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3047
static void highbd_dr_prediction_z3_4x16_avx2(uint16_t *dst, ptrdiff_t stride,
3048
                                              const uint16_t *left,
3049
                                              int upsample_left, int dy,
3050
25.9k
                                              int bd) {
3051
25.9k
  __m256i dstvec[4], d[4], d1;
3052
25.9k
  if (bd < 12) {
3053
15.1k
    highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
3054
15.1k
                                               dy);
3055
15.1k
  } else {
3056
10.7k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(4, dstvec, left,
3057
10.7k
                                                     upsample_left, dy);
3058
10.7k
  }
3059
25.9k
  highbd_transpose4x16_avx2(dstvec, d);
3060
129k
  for (int i = 0; i < 4; i++) {
3061
103k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
3062
103k
                     _mm256_castsi256_si128(d[i]));
3063
103k
    d1 = _mm256_bsrli_epi128(d[i], 8);
3064
103k
    _mm_storel_epi64((__m128i *)(dst + (i + 4) * stride),
3065
103k
                     _mm256_castsi256_si128(d1));
3066
103k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
3067
103k
                     _mm256_extracti128_si256(d[i], 1));
3068
103k
    _mm_storel_epi64((__m128i *)(dst + (i + 12) * stride),
3069
103k
                     _mm256_extracti128_si256(d1, 1));
3070
103k
  }
3071
25.9k
}
3072
3073
static void highbd_dr_prediction_z3_16x4_avx2(uint16_t *dst, ptrdiff_t stride,
3074
                                              const uint16_t *left,
3075
                                              int upsample_left, int dy,
3076
59.6k
                                              int bd) {
3077
59.6k
  __m128i dstvec[16], d[8];
3078
59.6k
  if (bd < 12) {
3079
47.7k
    highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
3080
47.7k
                                              dy);
3081
47.7k
  } else {
3082
11.8k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(16, dstvec, left,
3083
11.8k
                                                    upsample_left, dy);
3084
11.8k
  }
3085
59.6k
  highbd_transpose16x4_8x8_sse2(dstvec, d);
3086
3087
59.6k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
3088
59.6k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride + 8), d[1]);
3089
59.6k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[2]);
3090
59.6k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride + 8), d[3]);
3091
59.6k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[4]);
3092
59.6k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride + 8), d[5]);
3093
59.6k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[6]);
3094
59.6k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride + 8), d[7]);
3095
59.6k
}
3096
3097
static void highbd_dr_prediction_z3_8x32_avx2(uint16_t *dst, ptrdiff_t stride,
3098
                                              const uint16_t *left,
3099
                                              int upsample_left, int dy,
3100
13.3k
                                              int bd) {
3101
13.3k
  __m256i dstvec[16], d[16];
3102
13.3k
  if (bd < 12) {
3103
10.1k
    highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
3104
10.1k
                                               dy);
3105
10.1k
  } else {
3106
3.14k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(8, dstvec, left,
3107
3.14k
                                                     upsample_left, dy);
3108
3.14k
  }
3109
3110
39.9k
  for (int i = 0; i < 16; i += 8) {
3111
26.6k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3112
26.6k
  }
3113
3114
119k
  for (int i = 0; i < 8; i++) {
3115
106k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3116
106k
                     _mm256_castsi256_si128(d[i]));
3117
106k
  }
3118
119k
  for (int i = 0; i < 8; i++) {
3119
106k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3120
106k
                     _mm256_extracti128_si256(d[i], 1));
3121
106k
  }
3122
119k
  for (int i = 8; i < 16; i++) {
3123
106k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3124
106k
                     _mm256_castsi256_si128(d[i]));
3125
106k
  }
3126
119k
  for (int i = 8; i < 16; i++) {
3127
106k
    _mm_storeu_si128((__m128i *)(dst + (i + 16) * stride),
3128
106k
                     _mm256_extracti128_si256(d[i], 1));
3129
106k
  }
3130
13.3k
}
3131
3132
static void highbd_dr_prediction_z3_32x8_avx2(uint16_t *dst, ptrdiff_t stride,
3133
                                              const uint16_t *left,
3134
                                              int upsample_left, int dy,
3135
57.0k
                                              int bd) {
3136
57.0k
  __m128i dstvec[32], d[32];
3137
57.0k
  if (bd < 12) {
3138
52.1k
    highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
3139
52.1k
                                              dy);
3140
52.1k
  } else {
3141
4.86k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(32, dstvec, left,
3142
4.86k
                                                    upsample_left, dy);
3143
4.86k
  }
3144
3145
285k
  for (int i = 0; i < 32; i += 8) {
3146
228k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3147
228k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3148
228k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3149
228k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3150
228k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3151
228k
  }
3152
513k
  for (int i = 0; i < 8; i++) {
3153
456k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3154
456k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3155
456k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 16]);
3156
456k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 24), d[i + 24]);
3157
456k
  }
3158
57.0k
}
3159
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3160
3161
static void highbd_dr_prediction_z3_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
3162
                                               const uint16_t *left,
3163
                                               int upsample_left, int dy,
3164
102k
                                               int bd) {
3165
102k
  __m256i dstvec[16], d[16];
3166
102k
  if (bd < 12) {
3167
90.8k
    highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
3168
90.8k
                                               dy);
3169
90.8k
  } else {
3170
11.9k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(16, dstvec, left,
3171
11.9k
                                                     upsample_left, dy);
3172
11.9k
  }
3173
3174
102k
  highbd_transpose16x16_avx2(dstvec, d);
3175
3176
1.74M
  for (int i = 0; i < 16; i++) {
3177
1.64M
    _mm256_storeu_si256((__m256i *)(dst + i * stride), d[i]);
3178
1.64M
  }
3179
102k
}
3180
3181
static void highbd_dr_prediction_z3_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
3182
                                               const uint16_t *left,
3183
                                               int upsample_left, int dy,
3184
80.4k
                                               int bd) {
3185
80.4k
  __m256i dstvec[64], d[16];
3186
80.4k
  if (bd < 12) {
3187
75.6k
    highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
3188
75.6k
                                               dy);
3189
75.6k
  } else {
3190
4.79k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(32, dstvec, left,
3191
4.79k
                                                     upsample_left, dy);
3192
4.79k
  }
3193
80.4k
  highbd_transpose16x16_avx2(dstvec, d);
3194
1.36M
  for (int j = 0; j < 16; j++) {
3195
1.28M
    _mm256_storeu_si256((__m256i *)(dst + j * stride), d[j]);
3196
1.28M
  }
3197
80.4k
  highbd_transpose16x16_avx2(dstvec + 16, d);
3198
1.36M
  for (int j = 0; j < 16; j++) {
3199
1.28M
    _mm256_storeu_si256((__m256i *)(dst + j * stride + 16), d[j]);
3200
1.28M
  }
3201
80.4k
  highbd_transpose16x16_avx2(dstvec + 32, d);
3202
1.36M
  for (int j = 0; j < 16; j++) {
3203
1.28M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride), d[j]);
3204
1.28M
  }
3205
80.4k
  highbd_transpose16x16_avx2(dstvec + 48, d);
3206
1.36M
  for (int j = 0; j < 16; j++) {
3207
1.28M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride + 16), d[j]);
3208
1.28M
  }
3209
80.4k
}
3210
3211
static void highbd_dr_prediction_z3_64x64_avx2(uint16_t *dst, ptrdiff_t stride,
3212
                                               const uint16_t *left,
3213
                                               int upsample_left, int dy,
3214
22.3k
                                               int bd) {
3215
22.3k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
3216
22.3k
  if (bd < 12) {
3217
18.3k
    highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
3218
18.3k
  } else {
3219
4.06k
    highbd_dr_prediction_32bit_z1_64xN_avx2(64, dstT, 64, left, upsample_left,
3220
4.06k
                                            dy);
3221
4.06k
  }
3222
22.3k
  highbd_transpose(dstT, 64, dst, stride, 64, 64);
3223
22.3k
}
3224
3225
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
3226
                                               const uint16_t *left,
3227
                                               int upsample_left, int dy,
3228
24.1k
                                               int bd) {
3229
24.1k
  __m256i dstvec[32], d[32];
3230
24.1k
  if (bd < 12) {
3231
22.0k
    highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
3232
22.0k
                                               dy);
3233
22.0k
  } else {
3234
2.04k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(16, dstvec, left,
3235
2.04k
                                                     upsample_left, dy);
3236
2.04k
  }
3237
120k
  for (int i = 0; i < 32; i += 8) {
3238
96.4k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3239
96.4k
  }
3240
  // store
3241
72.3k
  for (int j = 0; j < 32; j += 16) {
3242
433k
    for (int i = 0; i < 8; i++) {
3243
385k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride),
3244
385k
                       _mm256_castsi256_si128(d[(i + j)]));
3245
385k
    }
3246
433k
    for (int i = 0; i < 8; i++) {
3247
385k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride + 8),
3248
385k
                       _mm256_castsi256_si128(d[(i + j) + 8]));
3249
385k
    }
3250
433k
    for (int i = 8; i < 16; i++) {
3251
385k
      _mm256_storeu_si256(
3252
385k
          (__m256i *)(dst + (i + j) * stride),
3253
385k
          _mm256_inserti128_si256(
3254
385k
              d[(i + j)], _mm256_extracti128_si256(d[(i + j) - 8], 1), 0));
3255
385k
    }
3256
48.2k
  }
3257
24.1k
}
3258
3259
static void highbd_dr_prediction_z3_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
3260
                                               const uint16_t *left,
3261
                                               int upsample_left, int dy,
3262
26.2k
                                               int bd) {
3263
26.2k
  __m256i dstvec[32], d[16];
3264
26.2k
  if (bd < 12) {
3265
23.9k
    highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
3266
23.9k
                                               dy);
3267
23.9k
  } else {
3268
2.34k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(32, dstvec, left,
3269
2.34k
                                                     upsample_left, dy);
3270
2.34k
  }
3271
78.7k
  for (int i = 0; i < 32; i += 16) {
3272
52.4k
    highbd_transpose16x16_avx2((dstvec + i), d);
3273
892k
    for (int j = 0; j < 16; j++) {
3274
839k
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3275
839k
    }
3276
52.4k
  }
3277
26.2k
}
3278
3279
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
3280
                                               const uint16_t *left,
3281
                                               int upsample_left, int dy,
3282
2.48k
                                               int bd) {
3283
2.48k
  uint16_t dstT[64 * 32];
3284
2.48k
  if (bd < 12) {
3285
2.16k
    highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
3286
2.16k
  } else {
3287
328
    highbd_dr_prediction_32bit_z1_64xN_avx2(32, dstT, 64, left, upsample_left,
3288
328
                                            dy);
3289
328
  }
3290
2.48k
  highbd_transpose(dstT, 64, dst, stride, 32, 64);
3291
2.48k
}
3292
3293
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
3294
                                               const uint16_t *left,
3295
                                               int upsample_left, int dy,
3296
3.34k
                                               int bd) {
3297
3.34k
  DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
3298
3.34k
  highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy, bd);
3299
3.34k
  highbd_transpose(dstT, 32, dst, stride, 64, 32);
3300
3.34k
  return;
3301
3.34k
}
3302
3303
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3304
static void highbd_dr_prediction_z3_16x64_avx2(uint16_t *dst, ptrdiff_t stride,
3305
                                               const uint16_t *left,
3306
                                               int upsample_left, int dy,
3307
4.90k
                                               int bd) {
3308
4.90k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
3309
4.90k
  if (bd < 12) {
3310
4.19k
    highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
3311
4.19k
  } else {
3312
709
    highbd_dr_prediction_32bit_z1_64xN_avx2(16, dstT, 64, left, upsample_left,
3313
709
                                            dy);
3314
709
  }
3315
4.90k
  highbd_transpose(dstT, 64, dst, stride, 16, 64);
3316
4.90k
}
3317
3318
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
3319
                                               const uint16_t *left,
3320
                                               int upsample_left, int dy,
3321
19.9k
                                               int bd) {
3322
19.9k
  __m256i dstvec[64], d[16];
3323
19.9k
  if (bd < 12) {
3324
19.3k
    highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
3325
19.3k
                                               dy);
3326
19.3k
  } else {
3327
614
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(64, dstvec, left,
3328
614
                                                     upsample_left, dy);
3329
614
  }
3330
99.6k
  for (int i = 0; i < 64; i += 16) {
3331
79.7k
    highbd_transpose16x16_avx2((dstvec + i), d);
3332
1.35M
    for (int j = 0; j < 16; j++) {
3333
1.27M
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3334
1.27M
    }
3335
79.7k
  }
3336
19.9k
}
3337
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3338
3339
void av1_highbd_dr_prediction_z3_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
3340
                                      int bh, const uint16_t *above,
3341
                                      const uint16_t *left, int upsample_left,
3342
871k
                                      int dx, int dy, int bd) {
3343
871k
  (void)above;
3344
871k
  (void)dx;
3345
3346
871k
  assert(dx == 1);
3347
871k
  assert(dy > 0);
3348
871k
  if (bw == bh) {
3349
476k
    switch (bw) {
3350
143k
      case 4:
3351
143k
        highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy,
3352
143k
                                         bd);
3353
143k
        break;
3354
127k
      case 8:
3355
127k
        highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy,
3356
127k
                                         bd);
3357
127k
        break;
3358
102k
      case 16:
3359
102k
        highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy,
3360
102k
                                           bd);
3361
102k
        break;
3362
80.4k
      case 32:
3363
80.4k
        highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy,
3364
80.4k
                                           bd);
3365
80.4k
        break;
3366
22.3k
      case 64:
3367
22.3k
        highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy,
3368
22.3k
                                           bd);
3369
22.3k
        break;
3370
476k
    }
3371
476k
  } else {
3372
394k
    if (bw < bh) {
3373
129k
      if (bw + bw == bh) {
3374
85.2k
        switch (bw) {
3375
24.1k
          case 4:
3376
24.1k
            highbd_dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left,
3377
24.1k
                                             dy, bd);
3378
24.1k
            break;
3379
34.5k
          case 8:
3380
34.5k
            highbd_dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left,
3381
34.5k
                                              dy, bd);
3382
34.5k
            break;
3383
24.1k
          case 16:
3384
24.1k
            highbd_dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left,
3385
24.1k
                                               dy, bd);
3386
24.1k
            break;
3387
2.48k
          case 32:
3388
2.48k
            highbd_dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left,
3389
2.48k
                                               dy, bd);
3390
2.48k
            break;
3391
85.2k
        }
3392
85.2k
      } else {
3393
44.1k
        switch (bw) {
3394
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3395
25.9k
          case 4:
3396
25.9k
            highbd_dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left,
3397
25.9k
                                              dy, bd);
3398
25.9k
            break;
3399
13.3k
          case 8:
3400
13.3k
            highbd_dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left,
3401
13.3k
                                              dy, bd);
3402
13.3k
            break;
3403
4.90k
          case 16:
3404
4.90k
            highbd_dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left,
3405
4.90k
                                               dy, bd);
3406
4.90k
            break;
3407
44.1k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3408
44.1k
        }
3409
44.1k
      }
3410
265k
    } else {
3411
265k
      if (bh + bh == bw) {
3412
128k
        switch (bh) {
3413
42.4k
          case 4:
3414
42.4k
            highbd_dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left,
3415
42.4k
                                             dy, bd);
3416
42.4k
            break;
3417
56.8k
          case 8:
3418
56.8k
            highbd_dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left,
3419
56.8k
                                              dy, bd);
3420
56.8k
            break;
3421
26.2k
          case 16:
3422
26.2k
            highbd_dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left,
3423
26.2k
                                               dy, bd);
3424
26.2k
            break;
3425
3.34k
          case 32:
3426
3.34k
            highbd_dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left,
3427
3.34k
                                               dy, bd);
3428
3.34k
            break;
3429
128k
        }
3430
136k
      } else {
3431
136k
        switch (bh) {
3432
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3433
59.6k
          case 4:
3434
59.6k
            highbd_dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left,
3435
59.6k
                                              dy, bd);
3436
59.6k
            break;
3437
57.0k
          case 8:
3438
57.0k
            highbd_dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left,
3439
57.0k
                                              dy, bd);
3440
57.0k
            break;
3441
19.9k
          case 16:
3442
19.9k
            highbd_dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left,
3443
19.9k
                                               dy, bd);
3444
19.9k
            break;
3445
136k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3446
136k
        }
3447
136k
      }
3448
265k
    }
3449
394k
  }
3450
871k
  return;
3451
871k
}
3452
#endif  // CONFIG_AV1_HIGHBITDEPTH
3453
3454
// Low bit depth functions
3455
static DECLARE_ALIGNED(32, uint8_t, BaseMask[33][32]) = {
3456
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3457
    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3458
  { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3459
    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3460
  { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3461
    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3462
  { 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3463
    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3464
  { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3465
    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3466
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3467
    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3468
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3469
    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3470
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3471
    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0 },
3472
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
3473
    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0 },
3474
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
3475
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0 },
3476
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3477
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3478
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3479
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3480
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3481
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3482
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3483
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3484
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3485
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3486
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,
3487
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3488
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3489
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,
3490
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3491
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3492
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,
3493
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3494
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3495
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,
3496
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3497
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3498
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,
3499
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3500
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3501
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,
3502
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3503
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3504
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,
3505
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3506
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3507
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,
3508
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3509
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3510
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3511
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3512
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3513
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3514
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3515
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3516
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3517
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0 },
3518
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3519
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3520
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0 },
3521
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3522
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3523
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0 },
3524
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3525
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3526
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0 },
3527
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3528
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3529
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0 },
3530
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3531
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3532
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0 },
3533
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3534
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3535
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0 },
3536
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3537
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3538
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0 },
3539
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3540
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3541
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
3542
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3543
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3544
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
3545
};
3546
3547
/* clang-format on */
3548
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_avx2(
3549
    int H, int W, __m128i *dst, const uint8_t *above, int upsample_above,
3550
819k
    int dx) {
3551
819k
  const int frac_bits = 6 - upsample_above;
3552
819k
  const int max_base_x = ((W + H) - 1) << upsample_above;
3553
3554
819k
  assert(dx > 0);
3555
  // pre-filter above pixels
3556
  // store in temp buffers:
3557
  //   above[x] * 32 + 16
3558
  //   above[x+1] - above[x]
3559
  // final pixels will be calculated as:
3560
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3561
819k
  __m256i a0, a1, a32, a16;
3562
819k
  __m256i diff, c3f;
3563
819k
  __m128i a_mbase_x;
3564
3565
819k
  a16 = _mm256_set1_epi16(16);
3566
819k
  a_mbase_x = _mm_set1_epi8((int8_t)above[max_base_x]);
3567
819k
  c3f = _mm256_set1_epi16(0x3f);
3568
3569
819k
  int x = dx;
3570
11.4M
  for (int r = 0; r < W; r++) {
3571
10.6M
    __m256i b, res, shift;
3572
10.6M
    __m128i res1, a0_128, a1_128;
3573
3574
10.6M
    int base = x >> frac_bits;
3575
10.6M
    int base_max_diff = (max_base_x - base) >> upsample_above;
3576
10.6M
    if (base_max_diff <= 0) {
3577
20.8k
      for (int i = r; i < W; ++i) {
3578
14.4k
        dst[i] = a_mbase_x;  // save 4 values
3579
14.4k
      }
3580
6.36k
      return;
3581
6.36k
    }
3582
10.6M
    if (base_max_diff > H) base_max_diff = H;
3583
10.6M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
3584
10.6M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
3585
3586
10.6M
    if (upsample_above) {
3587
1.44M
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)EvenOddMaskx[0]);
3588
1.44M
      a1_128 = _mm_srli_si128(a0_128, 8);
3589
3590
1.44M
      shift = _mm256_srli_epi16(
3591
1.44M
          _mm256_and_si256(
3592
1.44M
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
3593
1.44M
          1);
3594
9.22M
    } else {
3595
9.22M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3596
9.22M
    }
3597
10.6M
    a0 = _mm256_cvtepu8_epi16(a0_128);
3598
10.6M
    a1 = _mm256_cvtepu8_epi16(a1_128);
3599
3600
10.6M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3601
10.6M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3602
10.6M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3603
3604
10.6M
    b = _mm256_mullo_epi16(diff, shift);
3605
10.6M
    res = _mm256_add_epi16(a32, b);
3606
10.6M
    res = _mm256_srli_epi16(res, 5);
3607
3608
10.6M
    res = _mm256_packus_epi16(
3609
10.6M
        res, _mm256_castsi128_si256(
3610
10.6M
                 _mm256_extracti128_si256(res, 1)));  // goto 8 bit
3611
10.6M
    res1 = _mm256_castsi256_si128(res);               // 16 8bit values
3612
3613
10.6M
    dst[r] =
3614
10.6M
        _mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
3615
10.6M
    x += dx;
3616
10.6M
  }
3617
819k
}
3618
3619
static void dr_prediction_z1_4xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3620
                                      const uint8_t *above, int upsample_above,
3621
83.0k
                                      int dx) {
3622
83.0k
  __m128i dstvec[16];
3623
3624
83.0k
  dr_prediction_z1_HxW_internal_avx2(4, N, dstvec, above, upsample_above, dx);
3625
667k
  for (int i = 0; i < N; i++) {
3626
584k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
3627
584k
  }
3628
83.0k
}
3629
3630
static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3631
                                      const uint8_t *above, int upsample_above,
3632
128k
                                      int dx) {
3633
128k
  __m128i dstvec[32];
3634
3635
128k
  dr_prediction_z1_HxW_internal_avx2(8, N, dstvec, above, upsample_above, dx);
3636
1.49M
  for (int i = 0; i < N; i++) {
3637
1.36M
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
3638
1.36M
  }
3639
128k
}
3640
3641
static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3642
                                       const uint8_t *above, int upsample_above,
3643
118k
                                       int dx) {
3644
118k
  __m128i dstvec[64];
3645
3646
118k
  dr_prediction_z1_HxW_internal_avx2(16, N, dstvec, above, upsample_above, dx);
3647
1.80M
  for (int i = 0; i < N; i++) {
3648
1.68M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
3649
1.68M
  }
3650
118k
}
3651
3652
static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_avx2(
3653
168k
    int N, __m256i *dstvec, const uint8_t *above, int upsample_above, int dx) {
3654
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3655
168k
  (void)upsample_above;
3656
168k
  const int frac_bits = 6;
3657
168k
  const int max_base_x = ((32 + N) - 1);
3658
3659
  // pre-filter above pixels
3660
  // store in temp buffers:
3661
  //   above[x] * 32 + 16
3662
  //   above[x+1] - above[x]
3663
  // final pixels will be calculated as:
3664
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3665
168k
  __m256i a0, a1, a32, a16;
3666
168k
  __m256i a_mbase_x, diff, c3f;
3667
3668
168k
  a16 = _mm256_set1_epi16(16);
3669
168k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3670
168k
  c3f = _mm256_set1_epi16(0x3f);
3671
3672
168k
  int x = dx;
3673
4.69M
  for (int r = 0; r < N; r++) {
3674
4.52M
    __m256i b, res, res16[2];
3675
4.52M
    __m128i a0_128, a1_128;
3676
3677
4.52M
    int base = x >> frac_bits;
3678
4.52M
    int base_max_diff = (max_base_x - base);
3679
4.52M
    if (base_max_diff <= 0) {
3680
0
      for (int i = r; i < N; ++i) {
3681
0
        dstvec[i] = a_mbase_x;  // save 32 values
3682
0
      }
3683
0
      return;
3684
0
    }
3685
4.52M
    if (base_max_diff > 32) base_max_diff = 32;
3686
4.52M
    __m256i shift =
3687
4.52M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3688
3689
13.5M
    for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
3690
9.04M
      int mdiff = base_max_diff - j;
3691
9.04M
      if (mdiff <= 0) {
3692
739
        res16[jj] = a_mbase_x;
3693
9.04M
      } else {
3694
9.04M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3695
9.04M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + j + 1));
3696
9.04M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3697
9.04M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3698
3699
9.04M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3700
9.04M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3701
9.04M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3702
9.04M
        b = _mm256_mullo_epi16(diff, shift);
3703
3704
9.04M
        res = _mm256_add_epi16(a32, b);
3705
9.04M
        res = _mm256_srli_epi16(res, 5);
3706
9.04M
        res16[jj] = _mm256_packus_epi16(
3707
9.04M
            res, _mm256_castsi128_si256(
3708
9.04M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3709
9.04M
      }
3710
9.04M
    }
3711
4.52M
    res16[1] =
3712
4.52M
        _mm256_inserti128_si256(res16[0], _mm256_castsi256_si128(res16[1]),
3713
4.52M
                                1);  // 32 8bit values
3714
3715
4.52M
    dstvec[r] = _mm256_blendv_epi8(
3716
4.52M
        a_mbase_x, res16[1],
3717
4.52M
        *(__m256i *)BaseMask[base_max_diff]);  // 32 8bit values
3718
4.52M
    x += dx;
3719
4.52M
  }
3720
168k
}
3721
3722
static void dr_prediction_z1_32xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3723
                                       const uint8_t *above, int upsample_above,
3724
74.0k
                                       int dx) {
3725
74.0k
  __m256i dstvec[64];
3726
74.0k
  dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above, dx);
3727
2.09M
  for (int i = 0; i < N; i++) {
3728
2.01M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
3729
2.01M
  }
3730
74.0k
}
3731
3732
static void dr_prediction_z1_64xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3733
                                       const uint8_t *above, int upsample_above,
3734
38.8k
                                       int dx) {
3735
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3736
38.8k
  (void)upsample_above;
3737
38.8k
  const int frac_bits = 6;
3738
38.8k
  const int max_base_x = ((64 + N) - 1);
3739
3740
  // pre-filter above pixels
3741
  // store in temp buffers:
3742
  //   above[x] * 32 + 16
3743
  //   above[x+1] - above[x]
3744
  // final pixels will be calculated as:
3745
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3746
38.8k
  __m256i a0, a1, a32, a16;
3747
38.8k
  __m256i a_mbase_x, diff, c3f;
3748
38.8k
  __m128i max_base_x128, base_inc128, mask128;
3749
3750
38.8k
  a16 = _mm256_set1_epi16(16);
3751
38.8k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3752
38.8k
  max_base_x128 = _mm_set1_epi8(max_base_x);
3753
38.8k
  c3f = _mm256_set1_epi16(0x3f);
3754
3755
38.8k
  int x = dx;
3756
2.14M
  for (int r = 0; r < N; r++, dst += stride) {
3757
2.10M
    __m256i b, res;
3758
2.10M
    int base = x >> frac_bits;
3759
2.10M
    if (base >= max_base_x) {
3760
0
      for (int i = r; i < N; ++i) {
3761
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
3762
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
3763
0
        dst += stride;
3764
0
      }
3765
0
      return;
3766
0
    }
3767
3768
2.10M
    __m256i shift =
3769
2.10M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3770
3771
2.10M
    __m128i a0_128, a1_128, res128;
3772
10.5M
    for (int j = 0; j < 64; j += 16) {
3773
8.41M
      int mdif = max_base_x - (base + j);
3774
8.41M
      if (mdif <= 0) {
3775
2.98k
        _mm_storeu_si128((__m128i *)(dst + j),
3776
2.98k
                         _mm256_castsi256_si128(a_mbase_x));
3777
8.40M
      } else {
3778
8.40M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3779
8.40M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
3780
8.40M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3781
8.40M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3782
3783
8.40M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3784
8.40M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3785
8.40M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3786
8.40M
        b = _mm256_mullo_epi16(diff, shift);
3787
3788
8.40M
        res = _mm256_add_epi16(a32, b);
3789
8.40M
        res = _mm256_srli_epi16(res, 5);
3790
8.40M
        res = _mm256_packus_epi16(
3791
8.40M
            res, _mm256_castsi128_si256(
3792
8.40M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3793
3794
8.40M
        base_inc128 =
3795
8.40M
            _mm_setr_epi8((int8_t)(base + j), (int8_t)(base + j + 1),
3796
8.40M
                          (int8_t)(base + j + 2), (int8_t)(base + j + 3),
3797
8.40M
                          (int8_t)(base + j + 4), (int8_t)(base + j + 5),
3798
8.40M
                          (int8_t)(base + j + 6), (int8_t)(base + j + 7),
3799
8.40M
                          (int8_t)(base + j + 8), (int8_t)(base + j + 9),
3800
8.40M
                          (int8_t)(base + j + 10), (int8_t)(base + j + 11),
3801
8.40M
                          (int8_t)(base + j + 12), (int8_t)(base + j + 13),
3802
8.40M
                          (int8_t)(base + j + 14), (int8_t)(base + j + 15));
3803
3804
8.40M
        mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
3805
8.40M
                                 _mm_setzero_si128());
3806
8.40M
        res128 = _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x),
3807
8.40M
                                 _mm256_castsi256_si128(res), mask128);
3808
8.40M
        _mm_storeu_si128((__m128i *)(dst + j), res128);
3809
8.40M
      }
3810
8.41M
    }
3811
2.10M
    x += dx;
3812
2.10M
  }
3813
38.8k
}
3814
3815
// Directional prediction, zone 1: 0 < angle < 90
3816
void av1_dr_prediction_z1_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
3817
                               const uint8_t *above, const uint8_t *left,
3818
415k
                               int upsample_above, int dx, int dy) {
3819
415k
  (void)left;
3820
415k
  (void)dy;
3821
415k
  switch (bw) {
3822
83.0k
    case 4:
3823
83.0k
      dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above, dx);
3824
83.0k
      break;
3825
128k
    case 8:
3826
128k
      dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above, dx);
3827
128k
      break;
3828
118k
    case 16:
3829
118k
      dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above, dx);
3830
118k
      break;
3831
71.5k
    case 32:
3832
71.5k
      dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above, dx);
3833
71.5k
      break;
3834
13.7k
    case 64:
3835
13.7k
      dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above, dx);
3836
13.7k
      break;
3837
0
    default: break;
3838
415k
  }
3839
415k
  return;
3840
415k
}
3841
3842
static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3843
                                      const uint8_t *above, const uint8_t *left,
3844
                                      int upsample_above, int upsample_left,
3845
222k
                                      int dx, int dy) {
3846
222k
  const int min_base_x = -(1 << upsample_above);
3847
222k
  const int min_base_y = -(1 << upsample_left);
3848
222k
  const int frac_bits_x = 6 - upsample_above;
3849
222k
  const int frac_bits_y = 6 - upsample_left;
3850
3851
222k
  assert(dx > 0);
3852
  // pre-filter above pixels
3853
  // store in temp buffers:
3854
  //   above[x] * 32 + 16
3855
  //   above[x+1] - above[x]
3856
  // final pixels will be calculated as:
3857
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3858
222k
  __m128i a0_x, a1_x, a32, a16, diff;
3859
222k
  __m128i c3f, min_base_y128, c1234, dy128;
3860
3861
222k
  a16 = _mm_set1_epi16(16);
3862
222k
  c3f = _mm_set1_epi16(0x3f);
3863
222k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3864
222k
  c1234 = _mm_setr_epi16(0, 1, 2, 3, 4, 0, 0, 0);
3865
222k
  dy128 = _mm_set1_epi16(dy);
3866
3867
1.58M
  for (int r = 0; r < N; r++) {
3868
1.36M
    __m128i b, res, shift, r6, ydx;
3869
1.36M
    __m128i resx, resy, resxy;
3870
1.36M
    __m128i a0_x128, a1_x128;
3871
1.36M
    int y = r + 1;
3872
1.36M
    int base_x = (-y * dx) >> frac_bits_x;
3873
1.36M
    int base_shift = 0;
3874
1.36M
    if (base_x < (min_base_x - 1)) {
3875
1.01M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3876
1.01M
    }
3877
1.36M
    int base_min_diff =
3878
1.36M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3879
1.36M
    if (base_min_diff > 4) {
3880
679k
      base_min_diff = 4;
3881
688k
    } else {
3882
688k
      if (base_min_diff < 0) base_min_diff = 0;
3883
688k
    }
3884
3885
1.36M
    if (base_shift > 3) {
3886
679k
      a0_x = _mm_setzero_si128();
3887
679k
      a1_x = _mm_setzero_si128();
3888
679k
      shift = _mm_setzero_si128();
3889
688k
    } else {
3890
688k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
3891
688k
      ydx = _mm_set1_epi16(y * dx);
3892
688k
      r6 = _mm_slli_epi16(c1234, 6);
3893
3894
688k
      if (upsample_above) {
3895
262k
        a0_x128 =
3896
262k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
3897
262k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
3898
3899
262k
        shift = _mm_srli_epi16(
3900
262k
            _mm_and_si128(
3901
262k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
3902
262k
            1);
3903
426k
      } else {
3904
426k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
3905
426k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
3906
3907
426k
        shift = _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1);
3908
426k
      }
3909
688k
      a0_x = _mm_cvtepu8_epi16(a0_x128);
3910
688k
      a1_x = _mm_cvtepu8_epi16(a1_x128);
3911
688k
    }
3912
    // y calc
3913
1.36M
    __m128i a0_y, a1_y, shifty;
3914
1.36M
    if (base_x < min_base_x) {
3915
1.14M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
3916
1.14M
      __m128i y_c128, base_y_c128, mask128, c1234_;
3917
1.14M
      c1234_ = _mm_srli_si128(c1234, 2);
3918
1.14M
      r6 = _mm_set1_epi16(r << 6);
3919
1.14M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234_, dy128));
3920
1.14M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
3921
1.14M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
3922
1.14M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
3923
1.14M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3924
3925
1.14M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3926
1.14M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3927
1.14M
      base_y_c128 = _mm_add_epi16(base_y_c128, _mm_srli_epi16(a16, 4));
3928
1.14M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3929
1.14M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3930
1.14M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3931
3932
1.14M
      if (upsample_left) {
3933
509k
        shifty = _mm_srli_epi16(
3934
509k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
3935
639k
      } else {
3936
639k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
3937
639k
      }
3938
1.14M
      a0_x = _mm_unpacklo_epi64(a0_x, a0_y);
3939
1.14M
      a1_x = _mm_unpacklo_epi64(a1_x, a1_y);
3940
1.14M
      shift = _mm_unpacklo_epi64(shift, shifty);
3941
1.14M
    }
3942
3943
1.36M
    diff = _mm_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
3944
1.36M
    a32 = _mm_slli_epi16(a0_x, 5);     // a[x] * 32
3945
1.36M
    a32 = _mm_add_epi16(a32, a16);     // a[x] * 32 + 16
3946
3947
1.36M
    b = _mm_mullo_epi16(diff, shift);
3948
1.36M
    res = _mm_add_epi16(a32, b);
3949
1.36M
    res = _mm_srli_epi16(res, 5);
3950
3951
1.36M
    resx = _mm_packus_epi16(res, res);
3952
1.36M
    resy = _mm_srli_si128(resx, 4);
3953
3954
1.36M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
3955
1.36M
    *(int *)(dst) = _mm_cvtsi128_si32(resxy);
3956
1.36M
    dst += stride;
3957
1.36M
  }
3958
222k
}
3959
3960
static void dr_prediction_z2_Nx8_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3961
                                      const uint8_t *above, const uint8_t *left,
3962
                                      int upsample_above, int upsample_left,
3963
244k
                                      int dx, int dy) {
3964
244k
  const int min_base_x = -(1 << upsample_above);
3965
244k
  const int min_base_y = -(1 << upsample_left);
3966
244k
  const int frac_bits_x = 6 - upsample_above;
3967
244k
  const int frac_bits_y = 6 - upsample_left;
3968
3969
  // pre-filter above pixels
3970
  // store in temp buffers:
3971
  //   above[x] * 32 + 16
3972
  //   above[x+1] - above[x]
3973
  // final pixels will be calculated as:
3974
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3975
244k
  __m256i diff, a32, a16;
3976
244k
  __m256i a0_x, a1_x;
3977
244k
  __m128i a0_x128, a1_x128, min_base_y128, c3f;
3978
244k
  __m128i c1234, dy128;
3979
3980
244k
  a16 = _mm256_set1_epi16(16);
3981
244k
  c3f = _mm_set1_epi16(0x3f);
3982
244k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3983
244k
  dy128 = _mm_set1_epi16(dy);
3984
244k
  c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
3985
3986
2.54M
  for (int r = 0; r < N; r++) {
3987
2.29M
    __m256i b, res, shift;
3988
2.29M
    __m128i resx, resy, resxy, r6, ydx;
3989
3990
2.29M
    int y = r + 1;
3991
2.29M
    int base_x = (-y * dx) >> frac_bits_x;
3992
2.29M
    int base_shift = 0;
3993
2.29M
    if (base_x < (min_base_x - 1)) {
3994
1.72M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3995
1.72M
    }
3996
2.29M
    int base_min_diff =
3997
2.29M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3998
2.29M
    if (base_min_diff > 8) {
3999
1.02M
      base_min_diff = 8;
4000
1.26M
    } else {
4001
1.26M
      if (base_min_diff < 0) base_min_diff = 0;
4002
1.26M
    }
4003
4004
2.29M
    if (base_shift > 7) {
4005
1.02M
      a0_x = _mm256_setzero_si256();
4006
1.02M
      a1_x = _mm256_setzero_si256();
4007
1.02M
      shift = _mm256_setzero_si256();
4008
1.26M
    } else {
4009
1.26M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
4010
1.26M
      ydx = _mm_set1_epi16(y * dx);
4011
1.26M
      r6 = _mm_slli_epi16(_mm_srli_si128(c1234, 2), 6);
4012
1.26M
      if (upsample_above) {
4013
385k
        a0_x128 =
4014
385k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
4015
385k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
4016
4017
385k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
4018
385k
            _mm_and_si128(
4019
385k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
4020
385k
            1));
4021
882k
      } else {
4022
882k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
4023
882k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4024
882k
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4025
4026
882k
        shift = _mm256_castsi128_si256(
4027
882k
            _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1));
4028
882k
      }
4029
1.26M
      a0_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a0_x128));
4030
1.26M
      a1_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a1_x128));
4031
1.26M
    }
4032
4033
    // y calc
4034
2.29M
    __m128i a0_y, a1_y, shifty;
4035
2.29M
    if (base_x < min_base_x) {
4036
1.92M
      DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4037
1.92M
      __m128i y_c128, base_y_c128, mask128;
4038
1.92M
      r6 = _mm_set1_epi16(r << 6);
4039
1.92M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
4040
1.92M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
4041
1.92M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
4042
1.92M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
4043
1.92M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4044
4045
1.92M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4046
1.92M
                            left[base_y_c[2]], left[base_y_c[3]],
4047
1.92M
                            left[base_y_c[4]], left[base_y_c[5]],
4048
1.92M
                            left[base_y_c[6]], left[base_y_c[7]]);
4049
1.92M
      base_y_c128 = _mm_add_epi16(
4050
1.92M
          base_y_c128, _mm_srli_epi16(_mm256_castsi256_si128(a16), 4));
4051
1.92M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4052
4053
1.92M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4054
1.92M
                            left[base_y_c[2]], left[base_y_c[3]],
4055
1.92M
                            left[base_y_c[4]], left[base_y_c[5]],
4056
1.92M
                            left[base_y_c[6]], left[base_y_c[7]]);
4057
4058
1.92M
      if (upsample_left) {
4059
533k
        shifty = _mm_srli_epi16(
4060
533k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
4061
1.39M
      } else {
4062
1.39M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
4063
1.39M
      }
4064
4065
1.92M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
4066
1.92M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
4067
1.92M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
4068
1.92M
    }
4069
4070
2.29M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4071
2.29M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4072
2.29M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4073
4074
2.29M
    b = _mm256_mullo_epi16(diff, shift);
4075
2.29M
    res = _mm256_add_epi16(a32, b);
4076
2.29M
    res = _mm256_srli_epi16(res, 5);
4077
4078
2.29M
    resx = _mm_packus_epi16(_mm256_castsi256_si128(res),
4079
2.29M
                            _mm256_castsi256_si128(res));
4080
2.29M
    resy = _mm256_extracti128_si256(res, 1);
4081
2.29M
    resy = _mm_packus_epi16(resy, resy);
4082
4083
2.29M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4084
2.29M
    _mm_storel_epi64((__m128i *)(dst), resxy);
4085
2.29M
    dst += stride;
4086
2.29M
  }
4087
244k
}
4088
4089
static void dr_prediction_z2_HxW_avx2(int H, int W, uint8_t *dst,
4090
                                      ptrdiff_t stride, const uint8_t *above,
4091
                                      const uint8_t *left, int upsample_above,
4092
397k
                                      int upsample_left, int dx, int dy) {
4093
  // here upsample_above and upsample_left are 0 by design of
4094
  // av1_use_intra_edge_upsample
4095
397k
  const int min_base_x = -1;
4096
397k
  const int min_base_y = -1;
4097
397k
  (void)upsample_above;
4098
397k
  (void)upsample_left;
4099
397k
  const int frac_bits_x = 6;
4100
397k
  const int frac_bits_y = 6;
4101
4102
397k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c1234, c0123;
4103
397k
  __m256i diff, min_base_y256, c3f, shifty, dy256, c1;
4104
397k
  __m128i a0_x128, a1_x128;
4105
4106
397k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4107
397k
  a16 = _mm256_set1_epi16(16);
4108
397k
  c1 = _mm256_srli_epi16(a16, 4);
4109
397k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
4110
397k
  c3f = _mm256_set1_epi16(0x3f);
4111
397k
  dy256 = _mm256_set1_epi16(dy);
4112
397k
  c0123 =
4113
397k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4114
397k
  c1234 = _mm256_add_epi16(c0123, c1);
4115
4116
7.58M
  for (int r = 0; r < H; r++) {
4117
7.18M
    __m256i b, res, shift, j256, r6, ydx;
4118
7.18M
    __m128i resx, resy;
4119
7.18M
    __m128i resxy;
4120
7.18M
    int y = r + 1;
4121
7.18M
    ydx = _mm256_set1_epi16((int16_t)(y * dx));
4122
4123
7.18M
    int base_x = (-y * dx) >> frac_bits_x;
4124
20.2M
    for (int j = 0; j < W; j += 16) {
4125
13.0M
      j256 = _mm256_set1_epi16(j);
4126
13.0M
      int base_shift = 0;
4127
13.0M
      if ((base_x + j) < (min_base_x - 1)) {
4128
9.51M
        base_shift = (min_base_x - (base_x + j) - 1);
4129
9.51M
      }
4130
13.0M
      int base_min_diff = (min_base_x - base_x - j);
4131
13.0M
      if (base_min_diff > 16) {
4132
6.80M
        base_min_diff = 16;
4133
6.80M
      } else {
4134
6.25M
        if (base_min_diff < 0) base_min_diff = 0;
4135
6.25M
      }
4136
4137
13.0M
      if (base_shift < 16) {
4138
6.25M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
4139
6.25M
        a1_x128 =
4140
6.25M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
4141
6.25M
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4142
6.25M
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4143
4144
6.25M
        a0_x = _mm256_cvtepu8_epi16(a0_x128);
4145
6.25M
        a1_x = _mm256_cvtepu8_epi16(a1_x128);
4146
4147
6.25M
        r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
4148
6.25M
        shift = _mm256_srli_epi16(
4149
6.25M
            _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
4150
4151
6.25M
        diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4152
6.25M
        a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4153
6.25M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4154
4155
6.25M
        b = _mm256_mullo_epi16(diff, shift);
4156
6.25M
        res = _mm256_add_epi16(a32, b);
4157
6.25M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4158
6.25M
        resx = _mm256_castsi256_si128(_mm256_packus_epi16(
4159
6.25M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4160
6.80M
      } else {
4161
6.80M
        resx = _mm_setzero_si128();
4162
6.80M
      }
4163
4164
      // y calc
4165
13.0M
      if (base_x < min_base_x) {
4166
12.1M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
4167
12.1M
        r6 = _mm256_set1_epi16(r << 6);
4168
12.1M
        c256 = _mm256_add_epi16(j256, c1234);
4169
12.1M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
4170
12.1M
                                 _mm256_srli_epi16(min_base_y256, 1));
4171
12.1M
        y_c256 = _mm256_sub_epi16(r6, mul16);
4172
4173
12.1M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
4174
12.1M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
4175
4176
12.1M
        base_y_c256 = _mm256_blendv_epi8(base_y_c256, min_base_y256, mask256);
4177
12.1M
        int16_t min_y = (int16_t)_mm_extract_epi16(
4178
12.1M
            _mm256_extracti128_si256(base_y_c256, 1), 7);
4179
12.1M
        int16_t max_y =
4180
12.1M
            (int16_t)_mm_extract_epi16(_mm256_castsi256_si128(base_y_c256), 0);
4181
12.1M
        int16_t offset_diff = max_y - min_y;
4182
4183
12.1M
        if (offset_diff < 16) {
4184
11.4M
          __m256i min_y256 = _mm256_set1_epi16(min_y);
4185
4186
11.4M
          __m256i base_y_offset = _mm256_sub_epi16(base_y_c256, min_y256);
4187
11.4M
          __m128i base_y_offset128 =
4188
11.4M
              _mm_packs_epi16(_mm256_extracti128_si256(base_y_offset, 0),
4189
11.4M
                              _mm256_extracti128_si256(base_y_offset, 1));
4190
4191
11.4M
          __m128i a0_y128 = _mm_maskload_epi32(
4192
11.4M
              (int *)(left + min_y), *(__m128i *)LoadMaskz2[offset_diff / 4]);
4193
11.4M
          __m128i a1_y128 =
4194
11.4M
              _mm_maskload_epi32((int *)(left + min_y + 1),
4195
11.4M
                                 *(__m128i *)LoadMaskz2[offset_diff / 4]);
4196
11.4M
          a0_y128 = _mm_shuffle_epi8(a0_y128, base_y_offset128);
4197
11.4M
          a1_y128 = _mm_shuffle_epi8(a1_y128, base_y_offset128);
4198
11.4M
          a0_y = _mm256_cvtepu8_epi16(a0_y128);
4199
11.4M
          a1_y = _mm256_cvtepu8_epi16(a1_y128);
4200
11.4M
        } else {
4201
666k
          base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
4202
666k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4203
4204
666k
          a0_y = _mm256_setr_epi16(
4205
666k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4206
666k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4207
666k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4208
666k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4209
666k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4210
666k
              left[base_y_c[15]]);
4211
666k
          base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
4212
666k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4213
4214
666k
          a1_y = _mm256_setr_epi16(
4215
666k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4216
666k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4217
666k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4218
666k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4219
666k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4220
666k
              left[base_y_c[15]]);
4221
666k
        }
4222
12.1M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
4223
4224
12.1M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
4225
12.1M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
4226
12.1M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4227
4228
12.1M
        b = _mm256_mullo_epi16(diff, shifty);
4229
12.1M
        res = _mm256_add_epi16(a32, b);
4230
12.1M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4231
12.1M
        resy = _mm256_castsi256_si128(_mm256_packus_epi16(
4232
12.1M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4233
12.1M
      } else {
4234
964k
        resy = _mm_setzero_si128();
4235
964k
      }
4236
13.0M
      resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4237
13.0M
      _mm_storeu_si128((__m128i *)(dst + j), resxy);
4238
13.0M
    }  // for j
4239
7.18M
    dst += stride;
4240
7.18M
  }
4241
397k
}
4242
4243
// Directional prediction, zone 2: 90 < angle < 180
4244
void av1_dr_prediction_z2_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4245
                               const uint8_t *above, const uint8_t *left,
4246
                               int upsample_above, int upsample_left, int dx,
4247
864k
                               int dy) {
4248
864k
  assert(dx > 0);
4249
864k
  assert(dy > 0);
4250
864k
  switch (bw) {
4251
222k
    case 4:
4252
222k
      dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left, upsample_above,
4253
222k
                                upsample_left, dx, dy);
4254
222k
      break;
4255
244k
    case 8:
4256
244k
      dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left, upsample_above,
4257
244k
                                upsample_left, dx, dy);
4258
244k
      break;
4259
397k
    default:
4260
397k
      dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
4261
397k
                                upsample_above, upsample_left, dx, dy);
4262
397k
      break;
4263
864k
  }
4264
864k
  return;
4265
864k
}
4266
4267
// z3 functions
4268
161k
static inline void transpose16x32_avx2(__m256i *x, __m256i *d) {
4269
161k
  __m256i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
4270
161k
  __m256i w10, w11, w12, w13, w14, w15;
4271
4272
161k
  w0 = _mm256_unpacklo_epi8(x[0], x[1]);
4273
161k
  w1 = _mm256_unpacklo_epi8(x[2], x[3]);
4274
161k
  w2 = _mm256_unpacklo_epi8(x[4], x[5]);
4275
161k
  w3 = _mm256_unpacklo_epi8(x[6], x[7]);
4276
4277
161k
  w8 = _mm256_unpacklo_epi8(x[8], x[9]);
4278
161k
  w9 = _mm256_unpacklo_epi8(x[10], x[11]);
4279
161k
  w10 = _mm256_unpacklo_epi8(x[12], x[13]);
4280
161k
  w11 = _mm256_unpacklo_epi8(x[14], x[15]);
4281
4282
161k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4283
161k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4284
161k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4285
161k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4286
4287
161k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4288
161k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4289
161k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4290
161k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4291
4292
  // Store first 4-line result
4293
161k
  d[0] = _mm256_unpacklo_epi64(w6, w14);
4294
161k
  d[1] = _mm256_unpackhi_epi64(w6, w14);
4295
161k
  d[2] = _mm256_unpacklo_epi64(w7, w15);
4296
161k
  d[3] = _mm256_unpackhi_epi64(w7, w15);
4297
4298
161k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4299
161k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4300
161k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4301
161k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4302
4303
161k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4304
161k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4305
161k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4306
161k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4307
4308
  // Store second 4-line result
4309
161k
  d[4] = _mm256_unpacklo_epi64(w6, w14);
4310
161k
  d[5] = _mm256_unpackhi_epi64(w6, w14);
4311
161k
  d[6] = _mm256_unpacklo_epi64(w7, w15);
4312
161k
  d[7] = _mm256_unpackhi_epi64(w7, w15);
4313
4314
  // upper half
4315
161k
  w0 = _mm256_unpackhi_epi8(x[0], x[1]);
4316
161k
  w1 = _mm256_unpackhi_epi8(x[2], x[3]);
4317
161k
  w2 = _mm256_unpackhi_epi8(x[4], x[5]);
4318
161k
  w3 = _mm256_unpackhi_epi8(x[6], x[7]);
4319
4320
161k
  w8 = _mm256_unpackhi_epi8(x[8], x[9]);
4321
161k
  w9 = _mm256_unpackhi_epi8(x[10], x[11]);
4322
161k
  w10 = _mm256_unpackhi_epi8(x[12], x[13]);
4323
161k
  w11 = _mm256_unpackhi_epi8(x[14], x[15]);
4324
4325
161k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4326
161k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4327
161k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4328
161k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4329
4330
161k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4331
161k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4332
161k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4333
161k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4334
4335
  // Store first 4-line result
4336
161k
  d[8] = _mm256_unpacklo_epi64(w6, w14);
4337
161k
  d[9] = _mm256_unpackhi_epi64(w6, w14);
4338
161k
  d[10] = _mm256_unpacklo_epi64(w7, w15);
4339
161k
  d[11] = _mm256_unpackhi_epi64(w7, w15);
4340
4341
161k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4342
161k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4343
161k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4344
161k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4345
4346
161k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4347
161k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4348
161k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4349
161k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4350
4351
  // Store second 4-line result
4352
161k
  d[12] = _mm256_unpacklo_epi64(w6, w14);
4353
161k
  d[13] = _mm256_unpackhi_epi64(w6, w14);
4354
161k
  d[14] = _mm256_unpacklo_epi64(w7, w15);
4355
161k
  d[15] = _mm256_unpackhi_epi64(w7, w15);
4356
161k
}
4357
4358
static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
4359
                                      const uint8_t *left, int upsample_left,
4360
58.9k
                                      int dy) {
4361
58.9k
  __m128i dstvec[4], d[4];
4362
4363
58.9k
  dr_prediction_z1_HxW_internal_avx2(4, 4, dstvec, left, upsample_left, dy);
4364
58.9k
  transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4365
58.9k
                            &d[0], &d[1], &d[2], &d[3]);
4366
4367
58.9k
  *(int *)(dst + stride * 0) = _mm_cvtsi128_si32(d[0]);
4368
58.9k
  *(int *)(dst + stride * 1) = _mm_cvtsi128_si32(d[1]);
4369
58.9k
  *(int *)(dst + stride * 2) = _mm_cvtsi128_si32(d[2]);
4370
58.9k
  *(int *)(dst + stride * 3) = _mm_cvtsi128_si32(d[3]);
4371
58.9k
  return;
4372
58.9k
}
4373
4374
static void dr_prediction_z3_8x8_avx2(uint8_t *dst, ptrdiff_t stride,
4375
                                      const uint8_t *left, int upsample_left,
4376
89.3k
                                      int dy) {
4377
89.3k
  __m128i dstvec[8], d[8];
4378
4379
89.3k
  dr_prediction_z1_HxW_internal_avx2(8, 8, dstvec, left, upsample_left, dy);
4380
89.3k
  transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
4381
89.3k
                    &dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
4382
89.3k
                    &d[3]);
4383
4384
89.3k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4385
89.3k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), _mm_srli_si128(d[0], 8));
4386
89.3k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[1]);
4387
89.3k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), _mm_srli_si128(d[1], 8));
4388
89.3k
  _mm_storel_epi64((__m128i *)(dst + 4 * stride), d[2]);
4389
89.3k
  _mm_storel_epi64((__m128i *)(dst + 5 * stride), _mm_srli_si128(d[2], 8));
4390
89.3k
  _mm_storel_epi64((__m128i *)(dst + 6 * stride), d[3]);
4391
89.3k
  _mm_storel_epi64((__m128i *)(dst + 7 * stride), _mm_srli_si128(d[3], 8));
4392
89.3k
}
4393
4394
static void dr_prediction_z3_4x8_avx2(uint8_t *dst, ptrdiff_t stride,
4395
                                      const uint8_t *left, int upsample_left,
4396
22.2k
                                      int dy) {
4397
22.2k
  __m128i dstvec[4], d[8];
4398
4399
22.2k
  dr_prediction_z1_HxW_internal_avx2(8, 4, dstvec, left, upsample_left, dy);
4400
22.2k
  transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
4401
22.2k
                        &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
4402
200k
  for (int i = 0; i < 8; i++) {
4403
177k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4404
177k
  }
4405
22.2k
}
4406
4407
static void dr_prediction_z3_8x4_avx2(uint8_t *dst, ptrdiff_t stride,
4408
                                      const uint8_t *left, int upsample_left,
4409
33.8k
                                      int dy) {
4410
33.8k
  __m128i dstvec[8], d[4];
4411
4412
33.8k
  dr_prediction_z1_HxW_internal_avx2(4, 8, dstvec, left, upsample_left, dy);
4413
33.8k
  transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4414
33.8k
                        &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
4415
33.8k
                        &d[1], &d[2], &d[3]);
4416
33.8k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4417
33.8k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
4418
33.8k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
4419
33.8k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
4420
33.8k
}
4421
4422
static void dr_prediction_z3_8x16_avx2(uint8_t *dst, ptrdiff_t stride,
4423
                                       const uint8_t *left, int upsample_left,
4424
22.8k
                                       int dy) {
4425
22.8k
  __m128i dstvec[8], d[8];
4426
4427
22.8k
  dr_prediction_z1_HxW_internal_avx2(16, 8, dstvec, left, upsample_left, dy);
4428
22.8k
  transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
4429
22.8k
                          dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
4430
22.8k
                          d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
4431
205k
  for (int i = 0; i < 8; i++) {
4432
182k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
4433
182k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
4434
182k
                     _mm_srli_si128(d[i], 8));
4435
182k
  }
4436
22.8k
}
4437
4438
static void dr_prediction_z3_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
4439
                                       const uint8_t *left, int upsample_left,
4440
43.4k
                                       int dy) {
4441
43.4k
  __m128i dstvec[16], d[16];
4442
4443
43.4k
  dr_prediction_z1_HxW_internal_avx2(8, 16, dstvec, left, upsample_left, dy);
4444
43.4k
  transpose16x8_8x16_sse2(
4445
43.4k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4446
43.4k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4447
43.4k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4448
43.4k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4449
4450
390k
  for (int i = 0; i < 8; i++) {
4451
347k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4452
347k
  }
4453
43.4k
}
4454
4455
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4456
static void dr_prediction_z3_4x16_avx2(uint8_t *dst, ptrdiff_t stride,
4457
                                       const uint8_t *left, int upsample_left,
4458
16.7k
                                       int dy) {
4459
16.7k
  __m128i dstvec[4], d[16];
4460
4461
16.7k
  dr_prediction_z1_HxW_internal_avx2(16, 4, dstvec, left, upsample_left, dy);
4462
16.7k
  transpose4x16_sse2(dstvec, d);
4463
285k
  for (int i = 0; i < 16; i++) {
4464
268k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4465
268k
  }
4466
16.7k
}
4467
4468
static void dr_prediction_z3_16x4_avx2(uint8_t *dst, ptrdiff_t stride,
4469
                                       const uint8_t *left, int upsample_left,
4470
48.8k
                                       int dy) {
4471
48.8k
  __m128i dstvec[16], d[8];
4472
4473
48.8k
  dr_prediction_z1_HxW_internal_avx2(4, 16, dstvec, left, upsample_left, dy);
4474
244k
  for (int i = 4; i < 8; i++) {
4475
195k
    d[i] = _mm_setzero_si128();
4476
195k
  }
4477
48.8k
  transpose16x8_8x16_sse2(
4478
48.8k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4479
48.8k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4480
48.8k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4481
48.8k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4482
4483
244k
  for (int i = 0; i < 4; i++) {
4484
195k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4485
195k
  }
4486
48.8k
}
4487
4488
static void dr_prediction_z3_8x32_avx2(uint8_t *dst, ptrdiff_t stride,
4489
                                       const uint8_t *left, int upsample_left,
4490
10.4k
                                       int dy) {
4491
10.4k
  __m256i dstvec[16], d[16];
4492
4493
10.4k
  dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left, dy);
4494
93.9k
  for (int i = 8; i < 16; i++) {
4495
83.5k
    dstvec[i] = _mm256_setzero_si256();
4496
83.5k
  }
4497
10.4k
  transpose16x32_avx2(dstvec, d);
4498
4499
177k
  for (int i = 0; i < 16; i++) {
4500
167k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
4501
167k
                     _mm256_castsi256_si128(d[i]));
4502
167k
  }
4503
177k
  for (int i = 0; i < 16; i++) {
4504
167k
    _mm_storel_epi64((__m128i *)(dst + (i + 16) * stride),
4505
167k
                     _mm256_extracti128_si256(d[i], 1));
4506
167k
  }
4507
10.4k
}
4508
4509
static void dr_prediction_z3_32x8_avx2(uint8_t *dst, ptrdiff_t stride,
4510
                                       const uint8_t *left, int upsample_left,
4511
39.9k
                                       int dy) {
4512
39.9k
  __m128i dstvec[32], d[16];
4513
4514
39.9k
  dr_prediction_z1_HxW_internal_avx2(8, 32, dstvec, left, upsample_left, dy);
4515
4516
39.9k
  transpose16x8_8x16_sse2(
4517
39.9k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4518
39.9k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4519
39.9k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4520
39.9k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4521
39.9k
  transpose16x8_8x16_sse2(
4522
39.9k
      &dstvec[0 + 16], &dstvec[1 + 16], &dstvec[2 + 16], &dstvec[3 + 16],
4523
39.9k
      &dstvec[4 + 16], &dstvec[5 + 16], &dstvec[6 + 16], &dstvec[7 + 16],
4524
39.9k
      &dstvec[8 + 16], &dstvec[9 + 16], &dstvec[10 + 16], &dstvec[11 + 16],
4525
39.9k
      &dstvec[12 + 16], &dstvec[13 + 16], &dstvec[14 + 16], &dstvec[15 + 16],
4526
39.9k
      &d[0 + 8], &d[1 + 8], &d[2 + 8], &d[3 + 8], &d[4 + 8], &d[5 + 8],
4527
39.9k
      &d[6 + 8], &d[7 + 8]);
4528
4529
359k
  for (int i = 0; i < 8; i++) {
4530
319k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4531
319k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 8]);
4532
319k
  }
4533
39.9k
}
4534
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4535
4536
static void dr_prediction_z3_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
4537
                                        const uint8_t *left, int upsample_left,
4538
82.3k
                                        int dy) {
4539
82.3k
  __m128i dstvec[16], d[16];
4540
4541
82.3k
  dr_prediction_z1_HxW_internal_avx2(16, 16, dstvec, left, upsample_left, dy);
4542
82.3k
  transpose16x16_sse2(dstvec, d);
4543
4544
1.39M
  for (int i = 0; i < 16; i++) {
4545
1.31M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4546
1.31M
  }
4547
82.3k
}
4548
4549
static void dr_prediction_z3_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
4550
                                        const uint8_t *left, int upsample_left,
4551
67.2k
                                        int dy) {
4552
67.2k
  __m256i dstvec[32], d[32];
4553
4554
67.2k
  dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left, dy);
4555
67.2k
  transpose16x32_avx2(dstvec, d);
4556
67.2k
  transpose16x32_avx2(dstvec + 16, d + 16);
4557
1.14M
  for (int j = 0; j < 16; j++) {
4558
1.07M
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4559
1.07M
                     _mm256_castsi256_si128(d[j]));
4560
1.07M
    _mm_storeu_si128((__m128i *)(dst + j * stride + 16),
4561
1.07M
                     _mm256_castsi256_si128(d[j + 16]));
4562
1.07M
  }
4563
1.14M
  for (int j = 0; j < 16; j++) {
4564
1.07M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4565
1.07M
                     _mm256_extracti128_si256(d[j], 1));
4566
1.07M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride + 16),
4567
1.07M
                     _mm256_extracti128_si256(d[j + 16], 1));
4568
1.07M
  }
4569
67.2k
}
4570
4571
static void dr_prediction_z3_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
4572
                                        const uint8_t *left, int upsample_left,
4573
20.5k
                                        int dy) {
4574
20.5k
  DECLARE_ALIGNED(16, uint8_t, dstT[64 * 64]);
4575
20.5k
  dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
4576
20.5k
  transpose(dstT, 64, dst, stride, 64, 64);
4577
20.5k
}
4578
4579
static void dr_prediction_z3_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
4580
                                        const uint8_t *left, int upsample_left,
4581
17.0k
                                        int dy) {
4582
17.0k
  __m256i dstvec[16], d[16];
4583
4584
17.0k
  dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left, dy);
4585
17.0k
  transpose16x32_avx2(dstvec, d);
4586
  // store
4587
289k
  for (int j = 0; j < 16; j++) {
4588
272k
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4589
272k
                     _mm256_castsi256_si128(d[j]));
4590
272k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4591
272k
                     _mm256_extracti128_si256(d[j], 1));
4592
272k
  }
4593
17.0k
}
4594
4595
static void dr_prediction_z3_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
4596
                                        const uint8_t *left, int upsample_left,
4597
16.9k
                                        int dy) {
4598
16.9k
  __m128i dstvec[32], d[16];
4599
4600
16.9k
  dr_prediction_z1_HxW_internal_avx2(16, 32, dstvec, left, upsample_left, dy);
4601
50.7k
  for (int i = 0; i < 32; i += 16) {
4602
33.8k
    transpose16x16_sse2((dstvec + i), d);
4603
574k
    for (int j = 0; j < 16; j++) {
4604
541k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4605
541k
    }
4606
33.8k
  }
4607
16.9k
}
4608
4609
static void dr_prediction_z3_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
4610
                                        const uint8_t *left, int upsample_left,
4611
1.55k
                                        int dy) {
4612
1.55k
  uint8_t dstT[64 * 32];
4613
1.55k
  dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
4614
1.55k
  transpose(dstT, 64, dst, stride, 32, 64);
4615
1.55k
}
4616
4617
static void dr_prediction_z3_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
4618
                                        const uint8_t *left, int upsample_left,
4619
2.51k
                                        int dy) {
4620
2.51k
  uint8_t dstT[32 * 64];
4621
2.51k
  dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
4622
2.51k
  transpose(dstT, 32, dst, stride, 64, 32);
4623
2.51k
  return;
4624
2.51k
}
4625
4626
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4627
static void dr_prediction_z3_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
4628
                                        const uint8_t *left, int upsample_left,
4629
2.98k
                                        int dy) {
4630
2.98k
  uint8_t dstT[64 * 16];
4631
2.98k
  dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
4632
2.98k
  transpose(dstT, 64, dst, stride, 16, 64);
4633
2.98k
}
4634
4635
static void dr_prediction_z3_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
4636
                                        const uint8_t *left, int upsample_left,
4637
13.7k
                                        int dy) {
4638
13.7k
  __m128i dstvec[64], d[16];
4639
4640
13.7k
  dr_prediction_z1_HxW_internal_avx2(16, 64, dstvec, left, upsample_left, dy);
4641
68.6k
  for (int i = 0; i < 64; i += 16) {
4642
54.9k
    transpose16x16_sse2((dstvec + i), d);
4643
933k
    for (int j = 0; j < 16; j++) {
4644
878k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4645
878k
    }
4646
54.9k
  }
4647
13.7k
}
4648
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4649
4650
void av1_dr_prediction_z3_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4651
                               const uint8_t *above, const uint8_t *left,
4652
611k
                               int upsample_left, int dx, int dy) {
4653
611k
  (void)above;
4654
611k
  (void)dx;
4655
611k
  assert(dx == 1);
4656
611k
  assert(dy > 0);
4657
4658
611k
  if (bw == bh) {
4659
318k
    switch (bw) {
4660
58.9k
      case 4:
4661
58.9k
        dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
4662
58.9k
        break;
4663
89.3k
      case 8:
4664
89.3k
        dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
4665
89.3k
        break;
4666
82.3k
      case 16:
4667
82.3k
        dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy);
4668
82.3k
        break;
4669
67.2k
      case 32:
4670
67.2k
        dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy);
4671
67.2k
        break;
4672
20.5k
      case 64:
4673
20.5k
        dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy);
4674
20.5k
        break;
4675
318k
    }
4676
318k
  } else {
4677
292k
    if (bw < bh) {
4678
93.8k
      if (bw + bw == bh) {
4679
63.6k
        switch (bw) {
4680
22.2k
          case 4:
4681
22.2k
            dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left, dy);
4682
22.2k
            break;
4683
22.8k
          case 8:
4684
22.8k
            dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left, dy);
4685
22.8k
            break;
4686
17.0k
          case 16:
4687
17.0k
            dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left, dy);
4688
17.0k
            break;
4689
1.55k
          case 32:
4690
1.55k
            dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left, dy);
4691
1.55k
            break;
4692
63.6k
        }
4693
63.6k
      } else {
4694
30.1k
        switch (bw) {
4695
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4696
16.7k
          case 4:
4697
16.7k
            dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left, dy);
4698
16.7k
            break;
4699
10.4k
          case 8:
4700
10.4k
            dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left, dy);
4701
10.4k
            break;
4702
2.98k
          case 16:
4703
2.98k
            dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left, dy);
4704
2.98k
            break;
4705
30.1k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4706
30.1k
        }
4707
30.1k
      }
4708
199k
    } else {
4709
199k
      if (bh + bh == bw) {
4710
96.6k
        switch (bh) {
4711
33.8k
          case 4:
4712
33.8k
            dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left, dy);
4713
33.8k
            break;
4714
43.4k
          case 8:
4715
43.4k
            dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left, dy);
4716
43.4k
            break;
4717
16.9k
          case 16:
4718
16.9k
            dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left, dy);
4719
16.9k
            break;
4720
2.51k
          case 32:
4721
2.51k
            dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left, dy);
4722
2.51k
            break;
4723
96.6k
        }
4724
102k
      } else {
4725
102k
        switch (bh) {
4726
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4727
48.8k
          case 4:
4728
48.8k
            dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left, dy);
4729
48.8k
            break;
4730
39.9k
          case 8:
4731
39.9k
            dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left, dy);
4732
39.9k
            break;
4733
13.7k
          case 16:
4734
13.7k
            dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left, dy);
4735
13.7k
            break;
4736
102k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4737
102k
        }
4738
102k
      }
4739
199k
    }
4740
292k
  }
4741
611k
}