Coverage Report

Created: 2025-12-31 06:49

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/aom_dsp/x86/intrapred_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/av1_rtcd.h"
15
#include "aom_dsp/x86/intrapred_x86.h"
16
#include "aom_dsp/x86/intrapred_utils.h"
17
#include "aom_dsp/x86/lpf_common_sse2.h"
18
19
125k
static inline __m256i dc_sum_64(const uint8_t *ref) {
20
125k
  const __m256i x0 = _mm256_loadu_si256((const __m256i *)ref);
21
125k
  const __m256i x1 = _mm256_loadu_si256((const __m256i *)(ref + 32));
22
125k
  const __m256i zero = _mm256_setzero_si256();
23
125k
  __m256i y0 = _mm256_sad_epu8(x0, zero);
24
125k
  __m256i y1 = _mm256_sad_epu8(x1, zero);
25
125k
  y0 = _mm256_add_epi64(y0, y1);
26
125k
  __m256i u0 = _mm256_permute2x128_si256(y0, y0, 1);
27
125k
  y0 = _mm256_add_epi64(u0, y0);
28
125k
  u0 = _mm256_unpackhi_epi64(y0, y0);
29
125k
  return _mm256_add_epi16(y0, u0);
30
125k
}
31
32
772k
static inline __m256i dc_sum_32(const uint8_t *ref) {
33
772k
  const __m256i x = _mm256_loadu_si256((const __m256i *)ref);
34
772k
  const __m256i zero = _mm256_setzero_si256();
35
772k
  __m256i y = _mm256_sad_epu8(x, zero);
36
772k
  __m256i u = _mm256_permute2x128_si256(y, y, 1);
37
772k
  y = _mm256_add_epi64(u, y);
38
772k
  u = _mm256_unpackhi_epi64(y, y);
39
772k
  return _mm256_add_epi16(y, u);
40
772k
}
41
42
static inline void row_store_32xh(const __m256i *r, int height, uint8_t *dst,
43
512k
                                  ptrdiff_t stride) {
44
16.0M
  for (int i = 0; i < height; ++i) {
45
15.5M
    _mm256_storeu_si256((__m256i *)dst, *r);
46
15.5M
    dst += stride;
47
15.5M
  }
48
512k
}
49
50
static inline void row_store_32x2xh(const __m256i *r0, const __m256i *r1,
51
                                    int height, uint8_t *dst,
52
1.83k
                                    ptrdiff_t stride) {
53
85.0k
  for (int i = 0; i < height; ++i) {
54
83.2k
    _mm256_storeu_si256((__m256i *)dst, *r0);
55
83.2k
    _mm256_storeu_si256((__m256i *)(dst + 32), *r1);
56
83.2k
    dst += stride;
57
83.2k
  }
58
1.83k
}
59
60
static inline void row_store_64xh(const __m256i *r, int height, uint8_t *dst,
61
91.1k
                                  ptrdiff_t stride) {
62
4.30M
  for (int i = 0; i < height; ++i) {
63
4.21M
    _mm256_storeu_si256((__m256i *)dst, *r);
64
4.21M
    _mm256_storeu_si256((__m256i *)(dst + 32), *r);
65
4.21M
    dst += stride;
66
4.21M
  }
67
91.1k
}
68
69
#if CONFIG_AV1_HIGHBITDEPTH
70
static DECLARE_ALIGNED(16, uint8_t, HighbdLoadMaskx[8][16]) = {
71
  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
72
  { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
73
  { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
74
  { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
75
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
76
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
77
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
78
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
79
};
80
81
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx4[4][16]) = {
82
  { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
83
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
84
  { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
85
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 }
86
};
87
88
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx[8][32]) = {
89
  { 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25, 28, 29,
90
    2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
91
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27,
92
    0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
93
  { 0, 1, 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25,
94
    0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27 },
95
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23,
96
    0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25 },
97
  { 0, 1, 0, 1, 0, 1, 0, 1, 8,  9,  12, 13, 16, 17, 20, 21,
98
    0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19, 22, 23 },
99
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19,
100
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17, 20, 21 },
101
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17,
102
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15, 18, 19 },
103
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15,
104
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
105
};
106
107
static DECLARE_ALIGNED(32, uint16_t, HighbdBaseMask[17][16]) = {
108
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
109
  { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
110
  { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
  { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
  { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
113
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
114
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115
    0 },
116
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
117
    0, 0 },
118
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
119
    0, 0, 0, 0 },
120
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
121
    0, 0, 0, 0, 0, 0 },
122
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
123
    0xffff, 0, 0, 0, 0, 0, 0 },
124
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
125
    0xffff, 0xffff, 0, 0, 0, 0, 0 },
126
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
127
    0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
128
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
129
    0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
130
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
131
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
132
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
133
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
134
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
135
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
136
};
137
138
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
139
41.3k
static inline void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
140
41.3k
  __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
141
142
41.3k
  r0 = _mm_unpacklo_epi16(x[0], x[1]);
143
41.3k
  r1 = _mm_unpacklo_epi16(x[2], x[3]);
144
41.3k
  r2 = _mm_unpacklo_epi16(x[4], x[5]);
145
41.3k
  r3 = _mm_unpacklo_epi16(x[6], x[7]);
146
147
41.3k
  r4 = _mm_unpacklo_epi16(x[8], x[9]);
148
41.3k
  r5 = _mm_unpacklo_epi16(x[10], x[11]);
149
41.3k
  r6 = _mm_unpacklo_epi16(x[12], x[13]);
150
41.3k
  r7 = _mm_unpacklo_epi16(x[14], x[15]);
151
152
41.3k
  r8 = _mm_unpacklo_epi32(r0, r1);
153
41.3k
  r9 = _mm_unpackhi_epi32(r0, r1);
154
41.3k
  r10 = _mm_unpacklo_epi32(r2, r3);
155
41.3k
  r11 = _mm_unpackhi_epi32(r2, r3);
156
157
41.3k
  r12 = _mm_unpacklo_epi32(r4, r5);
158
41.3k
  r13 = _mm_unpackhi_epi32(r4, r5);
159
41.3k
  r14 = _mm_unpacklo_epi32(r6, r7);
160
41.3k
  r15 = _mm_unpackhi_epi32(r6, r7);
161
162
41.3k
  r0 = _mm_unpacklo_epi64(r8, r9);
163
41.3k
  r1 = _mm_unpackhi_epi64(r8, r9);
164
41.3k
  r2 = _mm_unpacklo_epi64(r10, r11);
165
41.3k
  r3 = _mm_unpackhi_epi64(r10, r11);
166
167
41.3k
  r4 = _mm_unpacklo_epi64(r12, r13);
168
41.3k
  r5 = _mm_unpackhi_epi64(r12, r13);
169
41.3k
  r6 = _mm_unpacklo_epi64(r14, r15);
170
41.3k
  r7 = _mm_unpackhi_epi64(r14, r15);
171
172
41.3k
  d[0] = _mm_unpacklo_epi64(r0, r2);
173
41.3k
  d[1] = _mm_unpacklo_epi64(r4, r6);
174
41.3k
  d[2] = _mm_unpacklo_epi64(r1, r3);
175
41.3k
  d[3] = _mm_unpacklo_epi64(r5, r7);
176
177
41.3k
  d[4] = _mm_unpackhi_epi64(r0, r2);
178
41.3k
  d[5] = _mm_unpackhi_epi64(r4, r6);
179
41.3k
  d[6] = _mm_unpackhi_epi64(r1, r3);
180
41.3k
  d[7] = _mm_unpackhi_epi64(r5, r7);
181
41.3k
}
182
183
13.6k
static inline void highbd_transpose4x16_avx2(__m256i *x, __m256i *d) {
184
13.6k
  __m256i w0, w1, w2, w3, ww0, ww1;
185
186
13.6k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
187
13.6k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
188
13.6k
  w2 = _mm256_unpackhi_epi16(x[0], x[1]);  // 40 50 41 51 42 52 43 53
189
13.6k
  w3 = _mm256_unpackhi_epi16(x[2], x[3]);  // 60 70 61 71 62 72 63 73
190
191
13.6k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
192
13.6k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
193
194
13.6k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
195
13.6k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
196
197
13.6k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
198
13.6k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
199
200
13.6k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
201
13.6k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
202
13.6k
}
203
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
204
205
92.2k
static inline void highbd_transpose8x16_16x8_avx2(__m256i *x, __m256i *d) {
206
92.2k
  __m256i w0, w1, w2, w3, ww0, ww1;
207
208
92.2k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
209
92.2k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
210
92.2k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);  // 40 50 41 51 42 52 43 53
211
92.2k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);  // 60 70 61 71 62 72 63 73
212
213
92.2k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
214
92.2k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
215
216
92.2k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
217
92.2k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
218
219
92.2k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
220
92.2k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
221
222
92.2k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
223
92.2k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
224
225
92.2k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);  // 04 14 05 15 06 16 07 17
226
92.2k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);  // 24 34 25 35 26 36 27 37
227
92.2k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);  // 44 54 45 55 46 56 47 57
228
92.2k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);  // 64 74 65 75 66 76 67 77
229
230
92.2k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 04 14 24 34 05 15 25 35
231
92.2k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 44 54 64 74 45 55 65 75
232
233
92.2k
  d[4] = _mm256_unpacklo_epi64(ww0, ww1);  // 04 14 24 34 44 54 64 74
234
92.2k
  d[5] = _mm256_unpackhi_epi64(ww0, ww1);  // 05 15 25 35 45 55 65 75
235
236
92.2k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 06 16 26 36 07 17 27 37
237
92.2k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 46 56 66 76 47 57 67 77
238
239
92.2k
  d[6] = _mm256_unpacklo_epi64(ww0, ww1);  // 06 16 26 36 46 56 66 76
240
92.2k
  d[7] = _mm256_unpackhi_epi64(ww0, ww1);  // 07 17 27 37 47 57 67 77
241
92.2k
}
242
243
640k
static inline void highbd_transpose16x16_avx2(__m256i *x, __m256i *d) {
244
640k
  __m256i w0, w1, w2, w3, ww0, ww1;
245
640k
  __m256i dd[16];
246
640k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);
247
640k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);
248
640k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);
249
640k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);
250
251
640k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
252
640k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
253
254
640k
  dd[0] = _mm256_unpacklo_epi64(ww0, ww1);
255
640k
  dd[1] = _mm256_unpackhi_epi64(ww0, ww1);
256
257
640k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
258
640k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
259
260
640k
  dd[2] = _mm256_unpacklo_epi64(ww0, ww1);
261
640k
  dd[3] = _mm256_unpackhi_epi64(ww0, ww1);
262
263
640k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);
264
640k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);
265
640k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);
266
640k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);
267
268
640k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
269
640k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
270
271
640k
  dd[4] = _mm256_unpacklo_epi64(ww0, ww1);
272
640k
  dd[5] = _mm256_unpackhi_epi64(ww0, ww1);
273
274
640k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
275
640k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
276
277
640k
  dd[6] = _mm256_unpacklo_epi64(ww0, ww1);
278
640k
  dd[7] = _mm256_unpackhi_epi64(ww0, ww1);
279
280
640k
  w0 = _mm256_unpacklo_epi16(x[8], x[9]);
281
640k
  w1 = _mm256_unpacklo_epi16(x[10], x[11]);
282
640k
  w2 = _mm256_unpacklo_epi16(x[12], x[13]);
283
640k
  w3 = _mm256_unpacklo_epi16(x[14], x[15]);
284
285
640k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
286
640k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
287
288
640k
  dd[8] = _mm256_unpacklo_epi64(ww0, ww1);
289
640k
  dd[9] = _mm256_unpackhi_epi64(ww0, ww1);
290
291
640k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
292
640k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
293
294
640k
  dd[10] = _mm256_unpacklo_epi64(ww0, ww1);
295
640k
  dd[11] = _mm256_unpackhi_epi64(ww0, ww1);
296
297
640k
  w0 = _mm256_unpackhi_epi16(x[8], x[9]);
298
640k
  w1 = _mm256_unpackhi_epi16(x[10], x[11]);
299
640k
  w2 = _mm256_unpackhi_epi16(x[12], x[13]);
300
640k
  w3 = _mm256_unpackhi_epi16(x[14], x[15]);
301
302
640k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
303
640k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
304
305
640k
  dd[12] = _mm256_unpacklo_epi64(ww0, ww1);
306
640k
  dd[13] = _mm256_unpackhi_epi64(ww0, ww1);
307
308
640k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
309
640k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
310
311
640k
  dd[14] = _mm256_unpacklo_epi64(ww0, ww1);
312
640k
  dd[15] = _mm256_unpackhi_epi64(ww0, ww1);
313
314
5.76M
  for (int i = 0; i < 8; i++) {
315
5.12M
    d[i] = _mm256_insertf128_si256(dd[i], _mm256_castsi256_si128(dd[i + 8]), 1);
316
5.12M
    d[i + 8] = _mm256_insertf128_si256(dd[i + 8],
317
5.12M
                                       _mm256_extracti128_si256(dd[i], 1), 0);
318
5.12M
  }
319
640k
}
320
#endif  // CONFIG_AV1_HIGHBITDEPTH
321
322
void aom_dc_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
323
340k
                                 const uint8_t *above, const uint8_t *left) {
324
340k
  const __m256i sum_above = dc_sum_32(above);
325
340k
  __m256i sum_left = dc_sum_32(left);
326
340k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
327
340k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
328
340k
  sum_left = _mm256_add_epi16(sum_left, thirtytwo);
329
340k
  sum_left = _mm256_srai_epi16(sum_left, 6);
330
340k
  const __m256i zero = _mm256_setzero_si256();
331
340k
  __m256i row = _mm256_shuffle_epi8(sum_left, zero);
332
340k
  row_store_32xh(&row, 32, dst, stride);
333
340k
}
334
335
void aom_dc_top_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
336
                                     const uint8_t *above,
337
29.2k
                                     const uint8_t *left) {
338
29.2k
  __m256i sum = dc_sum_32(above);
339
29.2k
  (void)left;
340
341
29.2k
  const __m256i sixteen = _mm256_set1_epi16(16);
342
29.2k
  sum = _mm256_add_epi16(sum, sixteen);
343
29.2k
  sum = _mm256_srai_epi16(sum, 5);
344
29.2k
  const __m256i zero = _mm256_setzero_si256();
345
29.2k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
346
29.2k
  row_store_32xh(&row, 32, dst, stride);
347
29.2k
}
348
349
void aom_dc_left_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
350
                                      const uint8_t *above,
351
49.2k
                                      const uint8_t *left) {
352
49.2k
  __m256i sum = dc_sum_32(left);
353
49.2k
  (void)above;
354
355
49.2k
  const __m256i sixteen = _mm256_set1_epi16(16);
356
49.2k
  sum = _mm256_add_epi16(sum, sixteen);
357
49.2k
  sum = _mm256_srai_epi16(sum, 5);
358
49.2k
  const __m256i zero = _mm256_setzero_si256();
359
49.2k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
360
49.2k
  row_store_32xh(&row, 32, dst, stride);
361
49.2k
}
362
363
void aom_dc_128_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
364
                                     const uint8_t *above,
365
11.1k
                                     const uint8_t *left) {
366
11.1k
  (void)above;
367
11.1k
  (void)left;
368
11.1k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
369
11.1k
  row_store_32xh(&row, 32, dst, stride);
370
11.1k
}
371
372
void aom_v_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
373
13.7k
                                const uint8_t *above, const uint8_t *left) {
374
13.7k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
375
13.7k
  (void)left;
376
13.7k
  row_store_32xh(&row, 32, dst, stride);
377
13.7k
}
378
379
// There are 32 rows togeter. This function does line:
380
// 0,1,2,3, and 16,17,18,19. The next call would do
381
// 4,5,6,7, and 20,21,22,23. So 4 times of calling
382
// would finish 32 rows.
383
static inline void h_predictor_32x8line(const __m256i *row, uint8_t *dst,
384
285k
                                        ptrdiff_t stride) {
385
285k
  __m256i t[4];
386
285k
  __m256i m = _mm256_setzero_si256();
387
285k
  const __m256i inc = _mm256_set1_epi8(4);
388
285k
  int i;
389
390
1.42M
  for (i = 0; i < 4; i++) {
391
1.14M
    t[i] = _mm256_shuffle_epi8(*row, m);
392
1.14M
    __m256i r0 = _mm256_permute2x128_si256(t[i], t[i], 0);
393
1.14M
    __m256i r1 = _mm256_permute2x128_si256(t[i], t[i], 0x11);
394
1.14M
    _mm256_storeu_si256((__m256i *)dst, r0);
395
1.14M
    _mm256_storeu_si256((__m256i *)(dst + (stride << 4)), r1);
396
1.14M
    dst += stride;
397
1.14M
    m = _mm256_add_epi8(m, inc);
398
1.14M
  }
399
285k
}
400
401
void aom_h_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
402
71.2k
                                const uint8_t *above, const uint8_t *left) {
403
71.2k
  (void)above;
404
71.2k
  const __m256i left_col = _mm256_loadu_si256((__m256i const *)left);
405
406
71.2k
  __m256i u = _mm256_unpacklo_epi8(left_col, left_col);
407
408
71.2k
  __m256i v = _mm256_unpacklo_epi8(u, u);
409
71.2k
  h_predictor_32x8line(&v, dst, stride);
410
71.2k
  dst += stride << 2;
411
412
71.2k
  v = _mm256_unpackhi_epi8(u, u);
413
71.2k
  h_predictor_32x8line(&v, dst, stride);
414
71.2k
  dst += stride << 2;
415
416
71.2k
  u = _mm256_unpackhi_epi8(left_col, left_col);
417
418
71.2k
  v = _mm256_unpacklo_epi8(u, u);
419
71.2k
  h_predictor_32x8line(&v, dst, stride);
420
71.2k
  dst += stride << 2;
421
422
71.2k
  v = _mm256_unpackhi_epi8(u, u);
423
71.2k
  h_predictor_32x8line(&v, dst, stride);
424
71.2k
}
425
426
// -----------------------------------------------------------------------------
427
// Rectangle
428
void aom_dc_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
429
54.2k
                                 const uint8_t *above, const uint8_t *left) {
430
54.2k
  const __m128i top_sum = dc_sum_32_sse2(above);
431
54.2k
  __m128i left_sum = dc_sum_16_sse2(left);
432
54.2k
  left_sum = _mm_add_epi16(top_sum, left_sum);
433
54.2k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(left_sum);
434
54.2k
  sum += 24;
435
54.2k
  sum /= 48;
436
54.2k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
437
54.2k
  row_store_32xh(&row, 16, dst, stride);
438
54.2k
}
439
440
void aom_dc_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
441
3.04k
                                 const uint8_t *above, const uint8_t *left) {
442
3.04k
  const __m256i sum_above = dc_sum_32(above);
443
3.04k
  __m256i sum_left = dc_sum_64(left);
444
3.04k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
445
3.04k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
446
3.04k
  sum += 48;
447
3.04k
  sum /= 96;
448
3.04k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
449
3.04k
  row_store_32xh(&row, 64, dst, stride);
450
3.04k
}
451
452
void aom_dc_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
453
36.1k
                                 const uint8_t *above, const uint8_t *left) {
454
36.1k
  const __m256i sum_above = dc_sum_64(above);
455
36.1k
  __m256i sum_left = dc_sum_64(left);
456
36.1k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
457
36.1k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
458
36.1k
  sum += 64;
459
36.1k
  sum /= 128;
460
36.1k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
461
36.1k
  row_store_64xh(&row, 64, dst, stride);
462
36.1k
}
463
464
void aom_dc_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
465
7.35k
                                 const uint8_t *above, const uint8_t *left) {
466
7.35k
  const __m256i sum_above = dc_sum_64(above);
467
7.35k
  __m256i sum_left = dc_sum_32(left);
468
7.35k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
469
7.35k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
470
7.35k
  sum += 48;
471
7.35k
  sum /= 96;
472
7.35k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
473
7.35k
  row_store_64xh(&row, 32, dst, stride);
474
7.35k
}
475
476
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
477
void aom_dc_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
478
26.5k
                                 const uint8_t *above, const uint8_t *left) {
479
26.5k
  const __m256i sum_above = dc_sum_64(above);
480
26.5k
  __m256i sum_left = _mm256_castsi128_si256(dc_sum_16_sse2(left));
481
26.5k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
482
26.5k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
483
26.5k
  sum += 40;
484
26.5k
  sum /= 80;
485
26.5k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
486
26.5k
  row_store_64xh(&row, 16, dst, stride);
487
26.5k
}
488
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
489
490
void aom_dc_top_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
491
                                     const uint8_t *above,
492
1.57k
                                     const uint8_t *left) {
493
1.57k
  __m256i sum = dc_sum_32(above);
494
1.57k
  (void)left;
495
496
1.57k
  const __m256i sixteen = _mm256_set1_epi16(16);
497
1.57k
  sum = _mm256_add_epi16(sum, sixteen);
498
1.57k
  sum = _mm256_srai_epi16(sum, 5);
499
1.57k
  const __m256i zero = _mm256_setzero_si256();
500
1.57k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
501
1.57k
  row_store_32xh(&row, 16, dst, stride);
502
1.57k
}
503
504
void aom_dc_top_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
505
                                     const uint8_t *above,
506
933
                                     const uint8_t *left) {
507
933
  __m256i sum = dc_sum_32(above);
508
933
  (void)left;
509
510
933
  const __m256i sixteen = _mm256_set1_epi16(16);
511
933
  sum = _mm256_add_epi16(sum, sixteen);
512
933
  sum = _mm256_srai_epi16(sum, 5);
513
933
  const __m256i zero = _mm256_setzero_si256();
514
933
  __m256i row = _mm256_shuffle_epi8(sum, zero);
515
933
  row_store_32xh(&row, 64, dst, stride);
516
933
}
517
518
void aom_dc_top_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
519
                                     const uint8_t *above,
520
6.28k
                                     const uint8_t *left) {
521
6.28k
  __m256i sum = dc_sum_64(above);
522
6.28k
  (void)left;
523
524
6.28k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
525
6.28k
  sum = _mm256_add_epi16(sum, thirtytwo);
526
6.28k
  sum = _mm256_srai_epi16(sum, 6);
527
6.28k
  const __m256i zero = _mm256_setzero_si256();
528
6.28k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
529
6.28k
  row_store_64xh(&row, 64, dst, stride);
530
6.28k
}
531
532
void aom_dc_top_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
533
                                     const uint8_t *above,
534
242
                                     const uint8_t *left) {
535
242
  __m256i sum = dc_sum_64(above);
536
242
  (void)left;
537
538
242
  const __m256i thirtytwo = _mm256_set1_epi16(32);
539
242
  sum = _mm256_add_epi16(sum, thirtytwo);
540
242
  sum = _mm256_srai_epi16(sum, 6);
541
242
  const __m256i zero = _mm256_setzero_si256();
542
242
  __m256i row = _mm256_shuffle_epi8(sum, zero);
543
242
  row_store_64xh(&row, 32, dst, stride);
544
242
}
545
546
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
547
void aom_dc_top_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
548
                                     const uint8_t *above,
549
936
                                     const uint8_t *left) {
550
936
  __m256i sum = dc_sum_64(above);
551
936
  (void)left;
552
553
936
  const __m256i thirtytwo = _mm256_set1_epi16(32);
554
936
  sum = _mm256_add_epi16(sum, thirtytwo);
555
936
  sum = _mm256_srai_epi16(sum, 6);
556
936
  const __m256i zero = _mm256_setzero_si256();
557
936
  __m256i row = _mm256_shuffle_epi8(sum, zero);
558
936
  row_store_64xh(&row, 16, dst, stride);
559
936
}
560
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
561
562
void aom_dc_left_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
563
                                      const uint8_t *above,
564
2.00k
                                      const uint8_t *left) {
565
2.00k
  __m128i sum = dc_sum_16_sse2(left);
566
2.00k
  (void)above;
567
568
2.00k
  const __m128i eight = _mm_set1_epi16(8);
569
2.00k
  sum = _mm_add_epi16(sum, eight);
570
2.00k
  sum = _mm_srai_epi16(sum, 4);
571
2.00k
  const __m128i zero = _mm_setzero_si128();
572
2.00k
  const __m128i r = _mm_shuffle_epi8(sum, zero);
573
2.00k
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
574
2.00k
  row_store_32xh(&row, 16, dst, stride);
575
2.00k
}
576
577
void aom_dc_left_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
578
                                      const uint8_t *above,
579
748
                                      const uint8_t *left) {
580
748
  __m256i sum = dc_sum_64(left);
581
748
  (void)above;
582
583
748
  const __m256i thirtytwo = _mm256_set1_epi16(32);
584
748
  sum = _mm256_add_epi16(sum, thirtytwo);
585
748
  sum = _mm256_srai_epi16(sum, 6);
586
748
  const __m256i zero = _mm256_setzero_si256();
587
748
  __m256i row = _mm256_shuffle_epi8(sum, zero);
588
748
  row_store_32xh(&row, 64, dst, stride);
589
748
}
590
591
void aom_dc_left_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
592
                                      const uint8_t *above,
593
8.36k
                                      const uint8_t *left) {
594
8.36k
  __m256i sum = dc_sum_64(left);
595
8.36k
  (void)above;
596
597
8.36k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
598
8.36k
  sum = _mm256_add_epi16(sum, thirtytwo);
599
8.36k
  sum = _mm256_srai_epi16(sum, 6);
600
8.36k
  const __m256i zero = _mm256_setzero_si256();
601
8.36k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
602
8.36k
  row_store_64xh(&row, 64, dst, stride);
603
8.36k
}
604
605
void aom_dc_left_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
606
                                      const uint8_t *above,
607
470
                                      const uint8_t *left) {
608
470
  __m256i sum = dc_sum_32(left);
609
470
  (void)above;
610
611
470
  const __m256i sixteen = _mm256_set1_epi16(16);
612
470
  sum = _mm256_add_epi16(sum, sixteen);
613
470
  sum = _mm256_srai_epi16(sum, 5);
614
470
  const __m256i zero = _mm256_setzero_si256();
615
470
  __m256i row = _mm256_shuffle_epi8(sum, zero);
616
470
  row_store_64xh(&row, 32, dst, stride);
617
470
}
618
619
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
620
void aom_dc_left_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
621
                                      const uint8_t *above,
622
200
                                      const uint8_t *left) {
623
200
  __m128i sum = dc_sum_16_sse2(left);
624
200
  (void)above;
625
626
200
  const __m128i eight = _mm_set1_epi16(8);
627
200
  sum = _mm_add_epi16(sum, eight);
628
200
  sum = _mm_srai_epi16(sum, 4);
629
200
  const __m128i zero = _mm_setzero_si128();
630
200
  const __m128i r = _mm_shuffle_epi8(sum, zero);
631
200
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
632
200
  row_store_64xh(&row, 16, dst, stride);
633
200
}
634
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
635
636
void aom_dc_128_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
637
                                     const uint8_t *above,
638
1.41k
                                     const uint8_t *left) {
639
1.41k
  (void)above;
640
1.41k
  (void)left;
641
1.41k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
642
1.41k
  row_store_32xh(&row, 16, dst, stride);
643
1.41k
}
644
645
void aom_dc_128_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
646
                                     const uint8_t *above,
647
399
                                     const uint8_t *left) {
648
399
  (void)above;
649
399
  (void)left;
650
399
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
651
399
  row_store_32xh(&row, 64, dst, stride);
652
399
}
653
654
void aom_dc_128_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
655
                                     const uint8_t *above,
656
3.85k
                                     const uint8_t *left) {
657
3.85k
  (void)above;
658
3.85k
  (void)left;
659
3.85k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
660
3.85k
  row_store_64xh(&row, 64, dst, stride);
661
3.85k
}
662
663
void aom_dc_128_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
664
                                     const uint8_t *above,
665
662
                                     const uint8_t *left) {
666
662
  (void)above;
667
662
  (void)left;
668
662
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
669
662
  row_store_64xh(&row, 32, dst, stride);
670
662
}
671
672
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
673
void aom_dc_128_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
674
                                     const uint8_t *above,
675
156
                                     const uint8_t *left) {
676
156
  (void)above;
677
156
  (void)left;
678
156
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
679
156
  row_store_64xh(&row, 16, dst, stride);
680
156
}
681
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
682
683
void aom_v_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
684
4.66k
                                const uint8_t *above, const uint8_t *left) {
685
4.66k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
686
4.66k
  (void)left;
687
4.66k
  row_store_32xh(&row, 16, dst, stride);
688
4.66k
}
689
690
void aom_v_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
691
178
                                const uint8_t *above, const uint8_t *left) {
692
178
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
693
178
  (void)left;
694
178
  row_store_32xh(&row, 64, dst, stride);
695
178
}
696
697
void aom_v_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
698
1.01k
                                const uint8_t *above, const uint8_t *left) {
699
1.01k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
700
1.01k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
701
1.01k
  (void)left;
702
1.01k
  row_store_32x2xh(&row0, &row1, 64, dst, stride);
703
1.01k
}
704
705
void aom_v_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
706
316
                                const uint8_t *above, const uint8_t *left) {
707
316
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
708
316
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
709
316
  (void)left;
710
316
  row_store_32x2xh(&row0, &row1, 32, dst, stride);
711
316
}
712
713
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
void aom_v_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
715
497
                                const uint8_t *above, const uint8_t *left) {
716
497
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
717
497
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
718
497
  (void)left;
719
497
  row_store_32x2xh(&row0, &row1, 16, dst, stride);
720
497
}
721
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
722
723
// -----------------------------------------------------------------------------
724
// PAETH_PRED
725
726
// Return 16 16-bit pixels in one row (__m256i)
727
static inline __m256i paeth_pred(const __m256i *left, const __m256i *top,
728
24.9M
                                 const __m256i *topleft) {
729
24.9M
  const __m256i base =
730
24.9M
      _mm256_sub_epi16(_mm256_add_epi16(*top, *left), *topleft);
731
732
24.9M
  __m256i pl = _mm256_abs_epi16(_mm256_sub_epi16(base, *left));
733
24.9M
  __m256i pt = _mm256_abs_epi16(_mm256_sub_epi16(base, *top));
734
24.9M
  __m256i ptl = _mm256_abs_epi16(_mm256_sub_epi16(base, *topleft));
735
736
24.9M
  __m256i mask1 = _mm256_cmpgt_epi16(pl, pt);
737
24.9M
  mask1 = _mm256_or_si256(mask1, _mm256_cmpgt_epi16(pl, ptl));
738
24.9M
  __m256i mask2 = _mm256_cmpgt_epi16(pt, ptl);
739
740
24.9M
  pl = _mm256_andnot_si256(mask1, *left);
741
742
24.9M
  ptl = _mm256_and_si256(mask2, *topleft);
743
24.9M
  pt = _mm256_andnot_si256(mask2, *top);
744
24.9M
  pt = _mm256_or_si256(pt, ptl);
745
24.9M
  pt = _mm256_and_si256(mask1, pt);
746
747
24.9M
  return _mm256_or_si256(pt, pl);
748
24.9M
}
749
750
// Return 16 8-bit pixels in one row (__m128i)
751
static inline __m128i paeth_16x1_pred(const __m256i *left, const __m256i *top,
752
24.6M
                                      const __m256i *topleft) {
753
24.6M
  const __m256i p0 = paeth_pred(left, top, topleft);
754
24.6M
  const __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
755
24.6M
  const __m256i p = _mm256_packus_epi16(p0, p1);
756
24.6M
  return _mm256_castsi256_si128(p);
757
24.6M
}
758
759
705k
static inline __m256i get_top_vector(const uint8_t *above) {
760
705k
  const __m128i x = _mm_load_si128((const __m128i *)above);
761
705k
  const __m128i zero = _mm_setzero_si128();
762
705k
  const __m128i t0 = _mm_unpacklo_epi8(x, zero);
763
705k
  const __m128i t1 = _mm_unpackhi_epi8(x, zero);
764
705k
  return _mm256_inserti128_si256(_mm256_castsi128_si256(t0), t1, 1);
765
705k
}
766
767
void aom_paeth_predictor_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
768
28.0k
                                   const uint8_t *above, const uint8_t *left) {
769
28.0k
  __m128i x = _mm_loadl_epi64((const __m128i *)left);
770
28.0k
  const __m256i l = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
771
28.0k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
772
28.0k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
773
28.0k
  const __m256i one = _mm256_set1_epi16(1);
774
28.0k
  const __m256i top = get_top_vector(above);
775
776
28.0k
  int i;
777
252k
  for (i = 0; i < 8; ++i) {
778
224k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
779
224k
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
780
781
224k
    _mm_store_si128((__m128i *)dst, row);
782
224k
    dst += stride;
783
224k
    rep = _mm256_add_epi16(rep, one);
784
224k
  }
785
28.0k
}
786
787
1.17M
static inline __m256i get_left_vector(const uint8_t *left) {
788
1.17M
  const __m128i x = _mm_load_si128((const __m128i *)left);
789
1.17M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
790
1.17M
}
791
792
void aom_paeth_predictor_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
793
37.2k
                                    const uint8_t *above, const uint8_t *left) {
794
37.2k
  const __m256i l = get_left_vector(left);
795
37.2k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
796
37.2k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
797
37.2k
  const __m256i one = _mm256_set1_epi16(1);
798
37.2k
  const __m256i top = get_top_vector(above);
799
800
37.2k
  int i;
801
633k
  for (i = 0; i < 16; ++i) {
802
596k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
803
596k
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
804
805
596k
    _mm_store_si128((__m128i *)dst, row);
806
596k
    dst += stride;
807
596k
    rep = _mm256_add_epi16(rep, one);
808
596k
  }
809
37.2k
}
810
811
void aom_paeth_predictor_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
812
295k
                                    const uint8_t *above, const uint8_t *left) {
813
295k
  __m256i l = get_left_vector(left);
814
295k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
815
295k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
816
295k
  const __m256i one = _mm256_set1_epi16(1);
817
295k
  const __m256i top = get_top_vector(above);
818
819
295k
  int i;
820
5.02M
  for (i = 0; i < 16; ++i) {
821
4.72M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
822
4.72M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
823
824
4.72M
    _mm_store_si128((__m128i *)dst, row);
825
4.72M
    dst += stride;
826
4.72M
    rep = _mm256_add_epi16(rep, one);
827
4.72M
  }
828
829
295k
  l = get_left_vector(left + 16);
830
295k
  rep = _mm256_set1_epi16((short)0x8000);
831
5.02M
  for (i = 0; i < 16; ++i) {
832
4.72M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
833
4.72M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
834
835
4.72M
    _mm_store_si128((__m128i *)dst, row);
836
4.72M
    dst += stride;
837
4.72M
    rep = _mm256_add_epi16(rep, one);
838
4.72M
  }
839
295k
}
840
841
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
842
void aom_paeth_predictor_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
843
73.4k
                                    const uint8_t *above, const uint8_t *left) {
844
73.4k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
845
73.4k
  const __m256i one = _mm256_set1_epi16(1);
846
73.4k
  const __m256i top = get_top_vector(above);
847
848
367k
  for (int j = 0; j < 4; ++j) {
849
293k
    const __m256i l = get_left_vector(left + j * 16);
850
293k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
851
4.99M
    for (int i = 0; i < 16; ++i) {
852
4.70M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
853
4.70M
      const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
854
855
4.70M
      _mm_store_si128((__m128i *)dst, row);
856
4.70M
      dst += stride;
857
4.70M
      rep = _mm256_add_epi16(rep, one);
858
4.70M
    }
859
293k
  }
860
73.4k
}
861
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
862
863
// Return 32 8-bit pixels in one row (__m256i)
864
static inline __m256i paeth_32x1_pred(const __m256i *left, const __m256i *top0,
865
                                      const __m256i *top1,
866
138k
                                      const __m256i *topleft) {
867
138k
  __m256i p0 = paeth_pred(left, top0, topleft);
868
138k
  __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
869
138k
  const __m256i x0 = _mm256_packus_epi16(p0, p1);
870
871
138k
  p0 = paeth_pred(left, top1, topleft);
872
138k
  p1 = _mm256_permute4x64_epi64(p0, 0xe);
873
138k
  const __m256i x1 = _mm256_packus_epi16(p0, p1);
874
875
138k
  return _mm256_permute2x128_si256(x0, x1, 0x20);
876
138k
}
877
878
void aom_paeth_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
879
8.66k
                                    const uint8_t *above, const uint8_t *left) {
880
8.66k
  const __m256i l = get_left_vector(left);
881
8.66k
  const __m256i t0 = get_top_vector(above);
882
8.66k
  const __m256i t1 = get_top_vector(above + 16);
883
8.66k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
884
8.66k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
885
8.66k
  const __m256i one = _mm256_set1_epi16(1);
886
887
8.66k
  int i;
888
147k
  for (i = 0; i < 16; ++i) {
889
138k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
890
891
138k
    const __m256i r = paeth_32x1_pred(&l16, &t0, &t1, &tl);
892
893
138k
    _mm256_storeu_si256((__m256i *)dst, r);
894
895
138k
    dst += stride;
896
138k
    rep = _mm256_add_epi16(rep, one);
897
138k
  }
898
8.66k
}
899
900
void aom_paeth_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
901
86.3k
                                    const uint8_t *above, const uint8_t *left) {
902
86.3k
  __m256i l = get_left_vector(left);
903
86.3k
  const __m256i t0 = get_top_vector(above);
904
86.3k
  const __m256i t1 = get_top_vector(above + 16);
905
86.3k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
906
86.3k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
907
86.3k
  const __m256i one = _mm256_set1_epi16(1);
908
909
86.3k
  int i;
910
1.46M
  for (i = 0; i < 16; ++i) {
911
1.38M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
912
913
1.38M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
914
1.38M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
915
916
1.38M
    _mm_store_si128((__m128i *)dst, r0);
917
1.38M
    _mm_store_si128((__m128i *)(dst + 16), r1);
918
919
1.38M
    dst += stride;
920
1.38M
    rep = _mm256_add_epi16(rep, one);
921
1.38M
  }
922
923
86.3k
  l = get_left_vector(left + 16);
924
86.3k
  rep = _mm256_set1_epi16((short)0x8000);
925
1.46M
  for (i = 0; i < 16; ++i) {
926
1.38M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
927
928
1.38M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
929
1.38M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
930
931
1.38M
    _mm_store_si128((__m128i *)dst, r0);
932
1.38M
    _mm_store_si128((__m128i *)(dst + 16), r1);
933
934
1.38M
    dst += stride;
935
1.38M
    rep = _mm256_add_epi16(rep, one);
936
1.38M
  }
937
86.3k
}
938
939
void aom_paeth_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
940
1.49k
                                    const uint8_t *above, const uint8_t *left) {
941
1.49k
  const __m256i t0 = get_top_vector(above);
942
1.49k
  const __m256i t1 = get_top_vector(above + 16);
943
1.49k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
944
1.49k
  const __m256i one = _mm256_set1_epi16(1);
945
946
1.49k
  int i, j;
947
7.47k
  for (j = 0; j < 4; ++j) {
948
5.97k
    const __m256i l = get_left_vector(left + j * 16);
949
5.97k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
950
101k
    for (i = 0; i < 16; ++i) {
951
95.6k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
952
953
95.6k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
954
95.6k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
955
956
95.6k
      _mm_store_si128((__m128i *)dst, r0);
957
95.6k
      _mm_store_si128((__m128i *)(dst + 16), r1);
958
959
95.6k
      dst += stride;
960
95.6k
      rep = _mm256_add_epi16(rep, one);
961
95.6k
    }
962
5.97k
  }
963
1.49k
}
964
965
void aom_paeth_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
966
2.18k
                                    const uint8_t *above, const uint8_t *left) {
967
2.18k
  const __m256i t0 = get_top_vector(above);
968
2.18k
  const __m256i t1 = get_top_vector(above + 16);
969
2.18k
  const __m256i t2 = get_top_vector(above + 32);
970
2.18k
  const __m256i t3 = get_top_vector(above + 48);
971
2.18k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
972
2.18k
  const __m256i one = _mm256_set1_epi16(1);
973
974
2.18k
  int i, j;
975
6.56k
  for (j = 0; j < 2; ++j) {
976
4.37k
    const __m256i l = get_left_vector(left + j * 16);
977
4.37k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
978
74.3k
    for (i = 0; i < 16; ++i) {
979
70.0k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
980
981
70.0k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
982
70.0k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
983
70.0k
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
984
70.0k
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
985
986
70.0k
      _mm_store_si128((__m128i *)dst, r0);
987
70.0k
      _mm_store_si128((__m128i *)(dst + 16), r1);
988
70.0k
      _mm_store_si128((__m128i *)(dst + 32), r2);
989
70.0k
      _mm_store_si128((__m128i *)(dst + 48), r3);
990
991
70.0k
      dst += stride;
992
70.0k
      rep = _mm256_add_epi16(rep, one);
993
70.0k
    }
994
4.37k
  }
995
2.18k
}
996
997
void aom_paeth_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
998
13.5k
                                    const uint8_t *above, const uint8_t *left) {
999
13.5k
  const __m256i t0 = get_top_vector(above);
1000
13.5k
  const __m256i t1 = get_top_vector(above + 16);
1001
13.5k
  const __m256i t2 = get_top_vector(above + 32);
1002
13.5k
  const __m256i t3 = get_top_vector(above + 48);
1003
13.5k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1004
13.5k
  const __m256i one = _mm256_set1_epi16(1);
1005
1006
13.5k
  int i, j;
1007
67.6k
  for (j = 0; j < 4; ++j) {
1008
54.0k
    const __m256i l = get_left_vector(left + j * 16);
1009
54.0k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
1010
919k
    for (i = 0; i < 16; ++i) {
1011
865k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1012
1013
865k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1014
865k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1015
865k
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1016
865k
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1017
1018
865k
      _mm_store_si128((__m128i *)dst, r0);
1019
865k
      _mm_store_si128((__m128i *)(dst + 16), r1);
1020
865k
      _mm_store_si128((__m128i *)(dst + 32), r2);
1021
865k
      _mm_store_si128((__m128i *)(dst + 48), r3);
1022
1023
865k
      dst += stride;
1024
865k
      rep = _mm256_add_epi16(rep, one);
1025
865k
    }
1026
54.0k
  }
1027
13.5k
}
1028
1029
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1030
void aom_paeth_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
1031
3.89k
                                    const uint8_t *above, const uint8_t *left) {
1032
3.89k
  const __m256i t0 = get_top_vector(above);
1033
3.89k
  const __m256i t1 = get_top_vector(above + 16);
1034
3.89k
  const __m256i t2 = get_top_vector(above + 32);
1035
3.89k
  const __m256i t3 = get_top_vector(above + 48);
1036
3.89k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1037
3.89k
  const __m256i one = _mm256_set1_epi16(1);
1038
1039
3.89k
  int i;
1040
3.89k
  const __m256i l = get_left_vector(left);
1041
3.89k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
1042
66.1k
  for (i = 0; i < 16; ++i) {
1043
62.2k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1044
1045
62.2k
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1046
62.2k
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1047
62.2k
    const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1048
62.2k
    const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1049
1050
62.2k
    _mm_store_si128((__m128i *)dst, r0);
1051
62.2k
    _mm_store_si128((__m128i *)(dst + 16), r1);
1052
62.2k
    _mm_store_si128((__m128i *)(dst + 32), r2);
1053
62.2k
    _mm_store_si128((__m128i *)(dst + 48), r3);
1054
1055
62.2k
    dst += stride;
1056
62.2k
    rep = _mm256_add_epi16(rep, one);
1057
62.2k
  }
1058
3.89k
}
1059
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1060
1061
#if CONFIG_AV1_HIGHBITDEPTH
1062
1063
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_4xN_internal_avx2(
1064
174k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1065
174k
  const int frac_bits = 6 - upsample_above;
1066
174k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1067
1068
174k
  assert(dx > 0);
1069
  // pre-filter above pixels
1070
  // store in temp buffers:
1071
  //   above[x] * 32 + 16
1072
  //   above[x+1] - above[x]
1073
  // final pixels will be calculated as:
1074
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1075
174k
  __m256i a0, a1, a32, a16;
1076
174k
  __m256i diff, c3f;
1077
174k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1078
174k
  __m128i a0_128, a1_128;
1079
174k
  a16 = _mm256_set1_epi16(16);
1080
174k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1081
174k
  max_base_x128 = _mm_set1_epi16(max_base_x);
1082
174k
  c3f = _mm256_set1_epi16(0x3f);
1083
1084
174k
  int x = dx;
1085
1.53M
  for (int r = 0; r < N; r++) {
1086
1.36M
    __m256i b, res, shift;
1087
1.36M
    __m128i res1;
1088
1089
1.36M
    int base = x >> frac_bits;
1090
1.36M
    if (base >= max_base_x) {
1091
5.60k
      for (int i = r; i < N; ++i) {
1092
3.39k
        dst[i] = a_mbase_x;  // save 4 values
1093
3.39k
      }
1094
2.21k
      return;
1095
2.21k
    }
1096
1097
1.35M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
1098
1.35M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1099
1100
1.35M
    if (upsample_above) {
1101
422k
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)HighbdEvenOddMaskx4[0]);
1102
422k
      a1_128 = _mm_srli_si128(a0_128, 8);
1103
1104
422k
      base_inc128 = _mm_setr_epi16(base, base + 2, base + 4, base + 6, base + 8,
1105
422k
                                   base + 10, base + 12, base + 14);
1106
422k
      shift = _mm256_srli_epi16(
1107
422k
          _mm256_and_si256(
1108
422k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above),
1109
422k
              _mm256_set1_epi16(0x3f)),
1110
422k
          1);
1111
936k
    } else {
1112
936k
      base_inc128 = _mm_setr_epi16(base, base + 1, base + 2, base + 3, base + 4,
1113
936k
                                   base + 5, base + 6, base + 7);
1114
936k
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1115
936k
    }
1116
1.35M
    a0 = _mm256_castsi128_si256(a0_128);
1117
1.35M
    a1 = _mm256_castsi128_si256(a1_128);
1118
1.35M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1119
1.35M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1120
1.35M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1121
1122
1.35M
    b = _mm256_mullo_epi16(diff, shift);
1123
1.35M
    res = _mm256_add_epi16(a32, b);
1124
1.35M
    res = _mm256_srli_epi16(res, 5);
1125
1.35M
    res1 = _mm256_castsi256_si128(res);
1126
1127
1.35M
    mask128 = _mm_cmpgt_epi16(max_base_x128, base_inc128);
1128
1.35M
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1129
1.35M
    x += dx;
1130
1.35M
  }
1131
174k
}
1132
1133
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_4xN_internal_avx2(
1134
47.3k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1135
47.3k
  const int frac_bits = 6 - upsample_above;
1136
47.3k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1137
1138
47.3k
  assert(dx > 0);
1139
  // pre-filter above pixels
1140
  // store in temp buffers:
1141
  //   above[x] * 32 + 16
1142
  //   above[x+1] - above[x]
1143
  // final pixels will be calculated as:
1144
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1145
47.3k
  __m256i a0, a1, a32, a16;
1146
47.3k
  __m256i diff;
1147
47.3k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1148
1149
47.3k
  a16 = _mm256_set1_epi32(16);
1150
47.3k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1151
47.3k
  max_base_x128 = _mm_set1_epi32(max_base_x);
1152
1153
47.3k
  int x = dx;
1154
402k
  for (int r = 0; r < N; r++) {
1155
356k
    __m256i b, res, shift;
1156
356k
    __m128i res1;
1157
1158
356k
    int base = x >> frac_bits;
1159
356k
    if (base >= max_base_x) {
1160
2.70k
      for (int i = r; i < N; ++i) {
1161
1.87k
        dst[i] = a_mbase_x;  // save 4 values
1162
1.87k
      }
1163
822
      return;
1164
822
    }
1165
1166
355k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1167
355k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1168
1169
355k
    if (upsample_above) {
1170
98.6k
      a0 = _mm256_permutevar8x32_epi32(
1171
98.6k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1172
98.6k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1173
98.6k
      base_inc128 = _mm_setr_epi32(base, base + 2, base + 4, base + 6);
1174
98.6k
      shift = _mm256_srli_epi32(
1175
98.6k
          _mm256_and_si256(
1176
98.6k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1177
98.6k
              _mm256_set1_epi32(0x3f)),
1178
98.6k
          1);
1179
256k
    } else {
1180
256k
      base_inc128 = _mm_setr_epi32(base, base + 1, base + 2, base + 3);
1181
256k
      shift = _mm256_srli_epi32(
1182
256k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1183
256k
    }
1184
1185
355k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1186
355k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1187
355k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1188
1189
355k
    b = _mm256_mullo_epi32(diff, shift);
1190
355k
    res = _mm256_add_epi32(a32, b);
1191
355k
    res = _mm256_srli_epi32(res, 5);
1192
1193
355k
    res1 = _mm256_castsi256_si128(res);
1194
355k
    res1 = _mm_packus_epi32(res1, res1);
1195
1196
355k
    mask128 = _mm_cmpgt_epi32(max_base_x128, base_inc128);
1197
355k
    mask128 = _mm_packs_epi32(mask128, mask128);  // goto 16 bit
1198
355k
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1199
355k
    x += dx;
1200
355k
  }
1201
47.3k
}
1202
1203
static void highbd_dr_prediction_z1_4xN_avx2(int N, uint16_t *dst,
1204
                                             ptrdiff_t stride,
1205
                                             const uint16_t *above,
1206
                                             int upsample_above, int dx,
1207
75.6k
                                             int bd) {
1208
75.6k
  __m128i dstvec[16];
1209
75.6k
  if (bd < 12) {
1210
49.0k
    highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
1211
49.0k
                                              dx);
1212
49.0k
  } else {
1213
26.6k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(N, dstvec, above,
1214
26.6k
                                                    upsample_above, dx);
1215
26.6k
  }
1216
608k
  for (int i = 0; i < N; i++) {
1217
533k
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
1218
533k
  }
1219
75.6k
}
1220
1221
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_8xN_internal_avx2(
1222
60.6k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1223
60.6k
  const int frac_bits = 6 - upsample_above;
1224
60.6k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1225
1226
60.6k
  assert(dx > 0);
1227
  // pre-filter above pixels
1228
  // store in temp buffers:
1229
  //   above[x] * 32 + 16
1230
  //   above[x+1] - above[x]
1231
  // final pixels will be calculated as:
1232
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1233
60.6k
  __m256i a0, a1, a0_1, a1_1, a32, a16;
1234
60.6k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1235
1236
60.6k
  a16 = _mm256_set1_epi32(16);
1237
60.6k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1238
60.6k
  max_base_x256 = _mm256_set1_epi32(max_base_x);
1239
1240
60.6k
  int x = dx;
1241
735k
  for (int r = 0; r < N; r++) {
1242
675k
    __m256i b, res, res1, shift;
1243
1244
675k
    int base = x >> frac_bits;
1245
675k
    if (base >= max_base_x) {
1246
2.08k
      for (int i = r; i < N; ++i) {
1247
1.55k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1248
1.55k
      }
1249
528
      return;
1250
528
    }
1251
1252
675k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1253
675k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1254
1255
675k
    if (upsample_above) {
1256
148k
      a0 = _mm256_permutevar8x32_epi32(
1257
148k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1258
148k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1259
1260
148k
      a0_1 =
1261
148k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1262
148k
      a0_1 = _mm256_permutevar8x32_epi32(
1263
148k
          a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1264
148k
      a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
1265
1266
148k
      a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
1267
148k
      a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
1268
148k
      base_inc256 =
1269
148k
          _mm256_setr_epi32(base, base + 2, base + 4, base + 6, base + 8,
1270
148k
                            base + 10, base + 12, base + 14);
1271
148k
      shift = _mm256_srli_epi32(
1272
148k
          _mm256_and_si256(
1273
148k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1274
148k
              _mm256_set1_epi32(0x3f)),
1275
148k
          1);
1276
526k
    } else {
1277
526k
      base_inc256 = _mm256_setr_epi32(base, base + 1, base + 2, base + 3,
1278
526k
                                      base + 4, base + 5, base + 6, base + 7);
1279
526k
      shift = _mm256_srli_epi32(
1280
526k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1281
526k
    }
1282
1283
675k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1284
675k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1285
675k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1286
1287
675k
    b = _mm256_mullo_epi32(diff, shift);
1288
675k
    res = _mm256_add_epi32(a32, b);
1289
675k
    res = _mm256_srli_epi32(res, 5);
1290
1291
675k
    res1 = _mm256_packus_epi32(
1292
675k
        res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
1293
1294
675k
    mask256 = _mm256_cmpgt_epi32(max_base_x256, base_inc256);
1295
675k
    mask256 = _mm256_packs_epi32(
1296
675k
        mask256, _mm256_castsi128_si256(
1297
675k
                     _mm256_extracti128_si256(mask256, 1)));  // goto 16 bit
1298
675k
    res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1299
675k
    dst[r] = _mm256_castsi256_si128(res1);
1300
675k
    x += dx;
1301
675k
  }
1302
60.6k
}
1303
1304
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
1305
235k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1306
235k
  const int frac_bits = 6 - upsample_above;
1307
235k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1308
1309
235k
  assert(dx > 0);
1310
  // pre-filter above pixels
1311
  // store in temp buffers:
1312
  //   above[x] * 32 + 16
1313
  //   above[x+1] - above[x]
1314
  // final pixels will be calculated as:
1315
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1316
235k
  __m256i a0, a1, a32, a16, c3f;
1317
235k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1318
235k
  __m128i a0_x128, a1_x128;
1319
1320
235k
  a16 = _mm256_set1_epi16(16);
1321
235k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1322
235k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1323
235k
  c3f = _mm256_set1_epi16(0x3f);
1324
1325
235k
  int x = dx;
1326
3.42M
  for (int r = 0; r < N; r++) {
1327
3.19M
    __m256i b, res, res1, shift;
1328
1329
3.19M
    int base = x >> frac_bits;
1330
3.19M
    if (base >= max_base_x) {
1331
5.14k
      for (int i = r; i < N; ++i) {
1332
3.72k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1333
3.72k
      }
1334
1.42k
      return;
1335
1.42k
    }
1336
1337
3.19M
    a0_x128 = _mm_loadu_si128((__m128i *)(above + base));
1338
3.19M
    if (upsample_above) {
1339
602k
      __m128i mask, atmp0, atmp1, atmp2, atmp3;
1340
602k
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 8));
1341
602k
      atmp0 = _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1342
602k
      atmp1 = _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1343
602k
      atmp2 =
1344
602k
          _mm_shuffle_epi8(a0_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1345
602k
      atmp3 =
1346
602k
          _mm_shuffle_epi8(a1_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1347
602k
      mask =
1348
602k
          _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[0], _mm_set1_epi8(15));
1349
602k
      a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
1350
602k
      mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[0] + 16),
1351
602k
                            _mm_set1_epi8(15));
1352
602k
      a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
1353
1354
602k
      base_inc256 = _mm256_setr_epi16(base, base + 2, base + 4, base + 6,
1355
602k
                                      base + 8, base + 10, base + 12, base + 14,
1356
602k
                                      0, 0, 0, 0, 0, 0, 0, 0);
1357
602k
      shift = _mm256_srli_epi16(
1358
602k
          _mm256_and_si256(
1359
602k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
1360
602k
          1);
1361
2.58M
    } else {
1362
2.58M
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1363
2.58M
      base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1364
2.58M
                                      base + 4, base + 5, base + 6, base + 7, 0,
1365
2.58M
                                      0, 0, 0, 0, 0, 0, 0);
1366
2.58M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1367
2.58M
    }
1368
3.19M
    a0 = _mm256_castsi128_si256(a0_x128);
1369
3.19M
    a1 = _mm256_castsi128_si256(a1_x128);
1370
1371
3.19M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1372
3.19M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1373
3.19M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1374
1375
3.19M
    b = _mm256_mullo_epi16(diff, shift);
1376
3.19M
    res = _mm256_add_epi16(a32, b);
1377
3.19M
    res = _mm256_srli_epi16(res, 5);
1378
1379
3.19M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1380
3.19M
    res1 = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1381
3.19M
    dst[r] = _mm256_castsi256_si128(res1);
1382
3.19M
    x += dx;
1383
3.19M
  }
1384
235k
}
1385
1386
static void highbd_dr_prediction_z1_8xN_avx2(int N, uint16_t *dst,
1387
                                             ptrdiff_t stride,
1388
                                             const uint16_t *above,
1389
                                             int upsample_above, int dx,
1390
121k
                                             int bd) {
1391
121k
  __m128i dstvec[32];
1392
121k
  if (bd < 12) {
1393
93.0k
    highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
1394
93.0k
                                              dx);
1395
93.0k
  } else {
1396
28.0k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(N, dstvec, above,
1397
28.0k
                                                    upsample_above, dx);
1398
28.0k
  }
1399
1.32M
  for (int i = 0; i < N; i++) {
1400
1.20M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
1401
1.20M
  }
1402
121k
}
1403
1404
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_16xN_internal_avx2(
1405
43.8k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1406
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1407
43.8k
  (void)upsample_above;
1408
43.8k
  const int frac_bits = 6;
1409
43.8k
  const int max_base_x = ((16 + N) - 1);
1410
1411
  // pre-filter above pixels
1412
  // store in temp buffers:
1413
  //   above[x] * 32 + 16
1414
  //   above[x+1] - above[x]
1415
  // final pixels will be calculated as:
1416
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1417
43.8k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1418
43.8k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1419
1420
43.8k
  a16 = _mm256_set1_epi32(16);
1421
43.8k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1422
43.8k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1423
1424
43.8k
  int x = dx;
1425
601k
  for (int r = 0; r < N; r++) {
1426
557k
    __m256i b, res[2], res1;
1427
1428
557k
    int base = x >> frac_bits;
1429
557k
    if (base >= max_base_x) {
1430
838
      for (int i = r; i < N; ++i) {
1431
692
        dstvec[i] = a_mbase_x;  // save 16 values
1432
692
      }
1433
146
      return;
1434
146
    }
1435
557k
    __m256i shift = _mm256_srli_epi32(
1436
557k
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1437
1438
557k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1439
557k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1440
1441
557k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1442
557k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1443
557k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1444
557k
    b = _mm256_mullo_epi32(diff, shift);
1445
1446
557k
    res[0] = _mm256_add_epi32(a32, b);
1447
557k
    res[0] = _mm256_srli_epi32(res[0], 5);
1448
557k
    res[0] = _mm256_packus_epi32(
1449
557k
        res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1450
1451
557k
    int mdif = max_base_x - base;
1452
557k
    if (mdif > 8) {
1453
555k
      a0_1 =
1454
555k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1455
555k
      a1_1 =
1456
555k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
1457
1458
555k
      diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1459
555k
      a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1460
555k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1461
555k
      b = _mm256_mullo_epi32(diff, shift);
1462
1463
555k
      res[1] = _mm256_add_epi32(a32, b);
1464
555k
      res[1] = _mm256_srli_epi32(res[1], 5);
1465
555k
      res[1] = _mm256_packus_epi32(
1466
555k
          res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1467
555k
    } else {
1468
1.50k
      res[1] = a_mbase_x;
1469
1.50k
    }
1470
557k
    res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1471
557k
                                   1);  // 16 16bit values
1472
1473
557k
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1474
557k
                                    base + 4, base + 5, base + 6, base + 7,
1475
557k
                                    base + 8, base + 9, base + 10, base + 11,
1476
557k
                                    base + 12, base + 13, base + 14, base + 15);
1477
557k
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1478
557k
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1479
557k
    x += dx;
1480
557k
  }
1481
43.8k
}
1482
1483
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
1484
194k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1485
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1486
194k
  (void)upsample_above;
1487
194k
  const int frac_bits = 6;
1488
194k
  const int max_base_x = ((16 + N) - 1);
1489
1490
  // pre-filter above pixels
1491
  // store in temp buffers:
1492
  //   above[x] * 32 + 16
1493
  //   above[x+1] - above[x]
1494
  // final pixels will be calculated as:
1495
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1496
194k
  __m256i a0, a1, a32, a16, c3f;
1497
194k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1498
1499
194k
  a16 = _mm256_set1_epi16(16);
1500
194k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1501
194k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1502
194k
  c3f = _mm256_set1_epi16(0x3f);
1503
1504
194k
  int x = dx;
1505
3.99M
  for (int r = 0; r < N; r++) {
1506
3.79M
    __m256i b, res;
1507
1508
3.79M
    int base = x >> frac_bits;
1509
3.79M
    if (base >= max_base_x) {
1510
1.17k
      for (int i = r; i < N; ++i) {
1511
908
        dstvec[i] = a_mbase_x;  // save 16 values
1512
908
      }
1513
264
      return;
1514
264
    }
1515
3.79M
    __m256i shift =
1516
3.79M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1517
1518
3.79M
    a0 = _mm256_loadu_si256((__m256i *)(above + base));
1519
3.79M
    a1 = _mm256_loadu_si256((__m256i *)(above + base + 1));
1520
1521
3.79M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1522
3.79M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1523
3.79M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1524
3.79M
    b = _mm256_mullo_epi16(diff, shift);
1525
1526
3.79M
    res = _mm256_add_epi16(a32, b);
1527
3.79M
    res = _mm256_srli_epi16(res, 5);  // 16 16bit values
1528
1529
3.79M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1530
3.79M
                                    base + 4, base + 5, base + 6, base + 7,
1531
3.79M
                                    base + 8, base + 9, base + 10, base + 11,
1532
3.79M
                                    base + 12, base + 13, base + 14, base + 15);
1533
3.79M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1534
3.79M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1535
3.79M
    x += dx;
1536
3.79M
  }
1537
194k
}
1538
1539
static void highbd_dr_prediction_z1_16xN_avx2(int N, uint16_t *dst,
1540
                                              ptrdiff_t stride,
1541
                                              const uint16_t *above,
1542
                                              int upsample_above, int dx,
1543
93.3k
                                              int bd) {
1544
93.3k
  __m256i dstvec[64];
1545
93.3k
  if (bd < 12) {
1546
69.8k
    highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
1547
69.8k
                                               dx);
1548
69.8k
  } else {
1549
23.5k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(N, dstvec, above,
1550
23.5k
                                                     upsample_above, dx);
1551
23.5k
  }
1552
1.36M
  for (int i = 0; i < N; i++) {
1553
1.26M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1554
1.26M
  }
1555
93.3k
}
1556
1557
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_32xN_internal_avx2(
1558
12.9k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1559
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1560
12.9k
  (void)upsample_above;
1561
12.9k
  const int frac_bits = 6;
1562
12.9k
  const int max_base_x = ((32 + N) - 1);
1563
1564
  // pre-filter above pixels
1565
  // store in temp buffers:
1566
  //   above[x] * 32 + 16
1567
  //   above[x+1] - above[x]
1568
  // final pixels will be calculated as:
1569
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1570
12.9k
  __m256i a0, a0_1, a1, a1_1, a32, a16, c3f;
1571
12.9k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1572
1573
12.9k
  a16 = _mm256_set1_epi32(16);
1574
12.9k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1575
12.9k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1576
12.9k
  c3f = _mm256_set1_epi16(0x3f);
1577
1578
12.9k
  int x = dx;
1579
296k
  for (int r = 0; r < N; r++) {
1580
283k
    __m256i b, res[2], res1;
1581
1582
283k
    int base = x >> frac_bits;
1583
283k
    if (base >= max_base_x) {
1584
0
      for (int i = r; i < N; ++i) {
1585
0
        dstvec[i] = a_mbase_x;  // save 32 values
1586
0
        dstvec[i + N] = a_mbase_x;
1587
0
      }
1588
0
      return;
1589
0
    }
1590
1591
283k
    __m256i shift =
1592
283k
        _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
1593
1594
850k
    for (int j = 0; j < 32; j += 16) {
1595
567k
      int mdif = max_base_x - (base + j);
1596
567k
      if (mdif <= 0) {
1597
268
        res1 = a_mbase_x;
1598
566k
      } else {
1599
566k
        a0 = _mm256_cvtepu16_epi32(
1600
566k
            _mm_loadu_si128((__m128i *)(above + base + j)));
1601
566k
        a1 = _mm256_cvtepu16_epi32(
1602
566k
            _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
1603
1604
566k
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1605
566k
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1606
566k
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1607
566k
        b = _mm256_mullo_epi32(diff, shift);
1608
1609
566k
        res[0] = _mm256_add_epi32(a32, b);
1610
566k
        res[0] = _mm256_srli_epi32(res[0], 5);
1611
566k
        res[0] = _mm256_packus_epi32(
1612
566k
            res[0],
1613
566k
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1614
566k
        if (mdif > 8) {
1615
565k
          a0_1 = _mm256_cvtepu16_epi32(
1616
565k
              _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
1617
565k
          a1_1 = _mm256_cvtepu16_epi32(
1618
565k
              _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
1619
1620
565k
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1621
565k
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1622
565k
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1623
565k
          b = _mm256_mullo_epi32(diff, shift);
1624
1625
565k
          res[1] = _mm256_add_epi32(a32, b);
1626
565k
          res[1] = _mm256_srli_epi32(res[1], 5);
1627
565k
          res[1] = _mm256_packus_epi32(
1628
565k
              res[1],
1629
565k
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1630
565k
        } else {
1631
1.72k
          res[1] = a_mbase_x;
1632
1.72k
        }
1633
566k
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1634
566k
                                       1);  // 16 16bit values
1635
566k
        base_inc256 = _mm256_setr_epi16(
1636
566k
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1637
566k
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1638
566k
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1639
566k
            base + j + 13, base + j + 14, base + j + 15);
1640
1641
566k
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1642
566k
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1643
566k
      }
1644
567k
      if (!j) {
1645
283k
        dstvec[r] = res1;
1646
283k
      } else {
1647
283k
        dstvec[r + N] = res1;
1648
283k
      }
1649
567k
    }
1650
283k
    x += dx;
1651
283k
  }
1652
12.9k
}
1653
1654
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
1655
112k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1656
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1657
112k
  (void)upsample_above;
1658
112k
  const int frac_bits = 6;
1659
112k
  const int max_base_x = ((32 + N) - 1);
1660
1661
  // pre-filter above pixels
1662
  // store in temp buffers:
1663
  //   above[x] * 32 + 16
1664
  //   above[x+1] - above[x]
1665
  // final pixels will be calculated as:
1666
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1667
112k
  __m256i a0, a1, a32, a16, c3f;
1668
112k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1669
1670
112k
  a16 = _mm256_set1_epi16(16);
1671
112k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1672
112k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1673
112k
  c3f = _mm256_set1_epi16(0x3f);
1674
1675
112k
  int x = dx;
1676
3.14M
  for (int r = 0; r < N; r++) {
1677
3.03M
    __m256i b, res;
1678
1679
3.03M
    int base = x >> frac_bits;
1680
3.03M
    if (base >= max_base_x) {
1681
0
      for (int i = r; i < N; ++i) {
1682
0
        dstvec[i] = a_mbase_x;  // save 32 values
1683
0
        dstvec[i + N] = a_mbase_x;
1684
0
      }
1685
0
      return;
1686
0
    }
1687
1688
3.03M
    __m256i shift =
1689
3.03M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1690
1691
9.09M
    for (int j = 0; j < 32; j += 16) {
1692
6.06M
      int mdif = max_base_x - (base + j);
1693
6.06M
      if (mdif <= 0) {
1694
515
        res = a_mbase_x;
1695
6.06M
      } else {
1696
6.06M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1697
6.06M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1698
1699
6.06M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1700
6.06M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1701
6.06M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1702
6.06M
        b = _mm256_mullo_epi16(diff, shift);
1703
1704
6.06M
        res = _mm256_add_epi16(a32, b);
1705
6.06M
        res = _mm256_srli_epi16(res, 5);
1706
1707
6.06M
        base_inc256 = _mm256_setr_epi16(
1708
6.06M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1709
6.06M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1710
6.06M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1711
6.06M
            base + j + 13, base + j + 14, base + j + 15);
1712
1713
6.06M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1714
6.06M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1715
6.06M
      }
1716
6.06M
      if (!j) {
1717
3.03M
        dstvec[r] = res;
1718
3.03M
      } else {
1719
3.03M
        dstvec[r + N] = res;
1720
3.03M
      }
1721
6.06M
    }
1722
3.03M
    x += dx;
1723
3.03M
  }
1724
112k
}
1725
1726
static void highbd_dr_prediction_z1_32xN_avx2(int N, uint16_t *dst,
1727
                                              ptrdiff_t stride,
1728
                                              const uint16_t *above,
1729
                                              int upsample_above, int dx,
1730
48.2k
                                              int bd) {
1731
48.2k
  __m256i dstvec[128];
1732
48.2k
  if (bd < 12) {
1733
41.6k
    highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
1734
41.6k
                                               dx);
1735
41.6k
  } else {
1736
6.60k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(N, dstvec, above,
1737
6.60k
                                                     upsample_above, dx);
1738
6.60k
  }
1739
1.29M
  for (int i = 0; i < N; i++) {
1740
1.24M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1741
1.24M
    _mm256_storeu_si256((__m256i *)(dst + stride * i + 16), dstvec[i + N]);
1742
1.24M
  }
1743
48.2k
}
1744
1745
static void highbd_dr_prediction_32bit_z1_64xN_avx2(int N, uint16_t *dst,
1746
                                                    ptrdiff_t stride,
1747
                                                    const uint16_t *above,
1748
                                                    int upsample_above,
1749
11.2k
                                                    int dx) {
1750
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1751
11.2k
  (void)upsample_above;
1752
11.2k
  const int frac_bits = 6;
1753
11.2k
  const int max_base_x = ((64 + N) - 1);
1754
1755
  // pre-filter above pixels
1756
  // store in temp buffers:
1757
  //   above[x] * 32 + 16
1758
  //   above[x+1] - above[x]
1759
  // final pixels will be calculated as:
1760
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1761
11.2k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1762
11.2k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1763
1764
11.2k
  a16 = _mm256_set1_epi32(16);
1765
11.2k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1766
11.2k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1767
1768
11.2k
  int x = dx;
1769
668k
  for (int r = 0; r < N; r++, dst += stride) {
1770
657k
    __m256i b, res[2], res1;
1771
1772
657k
    int base = x >> frac_bits;
1773
657k
    if (base >= max_base_x) {
1774
0
      for (int i = r; i < N; ++i) {
1775
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1776
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1777
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1778
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1779
0
        dst += stride;
1780
0
      }
1781
0
      return;
1782
0
    }
1783
1784
657k
    __m256i shift = _mm256_srli_epi32(
1785
657k
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1786
1787
657k
    __m128i a0_128, a0_1_128, a1_128, a1_1_128;
1788
3.28M
    for (int j = 0; j < 64; j += 16) {
1789
2.62M
      int mdif = max_base_x - (base + j);
1790
2.62M
      if (mdif <= 0) {
1791
2.38k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1792
2.62M
      } else {
1793
2.62M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
1794
2.62M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
1795
2.62M
        a0 = _mm256_cvtepu16_epi32(a0_128);
1796
2.62M
        a1 = _mm256_cvtepu16_epi32(a1_128);
1797
1798
2.62M
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1799
2.62M
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1800
2.62M
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1801
2.62M
        b = _mm256_mullo_epi32(diff, shift);
1802
1803
2.62M
        res[0] = _mm256_add_epi32(a32, b);
1804
2.62M
        res[0] = _mm256_srli_epi32(res[0], 5);
1805
2.62M
        res[0] = _mm256_packus_epi32(
1806
2.62M
            res[0],
1807
2.62M
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1808
2.62M
        if (mdif > 8) {
1809
2.62M
          a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
1810
2.62M
          a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
1811
2.62M
          a0_1 = _mm256_cvtepu16_epi32(a0_1_128);
1812
2.62M
          a1_1 = _mm256_cvtepu16_epi32(a1_1_128);
1813
1814
2.62M
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1815
2.62M
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1816
2.62M
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1817
2.62M
          b = _mm256_mullo_epi32(diff, shift);
1818
1819
2.62M
          res[1] = _mm256_add_epi32(a32, b);
1820
2.62M
          res[1] = _mm256_srli_epi32(res[1], 5);
1821
2.62M
          res[1] = _mm256_packus_epi32(
1822
2.62M
              res[1],
1823
2.62M
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1824
2.62M
        } else {
1825
4.22k
          res[1] = a_mbase_x;
1826
4.22k
        }
1827
2.62M
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1828
2.62M
                                       1);  // 16 16bit values
1829
2.62M
        base_inc256 = _mm256_setr_epi16(
1830
2.62M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1831
2.62M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1832
2.62M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1833
2.62M
            base + j + 13, base + j + 14, base + j + 15);
1834
1835
2.62M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1836
2.62M
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1837
2.62M
        _mm256_storeu_si256((__m256i *)(dst + j), res1);
1838
2.62M
      }
1839
2.62M
    }
1840
657k
    x += dx;
1841
657k
  }
1842
11.2k
}
1843
1844
static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
1845
                                              ptrdiff_t stride,
1846
                                              const uint16_t *above,
1847
19.4k
                                              int upsample_above, int dx) {
1848
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1849
19.4k
  (void)upsample_above;
1850
19.4k
  const int frac_bits = 6;
1851
19.4k
  const int max_base_x = ((64 + N) - 1);
1852
1853
  // pre-filter above pixels
1854
  // store in temp buffers:
1855
  //   above[x] * 32 + 16
1856
  //   above[x+1] - above[x]
1857
  // final pixels will be calculated as:
1858
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1859
19.4k
  __m256i a0, a1, a32, a16, c3f;
1860
19.4k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1861
1862
19.4k
  a16 = _mm256_set1_epi16(16);
1863
19.4k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1864
19.4k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1865
19.4k
  c3f = _mm256_set1_epi16(0x3f);
1866
1867
19.4k
  int x = dx;
1868
957k
  for (int r = 0; r < N; r++, dst += stride) {
1869
937k
    __m256i b, res;
1870
1871
937k
    int base = x >> frac_bits;
1872
937k
    if (base >= max_base_x) {
1873
0
      for (int i = r; i < N; ++i) {
1874
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1875
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1876
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1877
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1878
0
        dst += stride;
1879
0
      }
1880
0
      return;
1881
0
    }
1882
1883
937k
    __m256i shift =
1884
937k
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1885
1886
4.68M
    for (int j = 0; j < 64; j += 16) {
1887
3.75M
      int mdif = max_base_x - (base + j);
1888
3.75M
      if (mdif <= 0) {
1889
1.58k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1890
3.74M
      } else {
1891
3.74M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1892
3.74M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1893
1894
3.74M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1895
3.74M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1896
3.74M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1897
3.74M
        b = _mm256_mullo_epi16(diff, shift);
1898
1899
3.74M
        res = _mm256_add_epi16(a32, b);
1900
3.74M
        res = _mm256_srli_epi16(res, 5);
1901
1902
3.74M
        base_inc256 = _mm256_setr_epi16(
1903
3.74M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1904
3.74M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1905
3.74M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1906
3.74M
            base + j + 13, base + j + 14, base + j + 15);
1907
1908
3.74M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1909
3.74M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1910
3.74M
        _mm256_storeu_si256((__m256i *)(dst + j), res);  // 16 16bit values
1911
3.74M
      }
1912
3.75M
    }
1913
937k
    x += dx;
1914
937k
  }
1915
19.4k
}
1916
1917
// Directional prediction, zone 1: 0 < angle < 90
1918
void av1_highbd_dr_prediction_z1_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
1919
                                      int bh, const uint16_t *above,
1920
                                      const uint16_t *left, int upsample_above,
1921
350k
                                      int dx, int dy, int bd) {
1922
350k
  (void)left;
1923
350k
  (void)dy;
1924
1925
350k
  switch (bw) {
1926
75.6k
    case 4:
1927
75.6k
      highbd_dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above,
1928
75.6k
                                       dx, bd);
1929
75.6k
      break;
1930
121k
    case 8:
1931
121k
      highbd_dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above,
1932
121k
                                       dx, bd);
1933
121k
      break;
1934
93.3k
    case 16:
1935
93.3k
      highbd_dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above,
1936
93.3k
                                        dx, bd);
1937
93.3k
      break;
1938
45.7k
    case 32:
1939
45.7k
      highbd_dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above,
1940
45.7k
                                        dx, bd);
1941
45.7k
      break;
1942
14.3k
    case 64:
1943
14.3k
      if (bd < 12) {
1944
6.71k
        highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above,
1945
6.71k
                                          upsample_above, dx);
1946
7.65k
      } else {
1947
7.65k
        highbd_dr_prediction_32bit_z1_64xN_avx2(bh, dst, stride, above,
1948
7.65k
                                                upsample_above, dx);
1949
7.65k
      }
1950
14.3k
      break;
1951
0
    default: break;
1952
350k
  }
1953
350k
  return;
1954
350k
}
1955
1956
static void highbd_transpose_TX_16X16(const uint16_t *src, ptrdiff_t pitchSrc,
1957
238k
                                      uint16_t *dst, ptrdiff_t pitchDst) {
1958
238k
  __m256i r[16];
1959
238k
  __m256i d[16];
1960
4.04M
  for (int j = 0; j < 16; j++) {
1961
3.81M
    r[j] = _mm256_loadu_si256((__m256i *)(src + j * pitchSrc));
1962
3.81M
  }
1963
238k
  highbd_transpose16x16_avx2(r, d);
1964
4.04M
  for (int j = 0; j < 16; j++) {
1965
3.81M
    _mm256_storeu_si256((__m256i *)(dst + j * pitchDst), d[j]);
1966
3.81M
  }
1967
238k
}
1968
1969
static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
1970
                             uint16_t *dst, ptrdiff_t pitchDst, int width,
1971
18.9k
                             int height) {
1972
89.4k
  for (int j = 0; j < height; j += 16)
1973
308k
    for (int i = 0; i < width; i += 16)
1974
238k
      highbd_transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
1975
238k
                                dst + j * pitchDst + i, pitchDst);
1976
18.9k
}
1977
1978
static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
1979
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
1980
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
1981
30.7k
    int dy) {
1982
30.7k
  const int min_base_x = -(1 << upsample_above);
1983
30.7k
  const int min_base_y = -(1 << upsample_left);
1984
30.7k
  const int frac_bits_x = 6 - upsample_above;
1985
30.7k
  const int frac_bits_y = 6 - upsample_left;
1986
1987
30.7k
  assert(dx > 0);
1988
  // pre-filter above pixels
1989
  // store in temp buffers:
1990
  //   above[x] * 32 + 16
1991
  //   above[x+1] - above[x]
1992
  // final pixels will be calculated as:
1993
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1994
30.7k
  __m256i a0_x, a1_x, a32, a16;
1995
30.7k
  __m256i diff;
1996
30.7k
  __m128i c3f, min_base_y128;
1997
1998
30.7k
  a16 = _mm256_set1_epi32(16);
1999
30.7k
  c3f = _mm_set1_epi32(0x3f);
2000
30.7k
  min_base_y128 = _mm_set1_epi32(min_base_y);
2001
2002
237k
  for (int r = 0; r < N; r++) {
2003
206k
    __m256i b, res, shift;
2004
206k
    __m128i resx, resy, resxy;
2005
206k
    __m128i a0_x128, a1_x128;
2006
206k
    int y = r + 1;
2007
206k
    int base_x = (-y * dx) >> frac_bits_x;
2008
206k
    int base_shift = 0;
2009
206k
    if (base_x < (min_base_x - 1)) {
2010
157k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2011
157k
    }
2012
206k
    int base_min_diff =
2013
206k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2014
206k
    if (base_min_diff > 4) {
2015
118k
      base_min_diff = 4;
2016
118k
    } else {
2017
88.4k
      if (base_min_diff < 0) base_min_diff = 0;
2018
88.4k
    }
2019
2020
206k
    if (base_shift > 3) {
2021
118k
      a0_x = _mm256_setzero_si256();
2022
118k
      a1_x = _mm256_setzero_si256();
2023
118k
      shift = _mm256_setzero_si256();
2024
118k
    } else {
2025
88.4k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2026
88.4k
      if (upsample_above) {
2027
23.0k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2028
23.0k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2029
23.0k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2030
2031
23.0k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2032
23.0k
            _mm_and_si128(
2033
23.0k
                _mm_slli_epi32(
2034
23.0k
                    _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2035
23.0k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx),
2036
23.0k
                    upsample_above),
2037
23.0k
                c3f),
2038
23.0k
            1));
2039
65.4k
      } else {
2040
65.4k
        a0_x128 =
2041
65.4k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2042
65.4k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2043
2044
65.4k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2045
65.4k
            _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2046
65.4k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx),
2047
65.4k
                          c3f),
2048
65.4k
            1));
2049
65.4k
      }
2050
88.4k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2051
88.4k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2052
88.4k
    }
2053
    // y calc
2054
206k
    __m128i a0_y, a1_y, shifty;
2055
206k
    if (base_x < min_base_x) {
2056
174k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2057
174k
      DECLARE_ALIGNED(32, int, base_y_c[4]);
2058
174k
      r6 = _mm_set1_epi32(r << 6);
2059
174k
      dy128 = _mm_set1_epi32(dy);
2060
174k
      c1234 = _mm_setr_epi32(1, 2, 3, 4);
2061
174k
      y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
2062
174k
      base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
2063
174k
      mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
2064
174k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2065
174k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2066
2067
174k
      a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
2068
174k
                            left[base_y_c[2]], left[base_y_c[3]]);
2069
174k
      a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2070
174k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
2071
2072
174k
      if (upsample_left) {
2073
34.6k
        shifty = _mm_srli_epi32(
2074
34.6k
            _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
2075
139k
      } else {
2076
139k
        shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
2077
139k
      }
2078
174k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2079
174k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2080
174k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2081
174k
    }
2082
2083
206k
    diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2084
206k
    a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2085
206k
    a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2086
2087
206k
    b = _mm256_mullo_epi32(diff, shift);
2088
206k
    res = _mm256_add_epi32(a32, b);
2089
206k
    res = _mm256_srli_epi32(res, 5);
2090
2091
206k
    resx = _mm256_castsi256_si128(res);
2092
206k
    resx = _mm_packus_epi32(resx, resx);
2093
2094
206k
    resy = _mm256_extracti128_si256(res, 1);
2095
206k
    resy = _mm_packus_epi32(resy, resy);
2096
2097
206k
    resxy =
2098
206k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2099
206k
    _mm_storel_epi64((__m128i *)(dst), resxy);
2100
206k
    dst += stride;
2101
206k
  }
2102
30.7k
}
2103
2104
static void highbd_dr_prediction_z2_Nx4_avx2(
2105
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2106
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2107
83.3k
    int dy) {
2108
83.3k
  const int min_base_x = -(1 << upsample_above);
2109
83.3k
  const int min_base_y = -(1 << upsample_left);
2110
83.3k
  const int frac_bits_x = 6 - upsample_above;
2111
83.3k
  const int frac_bits_y = 6 - upsample_left;
2112
2113
83.3k
  assert(dx > 0);
2114
  // pre-filter above pixels
2115
  // store in temp buffers:
2116
  //   above[x] * 32 + 16
2117
  //   above[x+1] - above[x]
2118
  // final pixels will be calculated as:
2119
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2120
83.3k
  __m256i a0_x, a1_x, a32, a16;
2121
83.3k
  __m256i diff;
2122
83.3k
  __m128i c3f, min_base_y128;
2123
2124
83.3k
  a16 = _mm256_set1_epi16(16);
2125
83.3k
  c3f = _mm_set1_epi16(0x3f);
2126
83.3k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2127
2128
683k
  for (int r = 0; r < N; r++) {
2129
599k
    __m256i b, res, shift;
2130
599k
    __m128i resx, resy, resxy;
2131
599k
    __m128i a0_x128, a1_x128;
2132
599k
    int y = r + 1;
2133
599k
    int base_x = (-y * dx) >> frac_bits_x;
2134
599k
    int base_shift = 0;
2135
599k
    if (base_x < (min_base_x - 1)) {
2136
439k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2137
439k
    }
2138
599k
    int base_min_diff =
2139
599k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2140
599k
    if (base_min_diff > 4) {
2141
292k
      base_min_diff = 4;
2142
306k
    } else {
2143
306k
      if (base_min_diff < 0) base_min_diff = 0;
2144
306k
    }
2145
2146
599k
    if (base_shift > 3) {
2147
292k
      a0_x = _mm256_setzero_si256();
2148
292k
      a1_x = _mm256_setzero_si256();
2149
292k
      shift = _mm256_setzero_si256();
2150
306k
    } else {
2151
306k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2152
306k
      if (upsample_above) {
2153
102k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2154
102k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2155
102k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2156
2157
102k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2158
102k
            _mm_and_si128(
2159
102k
                _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2160
102k
                                              (2 << 6) - y * dx,
2161
102k
                                              (3 << 6) - y * dx, 0, 0, 0, 0),
2162
102k
                               upsample_above),
2163
102k
                c3f),
2164
102k
            1));
2165
204k
      } else {
2166
204k
        a0_x128 =
2167
204k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2168
204k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2169
2170
204k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2171
204k
            _mm_and_si128(
2172
204k
                _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2173
204k
                               (3 << 6) - y * dx, 0, 0, 0, 0),
2174
204k
                c3f),
2175
204k
            1));
2176
204k
      }
2177
306k
      a0_x = _mm256_castsi128_si256(a0_x128);
2178
306k
      a1_x = _mm256_castsi128_si256(a1_x128);
2179
306k
    }
2180
    // y calc
2181
599k
    __m128i a0_y, a1_y, shifty;
2182
599k
    if (base_x < min_base_x) {
2183
499k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2184
499k
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2185
499k
      r6 = _mm_set1_epi16(r << 6);
2186
499k
      dy128 = _mm_set1_epi16(dy);
2187
499k
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
2188
499k
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2189
499k
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2190
499k
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2191
499k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2192
499k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2193
2194
499k
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2195
499k
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
2196
499k
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2197
499k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
2198
499k
                            0, 0);
2199
2200
499k
      if (upsample_left) {
2201
149k
        shifty = _mm_srli_epi16(
2202
149k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
2203
349k
      } else {
2204
349k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2205
349k
      }
2206
499k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2207
499k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2208
499k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2209
499k
    }
2210
2211
599k
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2212
599k
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2213
599k
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2214
2215
599k
    b = _mm256_mullo_epi16(diff, shift);
2216
599k
    res = _mm256_add_epi16(a32, b);
2217
599k
    res = _mm256_srli_epi16(res, 5);
2218
2219
599k
    resx = _mm256_castsi256_si128(res);
2220
599k
    resy = _mm256_extracti128_si256(res, 1);
2221
599k
    resxy =
2222
599k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2223
599k
    _mm_storel_epi64((__m128i *)(dst), resxy);
2224
599k
    dst += stride;
2225
599k
  }
2226
83.3k
}
2227
2228
static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
2229
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2230
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2231
41.3k
    int dy) {
2232
41.3k
  const int min_base_x = -(1 << upsample_above);
2233
41.3k
  const int min_base_y = -(1 << upsample_left);
2234
41.3k
  const int frac_bits_x = 6 - upsample_above;
2235
41.3k
  const int frac_bits_y = 6 - upsample_left;
2236
2237
  // pre-filter above pixels
2238
  // store in temp buffers:
2239
  //   above[x] * 32 + 16
2240
  //   above[x+1] - above[x]
2241
  // final pixels will be calculated as:
2242
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2243
41.3k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
2244
41.3k
  __m256i diff;
2245
41.3k
  __m128i a0_x128, a1_x128;
2246
2247
41.3k
  a16 = _mm256_set1_epi32(16);
2248
41.3k
  c3f = _mm256_set1_epi32(0x3f);
2249
41.3k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2250
2251
481k
  for (int r = 0; r < N; r++) {
2252
439k
    __m256i b, res, shift;
2253
439k
    __m128i resx, resy, resxy;
2254
439k
    int y = r + 1;
2255
439k
    int base_x = (-y * dx) >> frac_bits_x;
2256
439k
    int base_shift = 0;
2257
439k
    if (base_x < (min_base_x - 1)) {
2258
335k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2259
335k
    }
2260
439k
    int base_min_diff =
2261
439k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2262
439k
    if (base_min_diff > 8) {
2263
202k
      base_min_diff = 8;
2264
237k
    } else {
2265
237k
      if (base_min_diff < 0) base_min_diff = 0;
2266
237k
    }
2267
2268
439k
    if (base_shift > 7) {
2269
202k
      resx = _mm_setzero_si128();
2270
237k
    } else {
2271
237k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2272
237k
      if (upsample_above) {
2273
44.5k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2274
44.5k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2275
44.5k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2276
44.5k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2277
44.5k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2278
44.5k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2279
44.5k
        atmp2 = _mm_shuffle_epi8(
2280
44.5k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2281
44.5k
        atmp3 = _mm_shuffle_epi8(
2282
44.5k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2283
44.5k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2284
44.5k
                              _mm_set1_epi8(15));
2285
44.5k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2286
44.5k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2287
44.5k
                              _mm_set1_epi8(15));
2288
44.5k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2289
44.5k
        shift = _mm256_srli_epi32(
2290
44.5k
            _mm256_and_si256(
2291
44.5k
                _mm256_slli_epi32(
2292
44.5k
                    _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
2293
44.5k
                                      (2 << 6) - y * dx, (3 << 6) - y * dx,
2294
44.5k
                                      (4 << 6) - y * dx, (5 << 6) - y * dx,
2295
44.5k
                                      (6 << 6) - y * dx, (7 << 6) - y * dx),
2296
44.5k
                    upsample_above),
2297
44.5k
                c3f),
2298
44.5k
            1);
2299
192k
      } else {
2300
192k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2301
192k
        a0_x128 =
2302
192k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2303
192k
        a1_x128 =
2304
192k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2305
2306
192k
        shift = _mm256_srli_epi32(
2307
192k
            _mm256_and_si256(
2308
192k
                _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2309
192k
                                  (3 << 6) - y * dx, (4 << 6) - y * dx,
2310
192k
                                  (5 << 6) - y * dx, (6 << 6) - y * dx,
2311
192k
                                  (7 << 6) - y * dx),
2312
192k
                c3f),
2313
192k
            1);
2314
192k
      }
2315
237k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2316
237k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2317
2318
237k
      diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2319
237k
      a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2320
237k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2321
2322
237k
      b = _mm256_mullo_epi32(diff, shift);
2323
237k
      res = _mm256_add_epi32(a32, b);
2324
237k
      res = _mm256_srli_epi32(res, 5);
2325
2326
237k
      resx = _mm256_castsi256_si128(_mm256_packus_epi32(
2327
237k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2328
237k
    }
2329
    // y calc
2330
439k
    if (base_x < min_base_x) {
2331
373k
      DECLARE_ALIGNED(32, int, base_y_c[8]);
2332
373k
      __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
2333
373k
      r6 = _mm256_set1_epi32(r << 6);
2334
373k
      dy256 = _mm256_set1_epi32(dy);
2335
373k
      c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
2336
373k
      y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2337
373k
      base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2338
373k
      mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2339
373k
      base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2340
373k
      _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2341
2342
373k
      a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2343
373k
          left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2344
373k
          left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2345
373k
          left[base_y_c[6]], left[base_y_c[7]]));
2346
373k
      a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2347
373k
          left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2348
373k
          left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2349
373k
          left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2350
2351
373k
      if (upsample_left) {
2352
55.5k
        shift = _mm256_srli_epi32(
2353
55.5k
            _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
2354
55.5k
            1);
2355
317k
      } else {
2356
317k
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2357
317k
      }
2358
373k
      diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2359
373k
      a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2360
373k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2361
2362
373k
      b = _mm256_mullo_epi32(diff, shift);
2363
373k
      res = _mm256_add_epi32(a32, b);
2364
373k
      res = _mm256_srli_epi32(res, 5);
2365
2366
373k
      resy = _mm256_castsi256_si128(_mm256_packus_epi32(
2367
373k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2368
373k
    } else {
2369
66.7k
      resy = resx;
2370
66.7k
    }
2371
439k
    resxy =
2372
439k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2373
439k
    _mm_storeu_si128((__m128i *)(dst), resxy);
2374
439k
    dst += stride;
2375
439k
  }
2376
41.3k
}
2377
2378
static void highbd_dr_prediction_z2_Nx8_avx2(
2379
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2380
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2381
152k
    int dy) {
2382
152k
  const int min_base_x = -(1 << upsample_above);
2383
152k
  const int min_base_y = -(1 << upsample_left);
2384
152k
  const int frac_bits_x = 6 - upsample_above;
2385
152k
  const int frac_bits_y = 6 - upsample_left;
2386
2387
  // pre-filter above pixels
2388
  // store in temp buffers:
2389
  //   above[x] * 32 + 16
2390
  //   above[x+1] - above[x]
2391
  // final pixels will be calculated as:
2392
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2393
152k
  __m128i c3f, min_base_y128;
2394
152k
  __m256i a0_x, a1_x, diff, a32, a16;
2395
152k
  __m128i a0_x128, a1_x128;
2396
2397
152k
  a16 = _mm256_set1_epi16(16);
2398
152k
  c3f = _mm_set1_epi16(0x3f);
2399
152k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2400
2401
1.61M
  for (int r = 0; r < N; r++) {
2402
1.46M
    __m256i b, res, shift;
2403
1.46M
    __m128i resx, resy, resxy;
2404
1.46M
    int y = r + 1;
2405
1.46M
    int base_x = (-y * dx) >> frac_bits_x;
2406
1.46M
    int base_shift = 0;
2407
1.46M
    if (base_x < (min_base_x - 1)) {
2408
1.10M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2409
1.10M
    }
2410
1.46M
    int base_min_diff =
2411
1.46M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2412
1.46M
    if (base_min_diff > 8) {
2413
669k
      base_min_diff = 8;
2414
796k
    } else {
2415
796k
      if (base_min_diff < 0) base_min_diff = 0;
2416
796k
    }
2417
2418
1.46M
    if (base_shift > 7) {
2419
669k
      a0_x = _mm256_setzero_si256();
2420
669k
      a1_x = _mm256_setzero_si256();
2421
669k
      shift = _mm256_setzero_si256();
2422
796k
    } else {
2423
796k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2424
796k
      if (upsample_above) {
2425
259k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2426
259k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2427
259k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2428
259k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2429
259k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2430
259k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2431
259k
        atmp2 = _mm_shuffle_epi8(
2432
259k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2433
259k
        atmp3 = _mm_shuffle_epi8(
2434
259k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2435
259k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2436
259k
                              _mm_set1_epi8(15));
2437
259k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2438
259k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2439
259k
                              _mm_set1_epi8(15));
2440
259k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2441
2442
259k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2443
259k
            _mm_and_si128(
2444
259k
                _mm_slli_epi16(
2445
259k
                    _mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2446
259k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx,
2447
259k
                                   (4 << 6) - y * dx, (5 << 6) - y * dx,
2448
259k
                                   (6 << 6) - y * dx, (7 << 6) - y * dx),
2449
259k
                    upsample_above),
2450
259k
                c3f),
2451
259k
            1));
2452
537k
      } else {
2453
537k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2454
537k
        a0_x128 =
2455
537k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2456
537k
        a1_x128 =
2457
537k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2458
2459
537k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2460
537k
            _mm_and_si128(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2461
537k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx,
2462
537k
                                         (4 << 6) - y * dx, (5 << 6) - y * dx,
2463
537k
                                         (6 << 6) - y * dx, (7 << 6) - y * dx),
2464
537k
                          c3f),
2465
537k
            1));
2466
537k
      }
2467
796k
      a0_x = _mm256_castsi128_si256(a0_x128);
2468
796k
      a1_x = _mm256_castsi128_si256(a1_x128);
2469
796k
    }
2470
2471
    // y calc
2472
1.46M
    __m128i a0_y, a1_y, shifty;
2473
1.46M
    if (base_x < min_base_x) {
2474
1.22M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2475
1.22M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2476
1.22M
      r6 = _mm_set1_epi16(r << 6);
2477
1.22M
      dy128 = _mm_set1_epi16(dy);
2478
1.22M
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
2479
1.22M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2480
1.22M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2481
1.22M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2482
1.22M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2483
1.22M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2484
2485
1.22M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2486
1.22M
                            left[base_y_c[2]], left[base_y_c[3]],
2487
1.22M
                            left[base_y_c[4]], left[base_y_c[5]],
2488
1.22M
                            left[base_y_c[6]], left[base_y_c[7]]);
2489
1.22M
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2490
1.22M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1],
2491
1.22M
                            left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2492
1.22M
                            left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
2493
2494
1.22M
      if (upsample_left) {
2495
325k
        shifty = _mm_srli_epi16(
2496
325k
            _mm_and_si128(_mm_slli_epi16((y_c128), upsample_left), c3f), 1);
2497
900k
      } else {
2498
900k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2499
900k
      }
2500
1.22M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2501
1.22M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2502
1.22M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2503
1.22M
    }
2504
2505
1.46M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2506
1.46M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2507
1.46M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2508
2509
1.46M
    b = _mm256_mullo_epi16(diff, shift);
2510
1.46M
    res = _mm256_add_epi16(a32, b);
2511
1.46M
    res = _mm256_srli_epi16(res, 5);
2512
2513
1.46M
    resx = _mm256_castsi256_si128(res);
2514
1.46M
    resy = _mm256_extracti128_si256(res, 1);
2515
2516
1.46M
    resxy =
2517
1.46M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2518
1.46M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2519
1.46M
    dst += stride;
2520
1.46M
  }
2521
152k
}
2522
2523
static void highbd_dr_prediction_32bit_z2_HxW_avx2(
2524
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2525
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2526
48.3k
    int dy) {
2527
  // here upsample_above and upsample_left are 0 by design of
2528
  // av1_use_intra_edge_upsample
2529
48.3k
  const int min_base_x = -1;
2530
48.3k
  const int min_base_y = -1;
2531
48.3k
  (void)upsample_above;
2532
48.3k
  (void)upsample_left;
2533
48.3k
  const int frac_bits_x = 6;
2534
48.3k
  const int frac_bits_y = 6;
2535
2536
  // pre-filter above pixels
2537
  // store in temp buffers:
2538
  //   above[x] * 32 + 16
2539
  //   above[x+1] - above[x]
2540
  // final pixels will be calculated as:
2541
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2542
48.3k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16, c1;
2543
48.3k
  __m256i diff, min_base_y256, c3f, dy256, c1234, c0123, c8;
2544
48.3k
  __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2545
48.3k
  DECLARE_ALIGNED(32, int, base_y_c[16]);
2546
2547
48.3k
  a16 = _mm256_set1_epi32(16);
2548
48.3k
  c1 = _mm256_srli_epi32(a16, 4);
2549
48.3k
  c8 = _mm256_srli_epi32(a16, 1);
2550
48.3k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2551
48.3k
  c3f = _mm256_set1_epi32(0x3f);
2552
48.3k
  dy256 = _mm256_set1_epi32(dy);
2553
48.3k
  c0123 = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
2554
48.3k
  c1234 = _mm256_add_epi32(c0123, c1);
2555
2556
1.05M
  for (int r = 0; r < H; r++) {
2557
1.00M
    __m256i b, res, shift, ydx;
2558
1.00M
    __m256i resx[2], resy[2];
2559
1.00M
    __m256i resxy, j256, r6;
2560
3.57M
    for (int j = 0; j < W; j += 16) {
2561
2.56M
      j256 = _mm256_set1_epi32(j);
2562
2.56M
      int y = r + 1;
2563
2.56M
      ydx = _mm256_set1_epi32(y * dx);
2564
2565
2.56M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2566
2.56M
      int base_shift = 0;
2567
2.56M
      if ((base_x) < (min_base_x - 1)) {
2568
1.65M
        base_shift = (min_base_x - base_x - 1);
2569
1.65M
      }
2570
2.56M
      int base_min_diff = (min_base_x - base_x);
2571
2.56M
      if (base_min_diff > 16) {
2572
1.12M
        base_min_diff = 16;
2573
1.43M
      } else {
2574
1.43M
        if (base_min_diff < 0) base_min_diff = 0;
2575
1.43M
      }
2576
2577
2.56M
      if (base_shift > 7) {
2578
1.34M
        resx[0] = _mm256_setzero_si256();
2579
1.34M
      } else {
2580
1.22M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2581
1.22M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2582
1.22M
        a0_x128 =
2583
1.22M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2584
1.22M
        a1_x128 =
2585
1.22M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2586
2587
1.22M
        a0_x = _mm256_cvtepu16_epi32(a0_x128);
2588
1.22M
        a1_x = _mm256_cvtepu16_epi32(a1_x128);
2589
2590
1.22M
        r6 = _mm256_slli_epi32(_mm256_add_epi32(c0123, j256), 6);
2591
1.22M
        shift = _mm256_srli_epi32(
2592
1.22M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2593
2594
1.22M
        diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2595
1.22M
        a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2596
1.22M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2597
2598
1.22M
        b = _mm256_mullo_epi32(diff, shift);
2599
1.22M
        res = _mm256_add_epi32(a32, b);
2600
1.22M
        res = _mm256_srli_epi32(res, 5);
2601
2602
1.22M
        resx[0] = _mm256_packus_epi32(
2603
1.22M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2604
1.22M
      }
2605
2.56M
      int base_shift8 = 0;
2606
2.56M
      if ((base_x + 8) < (min_base_x - 1)) {
2607
1.31M
        base_shift8 = (min_base_x - (base_x + 8) - 1);
2608
1.31M
      }
2609
2.56M
      if (base_shift8 > 7) {
2610
1.12M
        resx[1] = _mm256_setzero_si256();
2611
1.43M
      } else {
2612
1.43M
        a0_1_x128 =
2613
1.43M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 8));
2614
1.43M
        a1_1_x128 =
2615
1.43M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 9));
2616
1.43M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2617
1.43M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2618
1.43M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2619
1.43M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2620
2621
1.43M
        a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
2622
1.43M
        a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
2623
2624
1.43M
        r6 = _mm256_slli_epi32(
2625
1.43M
            _mm256_add_epi32(c0123, _mm256_add_epi32(j256, c8)), 6);
2626
1.43M
        shift = _mm256_srli_epi32(
2627
1.43M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2628
2629
1.43M
        diff = _mm256_sub_epi32(a1_1_x, a0_1_x);  // a[x+1] - a[x]
2630
1.43M
        a32 = _mm256_slli_epi32(a0_1_x, 5);       // a[x] * 32
2631
1.43M
        a32 = _mm256_add_epi32(a32, a16);         // a[x] * 32 + 16
2632
1.43M
        b = _mm256_mullo_epi32(diff, shift);
2633
2634
1.43M
        resx[1] = _mm256_add_epi32(a32, b);
2635
1.43M
        resx[1] = _mm256_srli_epi32(resx[1], 5);
2636
1.43M
        resx[1] = _mm256_packus_epi32(
2637
1.43M
            resx[1],
2638
1.43M
            _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
2639
1.43M
      }
2640
2.56M
      resx[0] =
2641
2.56M
          _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
2642
2.56M
                                  1);  // 16 16bit values
2643
2644
      // y calc
2645
2.56M
      resy[0] = _mm256_setzero_si256();
2646
2.56M
      if ((base_x < min_base_x)) {
2647
1.73M
        __m256i c256, y_c256, y_c_1_256, base_y_c256, mask256;
2648
1.73M
        r6 = _mm256_set1_epi32(r << 6);
2649
1.73M
        c256 = _mm256_add_epi32(j256, c1234);
2650
1.73M
        y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2651
1.73M
        base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2652
1.73M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2653
1.73M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2654
1.73M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2655
1.73M
        c256 = _mm256_add_epi32(c256, c8);
2656
1.73M
        y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2657
1.73M
        base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
2658
1.73M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2659
1.73M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2660
1.73M
        _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
2661
2662
1.73M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2663
1.73M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2664
1.73M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2665
1.73M
            left[base_y_c[6]], left[base_y_c[7]]));
2666
1.73M
        a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2667
1.73M
            left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2668
1.73M
            left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2669
1.73M
            left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2670
2671
1.73M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2672
2673
1.73M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2674
1.73M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2675
1.73M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2676
2677
1.73M
        b = _mm256_mullo_epi32(diff, shift);
2678
1.73M
        res = _mm256_add_epi32(a32, b);
2679
1.73M
        res = _mm256_srli_epi32(res, 5);
2680
2681
1.73M
        resy[0] = _mm256_packus_epi32(
2682
1.73M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2683
2684
1.73M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2685
1.73M
            left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
2686
1.73M
            left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
2687
1.73M
            left[base_y_c[14]], left[base_y_c[15]]));
2688
1.73M
        a1_y = _mm256_cvtepu16_epi32(
2689
1.73M
            _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
2690
1.73M
                           left[base_y_c[10] + 1], left[base_y_c[11] + 1],
2691
1.73M
                           left[base_y_c[12] + 1], left[base_y_c[13] + 1],
2692
1.73M
                           left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
2693
1.73M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
2694
2695
1.73M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2696
1.73M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2697
1.73M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2698
2699
1.73M
        b = _mm256_mullo_epi32(diff, shift);
2700
1.73M
        res = _mm256_add_epi32(a32, b);
2701
1.73M
        res = _mm256_srli_epi32(res, 5);
2702
2703
1.73M
        resy[1] = _mm256_packus_epi32(
2704
1.73M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2705
2706
1.73M
        resy[0] =
2707
1.73M
            _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
2708
1.73M
                                    1);  // 16 16bit values
2709
1.73M
      }
2710
2711
2.56M
      resxy = _mm256_blendv_epi8(resx[0], resy[0],
2712
2.56M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2713
2.56M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2714
2.56M
    }  // for j
2715
1.00M
    dst += stride;
2716
1.00M
  }
2717
48.3k
}
2718
2719
static void highbd_dr_prediction_z2_HxW_avx2(
2720
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2721
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2722
289k
    int dy) {
2723
  // here upsample_above and upsample_left are 0 by design of
2724
  // av1_use_intra_edge_upsample
2725
289k
  const int min_base_x = -1;
2726
289k
  const int min_base_y = -1;
2727
289k
  (void)upsample_above;
2728
289k
  (void)upsample_left;
2729
289k
  const int frac_bits_x = 6;
2730
289k
  const int frac_bits_y = 6;
2731
2732
  // pre-filter above pixels
2733
  // store in temp buffers:
2734
  //   above[x] * 32 + 16
2735
  //   above[x+1] - above[x]
2736
  // final pixels will be calculated as:
2737
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2738
289k
  __m256i a0_x, a1_x, a32, a16, c3f, c1;
2739
289k
  __m256i diff, min_base_y256, dy256, c1234, c0123;
2740
289k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
2741
2742
289k
  a16 = _mm256_set1_epi16(16);
2743
289k
  c1 = _mm256_srli_epi16(a16, 4);
2744
289k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
2745
289k
  c3f = _mm256_set1_epi16(0x3f);
2746
289k
  dy256 = _mm256_set1_epi16(dy);
2747
289k
  c0123 =
2748
289k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
2749
289k
  c1234 = _mm256_add_epi16(c0123, c1);
2750
2751
5.38M
  for (int r = 0; r < H; r++) {
2752
5.09M
    __m256i b, res, shift;
2753
5.09M
    __m256i resx, resy, ydx;
2754
5.09M
    __m256i resxy, j256, r6;
2755
5.09M
    __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2756
5.09M
    int y = r + 1;
2757
5.09M
    ydx = _mm256_set1_epi16((short)(y * dx));
2758
2759
14.0M
    for (int j = 0; j < W; j += 16) {
2760
8.98M
      j256 = _mm256_set1_epi16(j);
2761
8.98M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2762
8.98M
      int base_shift = 0;
2763
8.98M
      if ((base_x) < (min_base_x - 1)) {
2764
6.90M
        base_shift = (min_base_x - (base_x)-1);
2765
6.90M
      }
2766
8.98M
      int base_min_diff = (min_base_x - base_x);
2767
8.98M
      if (base_min_diff > 16) {
2768
5.30M
        base_min_diff = 16;
2769
5.30M
      } else {
2770
3.67M
        if (base_min_diff < 0) base_min_diff = 0;
2771
3.67M
      }
2772
2773
8.98M
      if (base_shift < 8) {
2774
3.02M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2775
3.02M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2776
3.02M
        a0_x128 =
2777
3.02M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2778
3.02M
        a1_x128 =
2779
3.02M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2780
2781
3.02M
        a0_x = _mm256_castsi128_si256(a0_x128);
2782
3.02M
        a1_x = _mm256_castsi128_si256(a1_x128);
2783
5.96M
      } else {
2784
5.96M
        a0_x = _mm256_setzero_si256();
2785
5.96M
        a1_x = _mm256_setzero_si256();
2786
5.96M
      }
2787
2788
8.98M
      int base_shift1 = 0;
2789
8.98M
      if (base_shift > 8) {
2790
5.87M
        base_shift1 = base_shift - 8;
2791
5.87M
      }
2792
8.98M
      if (base_shift1 < 8) {
2793
3.68M
        a0_1_x128 =
2794
3.68M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 8));
2795
3.68M
        a1_1_x128 =
2796
3.68M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 9));
2797
3.68M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2798
3.68M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2799
3.68M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2800
3.68M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2801
2802
3.68M
        a0_x = _mm256_inserti128_si256(a0_x, a0_1_x128, 1);
2803
3.68M
        a1_x = _mm256_inserti128_si256(a1_x, a1_1_x128, 1);
2804
3.68M
      }
2805
8.98M
      r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
2806
8.98M
      shift = _mm256_srli_epi16(
2807
8.98M
          _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
2808
2809
8.98M
      diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2810
8.98M
      a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2811
8.98M
      a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2812
2813
8.98M
      b = _mm256_mullo_epi16(diff, shift);
2814
8.98M
      res = _mm256_add_epi16(a32, b);
2815
8.98M
      resx = _mm256_srli_epi16(res, 5);  // 16 16-bit values
2816
2817
      // y calc
2818
8.98M
      resy = _mm256_setzero_si256();
2819
8.98M
      __m256i a0_y, a1_y, shifty;
2820
8.98M
      if ((base_x < min_base_x)) {
2821
7.21M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
2822
7.21M
        r6 = _mm256_set1_epi16(r << 6);
2823
7.21M
        c256 = _mm256_add_epi16(j256, c1234);
2824
7.21M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
2825
7.21M
                                 _mm256_srli_epi16(min_base_y256, 1));
2826
7.21M
        y_c256 = _mm256_sub_epi16(r6, mul16);
2827
7.21M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
2828
7.21M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
2829
7.21M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2830
7.21M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2831
2832
7.21M
        a0_y = _mm256_setr_epi16(
2833
7.21M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2834
7.21M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2835
7.21M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2836
7.21M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2837
7.21M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2838
7.21M
            left[base_y_c[15]]);
2839
7.21M
        base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
2840
7.21M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2841
2842
7.21M
        a1_y = _mm256_setr_epi16(
2843
7.21M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2844
7.21M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2845
7.21M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2846
7.21M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2847
7.21M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2848
7.21M
            left[base_y_c[15]]);
2849
2850
7.21M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
2851
2852
7.21M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
2853
7.21M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
2854
7.21M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2855
2856
7.21M
        b = _mm256_mullo_epi16(diff, shifty);
2857
7.21M
        res = _mm256_add_epi16(a32, b);
2858
7.21M
        resy = _mm256_srli_epi16(res, 5);
2859
7.21M
      }
2860
2861
8.98M
      resxy = _mm256_blendv_epi8(resx, resy,
2862
8.98M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2863
8.98M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2864
8.98M
    }  // for j
2865
5.09M
    dst += stride;
2866
5.09M
  }
2867
289k
}
2868
2869
// Directional prediction, zone 2: 90 < angle < 180
2870
void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
2871
                                      int bh, const uint16_t *above,
2872
                                      const uint16_t *left, int upsample_above,
2873
                                      int upsample_left, int dx, int dy,
2874
646k
                                      int bd) {
2875
646k
  (void)bd;
2876
646k
  assert(dx > 0);
2877
646k
  assert(dy > 0);
2878
646k
  switch (bw) {
2879
114k
    case 4:
2880
114k
      if (bd < 12) {
2881
83.3k
        highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
2882
83.3k
                                         upsample_above, upsample_left, dx, dy);
2883
83.3k
      } else {
2884
30.7k
        highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
2885
30.7k
                                               upsample_above, upsample_left,
2886
30.7k
                                               dx, dy);
2887
30.7k
      }
2888
114k
      break;
2889
193k
    case 8:
2890
193k
      if (bd < 12) {
2891
152k
        highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
2892
152k
                                         upsample_above, upsample_left, dx, dy);
2893
152k
      } else {
2894
41.3k
        highbd_dr_prediction_32bit_z2_Nx8_avx2(bh, dst, stride, above, left,
2895
41.3k
                                               upsample_above, upsample_left,
2896
41.3k
                                               dx, dy);
2897
41.3k
      }
2898
193k
      break;
2899
338k
    default:
2900
338k
      if (bd < 12) {
2901
289k
        highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2902
289k
                                         upsample_above, upsample_left, dx, dy);
2903
289k
      } else {
2904
48.3k
        highbd_dr_prediction_32bit_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2905
48.3k
                                               upsample_above, upsample_left,
2906
48.3k
                                               dx, dy);
2907
48.3k
      }
2908
338k
      break;
2909
646k
  }
2910
646k
}
2911
2912
//  Directional prediction, zone 3 functions
2913
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
2914
                                             const uint16_t *left,
2915
                                             int upsample_left, int dy,
2916
78.5k
                                             int bd) {
2917
78.5k
  __m128i dstvec[4], d[4];
2918
78.5k
  if (bd < 12) {
2919
69.2k
    highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left,
2920
69.2k
                                              dy);
2921
69.2k
  } else {
2922
9.32k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(4, dstvec, left,
2923
9.32k
                                                    upsample_left, dy);
2924
9.32k
  }
2925
78.5k
  highbd_transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2],
2926
78.5k
                                   &dstvec[3], &d[0], &d[1], &d[2], &d[3]);
2927
78.5k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
2928
78.5k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
2929
78.5k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
2930
78.5k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
2931
78.5k
  return;
2932
78.5k
}
2933
2934
static void highbd_dr_prediction_z3_8x8_avx2(uint16_t *dst, ptrdiff_t stride,
2935
                                             const uint16_t *left,
2936
                                             int upsample_left, int dy,
2937
82.3k
                                             int bd) {
2938
82.3k
  __m128i dstvec[8], d[8];
2939
82.3k
  if (bd < 12) {
2940
63.6k
    highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left,
2941
63.6k
                                              dy);
2942
63.6k
  } else {
2943
18.6k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(8, dstvec, left,
2944
18.6k
                                                    upsample_left, dy);
2945
18.6k
  }
2946
82.3k
  highbd_transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2947
82.3k
                           &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2948
82.3k
                           &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2949
82.3k
                           &d[7]);
2950
740k
  for (int i = 0; i < 8; i++) {
2951
658k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
2952
658k
  }
2953
82.3k
}
2954
2955
static void highbd_dr_prediction_z3_4x8_avx2(uint16_t *dst, ptrdiff_t stride,
2956
                                             const uint16_t *left,
2957
                                             int upsample_left, int dy,
2958
13.4k
                                             int bd) {
2959
13.4k
  __m128i dstvec[4], d[8];
2960
13.4k
  if (bd < 12) {
2961
10.9k
    highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left,
2962
10.9k
                                              dy);
2963
10.9k
  } else {
2964
2.48k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(4, dstvec, left,
2965
2.48k
                                                    upsample_left, dy);
2966
2.48k
  }
2967
2968
13.4k
  highbd_transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2969
13.4k
                               &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2970
13.4k
                               &d[7]);
2971
121k
  for (int i = 0; i < 8; i++) {
2972
107k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
2973
107k
  }
2974
13.4k
}
2975
2976
static void highbd_dr_prediction_z3_8x4_avx2(uint16_t *dst, ptrdiff_t stride,
2977
                                             const uint16_t *left,
2978
                                             int upsample_left, int dy,
2979
26.4k
                                             int bd) {
2980
26.4k
  __m128i dstvec[8], d[4];
2981
26.4k
  if (bd < 12) {
2982
22.0k
    highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left,
2983
22.0k
                                              dy);
2984
22.0k
  } else {
2985
4.35k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(8, dstvec, left,
2986
4.35k
                                                    upsample_left, dy);
2987
4.35k
  }
2988
2989
26.4k
  highbd_transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2990
26.4k
                               &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2991
26.4k
                               &d[0], &d[1], &d[2], &d[3]);
2992
26.4k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
2993
26.4k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[1]);
2994
26.4k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[2]);
2995
26.4k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[3]);
2996
26.4k
}
2997
2998
static void highbd_dr_prediction_z3_8x16_avx2(uint16_t *dst, ptrdiff_t stride,
2999
                                              const uint16_t *left,
3000
                                              int upsample_left, int dy,
3001
19.7k
                                              int bd) {
3002
19.7k
  __m256i dstvec[8], d[8];
3003
19.7k
  if (bd < 12) {
3004
14.1k
    highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
3005
14.1k
                                               dy);
3006
14.1k
  } else {
3007
5.59k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(8, dstvec, left,
3008
5.59k
                                                     upsample_left, dy);
3009
5.59k
  }
3010
19.7k
  highbd_transpose8x16_16x8_avx2(dstvec, d);
3011
177k
  for (int i = 0; i < 8; i++) {
3012
158k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3013
158k
                     _mm256_castsi256_si128(d[i]));
3014
158k
  }
3015
177k
  for (int i = 8; i < 16; i++) {
3016
158k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3017
158k
                     _mm256_extracti128_si256(d[i - 8], 1));
3018
158k
  }
3019
19.7k
}
3020
3021
static void highbd_dr_prediction_z3_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
3022
                                              const uint16_t *left,
3023
                                              int upsample_left, int dy,
3024
36.4k
                                              int bd) {
3025
36.4k
  __m128i dstvec[16], d[16];
3026
36.4k
  if (bd < 12) {
3027
28.0k
    highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
3028
28.0k
                                              dy);
3029
28.0k
  } else {
3030
8.40k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(16, dstvec, left,
3031
8.40k
                                                    upsample_left, dy);
3032
8.40k
  }
3033
109k
  for (int i = 0; i < 16; i += 8) {
3034
72.9k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3035
72.9k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3036
72.9k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3037
72.9k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3038
72.9k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3039
72.9k
  }
3040
328k
  for (int i = 0; i < 8; i++) {
3041
291k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3042
291k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3043
291k
  }
3044
36.4k
}
3045
3046
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3047
static void highbd_dr_prediction_z3_4x16_avx2(uint16_t *dst, ptrdiff_t stride,
3048
                                              const uint16_t *left,
3049
                                              int upsample_left, int dy,
3050
13.6k
                                              int bd) {
3051
13.6k
  __m256i dstvec[4], d[4], d1;
3052
13.6k
  if (bd < 12) {
3053
8.80k
    highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
3054
8.80k
                                               dy);
3055
8.80k
  } else {
3056
4.83k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(4, dstvec, left,
3057
4.83k
                                                     upsample_left, dy);
3058
4.83k
  }
3059
13.6k
  highbd_transpose4x16_avx2(dstvec, d);
3060
68.2k
  for (int i = 0; i < 4; i++) {
3061
54.5k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
3062
54.5k
                     _mm256_castsi256_si128(d[i]));
3063
54.5k
    d1 = _mm256_bsrli_epi128(d[i], 8);
3064
54.5k
    _mm_storel_epi64((__m128i *)(dst + (i + 4) * stride),
3065
54.5k
                     _mm256_castsi256_si128(d1));
3066
54.5k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
3067
54.5k
                     _mm256_extracti128_si256(d[i], 1));
3068
54.5k
    _mm_storel_epi64((__m128i *)(dst + (i + 12) * stride),
3069
54.5k
                     _mm256_extracti128_si256(d1, 1));
3070
54.5k
  }
3071
13.6k
}
3072
3073
static void highbd_dr_prediction_z3_16x4_avx2(uint16_t *dst, ptrdiff_t stride,
3074
                                              const uint16_t *left,
3075
                                              int upsample_left, int dy,
3076
41.3k
                                              int bd) {
3077
41.3k
  __m128i dstvec[16], d[8];
3078
41.3k
  if (bd < 12) {
3079
34.2k
    highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
3080
34.2k
                                              dy);
3081
34.2k
  } else {
3082
7.02k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(16, dstvec, left,
3083
7.02k
                                                    upsample_left, dy);
3084
7.02k
  }
3085
41.3k
  highbd_transpose16x4_8x8_sse2(dstvec, d);
3086
3087
41.3k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
3088
41.3k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride + 8), d[1]);
3089
41.3k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[2]);
3090
41.3k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride + 8), d[3]);
3091
41.3k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[4]);
3092
41.3k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride + 8), d[5]);
3093
41.3k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[6]);
3094
41.3k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride + 8), d[7]);
3095
41.3k
}
3096
3097
static void highbd_dr_prediction_z3_8x32_avx2(uint16_t *dst, ptrdiff_t stride,
3098
                                              const uint16_t *left,
3099
                                              int upsample_left, int dy,
3100
7.45k
                                              int bd) {
3101
7.45k
  __m256i dstvec[16], d[16];
3102
7.45k
  if (bd < 12) {
3103
5.42k
    highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
3104
5.42k
                                               dy);
3105
5.42k
  } else {
3106
2.03k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(8, dstvec, left,
3107
2.03k
                                                     upsample_left, dy);
3108
2.03k
  }
3109
3110
22.3k
  for (int i = 0; i < 16; i += 8) {
3111
14.9k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3112
14.9k
  }
3113
3114
67.1k
  for (int i = 0; i < 8; i++) {
3115
59.6k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3116
59.6k
                     _mm256_castsi256_si128(d[i]));
3117
59.6k
  }
3118
67.1k
  for (int i = 0; i < 8; i++) {
3119
59.6k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3120
59.6k
                     _mm256_extracti128_si256(d[i], 1));
3121
59.6k
  }
3122
67.1k
  for (int i = 8; i < 16; i++) {
3123
59.6k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3124
59.6k
                     _mm256_castsi256_si128(d[i]));
3125
59.6k
  }
3126
67.1k
  for (int i = 8; i < 16; i++) {
3127
59.6k
    _mm_storeu_si128((__m128i *)(dst + (i + 16) * stride),
3128
59.6k
                     _mm256_extracti128_si256(d[i], 1));
3129
59.6k
  }
3130
7.45k
}
3131
3132
static void highbd_dr_prediction_z3_32x8_avx2(uint16_t *dst, ptrdiff_t stride,
3133
                                              const uint16_t *left,
3134
                                              int upsample_left, int dy,
3135
42.8k
                                              int bd) {
3136
42.8k
  __m128i dstvec[32], d[32];
3137
42.8k
  if (bd < 12) {
3138
39.7k
    highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
3139
39.7k
                                              dy);
3140
39.7k
  } else {
3141
3.05k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(32, dstvec, left,
3142
3.05k
                                                    upsample_left, dy);
3143
3.05k
  }
3144
3145
214k
  for (int i = 0; i < 32; i += 8) {
3146
171k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3147
171k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3148
171k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3149
171k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3150
171k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3151
171k
  }
3152
385k
  for (int i = 0; i < 8; i++) {
3153
342k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3154
342k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3155
342k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 16]);
3156
342k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 24), d[i + 24]);
3157
342k
  }
3158
42.8k
}
3159
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3160
3161
static void highbd_dr_prediction_z3_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
3162
                                               const uint16_t *left,
3163
                                               int upsample_left, int dy,
3164
77.1k
                                               int bd) {
3165
77.1k
  __m256i dstvec[16], d[16];
3166
77.1k
  if (bd < 12) {
3167
69.4k
    highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
3168
69.4k
                                               dy);
3169
69.4k
  } else {
3170
7.73k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(16, dstvec, left,
3171
7.73k
                                                     upsample_left, dy);
3172
7.73k
  }
3173
3174
77.1k
  highbd_transpose16x16_avx2(dstvec, d);
3175
3176
1.31M
  for (int i = 0; i < 16; i++) {
3177
1.23M
    _mm256_storeu_si256((__m256i *)(dst + i * stride), d[i]);
3178
1.23M
  }
3179
77.1k
}
3180
3181
static void highbd_dr_prediction_z3_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
3182
                                               const uint16_t *left,
3183
                                               int upsample_left, int dy,
3184
55.7k
                                               int bd) {
3185
55.7k
  __m256i dstvec[64], d[16];
3186
55.7k
  if (bd < 12) {
3187
52.7k
    highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
3188
52.7k
                                               dy);
3189
52.7k
  } else {
3190
3.00k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(32, dstvec, left,
3191
3.00k
                                                     upsample_left, dy);
3192
3.00k
  }
3193
55.7k
  highbd_transpose16x16_avx2(dstvec, d);
3194
948k
  for (int j = 0; j < 16; j++) {
3195
892k
    _mm256_storeu_si256((__m256i *)(dst + j * stride), d[j]);
3196
892k
  }
3197
55.7k
  highbd_transpose16x16_avx2(dstvec + 16, d);
3198
947k
  for (int j = 0; j < 16; j++) {
3199
892k
    _mm256_storeu_si256((__m256i *)(dst + j * stride + 16), d[j]);
3200
892k
  }
3201
55.7k
  highbd_transpose16x16_avx2(dstvec + 32, d);
3202
948k
  for (int j = 0; j < 16; j++) {
3203
892k
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride), d[j]);
3204
892k
  }
3205
55.7k
  highbd_transpose16x16_avx2(dstvec + 48, d);
3206
948k
  for (int j = 0; j < 16; j++) {
3207
892k
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride + 16), d[j]);
3208
892k
  }
3209
55.7k
}
3210
3211
static void highbd_dr_prediction_z3_64x64_avx2(uint16_t *dst, ptrdiff_t stride,
3212
                                               const uint16_t *left,
3213
                                               int upsample_left, int dy,
3214
12.1k
                                               int bd) {
3215
12.1k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
3216
12.1k
  if (bd < 12) {
3217
9.49k
    highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
3218
9.49k
  } else {
3219
2.69k
    highbd_dr_prediction_32bit_z1_64xN_avx2(64, dstT, 64, left, upsample_left,
3220
2.69k
                                            dy);
3221
2.69k
  }
3222
12.1k
  highbd_transpose(dstT, 64, dst, stride, 64, 64);
3223
12.1k
}
3224
3225
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
3226
                                               const uint16_t *left,
3227
                                               int upsample_left, int dy,
3228
14.3k
                                               int bd) {
3229
14.3k
  __m256i dstvec[32], d[32];
3230
14.3k
  if (bd < 12) {
3231
13.0k
    highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
3232
13.0k
                                               dy);
3233
13.0k
  } else {
3234
1.34k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(16, dstvec, left,
3235
1.34k
                                                     upsample_left, dy);
3236
1.34k
  }
3237
71.9k
  for (int i = 0; i < 32; i += 8) {
3238
57.5k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3239
57.5k
  }
3240
  // store
3241
43.1k
  for (int j = 0; j < 32; j += 16) {
3242
258k
    for (int i = 0; i < 8; i++) {
3243
230k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride),
3244
230k
                       _mm256_castsi256_si128(d[(i + j)]));
3245
230k
    }
3246
258k
    for (int i = 0; i < 8; i++) {
3247
230k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride + 8),
3248
230k
                       _mm256_castsi256_si128(d[(i + j) + 8]));
3249
230k
    }
3250
258k
    for (int i = 8; i < 16; i++) {
3251
230k
      _mm256_storeu_si256(
3252
230k
          (__m256i *)(dst + (i + j) * stride),
3253
230k
          _mm256_inserti128_si256(
3254
230k
              d[(i + j)], _mm256_extracti128_si256(d[(i + j) - 8], 1), 0));
3255
230k
    }
3256
28.7k
  }
3257
14.3k
}
3258
3259
static void highbd_dr_prediction_z3_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
3260
                                               const uint16_t *left,
3261
                                               int upsample_left, int dy,
3262
18.2k
                                               int bd) {
3263
18.2k
  __m256i dstvec[32], d[16];
3264
18.2k
  if (bd < 12) {
3265
16.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
3266
16.6k
                                               dy);
3267
16.6k
  } else {
3268
1.58k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(32, dstvec, left,
3269
1.58k
                                                     upsample_left, dy);
3270
1.58k
  }
3271
54.6k
  for (int i = 0; i < 32; i += 16) {
3272
36.4k
    highbd_transpose16x16_avx2((dstvec + i), d);
3273
619k
    for (int j = 0; j < 16; j++) {
3274
582k
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3275
582k
    }
3276
36.4k
  }
3277
18.2k
}
3278
3279
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
3280
                                               const uint16_t *left,
3281
                                               int upsample_left, int dy,
3282
1.50k
                                               int bd) {
3283
1.50k
  uint16_t dstT[64 * 32];
3284
1.50k
  if (bd < 12) {
3285
1.24k
    highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
3286
1.24k
  } else {
3287
254
    highbd_dr_prediction_32bit_z1_64xN_avx2(32, dstT, 64, left, upsample_left,
3288
254
                                            dy);
3289
254
  }
3290
1.50k
  highbd_transpose(dstT, 64, dst, stride, 32, 64);
3291
1.50k
}
3292
3293
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
3294
                                               const uint16_t *left,
3295
                                               int upsample_left, int dy,
3296
2.56k
                                               int bd) {
3297
2.56k
  DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
3298
2.56k
  highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy, bd);
3299
2.56k
  highbd_transpose(dstT, 32, dst, stride, 64, 32);
3300
2.56k
  return;
3301
2.56k
}
3302
3303
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3304
static void highbd_dr_prediction_z3_16x64_avx2(uint16_t *dst, ptrdiff_t stride,
3305
                                               const uint16_t *left,
3306
                                               int upsample_left, int dy,
3307
2.66k
                                               int bd) {
3308
2.66k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
3309
2.66k
  if (bd < 12) {
3310
1.97k
    highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
3311
1.97k
  } else {
3312
689
    highbd_dr_prediction_32bit_z1_64xN_avx2(16, dstT, 64, left, upsample_left,
3313
689
                                            dy);
3314
689
  }
3315
2.66k
  highbd_transpose(dstT, 64, dst, stride, 16, 64);
3316
2.66k
}
3317
3318
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
3319
                                               const uint16_t *left,
3320
                                               int upsample_left, int dy,
3321
16.5k
                                               int bd) {
3322
16.5k
  __m256i dstvec[64], d[16];
3323
16.5k
  if (bd < 12) {
3324
16.0k
    highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
3325
16.0k
                                               dy);
3326
16.0k
  } else {
3327
500
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(64, dstvec, left,
3328
500
                                                     upsample_left, dy);
3329
500
  }
3330
82.7k
  for (int i = 0; i < 64; i += 16) {
3331
66.2k
    highbd_transpose16x16_avx2((dstvec + i), d);
3332
1.12M
    for (int j = 0; j < 16; j++) {
3333
1.05M
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3334
1.05M
    }
3335
66.2k
  }
3336
16.5k
}
3337
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3338
3339
void av1_highbd_dr_prediction_z3_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
3340
                                      int bh, const uint16_t *above,
3341
                                      const uint16_t *left, int upsample_left,
3342
563k
                                      int dx, int dy, int bd) {
3343
563k
  (void)above;
3344
563k
  (void)dx;
3345
3346
563k
  assert(dx == 1);
3347
563k
  assert(dy > 0);
3348
563k
  if (bw == bh) {
3349
305k
    switch (bw) {
3350
78.5k
      case 4:
3351
78.5k
        highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy,
3352
78.5k
                                         bd);
3353
78.5k
        break;
3354
82.3k
      case 8:
3355
82.3k
        highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy,
3356
82.3k
                                         bd);
3357
82.3k
        break;
3358
77.1k
      case 16:
3359
77.1k
        highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy,
3360
77.1k
                                           bd);
3361
77.1k
        break;
3362
55.7k
      case 32:
3363
55.7k
        highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy,
3364
55.7k
                                           bd);
3365
55.7k
        break;
3366
12.1k
      case 64:
3367
12.1k
        highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy,
3368
12.1k
                                           bd);
3369
12.1k
        break;
3370
305k
    }
3371
305k
  } else {
3372
257k
    if (bw < bh) {
3373
72.8k
      if (bw + bw == bh) {
3374
49.1k
        switch (bw) {
3375
13.4k
          case 4:
3376
13.4k
            highbd_dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left,
3377
13.4k
                                             dy, bd);
3378
13.4k
            break;
3379
19.7k
          case 8:
3380
19.7k
            highbd_dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left,
3381
19.7k
                                              dy, bd);
3382
19.7k
            break;
3383
14.3k
          case 16:
3384
14.3k
            highbd_dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left,
3385
14.3k
                                               dy, bd);
3386
14.3k
            break;
3387
1.50k
          case 32:
3388
1.50k
            highbd_dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left,
3389
1.50k
                                               dy, bd);
3390
1.50k
            break;
3391
49.1k
        }
3392
49.1k
      } else {
3393
23.7k
        switch (bw) {
3394
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3395
13.6k
          case 4:
3396
13.6k
            highbd_dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left,
3397
13.6k
                                              dy, bd);
3398
13.6k
            break;
3399
7.45k
          case 8:
3400
7.45k
            highbd_dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left,
3401
7.45k
                                              dy, bd);
3402
7.45k
            break;
3403
2.66k
          case 16:
3404
2.66k
            highbd_dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left,
3405
2.66k
                                               dy, bd);
3406
2.66k
            break;
3407
23.7k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3408
23.7k
        }
3409
23.7k
      }
3410
184k
    } else {
3411
184k
      if (bh + bh == bw) {
3412
83.6k
        switch (bh) {
3413
26.4k
          case 4:
3414
26.4k
            highbd_dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left,
3415
26.4k
                                             dy, bd);
3416
26.4k
            break;
3417
36.4k
          case 8:
3418
36.4k
            highbd_dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left,
3419
36.4k
                                              dy, bd);
3420
36.4k
            break;
3421
18.2k
          case 16:
3422
18.2k
            highbd_dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left,
3423
18.2k
                                               dy, bd);
3424
18.2k
            break;
3425
2.56k
          case 32:
3426
2.56k
            highbd_dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left,
3427
2.56k
                                               dy, bd);
3428
2.56k
            break;
3429
83.6k
        }
3430
100k
      } else {
3431
100k
        switch (bh) {
3432
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3433
41.3k
          case 4:
3434
41.3k
            highbd_dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left,
3435
41.3k
                                              dy, bd);
3436
41.3k
            break;
3437
42.8k
          case 8:
3438
42.8k
            highbd_dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left,
3439
42.8k
                                              dy, bd);
3440
42.8k
            break;
3441
16.5k
          case 16:
3442
16.5k
            highbd_dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left,
3443
16.5k
                                               dy, bd);
3444
16.5k
            break;
3445
100k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3446
100k
        }
3447
100k
      }
3448
184k
    }
3449
257k
  }
3450
563k
  return;
3451
563k
}
3452
#endif  // CONFIG_AV1_HIGHBITDEPTH
3453
3454
// Low bit depth functions
3455
static DECLARE_ALIGNED(32, uint8_t, BaseMask[33][32]) = {
3456
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3457
    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3458
  { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3459
    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3460
  { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3461
    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3462
  { 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3463
    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3464
  { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3465
    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3466
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3467
    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3468
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3469
    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3470
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3471
    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0 },
3472
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
3473
    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0 },
3474
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
3475
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0 },
3476
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3477
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3478
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3479
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3480
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3481
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3482
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3483
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3484
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3485
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3486
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,
3487
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3488
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3489
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,
3490
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3491
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3492
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,
3493
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3494
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3495
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,
3496
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3497
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3498
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,
3499
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3500
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3501
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,
3502
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3503
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3504
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,
3505
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3506
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3507
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,
3508
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3509
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3510
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3511
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3512
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3513
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3514
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3515
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3516
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3517
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0 },
3518
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3519
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3520
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0 },
3521
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3522
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3523
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0 },
3524
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3525
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3526
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0 },
3527
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3528
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3529
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0 },
3530
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3531
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3532
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0 },
3533
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3534
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3535
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0 },
3536
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3537
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3538
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0 },
3539
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3540
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3541
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
3542
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3543
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3544
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
3545
};
3546
3547
/* clang-format on */
3548
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_avx2(
3549
    int H, int W, __m128i *dst, const uint8_t *above, int upsample_above,
3550
436k
    int dx) {
3551
436k
  const int frac_bits = 6 - upsample_above;
3552
436k
  const int max_base_x = ((W + H) - 1) << upsample_above;
3553
3554
436k
  assert(dx > 0);
3555
  // pre-filter above pixels
3556
  // store in temp buffers:
3557
  //   above[x] * 32 + 16
3558
  //   above[x+1] - above[x]
3559
  // final pixels will be calculated as:
3560
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3561
436k
  __m256i a0, a1, a32, a16;
3562
436k
  __m256i diff, c3f;
3563
436k
  __m128i a_mbase_x;
3564
3565
436k
  a16 = _mm256_set1_epi16(16);
3566
436k
  a_mbase_x = _mm_set1_epi8((int8_t)above[max_base_x]);
3567
436k
  c3f = _mm256_set1_epi16(0x3f);
3568
3569
436k
  int x = dx;
3570
5.92M
  for (int r = 0; r < W; r++) {
3571
5.48M
    __m256i b, res, shift;
3572
5.48M
    __m128i res1, a0_128, a1_128;
3573
3574
5.48M
    int base = x >> frac_bits;
3575
5.48M
    int base_max_diff = (max_base_x - base) >> upsample_above;
3576
5.48M
    if (base_max_diff <= 0) {
3577
7.29k
      for (int i = r; i < W; ++i) {
3578
4.85k
        dst[i] = a_mbase_x;  // save 4 values
3579
4.85k
      }
3580
2.43k
      return;
3581
2.43k
    }
3582
5.48M
    if (base_max_diff > H) base_max_diff = H;
3583
5.48M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
3584
5.48M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
3585
3586
5.48M
    if (upsample_above) {
3587
843k
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)EvenOddMaskx[0]);
3588
843k
      a1_128 = _mm_srli_si128(a0_128, 8);
3589
3590
843k
      shift = _mm256_srli_epi16(
3591
843k
          _mm256_and_si256(
3592
843k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
3593
843k
          1);
3594
4.64M
    } else {
3595
4.64M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3596
4.64M
    }
3597
5.48M
    a0 = _mm256_cvtepu8_epi16(a0_128);
3598
5.48M
    a1 = _mm256_cvtepu8_epi16(a1_128);
3599
3600
5.48M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3601
5.48M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3602
5.48M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3603
3604
5.48M
    b = _mm256_mullo_epi16(diff, shift);
3605
5.48M
    res = _mm256_add_epi16(a32, b);
3606
5.48M
    res = _mm256_srli_epi16(res, 5);
3607
3608
5.48M
    res = _mm256_packus_epi16(
3609
5.48M
        res, _mm256_castsi128_si256(
3610
5.48M
                 _mm256_extracti128_si256(res, 1)));  // goto 8 bit
3611
5.48M
    res1 = _mm256_castsi256_si128(res);               // 16 8bit values
3612
3613
5.48M
    dst[r] =
3614
5.48M
        _mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
3615
5.48M
    x += dx;
3616
5.48M
  }
3617
436k
}
3618
3619
static void dr_prediction_z1_4xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3620
                                      const uint8_t *above, int upsample_above,
3621
55.9k
                                      int dx) {
3622
55.9k
  __m128i dstvec[16];
3623
3624
55.9k
  dr_prediction_z1_HxW_internal_avx2(4, N, dstvec, above, upsample_above, dx);
3625
411k
  for (int i = 0; i < N; i++) {
3626
355k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
3627
355k
  }
3628
55.9k
}
3629
3630
static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3631
                                      const uint8_t *above, int upsample_above,
3632
67.9k
                                      int dx) {
3633
67.9k
  __m128i dstvec[32];
3634
3635
67.9k
  dr_prediction_z1_HxW_internal_avx2(8, N, dstvec, above, upsample_above, dx);
3636
738k
  for (int i = 0; i < N; i++) {
3637
670k
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
3638
670k
  }
3639
67.9k
}
3640
3641
static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3642
                                       const uint8_t *above, int upsample_above,
3643
55.3k
                                       int dx) {
3644
55.3k
  __m128i dstvec[64];
3645
3646
55.3k
  dr_prediction_z1_HxW_internal_avx2(16, N, dstvec, above, upsample_above, dx);
3647
812k
  for (int i = 0; i < N; i++) {
3648
757k
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
3649
757k
  }
3650
55.3k
}
3651
3652
static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_avx2(
3653
80.2k
    int N, __m256i *dstvec, const uint8_t *above, int upsample_above, int dx) {
3654
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3655
80.2k
  (void)upsample_above;
3656
80.2k
  const int frac_bits = 6;
3657
80.2k
  const int max_base_x = ((32 + N) - 1);
3658
3659
  // pre-filter above pixels
3660
  // store in temp buffers:
3661
  //   above[x] * 32 + 16
3662
  //   above[x+1] - above[x]
3663
  // final pixels will be calculated as:
3664
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3665
80.2k
  __m256i a0, a1, a32, a16;
3666
80.2k
  __m256i a_mbase_x, diff, c3f;
3667
3668
80.2k
  a16 = _mm256_set1_epi16(16);
3669
80.2k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3670
80.2k
  c3f = _mm256_set1_epi16(0x3f);
3671
3672
80.2k
  int x = dx;
3673
2.25M
  for (int r = 0; r < N; r++) {
3674
2.17M
    __m256i b, res, res16[2];
3675
2.17M
    __m128i a0_128, a1_128;
3676
3677
2.17M
    int base = x >> frac_bits;
3678
2.17M
    int base_max_diff = (max_base_x - base);
3679
2.17M
    if (base_max_diff <= 0) {
3680
0
      for (int i = r; i < N; ++i) {
3681
0
        dstvec[i] = a_mbase_x;  // save 32 values
3682
0
      }
3683
0
      return;
3684
0
    }
3685
2.17M
    if (base_max_diff > 32) base_max_diff = 32;
3686
2.17M
    __m256i shift =
3687
2.17M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3688
3689
6.52M
    for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
3690
4.35M
      int mdiff = base_max_diff - j;
3691
4.35M
      if (mdiff <= 0) {
3692
405
        res16[jj] = a_mbase_x;
3693
4.34M
      } else {
3694
4.34M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3695
4.34M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + j + 1));
3696
4.34M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3697
4.34M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3698
3699
4.34M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3700
4.34M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3701
4.34M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3702
4.34M
        b = _mm256_mullo_epi16(diff, shift);
3703
3704
4.34M
        res = _mm256_add_epi16(a32, b);
3705
4.34M
        res = _mm256_srli_epi16(res, 5);
3706
4.34M
        res16[jj] = _mm256_packus_epi16(
3707
4.34M
            res, _mm256_castsi128_si256(
3708
4.34M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3709
4.34M
      }
3710
4.35M
    }
3711
2.17M
    res16[1] =
3712
2.17M
        _mm256_inserti128_si256(res16[0], _mm256_castsi256_si128(res16[1]),
3713
2.17M
                                1);  // 32 8bit values
3714
3715
2.17M
    dstvec[r] = _mm256_blendv_epi8(
3716
2.17M
        a_mbase_x, res16[1],
3717
2.17M
        *(__m256i *)BaseMask[base_max_diff]);  // 32 8bit values
3718
2.17M
    x += dx;
3719
2.17M
  }
3720
80.2k
}
3721
3722
static void dr_prediction_z1_32xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3723
                                       const uint8_t *above, int upsample_above,
3724
31.0k
                                       int dx) {
3725
31.0k
  __m256i dstvec[64];
3726
31.0k
  dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above, dx);
3727
874k
  for (int i = 0; i < N; i++) {
3728
843k
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
3729
843k
  }
3730
31.0k
}
3731
3732
static void dr_prediction_z1_64xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3733
                                       const uint8_t *above, int upsample_above,
3734
18.9k
                                       int dx) {
3735
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3736
18.9k
  (void)upsample_above;
3737
18.9k
  const int frac_bits = 6;
3738
18.9k
  const int max_base_x = ((64 + N) - 1);
3739
3740
  // pre-filter above pixels
3741
  // store in temp buffers:
3742
  //   above[x] * 32 + 16
3743
  //   above[x+1] - above[x]
3744
  // final pixels will be calculated as:
3745
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3746
18.9k
  __m256i a0, a1, a32, a16;
3747
18.9k
  __m256i a_mbase_x, diff, c3f;
3748
18.9k
  __m128i max_base_x128, base_inc128, mask128;
3749
3750
18.9k
  a16 = _mm256_set1_epi16(16);
3751
18.9k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3752
18.9k
  max_base_x128 = _mm_set1_epi8(max_base_x);
3753
18.9k
  c3f = _mm256_set1_epi16(0x3f);
3754
3755
18.9k
  int x = dx;
3756
1.02M
  for (int r = 0; r < N; r++, dst += stride) {
3757
1.01M
    __m256i b, res;
3758
1.01M
    int base = x >> frac_bits;
3759
1.01M
    if (base >= max_base_x) {
3760
0
      for (int i = r; i < N; ++i) {
3761
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
3762
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
3763
0
        dst += stride;
3764
0
      }
3765
0
      return;
3766
0
    }
3767
3768
1.01M
    __m256i shift =
3769
1.01M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3770
3771
1.01M
    __m128i a0_128, a1_128, res128;
3772
5.05M
    for (int j = 0; j < 64; j += 16) {
3773
4.04M
      int mdif = max_base_x - (base + j);
3774
4.04M
      if (mdif <= 0) {
3775
1.54k
        _mm_storeu_si128((__m128i *)(dst + j),
3776
1.54k
                         _mm256_castsi256_si128(a_mbase_x));
3777
4.04M
      } else {
3778
4.04M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3779
4.04M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
3780
4.04M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3781
4.04M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3782
3783
4.04M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3784
4.04M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3785
4.04M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3786
4.04M
        b = _mm256_mullo_epi16(diff, shift);
3787
3788
4.04M
        res = _mm256_add_epi16(a32, b);
3789
4.04M
        res = _mm256_srli_epi16(res, 5);
3790
4.04M
        res = _mm256_packus_epi16(
3791
4.04M
            res, _mm256_castsi128_si256(
3792
4.04M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3793
3794
4.04M
        base_inc128 =
3795
4.04M
            _mm_setr_epi8((int8_t)(base + j), (int8_t)(base + j + 1),
3796
4.04M
                          (int8_t)(base + j + 2), (int8_t)(base + j + 3),
3797
4.04M
                          (int8_t)(base + j + 4), (int8_t)(base + j + 5),
3798
4.04M
                          (int8_t)(base + j + 6), (int8_t)(base + j + 7),
3799
4.04M
                          (int8_t)(base + j + 8), (int8_t)(base + j + 9),
3800
4.04M
                          (int8_t)(base + j + 10), (int8_t)(base + j + 11),
3801
4.04M
                          (int8_t)(base + j + 12), (int8_t)(base + j + 13),
3802
4.04M
                          (int8_t)(base + j + 14), (int8_t)(base + j + 15));
3803
3804
4.04M
        mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
3805
4.04M
                                 _mm_setzero_si128());
3806
4.04M
        res128 = _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x),
3807
4.04M
                                 _mm256_castsi256_si128(res), mask128);
3808
4.04M
        _mm_storeu_si128((__m128i *)(dst + j), res128);
3809
4.04M
      }
3810
4.04M
    }
3811
1.01M
    x += dx;
3812
1.01M
  }
3813
18.9k
}
3814
3815
// Directional prediction, zone 1: 0 < angle < 90
3816
void av1_dr_prediction_z1_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
3817
                               const uint8_t *above, const uint8_t *left,
3818
215k
                               int upsample_above, int dx, int dy) {
3819
215k
  (void)left;
3820
215k
  (void)dy;
3821
215k
  switch (bw) {
3822
55.9k
    case 4:
3823
55.9k
      dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above, dx);
3824
55.9k
      break;
3825
67.9k
    case 8:
3826
67.9k
      dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above, dx);
3827
67.9k
      break;
3828
55.3k
    case 16:
3829
55.3k
      dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above, dx);
3830
55.3k
      break;
3831
29.5k
    case 32:
3832
29.5k
      dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above, dx);
3833
29.5k
      break;
3834
6.93k
    case 64:
3835
6.93k
      dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above, dx);
3836
6.93k
      break;
3837
0
    default: break;
3838
215k
  }
3839
215k
  return;
3840
215k
}
3841
3842
static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3843
                                      const uint8_t *above, const uint8_t *left,
3844
                                      int upsample_above, int upsample_left,
3845
178k
                                      int dx, int dy) {
3846
178k
  const int min_base_x = -(1 << upsample_above);
3847
178k
  const int min_base_y = -(1 << upsample_left);
3848
178k
  const int frac_bits_x = 6 - upsample_above;
3849
178k
  const int frac_bits_y = 6 - upsample_left;
3850
3851
178k
  assert(dx > 0);
3852
  // pre-filter above pixels
3853
  // store in temp buffers:
3854
  //   above[x] * 32 + 16
3855
  //   above[x+1] - above[x]
3856
  // final pixels will be calculated as:
3857
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3858
178k
  __m128i a0_x, a1_x, a32, a16, diff;
3859
178k
  __m128i c3f, min_base_y128, c1234, dy128;
3860
3861
178k
  a16 = _mm_set1_epi16(16);
3862
178k
  c3f = _mm_set1_epi16(0x3f);
3863
178k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3864
178k
  c1234 = _mm_setr_epi16(0, 1, 2, 3, 4, 0, 0, 0);
3865
178k
  dy128 = _mm_set1_epi16(dy);
3866
3867
1.16M
  for (int r = 0; r < N; r++) {
3868
981k
    __m128i b, res, shift, r6, ydx;
3869
981k
    __m128i resx, resy, resxy;
3870
981k
    __m128i a0_x128, a1_x128;
3871
981k
    int y = r + 1;
3872
981k
    int base_x = (-y * dx) >> frac_bits_x;
3873
981k
    int base_shift = 0;
3874
981k
    if (base_x < (min_base_x - 1)) {
3875
765k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3876
765k
    }
3877
981k
    int base_min_diff =
3878
981k
        (min_base_x - base_x + upsample_above) >> upsample_above;
3879
981k
    if (base_min_diff > 4) {
3880
526k
      base_min_diff = 4;
3881
526k
    } else {
3882
455k
      if (base_min_diff < 0) base_min_diff = 0;
3883
455k
    }
3884
3885
981k
    if (base_shift > 3) {
3886
526k
      a0_x = _mm_setzero_si128();
3887
526k
      a1_x = _mm_setzero_si128();
3888
526k
      shift = _mm_setzero_si128();
3889
526k
    } else {
3890
455k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
3891
455k
      ydx = _mm_set1_epi16(y * dx);
3892
455k
      r6 = _mm_slli_epi16(c1234, 6);
3893
3894
455k
      if (upsample_above) {
3895
144k
        a0_x128 =
3896
144k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
3897
144k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
3898
3899
144k
        shift = _mm_srli_epi16(
3900
144k
            _mm_and_si128(
3901
144k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
3902
144k
            1);
3903
311k
      } else {
3904
311k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
3905
311k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
3906
3907
311k
        shift = _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1);
3908
311k
      }
3909
455k
      a0_x = _mm_cvtepu8_epi16(a0_x128);
3910
455k
      a1_x = _mm_cvtepu8_epi16(a1_x128);
3911
455k
    }
3912
    // y calc
3913
981k
    __m128i a0_y, a1_y, shifty;
3914
981k
    if (base_x < min_base_x) {
3915
854k
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
3916
854k
      __m128i y_c128, base_y_c128, mask128, c1234_;
3917
854k
      c1234_ = _mm_srli_si128(c1234, 2);
3918
854k
      r6 = _mm_set1_epi16(r << 6);
3919
854k
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234_, dy128));
3920
854k
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
3921
854k
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
3922
854k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
3923
854k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3924
3925
854k
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3926
854k
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3927
854k
      base_y_c128 = _mm_add_epi16(base_y_c128, _mm_srli_epi16(a16, 4));
3928
854k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3929
854k
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3930
854k
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3931
3932
854k
      if (upsample_left) {
3933
511k
        shifty = _mm_srli_epi16(
3934
511k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
3935
511k
      } else {
3936
343k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
3937
343k
      }
3938
854k
      a0_x = _mm_unpacklo_epi64(a0_x, a0_y);
3939
854k
      a1_x = _mm_unpacklo_epi64(a1_x, a1_y);
3940
854k
      shift = _mm_unpacklo_epi64(shift, shifty);
3941
854k
    }
3942
3943
981k
    diff = _mm_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
3944
981k
    a32 = _mm_slli_epi16(a0_x, 5);     // a[x] * 32
3945
981k
    a32 = _mm_add_epi16(a32, a16);     // a[x] * 32 + 16
3946
3947
981k
    b = _mm_mullo_epi16(diff, shift);
3948
981k
    res = _mm_add_epi16(a32, b);
3949
981k
    res = _mm_srli_epi16(res, 5);
3950
3951
981k
    resx = _mm_packus_epi16(res, res);
3952
981k
    resy = _mm_srli_si128(resx, 4);
3953
3954
981k
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
3955
981k
    *(int *)(dst) = _mm_cvtsi128_si32(resxy);
3956
981k
    dst += stride;
3957
981k
  }
3958
178k
}
3959
3960
static void dr_prediction_z2_Nx8_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3961
                                      const uint8_t *above, const uint8_t *left,
3962
                                      int upsample_above, int upsample_left,
3963
137k
                                      int dx, int dy) {
3964
137k
  const int min_base_x = -(1 << upsample_above);
3965
137k
  const int min_base_y = -(1 << upsample_left);
3966
137k
  const int frac_bits_x = 6 - upsample_above;
3967
137k
  const int frac_bits_y = 6 - upsample_left;
3968
3969
  // pre-filter above pixels
3970
  // store in temp buffers:
3971
  //   above[x] * 32 + 16
3972
  //   above[x+1] - above[x]
3973
  // final pixels will be calculated as:
3974
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3975
137k
  __m256i diff, a32, a16;
3976
137k
  __m256i a0_x, a1_x;
3977
137k
  __m128i a0_x128, a1_x128, min_base_y128, c3f;
3978
137k
  __m128i c1234, dy128;
3979
3980
137k
  a16 = _mm256_set1_epi16(16);
3981
137k
  c3f = _mm_set1_epi16(0x3f);
3982
137k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3983
137k
  dy128 = _mm_set1_epi16(dy);
3984
137k
  c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
3985
3986
1.42M
  for (int r = 0; r < N; r++) {
3987
1.28M
    __m256i b, res, shift;
3988
1.28M
    __m128i resx, resy, resxy, r6, ydx;
3989
3990
1.28M
    int y = r + 1;
3991
1.28M
    int base_x = (-y * dx) >> frac_bits_x;
3992
1.28M
    int base_shift = 0;
3993
1.28M
    if (base_x < (min_base_x - 1)) {
3994
971k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3995
971k
    }
3996
1.28M
    int base_min_diff =
3997
1.28M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3998
1.28M
    if (base_min_diff > 8) {
3999
571k
      base_min_diff = 8;
4000
714k
    } else {
4001
714k
      if (base_min_diff < 0) base_min_diff = 0;
4002
714k
    }
4003
4004
1.28M
    if (base_shift > 7) {
4005
571k
      a0_x = _mm256_setzero_si256();
4006
571k
      a1_x = _mm256_setzero_si256();
4007
571k
      shift = _mm256_setzero_si256();
4008
714k
    } else {
4009
714k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
4010
714k
      ydx = _mm_set1_epi16(y * dx);
4011
714k
      r6 = _mm_slli_epi16(_mm_srli_si128(c1234, 2), 6);
4012
714k
      if (upsample_above) {
4013
228k
        a0_x128 =
4014
228k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
4015
228k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
4016
4017
228k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
4018
228k
            _mm_and_si128(
4019
228k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
4020
228k
            1));
4021
485k
      } else {
4022
485k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
4023
485k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4024
485k
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4025
4026
485k
        shift = _mm256_castsi128_si256(
4027
485k
            _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1));
4028
485k
      }
4029
714k
      a0_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a0_x128));
4030
714k
      a1_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a1_x128));
4031
714k
    }
4032
4033
    // y calc
4034
1.28M
    __m128i a0_y, a1_y, shifty;
4035
1.28M
    if (base_x < min_base_x) {
4036
1.08M
      DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4037
1.08M
      __m128i y_c128, base_y_c128, mask128;
4038
1.08M
      r6 = _mm_set1_epi16(r << 6);
4039
1.08M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
4040
1.08M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
4041
1.08M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
4042
1.08M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
4043
1.08M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4044
4045
1.08M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4046
1.08M
                            left[base_y_c[2]], left[base_y_c[3]],
4047
1.08M
                            left[base_y_c[4]], left[base_y_c[5]],
4048
1.08M
                            left[base_y_c[6]], left[base_y_c[7]]);
4049
1.08M
      base_y_c128 = _mm_add_epi16(
4050
1.08M
          base_y_c128, _mm_srli_epi16(_mm256_castsi256_si128(a16), 4));
4051
1.08M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4052
4053
1.08M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4054
1.08M
                            left[base_y_c[2]], left[base_y_c[3]],
4055
1.08M
                            left[base_y_c[4]], left[base_y_c[5]],
4056
1.08M
                            left[base_y_c[6]], left[base_y_c[7]]);
4057
4058
1.08M
      if (upsample_left) {
4059
297k
        shifty = _mm_srli_epi16(
4060
297k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
4061
783k
      } else {
4062
783k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
4063
783k
      }
4064
4065
1.08M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
4066
1.08M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
4067
1.08M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
4068
1.08M
    }
4069
4070
1.28M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4071
1.28M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4072
1.28M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4073
4074
1.28M
    b = _mm256_mullo_epi16(diff, shift);
4075
1.28M
    res = _mm256_add_epi16(a32, b);
4076
1.28M
    res = _mm256_srli_epi16(res, 5);
4077
4078
1.28M
    resx = _mm_packus_epi16(_mm256_castsi256_si128(res),
4079
1.28M
                            _mm256_castsi256_si128(res));
4080
1.28M
    resy = _mm256_extracti128_si256(res, 1);
4081
1.28M
    resy = _mm_packus_epi16(resy, resy);
4082
4083
1.28M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4084
1.28M
    _mm_storel_epi64((__m128i *)(dst), resxy);
4085
1.28M
    dst += stride;
4086
1.28M
  }
4087
137k
}
4088
4089
static void dr_prediction_z2_HxW_avx2(int H, int W, uint8_t *dst,
4090
                                      ptrdiff_t stride, const uint8_t *above,
4091
                                      const uint8_t *left, int upsample_above,
4092
202k
                                      int upsample_left, int dx, int dy) {
4093
  // here upsample_above and upsample_left are 0 by design of
4094
  // av1_use_intra_edge_upsample
4095
202k
  const int min_base_x = -1;
4096
202k
  const int min_base_y = -1;
4097
202k
  (void)upsample_above;
4098
202k
  (void)upsample_left;
4099
202k
  const int frac_bits_x = 6;
4100
202k
  const int frac_bits_y = 6;
4101
4102
202k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c1234, c0123;
4103
202k
  __m256i diff, min_base_y256, c3f, shifty, dy256, c1;
4104
202k
  __m128i a0_x128, a1_x128;
4105
4106
202k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4107
202k
  a16 = _mm256_set1_epi16(16);
4108
202k
  c1 = _mm256_srli_epi16(a16, 4);
4109
202k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
4110
202k
  c3f = _mm256_set1_epi16(0x3f);
4111
202k
  dy256 = _mm256_set1_epi16(dy);
4112
202k
  c0123 =
4113
202k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4114
202k
  c1234 = _mm256_add_epi16(c0123, c1);
4115
4116
3.88M
  for (int r = 0; r < H; r++) {
4117
3.67M
    __m256i b, res, shift, j256, r6, ydx;
4118
3.67M
    __m128i resx, resy;
4119
3.67M
    __m128i resxy;
4120
3.67M
    int y = r + 1;
4121
3.67M
    ydx = _mm256_set1_epi16((int16_t)(y * dx));
4122
4123
3.67M
    int base_x = (-y * dx) >> frac_bits_x;
4124
10.5M
    for (int j = 0; j < W; j += 16) {
4125
6.89M
      j256 = _mm256_set1_epi16(j);
4126
6.89M
      int base_shift = 0;
4127
6.89M
      if ((base_x + j) < (min_base_x - 1)) {
4128
5.04M
        base_shift = (min_base_x - (base_x + j) - 1);
4129
5.04M
      }
4130
6.89M
      int base_min_diff = (min_base_x - base_x - j);
4131
6.89M
      if (base_min_diff > 16) {
4132
3.67M
        base_min_diff = 16;
4133
3.67M
      } else {
4134
3.21M
        if (base_min_diff < 0) base_min_diff = 0;
4135
3.21M
      }
4136
4137
6.89M
      if (base_shift < 16) {
4138
3.21M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
4139
3.21M
        a1_x128 =
4140
3.21M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
4141
3.21M
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4142
3.21M
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4143
4144
3.21M
        a0_x = _mm256_cvtepu8_epi16(a0_x128);
4145
3.21M
        a1_x = _mm256_cvtepu8_epi16(a1_x128);
4146
4147
3.21M
        r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
4148
3.21M
        shift = _mm256_srli_epi16(
4149
3.21M
            _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
4150
4151
3.21M
        diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4152
3.21M
        a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4153
3.21M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4154
4155
3.21M
        b = _mm256_mullo_epi16(diff, shift);
4156
3.21M
        res = _mm256_add_epi16(a32, b);
4157
3.21M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4158
3.21M
        resx = _mm256_castsi256_si128(_mm256_packus_epi16(
4159
3.21M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4160
3.67M
      } else {
4161
3.67M
        resx = _mm_setzero_si128();
4162
3.67M
      }
4163
4164
      // y calc
4165
6.89M
      if (base_x < min_base_x) {
4166
6.40M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
4167
6.40M
        r6 = _mm256_set1_epi16(r << 6);
4168
6.40M
        c256 = _mm256_add_epi16(j256, c1234);
4169
6.40M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
4170
6.40M
                                 _mm256_srli_epi16(min_base_y256, 1));
4171
6.40M
        y_c256 = _mm256_sub_epi16(r6, mul16);
4172
4173
6.40M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
4174
6.40M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
4175
4176
6.40M
        base_y_c256 = _mm256_blendv_epi8(base_y_c256, min_base_y256, mask256);
4177
6.40M
        int16_t min_y = (int16_t)_mm_extract_epi16(
4178
6.40M
            _mm256_extracti128_si256(base_y_c256, 1), 7);
4179
6.40M
        int16_t max_y =
4180
6.40M
            (int16_t)_mm_extract_epi16(_mm256_castsi256_si128(base_y_c256), 0);
4181
6.40M
        int16_t offset_diff = max_y - min_y;
4182
4183
6.40M
        if (offset_diff < 16) {
4184
6.05M
          __m256i min_y256 = _mm256_set1_epi16(min_y);
4185
4186
6.05M
          __m256i base_y_offset = _mm256_sub_epi16(base_y_c256, min_y256);
4187
6.05M
          __m128i base_y_offset128 =
4188
6.05M
              _mm_packs_epi16(_mm256_extracti128_si256(base_y_offset, 0),
4189
6.05M
                              _mm256_extracti128_si256(base_y_offset, 1));
4190
4191
6.05M
          __m128i a0_y128 = _mm_maskload_epi32(
4192
6.05M
              (int *)(left + min_y), *(__m128i *)LoadMaskz2[offset_diff / 4]);
4193
6.05M
          __m128i a1_y128 =
4194
6.05M
              _mm_maskload_epi32((int *)(left + min_y + 1),
4195
6.05M
                                 *(__m128i *)LoadMaskz2[offset_diff / 4]);
4196
6.05M
          a0_y128 = _mm_shuffle_epi8(a0_y128, base_y_offset128);
4197
6.05M
          a1_y128 = _mm_shuffle_epi8(a1_y128, base_y_offset128);
4198
6.05M
          a0_y = _mm256_cvtepu8_epi16(a0_y128);
4199
6.05M
          a1_y = _mm256_cvtepu8_epi16(a1_y128);
4200
6.05M
        } else {
4201
351k
          base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
4202
351k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4203
4204
351k
          a0_y = _mm256_setr_epi16(
4205
351k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4206
351k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4207
351k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4208
351k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4209
351k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4210
351k
              left[base_y_c[15]]);
4211
351k
          base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
4212
351k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4213
4214
351k
          a1_y = _mm256_setr_epi16(
4215
351k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4216
351k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4217
351k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4218
351k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4219
351k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4220
351k
              left[base_y_c[15]]);
4221
351k
        }
4222
6.40M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
4223
4224
6.40M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
4225
6.40M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
4226
6.40M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4227
4228
6.40M
        b = _mm256_mullo_epi16(diff, shifty);
4229
6.40M
        res = _mm256_add_epi16(a32, b);
4230
6.40M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4231
6.40M
        resy = _mm256_castsi256_si128(_mm256_packus_epi16(
4232
6.40M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4233
6.40M
      } else {
4234
487k
        resy = _mm_setzero_si128();
4235
487k
      }
4236
6.89M
      resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4237
6.89M
      _mm_storeu_si128((__m128i *)(dst + j), resxy);
4238
6.89M
    }  // for j
4239
3.67M
    dst += stride;
4240
3.67M
  }
4241
202k
}
4242
4243
// Directional prediction, zone 2: 90 < angle < 180
4244
void av1_dr_prediction_z2_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4245
                               const uint8_t *above, const uint8_t *left,
4246
                               int upsample_above, int upsample_left, int dx,
4247
519k
                               int dy) {
4248
519k
  assert(dx > 0);
4249
519k
  assert(dy > 0);
4250
519k
  switch (bw) {
4251
178k
    case 4:
4252
178k
      dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left, upsample_above,
4253
178k
                                upsample_left, dx, dy);
4254
178k
      break;
4255
137k
    case 8:
4256
137k
      dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left, upsample_above,
4257
137k
                                upsample_left, dx, dy);
4258
137k
      break;
4259
202k
    default:
4260
202k
      dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
4261
202k
                                upsample_above, upsample_left, dx, dy);
4262
202k
      break;
4263
519k
  }
4264
519k
  return;
4265
519k
}
4266
4267
// z3 functions
4268
85.5k
static inline void transpose16x32_avx2(__m256i *x, __m256i *d) {
4269
85.5k
  __m256i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
4270
85.5k
  __m256i w10, w11, w12, w13, w14, w15;
4271
4272
85.5k
  w0 = _mm256_unpacklo_epi8(x[0], x[1]);
4273
85.5k
  w1 = _mm256_unpacklo_epi8(x[2], x[3]);
4274
85.5k
  w2 = _mm256_unpacklo_epi8(x[4], x[5]);
4275
85.5k
  w3 = _mm256_unpacklo_epi8(x[6], x[7]);
4276
4277
85.5k
  w8 = _mm256_unpacklo_epi8(x[8], x[9]);
4278
85.5k
  w9 = _mm256_unpacklo_epi8(x[10], x[11]);
4279
85.5k
  w10 = _mm256_unpacklo_epi8(x[12], x[13]);
4280
85.5k
  w11 = _mm256_unpacklo_epi8(x[14], x[15]);
4281
4282
85.5k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4283
85.5k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4284
85.5k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4285
85.5k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4286
4287
85.5k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4288
85.5k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4289
85.5k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4290
85.5k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4291
4292
  // Store first 4-line result
4293
85.5k
  d[0] = _mm256_unpacklo_epi64(w6, w14);
4294
85.5k
  d[1] = _mm256_unpackhi_epi64(w6, w14);
4295
85.5k
  d[2] = _mm256_unpacklo_epi64(w7, w15);
4296
85.5k
  d[3] = _mm256_unpackhi_epi64(w7, w15);
4297
4298
85.5k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4299
85.5k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4300
85.5k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4301
85.5k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4302
4303
85.5k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4304
85.5k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4305
85.5k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4306
85.5k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4307
4308
  // Store second 4-line result
4309
85.5k
  d[4] = _mm256_unpacklo_epi64(w6, w14);
4310
85.5k
  d[5] = _mm256_unpackhi_epi64(w6, w14);
4311
85.5k
  d[6] = _mm256_unpacklo_epi64(w7, w15);
4312
85.5k
  d[7] = _mm256_unpackhi_epi64(w7, w15);
4313
4314
  // upper half
4315
85.5k
  w0 = _mm256_unpackhi_epi8(x[0], x[1]);
4316
85.5k
  w1 = _mm256_unpackhi_epi8(x[2], x[3]);
4317
85.5k
  w2 = _mm256_unpackhi_epi8(x[4], x[5]);
4318
85.5k
  w3 = _mm256_unpackhi_epi8(x[6], x[7]);
4319
4320
85.5k
  w8 = _mm256_unpackhi_epi8(x[8], x[9]);
4321
85.5k
  w9 = _mm256_unpackhi_epi8(x[10], x[11]);
4322
85.5k
  w10 = _mm256_unpackhi_epi8(x[12], x[13]);
4323
85.5k
  w11 = _mm256_unpackhi_epi8(x[14], x[15]);
4324
4325
85.5k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4326
85.5k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4327
85.5k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4328
85.5k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4329
4330
85.5k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4331
85.5k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4332
85.5k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4333
85.5k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4334
4335
  // Store first 4-line result
4336
85.5k
  d[8] = _mm256_unpacklo_epi64(w6, w14);
4337
85.5k
  d[9] = _mm256_unpackhi_epi64(w6, w14);
4338
85.5k
  d[10] = _mm256_unpacklo_epi64(w7, w15);
4339
85.5k
  d[11] = _mm256_unpackhi_epi64(w7, w15);
4340
4341
85.5k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4342
85.5k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4343
85.5k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4344
85.5k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4345
4346
85.5k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4347
85.5k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4348
85.5k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4349
85.5k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4350
4351
  // Store second 4-line result
4352
85.5k
  d[12] = _mm256_unpacklo_epi64(w6, w14);
4353
85.5k
  d[13] = _mm256_unpackhi_epi64(w6, w14);
4354
85.5k
  d[14] = _mm256_unpacklo_epi64(w7, w15);
4355
85.5k
  d[15] = _mm256_unpackhi_epi64(w7, w15);
4356
85.5k
}
4357
4358
static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
4359
                                      const uint8_t *left, int upsample_left,
4360
31.2k
                                      int dy) {
4361
31.2k
  __m128i dstvec[4], d[4];
4362
4363
31.2k
  dr_prediction_z1_HxW_internal_avx2(4, 4, dstvec, left, upsample_left, dy);
4364
31.2k
  transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4365
31.2k
                            &d[0], &d[1], &d[2], &d[3]);
4366
4367
31.2k
  *(int *)(dst + stride * 0) = _mm_cvtsi128_si32(d[0]);
4368
31.2k
  *(int *)(dst + stride * 1) = _mm_cvtsi128_si32(d[1]);
4369
31.2k
  *(int *)(dst + stride * 2) = _mm_cvtsi128_si32(d[2]);
4370
31.2k
  *(int *)(dst + stride * 3) = _mm_cvtsi128_si32(d[3]);
4371
31.2k
  return;
4372
31.2k
}
4373
4374
static void dr_prediction_z3_8x8_avx2(uint8_t *dst, ptrdiff_t stride,
4375
                                      const uint8_t *left, int upsample_left,
4376
44.9k
                                      int dy) {
4377
44.9k
  __m128i dstvec[8], d[8];
4378
4379
44.9k
  dr_prediction_z1_HxW_internal_avx2(8, 8, dstvec, left, upsample_left, dy);
4380
44.9k
  transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
4381
44.9k
                    &dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
4382
44.9k
                    &d[3]);
4383
4384
44.9k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4385
44.9k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), _mm_srli_si128(d[0], 8));
4386
44.9k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[1]);
4387
44.9k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), _mm_srli_si128(d[1], 8));
4388
44.9k
  _mm_storel_epi64((__m128i *)(dst + 4 * stride), d[2]);
4389
44.9k
  _mm_storel_epi64((__m128i *)(dst + 5 * stride), _mm_srli_si128(d[2], 8));
4390
44.9k
  _mm_storel_epi64((__m128i *)(dst + 6 * stride), d[3]);
4391
44.9k
  _mm_storel_epi64((__m128i *)(dst + 7 * stride), _mm_srli_si128(d[3], 8));
4392
44.9k
}
4393
4394
static void dr_prediction_z3_4x8_avx2(uint8_t *dst, ptrdiff_t stride,
4395
                                      const uint8_t *left, int upsample_left,
4396
13.4k
                                      int dy) {
4397
13.4k
  __m128i dstvec[4], d[8];
4398
4399
13.4k
  dr_prediction_z1_HxW_internal_avx2(8, 4, dstvec, left, upsample_left, dy);
4400
13.4k
  transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
4401
13.4k
                        &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
4402
120k
  for (int i = 0; i < 8; i++) {
4403
107k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4404
107k
  }
4405
13.4k
}
4406
4407
static void dr_prediction_z3_8x4_avx2(uint8_t *dst, ptrdiff_t stride,
4408
                                      const uint8_t *left, int upsample_left,
4409
21.2k
                                      int dy) {
4410
21.2k
  __m128i dstvec[8], d[4];
4411
4412
21.2k
  dr_prediction_z1_HxW_internal_avx2(4, 8, dstvec, left, upsample_left, dy);
4413
21.2k
  transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4414
21.2k
                        &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
4415
21.2k
                        &d[1], &d[2], &d[3]);
4416
21.2k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4417
21.2k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
4418
21.2k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
4419
21.2k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
4420
21.2k
}
4421
4422
static void dr_prediction_z3_8x16_avx2(uint8_t *dst, ptrdiff_t stride,
4423
                                       const uint8_t *left, int upsample_left,
4424
11.3k
                                       int dy) {
4425
11.3k
  __m128i dstvec[8], d[8];
4426
4427
11.3k
  dr_prediction_z1_HxW_internal_avx2(16, 8, dstvec, left, upsample_left, dy);
4428
11.3k
  transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
4429
11.3k
                          dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
4430
11.3k
                          d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
4431
102k
  for (int i = 0; i < 8; i++) {
4432
91.0k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
4433
91.0k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
4434
91.0k
                     _mm_srli_si128(d[i], 8));
4435
91.0k
  }
4436
11.3k
}
4437
4438
static void dr_prediction_z3_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
4439
                                       const uint8_t *left, int upsample_left,
4440
22.2k
                                       int dy) {
4441
22.2k
  __m128i dstvec[16], d[16];
4442
4443
22.2k
  dr_prediction_z1_HxW_internal_avx2(8, 16, dstvec, left, upsample_left, dy);
4444
22.2k
  transpose16x8_8x16_sse2(
4445
22.2k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4446
22.2k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4447
22.2k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4448
22.2k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4449
4450
200k
  for (int i = 0; i < 8; i++) {
4451
178k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4452
178k
  }
4453
22.2k
}
4454
4455
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4456
static void dr_prediction_z3_4x16_avx2(uint8_t *dst, ptrdiff_t stride,
4457
                                       const uint8_t *left, int upsample_left,
4458
8.50k
                                       int dy) {
4459
8.50k
  __m128i dstvec[4], d[16];
4460
4461
8.50k
  dr_prediction_z1_HxW_internal_avx2(16, 4, dstvec, left, upsample_left, dy);
4462
8.50k
  transpose4x16_sse2(dstvec, d);
4463
144k
  for (int i = 0; i < 16; i++) {
4464
136k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4465
136k
  }
4466
8.50k
}
4467
4468
static void dr_prediction_z3_16x4_avx2(uint8_t *dst, ptrdiff_t stride,
4469
                                       const uint8_t *left, int upsample_left,
4470
28.2k
                                       int dy) {
4471
28.2k
  __m128i dstvec[16], d[8];
4472
4473
28.2k
  dr_prediction_z1_HxW_internal_avx2(4, 16, dstvec, left, upsample_left, dy);
4474
141k
  for (int i = 4; i < 8; i++) {
4475
112k
    d[i] = _mm_setzero_si128();
4476
112k
  }
4477
28.2k
  transpose16x8_8x16_sse2(
4478
28.2k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4479
28.2k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4480
28.2k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4481
28.2k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4482
4483
141k
  for (int i = 0; i < 4; i++) {
4484
112k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4485
112k
  }
4486
28.2k
}
4487
4488
static void dr_prediction_z3_8x32_avx2(uint8_t *dst, ptrdiff_t stride,
4489
                                       const uint8_t *left, int upsample_left,
4490
4.58k
                                       int dy) {
4491
4.58k
  __m256i dstvec[16], d[16];
4492
4493
4.58k
  dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left, dy);
4494
41.2k
  for (int i = 8; i < 16; i++) {
4495
36.6k
    dstvec[i] = _mm256_setzero_si256();
4496
36.6k
  }
4497
4.58k
  transpose16x32_avx2(dstvec, d);
4498
4499
77.8k
  for (int i = 0; i < 16; i++) {
4500
73.2k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
4501
73.2k
                     _mm256_castsi256_si128(d[i]));
4502
73.2k
  }
4503
77.8k
  for (int i = 0; i < 16; i++) {
4504
73.2k
    _mm_storel_epi64((__m128i *)(dst + (i + 16) * stride),
4505
73.2k
                     _mm256_extracti128_si256(d[i], 1));
4506
73.2k
  }
4507
4.58k
}
4508
4509
static void dr_prediction_z3_32x8_avx2(uint8_t *dst, ptrdiff_t stride,
4510
                                       const uint8_t *left, int upsample_left,
4511
20.8k
                                       int dy) {
4512
20.8k
  __m128i dstvec[32], d[16];
4513
4514
20.8k
  dr_prediction_z1_HxW_internal_avx2(8, 32, dstvec, left, upsample_left, dy);
4515
4516
20.8k
  transpose16x8_8x16_sse2(
4517
20.8k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4518
20.8k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4519
20.8k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4520
20.8k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4521
20.8k
  transpose16x8_8x16_sse2(
4522
20.8k
      &dstvec[0 + 16], &dstvec[1 + 16], &dstvec[2 + 16], &dstvec[3 + 16],
4523
20.8k
      &dstvec[4 + 16], &dstvec[5 + 16], &dstvec[6 + 16], &dstvec[7 + 16],
4524
20.8k
      &dstvec[8 + 16], &dstvec[9 + 16], &dstvec[10 + 16], &dstvec[11 + 16],
4525
20.8k
      &dstvec[12 + 16], &dstvec[13 + 16], &dstvec[14 + 16], &dstvec[15 + 16],
4526
20.8k
      &d[0 + 8], &d[1 + 8], &d[2 + 8], &d[3 + 8], &d[4 + 8], &d[5 + 8],
4527
20.8k
      &d[6 + 8], &d[7 + 8]);
4528
4529
187k
  for (int i = 0; i < 8; i++) {
4530
166k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4531
166k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 8]);
4532
166k
  }
4533
20.8k
}
4534
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4535
4536
static void dr_prediction_z3_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
4537
                                        const uint8_t *left, int upsample_left,
4538
39.4k
                                        int dy) {
4539
39.4k
  __m128i dstvec[16], d[16];
4540
4541
39.4k
  dr_prediction_z1_HxW_internal_avx2(16, 16, dstvec, left, upsample_left, dy);
4542
39.4k
  transpose16x16_sse2(dstvec, d);
4543
4544
669k
  for (int i = 0; i < 16; i++) {
4545
630k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4546
630k
  }
4547
39.4k
}
4548
4549
static void dr_prediction_z3_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
4550
                                        const uint8_t *left, int upsample_left,
4551
36.3k
                                        int dy) {
4552
36.3k
  __m256i dstvec[32], d[32];
4553
4554
36.3k
  dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left, dy);
4555
36.3k
  transpose16x32_avx2(dstvec, d);
4556
36.3k
  transpose16x32_avx2(dstvec + 16, d + 16);
4557
618k
  for (int j = 0; j < 16; j++) {
4558
581k
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4559
581k
                     _mm256_castsi256_si128(d[j]));
4560
581k
    _mm_storeu_si128((__m128i *)(dst + j * stride + 16),
4561
581k
                     _mm256_castsi256_si128(d[j + 16]));
4562
581k
  }
4563
618k
  for (int j = 0; j < 16; j++) {
4564
581k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4565
581k
                     _mm256_extracti128_si256(d[j], 1));
4566
581k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride + 16),
4567
581k
                     _mm256_extracti128_si256(d[j + 16], 1));
4568
581k
  }
4569
36.3k
}
4570
4571
static void dr_prediction_z3_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
4572
                                        const uint8_t *left, int upsample_left,
4573
9.74k
                                        int dy) {
4574
9.74k
  DECLARE_ALIGNED(16, uint8_t, dstT[64 * 64]);
4575
9.74k
  dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
4576
9.74k
  transpose(dstT, 64, dst, stride, 64, 64);
4577
9.74k
}
4578
4579
static void dr_prediction_z3_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
4580
                                        const uint8_t *left, int upsample_left,
4581
8.18k
                                        int dy) {
4582
8.18k
  __m256i dstvec[16], d[16];
4583
4584
8.18k
  dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left, dy);
4585
8.18k
  transpose16x32_avx2(dstvec, d);
4586
  // store
4587
139k
  for (int j = 0; j < 16; j++) {
4588
131k
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4589
131k
                     _mm256_castsi256_si128(d[j]));
4590
131k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4591
131k
                     _mm256_extracti128_si256(d[j], 1));
4592
131k
  }
4593
8.18k
}
4594
4595
static void dr_prediction_z3_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
4596
                                        const uint8_t *left, int upsample_left,
4597
8.45k
                                        int dy) {
4598
8.45k
  __m128i dstvec[32], d[16];
4599
4600
8.45k
  dr_prediction_z1_HxW_internal_avx2(16, 32, dstvec, left, upsample_left, dy);
4601
25.3k
  for (int i = 0; i < 32; i += 16) {
4602
16.9k
    transpose16x16_sse2((dstvec + i), d);
4603
287k
    for (int j = 0; j < 16; j++) {
4604
270k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4605
270k
    }
4606
16.9k
  }
4607
8.45k
}
4608
4609
static void dr_prediction_z3_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
4610
                                        const uint8_t *left, int upsample_left,
4611
748
                                        int dy) {
4612
748
  uint8_t dstT[64 * 32];
4613
748
  dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
4614
748
  transpose(dstT, 64, dst, stride, 32, 64);
4615
748
}
4616
4617
static void dr_prediction_z3_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
4618
                                        const uint8_t *left, int upsample_left,
4619
1.57k
                                        int dy) {
4620
1.57k
  uint8_t dstT[32 * 64];
4621
1.57k
  dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
4622
1.57k
  transpose(dstT, 32, dst, stride, 64, 32);
4623
1.57k
  return;
4624
1.57k
}
4625
4626
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4627
static void dr_prediction_z3_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
4628
                                        const uint8_t *left, int upsample_left,
4629
1.49k
                                        int dy) {
4630
1.49k
  uint8_t dstT[64 * 16];
4631
1.49k
  dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
4632
1.49k
  transpose(dstT, 64, dst, stride, 16, 64);
4633
1.49k
}
4634
4635
static void dr_prediction_z3_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
4636
                                        const uint8_t *left, int upsample_left,
4637
7.76k
                                        int dy) {
4638
7.76k
  __m128i dstvec[64], d[16];
4639
4640
7.76k
  dr_prediction_z1_HxW_internal_avx2(16, 64, dstvec, left, upsample_left, dy);
4641
38.8k
  for (int i = 0; i < 64; i += 16) {
4642
31.0k
    transpose16x16_sse2((dstvec + i), d);
4643
527k
    for (int j = 0; j < 16; j++) {
4644
496k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4645
496k
    }
4646
31.0k
  }
4647
7.76k
}
4648
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4649
4650
void av1_dr_prediction_z3_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4651
                               const uint8_t *above, const uint8_t *left,
4652
320k
                               int upsample_left, int dx, int dy) {
4653
320k
  (void)above;
4654
320k
  (void)dx;
4655
320k
  assert(dx == 1);
4656
320k
  assert(dy > 0);
4657
4658
320k
  if (bw == bh) {
4659
161k
    switch (bw) {
4660
31.2k
      case 4:
4661
31.2k
        dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
4662
31.2k
        break;
4663
44.9k
      case 8:
4664
44.9k
        dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
4665
44.9k
        break;
4666
39.4k
      case 16:
4667
39.4k
        dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy);
4668
39.4k
        break;
4669
36.3k
      case 32:
4670
36.3k
        dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy);
4671
36.3k
        break;
4672
9.74k
      case 64:
4673
9.74k
        dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy);
4674
9.74k
        break;
4675
161k
    }
4676
161k
  } else {
4677
158k
    if (bw < bh) {
4678
48.3k
      if (bw + bw == bh) {
4679
33.7k
        switch (bw) {
4680
13.4k
          case 4:
4681
13.4k
            dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left, dy);
4682
13.4k
            break;
4683
11.3k
          case 8:
4684
11.3k
            dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left, dy);
4685
11.3k
            break;
4686
8.18k
          case 16:
4687
8.18k
            dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left, dy);
4688
8.18k
            break;
4689
748
          case 32:
4690
748
            dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left, dy);
4691
748
            break;
4692
33.7k
        }
4693
33.7k
      } else {
4694
14.5k
        switch (bw) {
4695
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4696
8.50k
          case 4:
4697
8.50k
            dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left, dy);
4698
8.50k
            break;
4699
4.58k
          case 8:
4700
4.58k
            dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left, dy);
4701
4.58k
            break;
4702
1.49k
          case 16:
4703
1.49k
            dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left, dy);
4704
1.49k
            break;
4705
14.5k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4706
14.5k
        }
4707
14.5k
      }
4708
110k
    } else {
4709
110k
      if (bh + bh == bw) {
4710
53.5k
        switch (bh) {
4711
21.2k
          case 4:
4712
21.2k
            dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left, dy);
4713
21.2k
            break;
4714
22.2k
          case 8:
4715
22.2k
            dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left, dy);
4716
22.2k
            break;
4717
8.45k
          case 16:
4718
8.45k
            dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left, dy);
4719
8.45k
            break;
4720
1.57k
          case 32:
4721
1.57k
            dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left, dy);
4722
1.57k
            break;
4723
53.5k
        }
4724
56.8k
      } else {
4725
56.8k
        switch (bh) {
4726
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4727
28.2k
          case 4:
4728
28.2k
            dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left, dy);
4729
28.2k
            break;
4730
20.8k
          case 8:
4731
20.8k
            dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left, dy);
4732
20.8k
            break;
4733
7.76k
          case 16:
4734
7.76k
            dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left, dy);
4735
7.76k
            break;
4736
56.8k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4737
56.8k
        }
4738
56.8k
      }
4739
110k
    }
4740
158k
  }
4741
320k
}