Coverage Report

Created: 2025-11-16 07:09

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/aom/aom_dsp/x86/intrapred_avx2.c
Line
Count
Source
1
/*
2
 * Copyright (c) 2017, Alliance for Open Media. All rights reserved.
3
 *
4
 * This source code is subject to the terms of the BSD 2 Clause License and
5
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6
 * was not distributed with this source code in the LICENSE file, you can
7
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8
 * Media Patent License 1.0 was not distributed with this source code in the
9
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10
 */
11
12
#include <immintrin.h>
13
14
#include "config/av1_rtcd.h"
15
#include "aom_dsp/x86/intrapred_x86.h"
16
#include "aom_dsp/x86/intrapred_utils.h"
17
#include "aom_dsp/x86/lpf_common_sse2.h"
18
19
324k
static inline __m256i dc_sum_64(const uint8_t *ref) {
20
324k
  const __m256i x0 = _mm256_loadu_si256((const __m256i *)ref);
21
324k
  const __m256i x1 = _mm256_loadu_si256((const __m256i *)(ref + 32));
22
324k
  const __m256i zero = _mm256_setzero_si256();
23
324k
  __m256i y0 = _mm256_sad_epu8(x0, zero);
24
324k
  __m256i y1 = _mm256_sad_epu8(x1, zero);
25
324k
  y0 = _mm256_add_epi64(y0, y1);
26
324k
  __m256i u0 = _mm256_permute2x128_si256(y0, y0, 1);
27
324k
  y0 = _mm256_add_epi64(u0, y0);
28
324k
  u0 = _mm256_unpackhi_epi64(y0, y0);
29
324k
  return _mm256_add_epi16(y0, u0);
30
324k
}
31
32
2.09M
static inline __m256i dc_sum_32(const uint8_t *ref) {
33
2.09M
  const __m256i x = _mm256_loadu_si256((const __m256i *)ref);
34
2.09M
  const __m256i zero = _mm256_setzero_si256();
35
2.09M
  __m256i y = _mm256_sad_epu8(x, zero);
36
2.09M
  __m256i u = _mm256_permute2x128_si256(y, y, 1);
37
2.09M
  y = _mm256_add_epi64(u, y);
38
2.09M
  u = _mm256_unpackhi_epi64(y, y);
39
2.09M
  return _mm256_add_epi16(y, u);
40
2.09M
}
41
42
static inline void row_store_32xh(const __m256i *r, int height, uint8_t *dst,
43
1.33M
                                  ptrdiff_t stride) {
44
41.8M
  for (int i = 0; i < height; ++i) {
45
40.5M
    _mm256_storeu_si256((__m256i *)dst, *r);
46
40.5M
    dst += stride;
47
40.5M
  }
48
1.33M
}
49
50
static inline void row_store_32x2xh(const __m256i *r0, const __m256i *r1,
51
                                    int height, uint8_t *dst,
52
3.74k
                                    ptrdiff_t stride) {
53
174k
  for (int i = 0; i < height; ++i) {
54
170k
    _mm256_storeu_si256((__m256i *)dst, *r0);
55
170k
    _mm256_storeu_si256((__m256i *)(dst + 32), *r1);
56
170k
    dst += stride;
57
170k
  }
58
3.74k
}
59
60
static inline void row_store_64xh(const __m256i *r, int height, uint8_t *dst,
61
222k
                                  ptrdiff_t stride) {
62
11.1M
  for (int i = 0; i < height; ++i) {
63
10.9M
    _mm256_storeu_si256((__m256i *)dst, *r);
64
10.9M
    _mm256_storeu_si256((__m256i *)(dst + 32), *r);
65
10.9M
    dst += stride;
66
10.9M
  }
67
222k
}
68
69
#if CONFIG_AV1_HIGHBITDEPTH
70
static DECLARE_ALIGNED(16, uint8_t, HighbdLoadMaskx[8][16]) = {
71
  { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
72
  { 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 },
73
  { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
74
  { 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
75
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7 },
76
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 4, 5 },
77
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3 },
78
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
79
};
80
81
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx4[4][16]) = {
82
  { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15 },
83
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 4, 5, 8, 9, 12, 13 },
84
  { 0, 1, 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 6, 7, 10, 11 },
85
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 0, 1, 8, 9 }
86
};
87
88
static DECLARE_ALIGNED(16, uint8_t, HighbdEvenOddMaskx[8][32]) = {
89
  { 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25, 28, 29,
90
    2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 },
91
  { 0, 1, 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27,
92
    0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 },
93
  { 0, 1, 0, 1, 4, 5, 8,  9,  12, 13, 16, 17, 20, 21, 24, 25,
94
    0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27 },
95
  { 0, 1, 0, 1, 0, 1, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23,
96
    0, 1, 0, 1, 0, 1, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25 },
97
  { 0, 1, 0, 1, 0, 1, 0, 1, 8,  9,  12, 13, 16, 17, 20, 21,
98
    0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19, 22, 23 },
99
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 10, 11, 14, 15, 18, 19,
100
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17, 20, 21 },
101
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 12, 13, 16, 17,
102
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15, 18, 19 },
103
  { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 14, 15,
104
    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 16, 17 }
105
};
106
107
static DECLARE_ALIGNED(32, uint16_t, HighbdBaseMask[17][16]) = {
108
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
109
  { 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
110
  { 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
111
  { 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
112
  { 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
113
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
114
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115
    0 },
116
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0, 0, 0, 0,
117
    0, 0 },
118
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0, 0,
119
    0, 0, 0, 0 },
120
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0,
121
    0, 0, 0, 0, 0, 0 },
122
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
123
    0xffff, 0, 0, 0, 0, 0, 0 },
124
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
125
    0xffff, 0xffff, 0, 0, 0, 0, 0 },
126
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
127
    0xffff, 0xffff, 0xffff, 0, 0, 0, 0 },
128
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
129
    0xffff, 0xffff, 0xffff, 0xffff, 0, 0, 0 },
130
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
131
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0, 0 },
132
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
133
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0 },
134
  { 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
135
    0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff }
136
};
137
138
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
139
64.3k
static inline void highbd_transpose16x4_8x8_sse2(__m128i *x, __m128i *d) {
140
64.3k
  __m128i r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
141
142
64.3k
  r0 = _mm_unpacklo_epi16(x[0], x[1]);
143
64.3k
  r1 = _mm_unpacklo_epi16(x[2], x[3]);
144
64.3k
  r2 = _mm_unpacklo_epi16(x[4], x[5]);
145
64.3k
  r3 = _mm_unpacklo_epi16(x[6], x[7]);
146
147
64.3k
  r4 = _mm_unpacklo_epi16(x[8], x[9]);
148
64.3k
  r5 = _mm_unpacklo_epi16(x[10], x[11]);
149
64.3k
  r6 = _mm_unpacklo_epi16(x[12], x[13]);
150
64.3k
  r7 = _mm_unpacklo_epi16(x[14], x[15]);
151
152
64.3k
  r8 = _mm_unpacklo_epi32(r0, r1);
153
64.3k
  r9 = _mm_unpackhi_epi32(r0, r1);
154
64.3k
  r10 = _mm_unpacklo_epi32(r2, r3);
155
64.3k
  r11 = _mm_unpackhi_epi32(r2, r3);
156
157
64.3k
  r12 = _mm_unpacklo_epi32(r4, r5);
158
64.3k
  r13 = _mm_unpackhi_epi32(r4, r5);
159
64.3k
  r14 = _mm_unpacklo_epi32(r6, r7);
160
64.3k
  r15 = _mm_unpackhi_epi32(r6, r7);
161
162
64.3k
  r0 = _mm_unpacklo_epi64(r8, r9);
163
64.3k
  r1 = _mm_unpackhi_epi64(r8, r9);
164
64.3k
  r2 = _mm_unpacklo_epi64(r10, r11);
165
64.3k
  r3 = _mm_unpackhi_epi64(r10, r11);
166
167
64.3k
  r4 = _mm_unpacklo_epi64(r12, r13);
168
64.3k
  r5 = _mm_unpackhi_epi64(r12, r13);
169
64.3k
  r6 = _mm_unpacklo_epi64(r14, r15);
170
64.3k
  r7 = _mm_unpackhi_epi64(r14, r15);
171
172
64.3k
  d[0] = _mm_unpacklo_epi64(r0, r2);
173
64.3k
  d[1] = _mm_unpacklo_epi64(r4, r6);
174
64.3k
  d[2] = _mm_unpacklo_epi64(r1, r3);
175
64.3k
  d[3] = _mm_unpacklo_epi64(r5, r7);
176
177
64.3k
  d[4] = _mm_unpackhi_epi64(r0, r2);
178
64.3k
  d[5] = _mm_unpackhi_epi64(r4, r6);
179
64.3k
  d[6] = _mm_unpackhi_epi64(r1, r3);
180
64.3k
  d[7] = _mm_unpackhi_epi64(r5, r7);
181
64.3k
}
182
183
32.1k
static inline void highbd_transpose4x16_avx2(__m256i *x, __m256i *d) {
184
32.1k
  __m256i w0, w1, w2, w3, ww0, ww1;
185
186
32.1k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
187
32.1k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
188
32.1k
  w2 = _mm256_unpackhi_epi16(x[0], x[1]);  // 40 50 41 51 42 52 43 53
189
32.1k
  w3 = _mm256_unpackhi_epi16(x[2], x[3]);  // 60 70 61 71 62 72 63 73
190
191
32.1k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
192
32.1k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
193
194
32.1k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
195
32.1k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
196
197
32.1k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
198
32.1k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
199
200
32.1k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
201
32.1k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
202
32.1k
}
203
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
204
205
163k
static inline void highbd_transpose8x16_16x8_avx2(__m256i *x, __m256i *d) {
206
163k
  __m256i w0, w1, w2, w3, ww0, ww1;
207
208
163k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);  // 00 10 01 11 02 12 03 13
209
163k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);  // 20 30 21 31 22 32 23 33
210
163k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);  // 40 50 41 51 42 52 43 53
211
163k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);  // 60 70 61 71 62 72 63 73
212
213
163k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 00 10 20 30 01 11 21 31
214
163k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 40 50 60 70 41 51 61 71
215
216
163k
  d[0] = _mm256_unpacklo_epi64(ww0, ww1);  // 00 10 20 30 40 50 60 70
217
163k
  d[1] = _mm256_unpackhi_epi64(ww0, ww1);  // 01 11 21 31 41 51 61 71
218
219
163k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 02 12 22 32 03 13 23 33
220
163k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 42 52 62 72 43 53 63 73
221
222
163k
  d[2] = _mm256_unpacklo_epi64(ww0, ww1);  // 02 12 22 32 42 52 62 72
223
163k
  d[3] = _mm256_unpackhi_epi64(ww0, ww1);  // 03 13 23 33 43 53 63 73
224
225
163k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);  // 04 14 05 15 06 16 07 17
226
163k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);  // 24 34 25 35 26 36 27 37
227
163k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);  // 44 54 45 55 46 56 47 57
228
163k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);  // 64 74 65 75 66 76 67 77
229
230
163k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  // 04 14 24 34 05 15 25 35
231
163k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  // 44 54 64 74 45 55 65 75
232
233
163k
  d[4] = _mm256_unpacklo_epi64(ww0, ww1);  // 04 14 24 34 44 54 64 74
234
163k
  d[5] = _mm256_unpackhi_epi64(ww0, ww1);  // 05 15 25 35 45 55 65 75
235
236
163k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  // 06 16 26 36 07 17 27 37
237
163k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  // 46 56 66 76 47 57 67 77
238
239
163k
  d[6] = _mm256_unpacklo_epi64(ww0, ww1);  // 06 16 26 36 46 56 66 76
240
163k
  d[7] = _mm256_unpackhi_epi64(ww0, ww1);  // 07 17 27 37 47 57 67 77
241
163k
}
242
243
975k
static inline void highbd_transpose16x16_avx2(__m256i *x, __m256i *d) {
244
975k
  __m256i w0, w1, w2, w3, ww0, ww1;
245
975k
  __m256i dd[16];
246
975k
  w0 = _mm256_unpacklo_epi16(x[0], x[1]);
247
975k
  w1 = _mm256_unpacklo_epi16(x[2], x[3]);
248
975k
  w2 = _mm256_unpacklo_epi16(x[4], x[5]);
249
975k
  w3 = _mm256_unpacklo_epi16(x[6], x[7]);
250
251
975k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
252
975k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
253
254
975k
  dd[0] = _mm256_unpacklo_epi64(ww0, ww1);
255
975k
  dd[1] = _mm256_unpackhi_epi64(ww0, ww1);
256
257
975k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
258
975k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
259
260
975k
  dd[2] = _mm256_unpacklo_epi64(ww0, ww1);
261
975k
  dd[3] = _mm256_unpackhi_epi64(ww0, ww1);
262
263
975k
  w0 = _mm256_unpackhi_epi16(x[0], x[1]);
264
975k
  w1 = _mm256_unpackhi_epi16(x[2], x[3]);
265
975k
  w2 = _mm256_unpackhi_epi16(x[4], x[5]);
266
975k
  w3 = _mm256_unpackhi_epi16(x[6], x[7]);
267
268
975k
  ww0 = _mm256_unpacklo_epi32(w0, w1);  //
269
975k
  ww1 = _mm256_unpacklo_epi32(w2, w3);  //
270
271
975k
  dd[4] = _mm256_unpacklo_epi64(ww0, ww1);
272
975k
  dd[5] = _mm256_unpackhi_epi64(ww0, ww1);
273
274
975k
  ww0 = _mm256_unpackhi_epi32(w0, w1);  //
275
975k
  ww1 = _mm256_unpackhi_epi32(w2, w3);  //
276
277
975k
  dd[6] = _mm256_unpacklo_epi64(ww0, ww1);
278
975k
  dd[7] = _mm256_unpackhi_epi64(ww0, ww1);
279
280
975k
  w0 = _mm256_unpacklo_epi16(x[8], x[9]);
281
975k
  w1 = _mm256_unpacklo_epi16(x[10], x[11]);
282
975k
  w2 = _mm256_unpacklo_epi16(x[12], x[13]);
283
975k
  w3 = _mm256_unpacklo_epi16(x[14], x[15]);
284
285
975k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
286
975k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
287
288
975k
  dd[8] = _mm256_unpacklo_epi64(ww0, ww1);
289
975k
  dd[9] = _mm256_unpackhi_epi64(ww0, ww1);
290
291
975k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
292
975k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
293
294
975k
  dd[10] = _mm256_unpacklo_epi64(ww0, ww1);
295
975k
  dd[11] = _mm256_unpackhi_epi64(ww0, ww1);
296
297
975k
  w0 = _mm256_unpackhi_epi16(x[8], x[9]);
298
975k
  w1 = _mm256_unpackhi_epi16(x[10], x[11]);
299
975k
  w2 = _mm256_unpackhi_epi16(x[12], x[13]);
300
975k
  w3 = _mm256_unpackhi_epi16(x[14], x[15]);
301
302
975k
  ww0 = _mm256_unpacklo_epi32(w0, w1);
303
975k
  ww1 = _mm256_unpacklo_epi32(w2, w3);
304
305
975k
  dd[12] = _mm256_unpacklo_epi64(ww0, ww1);
306
975k
  dd[13] = _mm256_unpackhi_epi64(ww0, ww1);
307
308
975k
  ww0 = _mm256_unpackhi_epi32(w0, w1);
309
975k
  ww1 = _mm256_unpackhi_epi32(w2, w3);
310
311
975k
  dd[14] = _mm256_unpacklo_epi64(ww0, ww1);
312
975k
  dd[15] = _mm256_unpackhi_epi64(ww0, ww1);
313
314
8.78M
  for (int i = 0; i < 8; i++) {
315
7.80M
    d[i] = _mm256_insertf128_si256(dd[i], _mm256_castsi256_si128(dd[i + 8]), 1);
316
7.80M
    d[i + 8] = _mm256_insertf128_si256(dd[i + 8],
317
7.80M
                                       _mm256_extracti128_si256(dd[i], 1), 0);
318
7.80M
  }
319
975k
}
320
#endif  // CONFIG_AV1_HIGHBITDEPTH
321
322
void aom_dc_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
323
946k
                                 const uint8_t *above, const uint8_t *left) {
324
946k
  const __m256i sum_above = dc_sum_32(above);
325
946k
  __m256i sum_left = dc_sum_32(left);
326
946k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
327
946k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
328
946k
  sum_left = _mm256_add_epi16(sum_left, thirtytwo);
329
946k
  sum_left = _mm256_srai_epi16(sum_left, 6);
330
946k
  const __m256i zero = _mm256_setzero_si256();
331
946k
  __m256i row = _mm256_shuffle_epi8(sum_left, zero);
332
946k
  row_store_32xh(&row, 32, dst, stride);
333
946k
}
334
335
void aom_dc_top_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
336
                                     const uint8_t *above,
337
65.2k
                                     const uint8_t *left) {
338
65.2k
  __m256i sum = dc_sum_32(above);
339
65.2k
  (void)left;
340
341
65.2k
  const __m256i sixteen = _mm256_set1_epi16(16);
342
65.2k
  sum = _mm256_add_epi16(sum, sixteen);
343
65.2k
  sum = _mm256_srai_epi16(sum, 5);
344
65.2k
  const __m256i zero = _mm256_setzero_si256();
345
65.2k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
346
65.2k
  row_store_32xh(&row, 32, dst, stride);
347
65.2k
}
348
349
void aom_dc_left_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
350
                                      const uint8_t *above,
351
106k
                                      const uint8_t *left) {
352
106k
  __m256i sum = dc_sum_32(left);
353
106k
  (void)above;
354
355
106k
  const __m256i sixteen = _mm256_set1_epi16(16);
356
106k
  sum = _mm256_add_epi16(sum, sixteen);
357
106k
  sum = _mm256_srai_epi16(sum, 5);
358
106k
  const __m256i zero = _mm256_setzero_si256();
359
106k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
360
106k
  row_store_32xh(&row, 32, dst, stride);
361
106k
}
362
363
void aom_dc_128_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
364
                                     const uint8_t *above,
365
21.3k
                                     const uint8_t *left) {
366
21.3k
  (void)above;
367
21.3k
  (void)left;
368
21.3k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
369
21.3k
  row_store_32xh(&row, 32, dst, stride);
370
21.3k
}
371
372
void aom_v_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
373
27.1k
                                const uint8_t *above, const uint8_t *left) {
374
27.1k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
375
27.1k
  (void)left;
376
27.1k
  row_store_32xh(&row, 32, dst, stride);
377
27.1k
}
378
379
// There are 32 rows togeter. This function does line:
380
// 0,1,2,3, and 16,17,18,19. The next call would do
381
// 4,5,6,7, and 20,21,22,23. So 4 times of calling
382
// would finish 32 rows.
383
static inline void h_predictor_32x8line(const __m256i *row, uint8_t *dst,
384
590k
                                        ptrdiff_t stride) {
385
590k
  __m256i t[4];
386
590k
  __m256i m = _mm256_setzero_si256();
387
590k
  const __m256i inc = _mm256_set1_epi8(4);
388
590k
  int i;
389
390
2.95M
  for (i = 0; i < 4; i++) {
391
2.36M
    t[i] = _mm256_shuffle_epi8(*row, m);
392
2.36M
    __m256i r0 = _mm256_permute2x128_si256(t[i], t[i], 0);
393
2.36M
    __m256i r1 = _mm256_permute2x128_si256(t[i], t[i], 0x11);
394
2.36M
    _mm256_storeu_si256((__m256i *)dst, r0);
395
2.36M
    _mm256_storeu_si256((__m256i *)(dst + (stride << 4)), r1);
396
2.36M
    dst += stride;
397
2.36M
    m = _mm256_add_epi8(m, inc);
398
2.36M
  }
399
590k
}
400
401
void aom_h_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
402
147k
                                const uint8_t *above, const uint8_t *left) {
403
147k
  (void)above;
404
147k
  const __m256i left_col = _mm256_loadu_si256((__m256i const *)left);
405
406
147k
  __m256i u = _mm256_unpacklo_epi8(left_col, left_col);
407
408
147k
  __m256i v = _mm256_unpacklo_epi8(u, u);
409
147k
  h_predictor_32x8line(&v, dst, stride);
410
147k
  dst += stride << 2;
411
412
147k
  v = _mm256_unpackhi_epi8(u, u);
413
147k
  h_predictor_32x8line(&v, dst, stride);
414
147k
  dst += stride << 2;
415
416
147k
  u = _mm256_unpackhi_epi8(left_col, left_col);
417
418
147k
  v = _mm256_unpacklo_epi8(u, u);
419
147k
  h_predictor_32x8line(&v, dst, stride);
420
147k
  dst += stride << 2;
421
422
147k
  v = _mm256_unpackhi_epi8(u, u);
423
147k
  h_predictor_32x8line(&v, dst, stride);
424
147k
}
425
426
// -----------------------------------------------------------------------------
427
// Rectangle
428
void aom_dc_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
429
139k
                                 const uint8_t *above, const uint8_t *left) {
430
139k
  const __m128i top_sum = dc_sum_32_sse2(above);
431
139k
  __m128i left_sum = dc_sum_16_sse2(left);
432
139k
  left_sum = _mm_add_epi16(top_sum, left_sum);
433
139k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(left_sum);
434
139k
  sum += 24;
435
139k
  sum /= 48;
436
139k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
437
139k
  row_store_32xh(&row, 16, dst, stride);
438
139k
}
439
440
void aom_dc_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
441
7.43k
                                 const uint8_t *above, const uint8_t *left) {
442
7.43k
  const __m256i sum_above = dc_sum_32(above);
443
7.43k
  __m256i sum_left = dc_sum_64(left);
444
7.43k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
445
7.43k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
446
7.43k
  sum += 48;
447
7.43k
  sum /= 96;
448
7.43k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
449
7.43k
  row_store_32xh(&row, 64, dst, stride);
450
7.43k
}
451
452
void aom_dc_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
453
104k
                                 const uint8_t *above, const uint8_t *left) {
454
104k
  const __m256i sum_above = dc_sum_64(above);
455
104k
  __m256i sum_left = dc_sum_64(left);
456
104k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
457
104k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
458
104k
  sum += 64;
459
104k
  sum /= 128;
460
104k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
461
104k
  row_store_64xh(&row, 64, dst, stride);
462
104k
}
463
464
void aom_dc_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
465
13.9k
                                 const uint8_t *above, const uint8_t *left) {
466
13.9k
  const __m256i sum_above = dc_sum_64(above);
467
13.9k
  __m256i sum_left = dc_sum_32(left);
468
13.9k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
469
13.9k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
470
13.9k
  sum += 48;
471
13.9k
  sum /= 96;
472
13.9k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
473
13.9k
  row_store_64xh(&row, 32, dst, stride);
474
13.9k
}
475
476
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
477
void aom_dc_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
478
54.1k
                                 const uint8_t *above, const uint8_t *left) {
479
54.1k
  const __m256i sum_above = dc_sum_64(above);
480
54.1k
  __m256i sum_left = _mm256_castsi128_si256(dc_sum_16_sse2(left));
481
54.1k
  sum_left = _mm256_add_epi16(sum_left, sum_above);
482
54.1k
  uint16_t sum = (uint16_t)_mm_cvtsi128_si32(_mm256_castsi256_si128(sum_left));
483
54.1k
  sum += 40;
484
54.1k
  sum /= 80;
485
54.1k
  const __m256i row = _mm256_set1_epi8((int8_t)sum);
486
54.1k
  row_store_64xh(&row, 16, dst, stride);
487
54.1k
}
488
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
489
490
void aom_dc_top_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
491
                                     const uint8_t *above,
492
4.08k
                                     const uint8_t *left) {
493
4.08k
  __m256i sum = dc_sum_32(above);
494
4.08k
  (void)left;
495
496
4.08k
  const __m256i sixteen = _mm256_set1_epi16(16);
497
4.08k
  sum = _mm256_add_epi16(sum, sixteen);
498
4.08k
  sum = _mm256_srai_epi16(sum, 5);
499
4.08k
  const __m256i zero = _mm256_setzero_si256();
500
4.08k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
501
4.08k
  row_store_32xh(&row, 16, dst, stride);
502
4.08k
}
503
504
void aom_dc_top_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
505
                                     const uint8_t *above,
506
1.41k
                                     const uint8_t *left) {
507
1.41k
  __m256i sum = dc_sum_32(above);
508
1.41k
  (void)left;
509
510
1.41k
  const __m256i sixteen = _mm256_set1_epi16(16);
511
1.41k
  sum = _mm256_add_epi16(sum, sixteen);
512
1.41k
  sum = _mm256_srai_epi16(sum, 5);
513
1.41k
  const __m256i zero = _mm256_setzero_si256();
514
1.41k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
515
1.41k
  row_store_32xh(&row, 64, dst, stride);
516
1.41k
}
517
518
void aom_dc_top_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
519
                                     const uint8_t *above,
520
15.1k
                                     const uint8_t *left) {
521
15.1k
  __m256i sum = dc_sum_64(above);
522
15.1k
  (void)left;
523
524
15.1k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
525
15.1k
  sum = _mm256_add_epi16(sum, thirtytwo);
526
15.1k
  sum = _mm256_srai_epi16(sum, 6);
527
15.1k
  const __m256i zero = _mm256_setzero_si256();
528
15.1k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
529
15.1k
  row_store_64xh(&row, 64, dst, stride);
530
15.1k
}
531
532
void aom_dc_top_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
533
                                     const uint8_t *above,
534
462
                                     const uint8_t *left) {
535
462
  __m256i sum = dc_sum_64(above);
536
462
  (void)left;
537
538
462
  const __m256i thirtytwo = _mm256_set1_epi16(32);
539
462
  sum = _mm256_add_epi16(sum, thirtytwo);
540
462
  sum = _mm256_srai_epi16(sum, 6);
541
462
  const __m256i zero = _mm256_setzero_si256();
542
462
  __m256i row = _mm256_shuffle_epi8(sum, zero);
543
462
  row_store_64xh(&row, 32, dst, stride);
544
462
}
545
546
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
547
void aom_dc_top_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
548
                                     const uint8_t *above,
549
2.13k
                                     const uint8_t *left) {
550
2.13k
  __m256i sum = dc_sum_64(above);
551
2.13k
  (void)left;
552
553
2.13k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
554
2.13k
  sum = _mm256_add_epi16(sum, thirtytwo);
555
2.13k
  sum = _mm256_srai_epi16(sum, 6);
556
2.13k
  const __m256i zero = _mm256_setzero_si256();
557
2.13k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
558
2.13k
  row_store_64xh(&row, 16, dst, stride);
559
2.13k
}
560
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
561
562
void aom_dc_left_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
563
                                      const uint8_t *above,
564
4.01k
                                      const uint8_t *left) {
565
4.01k
  __m128i sum = dc_sum_16_sse2(left);
566
4.01k
  (void)above;
567
568
4.01k
  const __m128i eight = _mm_set1_epi16(8);
569
4.01k
  sum = _mm_add_epi16(sum, eight);
570
4.01k
  sum = _mm_srai_epi16(sum, 4);
571
4.01k
  const __m128i zero = _mm_setzero_si128();
572
4.01k
  const __m128i r = _mm_shuffle_epi8(sum, zero);
573
4.01k
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
574
4.01k
  row_store_32xh(&row, 16, dst, stride);
575
4.01k
}
576
577
void aom_dc_left_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
578
                                      const uint8_t *above,
579
1.69k
                                      const uint8_t *left) {
580
1.69k
  __m256i sum = dc_sum_64(left);
581
1.69k
  (void)above;
582
583
1.69k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
584
1.69k
  sum = _mm256_add_epi16(sum, thirtytwo);
585
1.69k
  sum = _mm256_srai_epi16(sum, 6);
586
1.69k
  const __m256i zero = _mm256_setzero_si256();
587
1.69k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
588
1.69k
  row_store_32xh(&row, 64, dst, stride);
589
1.69k
}
590
591
void aom_dc_left_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
592
                                      const uint8_t *above,
593
20.5k
                                      const uint8_t *left) {
594
20.5k
  __m256i sum = dc_sum_64(left);
595
20.5k
  (void)above;
596
597
20.5k
  const __m256i thirtytwo = _mm256_set1_epi16(32);
598
20.5k
  sum = _mm256_add_epi16(sum, thirtytwo);
599
20.5k
  sum = _mm256_srai_epi16(sum, 6);
600
20.5k
  const __m256i zero = _mm256_setzero_si256();
601
20.5k
  __m256i row = _mm256_shuffle_epi8(sum, zero);
602
20.5k
  row_store_64xh(&row, 64, dst, stride);
603
20.5k
}
604
605
void aom_dc_left_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
606
                                      const uint8_t *above,
607
957
                                      const uint8_t *left) {
608
957
  __m256i sum = dc_sum_32(left);
609
957
  (void)above;
610
611
957
  const __m256i sixteen = _mm256_set1_epi16(16);
612
957
  sum = _mm256_add_epi16(sum, sixteen);
613
957
  sum = _mm256_srai_epi16(sum, 5);
614
957
  const __m256i zero = _mm256_setzero_si256();
615
957
  __m256i row = _mm256_shuffle_epi8(sum, zero);
616
957
  row_store_64xh(&row, 32, dst, stride);
617
957
}
618
619
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
620
void aom_dc_left_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
621
                                      const uint8_t *above,
622
281
                                      const uint8_t *left) {
623
281
  __m128i sum = dc_sum_16_sse2(left);
624
281
  (void)above;
625
626
281
  const __m128i eight = _mm_set1_epi16(8);
627
281
  sum = _mm_add_epi16(sum, eight);
628
281
  sum = _mm_srai_epi16(sum, 4);
629
281
  const __m128i zero = _mm_setzero_si128();
630
281
  const __m128i r = _mm_shuffle_epi8(sum, zero);
631
281
  const __m256i row = _mm256_inserti128_si256(_mm256_castsi128_si256(r), r, 1);
632
281
  row_store_64xh(&row, 16, dst, stride);
633
281
}
634
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
635
636
void aom_dc_128_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
637
                                     const uint8_t *above,
638
3.54k
                                     const uint8_t *left) {
639
3.54k
  (void)above;
640
3.54k
  (void)left;
641
3.54k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
642
3.54k
  row_store_32xh(&row, 16, dst, stride);
643
3.54k
}
644
645
void aom_dc_128_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
646
                                     const uint8_t *above,
647
548
                                     const uint8_t *left) {
648
548
  (void)above;
649
548
  (void)left;
650
548
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
651
548
  row_store_32xh(&row, 64, dst, stride);
652
548
}
653
654
void aom_dc_128_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
655
                                     const uint8_t *above,
656
8.38k
                                     const uint8_t *left) {
657
8.38k
  (void)above;
658
8.38k
  (void)left;
659
8.38k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
660
8.38k
  row_store_64xh(&row, 64, dst, stride);
661
8.38k
}
662
663
void aom_dc_128_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
664
                                     const uint8_t *above,
665
1.48k
                                     const uint8_t *left) {
666
1.48k
  (void)above;
667
1.48k
  (void)left;
668
1.48k
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
669
1.48k
  row_store_64xh(&row, 32, dst, stride);
670
1.48k
}
671
672
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
673
void aom_dc_128_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
674
                                     const uint8_t *above,
675
282
                                     const uint8_t *left) {
676
282
  (void)above;
677
282
  (void)left;
678
282
  const __m256i row = _mm256_set1_epi8((int8_t)0x80);
679
282
  row_store_64xh(&row, 16, dst, stride);
680
282
}
681
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
682
683
void aom_v_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
684
9.84k
                                const uint8_t *above, const uint8_t *left) {
685
9.84k
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
686
9.84k
  (void)left;
687
9.84k
  row_store_32xh(&row, 16, dst, stride);
688
9.84k
}
689
690
void aom_v_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
691
554
                                const uint8_t *above, const uint8_t *left) {
692
554
  const __m256i row = _mm256_loadu_si256((const __m256i *)above);
693
554
  (void)left;
694
554
  row_store_32xh(&row, 64, dst, stride);
695
554
}
696
697
void aom_v_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
698
2.12k
                                const uint8_t *above, const uint8_t *left) {
699
2.12k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
700
2.12k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
701
2.12k
  (void)left;
702
2.12k
  row_store_32x2xh(&row0, &row1, 64, dst, stride);
703
2.12k
}
704
705
void aom_v_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
706
537
                                const uint8_t *above, const uint8_t *left) {
707
537
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
708
537
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
709
537
  (void)left;
710
537
  row_store_32x2xh(&row0, &row1, 32, dst, stride);
711
537
}
712
713
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
714
void aom_v_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
715
1.09k
                                const uint8_t *above, const uint8_t *left) {
716
1.09k
  const __m256i row0 = _mm256_loadu_si256((const __m256i *)above);
717
1.09k
  const __m256i row1 = _mm256_loadu_si256((const __m256i *)(above + 32));
718
1.09k
  (void)left;
719
1.09k
  row_store_32x2xh(&row0, &row1, 16, dst, stride);
720
1.09k
}
721
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
722
723
// -----------------------------------------------------------------------------
724
// PAETH_PRED
725
726
// Return 16 16-bit pixels in one row (__m256i)
727
static inline __m256i paeth_pred(const __m256i *left, const __m256i *top,
728
78.7M
                                 const __m256i *topleft) {
729
78.7M
  const __m256i base =
730
78.7M
      _mm256_sub_epi16(_mm256_add_epi16(*top, *left), *topleft);
731
732
78.7M
  __m256i pl = _mm256_abs_epi16(_mm256_sub_epi16(base, *left));
733
78.7M
  __m256i pt = _mm256_abs_epi16(_mm256_sub_epi16(base, *top));
734
78.7M
  __m256i ptl = _mm256_abs_epi16(_mm256_sub_epi16(base, *topleft));
735
736
78.7M
  __m256i mask1 = _mm256_cmpgt_epi16(pl, pt);
737
78.7M
  mask1 = _mm256_or_si256(mask1, _mm256_cmpgt_epi16(pl, ptl));
738
78.7M
  __m256i mask2 = _mm256_cmpgt_epi16(pt, ptl);
739
740
78.7M
  pl = _mm256_andnot_si256(mask1, *left);
741
742
78.7M
  ptl = _mm256_and_si256(mask2, *topleft);
743
78.7M
  pt = _mm256_andnot_si256(mask2, *top);
744
78.7M
  pt = _mm256_or_si256(pt, ptl);
745
78.7M
  pt = _mm256_and_si256(mask1, pt);
746
747
78.7M
  return _mm256_or_si256(pt, pl);
748
78.7M
}
749
750
// Return 16 8-bit pixels in one row (__m128i)
751
static inline __m128i paeth_16x1_pred(const __m256i *left, const __m256i *top,
752
77.8M
                                      const __m256i *topleft) {
753
77.8M
  const __m256i p0 = paeth_pred(left, top, topleft);
754
77.8M
  const __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
755
77.8M
  const __m256i p = _mm256_packus_epi16(p0, p1);
756
77.8M
  return _mm256_castsi256_si128(p);
757
77.8M
}
758
759
2.23M
static inline __m256i get_top_vector(const uint8_t *above) {
760
2.23M
  const __m128i x = _mm_load_si128((const __m128i *)above);
761
2.23M
  const __m128i zero = _mm_setzero_si128();
762
2.23M
  const __m128i t0 = _mm_unpacklo_epi8(x, zero);
763
2.23M
  const __m128i t1 = _mm_unpackhi_epi8(x, zero);
764
2.23M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(t0), t1, 1);
765
2.23M
}
766
767
void aom_paeth_predictor_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
768
78.2k
                                   const uint8_t *above, const uint8_t *left) {
769
78.2k
  __m128i x = _mm_loadl_epi64((const __m128i *)left);
770
78.2k
  const __m256i l = _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
771
78.2k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
772
78.2k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
773
78.2k
  const __m256i one = _mm256_set1_epi16(1);
774
78.2k
  const __m256i top = get_top_vector(above);
775
776
78.2k
  int i;
777
704k
  for (i = 0; i < 8; ++i) {
778
625k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
779
625k
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
780
781
625k
    _mm_store_si128((__m128i *)dst, row);
782
625k
    dst += stride;
783
625k
    rep = _mm256_add_epi16(rep, one);
784
625k
  }
785
78.2k
}
786
787
3.78M
static inline __m256i get_left_vector(const uint8_t *left) {
788
3.78M
  const __m128i x = _mm_load_si128((const __m128i *)left);
789
3.78M
  return _mm256_inserti128_si256(_mm256_castsi128_si256(x), x, 1);
790
3.78M
}
791
792
void aom_paeth_predictor_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
793
129k
                                    const uint8_t *above, const uint8_t *left) {
794
129k
  const __m256i l = get_left_vector(left);
795
129k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
796
129k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
797
129k
  const __m256i one = _mm256_set1_epi16(1);
798
129k
  const __m256i top = get_top_vector(above);
799
800
129k
  int i;
801
2.19M
  for (i = 0; i < 16; ++i) {
802
2.06M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
803
2.06M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
804
805
2.06M
    _mm_store_si128((__m128i *)dst, row);
806
2.06M
    dst += stride;
807
2.06M
    rep = _mm256_add_epi16(rep, one);
808
2.06M
  }
809
129k
}
810
811
void aom_paeth_predictor_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
812
973k
                                    const uint8_t *above, const uint8_t *left) {
813
973k
  __m256i l = get_left_vector(left);
814
973k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
815
973k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
816
973k
  const __m256i one = _mm256_set1_epi16(1);
817
973k
  const __m256i top = get_top_vector(above);
818
819
973k
  int i;
820
16.5M
  for (i = 0; i < 16; ++i) {
821
15.5M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
822
15.5M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
823
824
15.5M
    _mm_store_si128((__m128i *)dst, row);
825
15.5M
    dst += stride;
826
15.5M
    rep = _mm256_add_epi16(rep, one);
827
15.5M
  }
828
829
973k
  l = get_left_vector(left + 16);
830
973k
  rep = _mm256_set1_epi16((short)0x8000);
831
16.5M
  for (i = 0; i < 16; ++i) {
832
15.5M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
833
15.5M
    const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
834
835
15.5M
    _mm_store_si128((__m128i *)dst, row);
836
15.5M
    dst += stride;
837
15.5M
    rep = _mm256_add_epi16(rep, one);
838
15.5M
  }
839
973k
}
840
841
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
842
void aom_paeth_predictor_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
843
237k
                                    const uint8_t *above, const uint8_t *left) {
844
237k
  const __m256i tl16 = _mm256_set1_epi16((int16_t)above[-1]);
845
237k
  const __m256i one = _mm256_set1_epi16(1);
846
237k
  const __m256i top = get_top_vector(above);
847
848
1.18M
  for (int j = 0; j < 4; ++j) {
849
951k
    const __m256i l = get_left_vector(left + j * 16);
850
951k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
851
16.1M
    for (int i = 0; i < 16; ++i) {
852
15.2M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
853
15.2M
      const __m128i row = paeth_16x1_pred(&l16, &top, &tl16);
854
855
15.2M
      _mm_store_si128((__m128i *)dst, row);
856
15.2M
      dst += stride;
857
15.2M
      rep = _mm256_add_epi16(rep, one);
858
15.2M
    }
859
951k
  }
860
237k
}
861
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
862
863
// Return 32 8-bit pixels in one row (__m256i)
864
static inline __m256i paeth_32x1_pred(const __m256i *left, const __m256i *top0,
865
                                      const __m256i *top1,
866
465k
                                      const __m256i *topleft) {
867
465k
  __m256i p0 = paeth_pred(left, top0, topleft);
868
465k
  __m256i p1 = _mm256_permute4x64_epi64(p0, 0xe);
869
465k
  const __m256i x0 = _mm256_packus_epi16(p0, p1);
870
871
465k
  p0 = paeth_pred(left, top1, topleft);
872
465k
  p1 = _mm256_permute4x64_epi64(p0, 0xe);
873
465k
  const __m256i x1 = _mm256_packus_epi16(p0, p1);
874
875
465k
  return _mm256_permute2x128_si256(x0, x1, 0x20);
876
465k
}
877
878
void aom_paeth_predictor_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
879
29.1k
                                    const uint8_t *above, const uint8_t *left) {
880
29.1k
  const __m256i l = get_left_vector(left);
881
29.1k
  const __m256i t0 = get_top_vector(above);
882
29.1k
  const __m256i t1 = get_top_vector(above + 16);
883
29.1k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
884
29.1k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
885
29.1k
  const __m256i one = _mm256_set1_epi16(1);
886
887
29.1k
  int i;
888
494k
  for (i = 0; i < 16; ++i) {
889
465k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
890
891
465k
    const __m256i r = paeth_32x1_pred(&l16, &t0, &t1, &tl);
892
893
465k
    _mm256_storeu_si256((__m256i *)dst, r);
894
895
465k
    dst += stride;
896
465k
    rep = _mm256_add_epi16(rep, one);
897
465k
  }
898
29.1k
}
899
900
void aom_paeth_predictor_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
901
274k
                                    const uint8_t *above, const uint8_t *left) {
902
274k
  __m256i l = get_left_vector(left);
903
274k
  const __m256i t0 = get_top_vector(above);
904
274k
  const __m256i t1 = get_top_vector(above + 16);
905
274k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
906
274k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
907
274k
  const __m256i one = _mm256_set1_epi16(1);
908
909
274k
  int i;
910
4.66M
  for (i = 0; i < 16; ++i) {
911
4.38M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
912
913
4.38M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
914
4.38M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
915
916
4.38M
    _mm_store_si128((__m128i *)dst, r0);
917
4.38M
    _mm_store_si128((__m128i *)(dst + 16), r1);
918
919
4.38M
    dst += stride;
920
4.38M
    rep = _mm256_add_epi16(rep, one);
921
4.38M
  }
922
923
274k
  l = get_left_vector(left + 16);
924
274k
  rep = _mm256_set1_epi16((short)0x8000);
925
4.66M
  for (i = 0; i < 16; ++i) {
926
4.38M
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
927
928
4.38M
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
929
4.38M
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
930
931
4.38M
    _mm_store_si128((__m128i *)dst, r0);
932
4.38M
    _mm_store_si128((__m128i *)(dst + 16), r1);
933
934
4.38M
    dst += stride;
935
4.38M
    rep = _mm256_add_epi16(rep, one);
936
4.38M
  }
937
274k
}
938
939
void aom_paeth_predictor_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
940
4.30k
                                    const uint8_t *above, const uint8_t *left) {
941
4.30k
  const __m256i t0 = get_top_vector(above);
942
4.30k
  const __m256i t1 = get_top_vector(above + 16);
943
4.30k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
944
4.30k
  const __m256i one = _mm256_set1_epi16(1);
945
946
4.30k
  int i, j;
947
21.5k
  for (j = 0; j < 4; ++j) {
948
17.2k
    const __m256i l = get_left_vector(left + j * 16);
949
17.2k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
950
292k
    for (i = 0; i < 16; ++i) {
951
275k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
952
953
275k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
954
275k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
955
956
275k
      _mm_store_si128((__m128i *)dst, r0);
957
275k
      _mm_store_si128((__m128i *)(dst + 16), r1);
958
959
275k
      dst += stride;
960
275k
      rep = _mm256_add_epi16(rep, one);
961
275k
    }
962
17.2k
  }
963
4.30k
}
964
965
void aom_paeth_predictor_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
966
4.43k
                                    const uint8_t *above, const uint8_t *left) {
967
4.43k
  const __m256i t0 = get_top_vector(above);
968
4.43k
  const __m256i t1 = get_top_vector(above + 16);
969
4.43k
  const __m256i t2 = get_top_vector(above + 32);
970
4.43k
  const __m256i t3 = get_top_vector(above + 48);
971
4.43k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
972
4.43k
  const __m256i one = _mm256_set1_epi16(1);
973
974
4.43k
  int i, j;
975
13.3k
  for (j = 0; j < 2; ++j) {
976
8.86k
    const __m256i l = get_left_vector(left + j * 16);
977
8.86k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
978
150k
    for (i = 0; i < 16; ++i) {
979
141k
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
980
981
141k
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
982
141k
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
983
141k
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
984
141k
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
985
986
141k
      _mm_store_si128((__m128i *)dst, r0);
987
141k
      _mm_store_si128((__m128i *)(dst + 16), r1);
988
141k
      _mm_store_si128((__m128i *)(dst + 32), r2);
989
141k
      _mm_store_si128((__m128i *)(dst + 48), r3);
990
991
141k
      dst += stride;
992
141k
      rep = _mm256_add_epi16(rep, one);
993
141k
    }
994
8.86k
  }
995
4.43k
}
996
997
void aom_paeth_predictor_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
998
37.0k
                                    const uint8_t *above, const uint8_t *left) {
999
37.0k
  const __m256i t0 = get_top_vector(above);
1000
37.0k
  const __m256i t1 = get_top_vector(above + 16);
1001
37.0k
  const __m256i t2 = get_top_vector(above + 32);
1002
37.0k
  const __m256i t3 = get_top_vector(above + 48);
1003
37.0k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1004
37.0k
  const __m256i one = _mm256_set1_epi16(1);
1005
1006
37.0k
  int i, j;
1007
185k
  for (j = 0; j < 4; ++j) {
1008
148k
    const __m256i l = get_left_vector(left + j * 16);
1009
148k
    __m256i rep = _mm256_set1_epi16((short)0x8000);
1010
2.52M
    for (i = 0; i < 16; ++i) {
1011
2.37M
      const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1012
1013
2.37M
      const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1014
2.37M
      const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1015
2.37M
      const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1016
2.37M
      const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1017
1018
2.37M
      _mm_store_si128((__m128i *)dst, r0);
1019
2.37M
      _mm_store_si128((__m128i *)(dst + 16), r1);
1020
2.37M
      _mm_store_si128((__m128i *)(dst + 32), r2);
1021
2.37M
      _mm_store_si128((__m128i *)(dst + 48), r3);
1022
1023
2.37M
      dst += stride;
1024
2.37M
      rep = _mm256_add_epi16(rep, one);
1025
2.37M
    }
1026
148k
  }
1027
37.0k
}
1028
1029
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1030
void aom_paeth_predictor_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
1031
9.17k
                                    const uint8_t *above, const uint8_t *left) {
1032
9.17k
  const __m256i t0 = get_top_vector(above);
1033
9.17k
  const __m256i t1 = get_top_vector(above + 16);
1034
9.17k
  const __m256i t2 = get_top_vector(above + 32);
1035
9.17k
  const __m256i t3 = get_top_vector(above + 48);
1036
9.17k
  const __m256i tl = _mm256_set1_epi16((int16_t)above[-1]);
1037
9.17k
  const __m256i one = _mm256_set1_epi16(1);
1038
1039
9.17k
  int i;
1040
9.17k
  const __m256i l = get_left_vector(left);
1041
9.17k
  __m256i rep = _mm256_set1_epi16((short)0x8000);
1042
156k
  for (i = 0; i < 16; ++i) {
1043
146k
    const __m256i l16 = _mm256_shuffle_epi8(l, rep);
1044
1045
146k
    const __m128i r0 = paeth_16x1_pred(&l16, &t0, &tl);
1046
146k
    const __m128i r1 = paeth_16x1_pred(&l16, &t1, &tl);
1047
146k
    const __m128i r2 = paeth_16x1_pred(&l16, &t2, &tl);
1048
146k
    const __m128i r3 = paeth_16x1_pred(&l16, &t3, &tl);
1049
1050
146k
    _mm_store_si128((__m128i *)dst, r0);
1051
146k
    _mm_store_si128((__m128i *)(dst + 16), r1);
1052
146k
    _mm_store_si128((__m128i *)(dst + 32), r2);
1053
146k
    _mm_store_si128((__m128i *)(dst + 48), r3);
1054
1055
146k
    dst += stride;
1056
146k
    rep = _mm256_add_epi16(rep, one);
1057
146k
  }
1058
9.17k
}
1059
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
1060
1061
#if CONFIG_AV1_HIGHBITDEPTH
1062
1063
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_4xN_internal_avx2(
1064
333k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1065
333k
  const int frac_bits = 6 - upsample_above;
1066
333k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1067
1068
333k
  assert(dx > 0);
1069
  // pre-filter above pixels
1070
  // store in temp buffers:
1071
  //   above[x] * 32 + 16
1072
  //   above[x+1] - above[x]
1073
  // final pixels will be calculated as:
1074
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1075
333k
  __m256i a0, a1, a32, a16;
1076
333k
  __m256i diff, c3f;
1077
333k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1078
333k
  __m128i a0_128, a1_128;
1079
333k
  a16 = _mm256_set1_epi16(16);
1080
333k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1081
333k
  max_base_x128 = _mm_set1_epi16(max_base_x);
1082
333k
  c3f = _mm256_set1_epi16(0x3f);
1083
1084
333k
  int x = dx;
1085
2.68M
  for (int r = 0; r < N; r++) {
1086
2.35M
    __m256i b, res, shift;
1087
2.35M
    __m128i res1;
1088
1089
2.35M
    int base = x >> frac_bits;
1090
2.35M
    if (base >= max_base_x) {
1091
8.40k
      for (int i = r; i < N; ++i) {
1092
4.98k
        dst[i] = a_mbase_x;  // save 4 values
1093
4.98k
      }
1094
3.42k
      return;
1095
3.42k
    }
1096
1097
2.34M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
1098
2.34M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1099
1100
2.34M
    if (upsample_above) {
1101
806k
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)HighbdEvenOddMaskx4[0]);
1102
806k
      a1_128 = _mm_srli_si128(a0_128, 8);
1103
1104
806k
      base_inc128 = _mm_setr_epi16(base, base + 2, base + 4, base + 6, base + 8,
1105
806k
                                   base + 10, base + 12, base + 14);
1106
806k
      shift = _mm256_srli_epi16(
1107
806k
          _mm256_and_si256(
1108
806k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above),
1109
806k
              _mm256_set1_epi16(0x3f)),
1110
806k
          1);
1111
1.54M
    } else {
1112
1.54M
      base_inc128 = _mm_setr_epi16(base, base + 1, base + 2, base + 3, base + 4,
1113
1.54M
                                   base + 5, base + 6, base + 7);
1114
1.54M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1115
1.54M
    }
1116
2.34M
    a0 = _mm256_castsi128_si256(a0_128);
1117
2.34M
    a1 = _mm256_castsi128_si256(a1_128);
1118
2.34M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1119
2.34M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1120
2.34M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1121
1122
2.34M
    b = _mm256_mullo_epi16(diff, shift);
1123
2.34M
    res = _mm256_add_epi16(a32, b);
1124
2.34M
    res = _mm256_srli_epi16(res, 5);
1125
2.34M
    res1 = _mm256_castsi256_si128(res);
1126
1127
2.34M
    mask128 = _mm_cmpgt_epi16(max_base_x128, base_inc128);
1128
2.34M
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1129
2.34M
    x += dx;
1130
2.34M
  }
1131
333k
}
1132
1133
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_4xN_internal_avx2(
1134
131k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1135
131k
  const int frac_bits = 6 - upsample_above;
1136
131k
  const int max_base_x = ((N + 4) - 1) << upsample_above;
1137
1138
131k
  assert(dx > 0);
1139
  // pre-filter above pixels
1140
  // store in temp buffers:
1141
  //   above[x] * 32 + 16
1142
  //   above[x+1] - above[x]
1143
  // final pixels will be calculated as:
1144
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1145
131k
  __m256i a0, a1, a32, a16;
1146
131k
  __m256i diff;
1147
131k
  __m128i a_mbase_x, max_base_x128, base_inc128, mask128;
1148
1149
131k
  a16 = _mm256_set1_epi32(16);
1150
131k
  a_mbase_x = _mm_set1_epi16(above[max_base_x]);
1151
131k
  max_base_x128 = _mm_set1_epi32(max_base_x);
1152
1153
131k
  int x = dx;
1154
1.07M
  for (int r = 0; r < N; r++) {
1155
944k
    __m256i b, res, shift;
1156
944k
    __m128i res1;
1157
1158
944k
    int base = x >> frac_bits;
1159
944k
    if (base >= max_base_x) {
1160
4.21k
      for (int i = r; i < N; ++i) {
1161
2.90k
        dst[i] = a_mbase_x;  // save 4 values
1162
2.90k
      }
1163
1.31k
      return;
1164
1.31k
    }
1165
1166
943k
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1167
943k
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1168
1169
943k
    if (upsample_above) {
1170
265k
      a0 = _mm256_permutevar8x32_epi32(
1171
265k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1172
265k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1173
265k
      base_inc128 = _mm_setr_epi32(base, base + 2, base + 4, base + 6);
1174
265k
      shift = _mm256_srli_epi32(
1175
265k
          _mm256_and_si256(
1176
265k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1177
265k
              _mm256_set1_epi32(0x3f)),
1178
265k
          1);
1179
678k
    } else {
1180
678k
      base_inc128 = _mm_setr_epi32(base, base + 1, base + 2, base + 3);
1181
678k
      shift = _mm256_srli_epi32(
1182
678k
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1183
678k
    }
1184
1185
943k
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1186
943k
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1187
943k
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1188
1189
943k
    b = _mm256_mullo_epi32(diff, shift);
1190
943k
    res = _mm256_add_epi32(a32, b);
1191
943k
    res = _mm256_srli_epi32(res, 5);
1192
1193
943k
    res1 = _mm256_castsi256_si128(res);
1194
943k
    res1 = _mm_packus_epi32(res1, res1);
1195
1196
943k
    mask128 = _mm_cmpgt_epi32(max_base_x128, base_inc128);
1197
943k
    mask128 = _mm_packs_epi32(mask128, mask128);  // goto 16 bit
1198
943k
    dst[r] = _mm_blendv_epi8(a_mbase_x, res1, mask128);
1199
943k
    x += dx;
1200
943k
  }
1201
131k
}
1202
1203
static void highbd_dr_prediction_z1_4xN_avx2(int N, uint16_t *dst,
1204
                                             ptrdiff_t stride,
1205
                                             const uint16_t *above,
1206
                                             int upsample_above, int dx,
1207
145k
                                             int bd) {
1208
145k
  __m128i dstvec[16];
1209
145k
  if (bd < 12) {
1210
90.2k
    highbd_dr_prediction_z1_4xN_internal_avx2(N, dstvec, above, upsample_above,
1211
90.2k
                                              dx);
1212
90.2k
  } else {
1213
55.4k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(N, dstvec, above,
1214
55.4k
                                                    upsample_above, dx);
1215
55.4k
  }
1216
1.19M
  for (int i = 0; i < N; i++) {
1217
1.04M
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
1218
1.04M
  }
1219
145k
}
1220
1221
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_8xN_internal_avx2(
1222
151k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1223
151k
  const int frac_bits = 6 - upsample_above;
1224
151k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1225
1226
151k
  assert(dx > 0);
1227
  // pre-filter above pixels
1228
  // store in temp buffers:
1229
  //   above[x] * 32 + 16
1230
  //   above[x+1] - above[x]
1231
  // final pixels will be calculated as:
1232
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1233
151k
  __m256i a0, a1, a0_1, a1_1, a32, a16;
1234
151k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1235
1236
151k
  a16 = _mm256_set1_epi32(16);
1237
151k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1238
151k
  max_base_x256 = _mm256_set1_epi32(max_base_x);
1239
1240
151k
  int x = dx;
1241
1.71M
  for (int r = 0; r < N; r++) {
1242
1.56M
    __m256i b, res, res1, shift;
1243
1244
1.56M
    int base = x >> frac_bits;
1245
1.56M
    if (base >= max_base_x) {
1246
3.23k
      for (int i = r; i < N; ++i) {
1247
2.33k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1248
2.33k
      }
1249
895
      return;
1250
895
    }
1251
1252
1.56M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1253
1.56M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1254
1255
1.56M
    if (upsample_above) {
1256
307k
      a0 = _mm256_permutevar8x32_epi32(
1257
307k
          a0, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1258
307k
      a1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0, 1));
1259
1260
307k
      a0_1 =
1261
307k
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1262
307k
      a0_1 = _mm256_permutevar8x32_epi32(
1263
307k
          a0_1, _mm256_set_epi32(7, 5, 3, 1, 6, 4, 2, 0));
1264
307k
      a1_1 = _mm256_castsi128_si256(_mm256_extracti128_si256(a0_1, 1));
1265
1266
307k
      a0 = _mm256_inserti128_si256(a0, _mm256_castsi256_si128(a0_1), 1);
1267
307k
      a1 = _mm256_inserti128_si256(a1, _mm256_castsi256_si128(a1_1), 1);
1268
307k
      base_inc256 =
1269
307k
          _mm256_setr_epi32(base, base + 2, base + 4, base + 6, base + 8,
1270
307k
                            base + 10, base + 12, base + 14);
1271
307k
      shift = _mm256_srli_epi32(
1272
307k
          _mm256_and_si256(
1273
307k
              _mm256_slli_epi32(_mm256_set1_epi32(x), upsample_above),
1274
307k
              _mm256_set1_epi32(0x3f)),
1275
307k
          1);
1276
1.26M
    } else {
1277
1.26M
      base_inc256 = _mm256_setr_epi32(base, base + 1, base + 2, base + 3,
1278
1.26M
                                      base + 4, base + 5, base + 6, base + 7);
1279
1.26M
      shift = _mm256_srli_epi32(
1280
1.26M
          _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1281
1.26M
    }
1282
1283
1.56M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1284
1.56M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1285
1.56M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1286
1287
1.56M
    b = _mm256_mullo_epi32(diff, shift);
1288
1.56M
    res = _mm256_add_epi32(a32, b);
1289
1.56M
    res = _mm256_srli_epi32(res, 5);
1290
1291
1.56M
    res1 = _mm256_packus_epi32(
1292
1.56M
        res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
1293
1294
1.56M
    mask256 = _mm256_cmpgt_epi32(max_base_x256, base_inc256);
1295
1.56M
    mask256 = _mm256_packs_epi32(
1296
1.56M
        mask256, _mm256_castsi128_si256(
1297
1.56M
                     _mm256_extracti128_si256(mask256, 1)));  // goto 16 bit
1298
1.56M
    res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1299
1.56M
    dst[r] = _mm256_castsi256_si128(res1);
1300
1.56M
    x += dx;
1301
1.56M
  }
1302
151k
}
1303
1304
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_8xN_internal_avx2(
1305
355k
    int N, __m128i *dst, const uint16_t *above, int upsample_above, int dx) {
1306
355k
  const int frac_bits = 6 - upsample_above;
1307
355k
  const int max_base_x = ((8 + N) - 1) << upsample_above;
1308
1309
355k
  assert(dx > 0);
1310
  // pre-filter above pixels
1311
  // store in temp buffers:
1312
  //   above[x] * 32 + 16
1313
  //   above[x+1] - above[x]
1314
  // final pixels will be calculated as:
1315
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1316
355k
  __m256i a0, a1, a32, a16, c3f;
1317
355k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1318
355k
  __m128i a0_x128, a1_x128;
1319
1320
355k
  a16 = _mm256_set1_epi16(16);
1321
355k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1322
355k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1323
355k
  c3f = _mm256_set1_epi16(0x3f);
1324
1325
355k
  int x = dx;
1326
4.87M
  for (int r = 0; r < N; r++) {
1327
4.52M
    __m256i b, res, res1, shift;
1328
1329
4.52M
    int base = x >> frac_bits;
1330
4.52M
    if (base >= max_base_x) {
1331
6.81k
      for (int i = r; i < N; ++i) {
1332
4.86k
        dst[i] = _mm256_castsi256_si128(a_mbase_x);  // save 8 values
1333
4.86k
      }
1334
1.95k
      return;
1335
1.95k
    }
1336
1337
4.51M
    a0_x128 = _mm_loadu_si128((__m128i *)(above + base));
1338
4.51M
    if (upsample_above) {
1339
898k
      __m128i mask, atmp0, atmp1, atmp2, atmp3;
1340
898k
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 8));
1341
898k
      atmp0 = _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1342
898k
      atmp1 = _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdEvenOddMaskx[0]);
1343
898k
      atmp2 =
1344
898k
          _mm_shuffle_epi8(a0_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1345
898k
      atmp3 =
1346
898k
          _mm_shuffle_epi8(a1_x128, *(__m128i *)(HighbdEvenOddMaskx[0] + 16));
1347
898k
      mask =
1348
898k
          _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[0], _mm_set1_epi8(15));
1349
898k
      a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
1350
898k
      mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[0] + 16),
1351
898k
                            _mm_set1_epi8(15));
1352
898k
      a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
1353
1354
898k
      base_inc256 = _mm256_setr_epi16(base, base + 2, base + 4, base + 6,
1355
898k
                                      base + 8, base + 10, base + 12, base + 14,
1356
898k
                                      0, 0, 0, 0, 0, 0, 0, 0);
1357
898k
      shift = _mm256_srli_epi16(
1358
898k
          _mm256_and_si256(
1359
898k
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
1360
898k
          1);
1361
3.61M
    } else {
1362
3.61M
      a1_x128 = _mm_loadu_si128((__m128i *)(above + base + 1));
1363
3.61M
      base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1364
3.61M
                                      base + 4, base + 5, base + 6, base + 7, 0,
1365
3.61M
                                      0, 0, 0, 0, 0, 0, 0);
1366
3.61M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1367
3.61M
    }
1368
4.51M
    a0 = _mm256_castsi128_si256(a0_x128);
1369
4.51M
    a1 = _mm256_castsi128_si256(a1_x128);
1370
1371
4.51M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1372
4.51M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1373
4.51M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1374
1375
4.51M
    b = _mm256_mullo_epi16(diff, shift);
1376
4.51M
    res = _mm256_add_epi16(a32, b);
1377
4.51M
    res = _mm256_srli_epi16(res, 5);
1378
1379
4.51M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1380
4.51M
    res1 = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1381
4.51M
    dst[r] = _mm256_castsi256_si128(res1);
1382
4.51M
    x += dx;
1383
4.51M
  }
1384
355k
}
1385
1386
static void highbd_dr_prediction_z1_8xN_avx2(int N, uint16_t *dst,
1387
                                             ptrdiff_t stride,
1388
                                             const uint16_t *above,
1389
                                             int upsample_above, int dx,
1390
218k
                                             int bd) {
1391
218k
  __m128i dstvec[32];
1392
218k
  if (bd < 12) {
1393
147k
    highbd_dr_prediction_z1_8xN_internal_avx2(N, dstvec, above, upsample_above,
1394
147k
                                              dx);
1395
147k
  } else {
1396
71.4k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(N, dstvec, above,
1397
71.4k
                                                    upsample_above, dx);
1398
71.4k
  }
1399
2.36M
  for (int i = 0; i < N; i++) {
1400
2.14M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
1401
2.14M
  }
1402
218k
}
1403
1404
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_16xN_internal_avx2(
1405
103k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1406
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1407
103k
  (void)upsample_above;
1408
103k
  const int frac_bits = 6;
1409
103k
  const int max_base_x = ((16 + N) - 1);
1410
1411
  // pre-filter above pixels
1412
  // store in temp buffers:
1413
  //   above[x] * 32 + 16
1414
  //   above[x+1] - above[x]
1415
  // final pixels will be calculated as:
1416
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1417
103k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1418
103k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1419
1420
103k
  a16 = _mm256_set1_epi32(16);
1421
103k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1422
103k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1423
1424
103k
  int x = dx;
1425
1.23M
  for (int r = 0; r < N; r++) {
1426
1.12M
    __m256i b, res[2], res1;
1427
1428
1.12M
    int base = x >> frac_bits;
1429
1.12M
    if (base >= max_base_x) {
1430
969
      for (int i = r; i < N; ++i) {
1431
782
        dstvec[i] = a_mbase_x;  // save 16 values
1432
782
      }
1433
187
      return;
1434
187
    }
1435
1.12M
    __m256i shift = _mm256_srli_epi32(
1436
1.12M
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1437
1438
1.12M
    a0 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base)));
1439
1.12M
    a1 = _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 1)));
1440
1441
1.12M
    diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1442
1.12M
    a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1443
1.12M
    a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1444
1.12M
    b = _mm256_mullo_epi32(diff, shift);
1445
1446
1.12M
    res[0] = _mm256_add_epi32(a32, b);
1447
1.12M
    res[0] = _mm256_srli_epi32(res[0], 5);
1448
1.12M
    res[0] = _mm256_packus_epi32(
1449
1.12M
        res[0], _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1450
1451
1.12M
    int mdif = max_base_x - base;
1452
1.12M
    if (mdif > 8) {
1453
1.12M
      a0_1 =
1454
1.12M
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 8)));
1455
1.12M
      a1_1 =
1456
1.12M
          _mm256_cvtepu16_epi32(_mm_loadu_si128((__m128i *)(above + base + 9)));
1457
1458
1.12M
      diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1459
1.12M
      a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1460
1.12M
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1461
1.12M
      b = _mm256_mullo_epi32(diff, shift);
1462
1463
1.12M
      res[1] = _mm256_add_epi32(a32, b);
1464
1.12M
      res[1] = _mm256_srli_epi32(res[1], 5);
1465
1.12M
      res[1] = _mm256_packus_epi32(
1466
1.12M
          res[1], _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1467
1.12M
    } else {
1468
2.29k
      res[1] = a_mbase_x;
1469
2.29k
    }
1470
1.12M
    res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1471
1.12M
                                   1);  // 16 16bit values
1472
1473
1.12M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1474
1.12M
                                    base + 4, base + 5, base + 6, base + 7,
1475
1.12M
                                    base + 8, base + 9, base + 10, base + 11,
1476
1.12M
                                    base + 12, base + 13, base + 14, base + 15);
1477
1.12M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1478
1.12M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1479
1.12M
    x += dx;
1480
1.12M
  }
1481
103k
}
1482
1483
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_16xN_internal_avx2(
1484
285k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1485
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1486
285k
  (void)upsample_above;
1487
285k
  const int frac_bits = 6;
1488
285k
  const int max_base_x = ((16 + N) - 1);
1489
1490
  // pre-filter above pixels
1491
  // store in temp buffers:
1492
  //   above[x] * 32 + 16
1493
  //   above[x+1] - above[x]
1494
  // final pixels will be calculated as:
1495
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1496
285k
  __m256i a0, a1, a32, a16, c3f;
1497
285k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1498
1499
285k
  a16 = _mm256_set1_epi16(16);
1500
285k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1501
285k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1502
285k
  c3f = _mm256_set1_epi16(0x3f);
1503
1504
285k
  int x = dx;
1505
5.38M
  for (int r = 0; r < N; r++) {
1506
5.10M
    __m256i b, res;
1507
1508
5.10M
    int base = x >> frac_bits;
1509
5.10M
    if (base >= max_base_x) {
1510
2.46k
      for (int i = r; i < N; ++i) {
1511
1.92k
        dstvec[i] = a_mbase_x;  // save 16 values
1512
1.92k
      }
1513
537
      return;
1514
537
    }
1515
5.09M
    __m256i shift =
1516
5.09M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1517
1518
5.09M
    a0 = _mm256_loadu_si256((__m256i *)(above + base));
1519
5.09M
    a1 = _mm256_loadu_si256((__m256i *)(above + base + 1));
1520
1521
5.09M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1522
5.09M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1523
5.09M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1524
5.09M
    b = _mm256_mullo_epi16(diff, shift);
1525
1526
5.09M
    res = _mm256_add_epi16(a32, b);
1527
5.09M
    res = _mm256_srli_epi16(res, 5);  // 16 16bit values
1528
1529
5.09M
    base_inc256 = _mm256_setr_epi16(base, base + 1, base + 2, base + 3,
1530
5.09M
                                    base + 4, base + 5, base + 6, base + 7,
1531
5.09M
                                    base + 8, base + 9, base + 10, base + 11,
1532
5.09M
                                    base + 12, base + 13, base + 14, base + 15);
1533
5.09M
    mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1534
5.09M
    dstvec[r] = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1535
5.09M
    x += dx;
1536
5.09M
  }
1537
285k
}
1538
1539
static void highbd_dr_prediction_z1_16xN_avx2(int N, uint16_t *dst,
1540
                                              ptrdiff_t stride,
1541
                                              const uint16_t *above,
1542
                                              int upsample_above, int dx,
1543
180k
                                              int bd) {
1544
180k
  __m256i dstvec[64];
1545
180k
  if (bd < 12) {
1546
120k
    highbd_dr_prediction_z1_16xN_internal_avx2(N, dstvec, above, upsample_above,
1547
120k
                                               dx);
1548
120k
  } else {
1549
59.6k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(N, dstvec, above,
1550
59.6k
                                                     upsample_above, dx);
1551
59.6k
  }
1552
2.57M
  for (int i = 0; i < N; i++) {
1553
2.39M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1554
2.39M
  }
1555
180k
}
1556
1557
static AOM_FORCE_INLINE void highbd_dr_prediction_32bit_z1_32xN_internal_avx2(
1558
22.0k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1559
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1560
22.0k
  (void)upsample_above;
1561
22.0k
  const int frac_bits = 6;
1562
22.0k
  const int max_base_x = ((32 + N) - 1);
1563
1564
  // pre-filter above pixels
1565
  // store in temp buffers:
1566
  //   above[x] * 32 + 16
1567
  //   above[x+1] - above[x]
1568
  // final pixels will be calculated as:
1569
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1570
22.0k
  __m256i a0, a0_1, a1, a1_1, a32, a16, c3f;
1571
22.0k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1572
1573
22.0k
  a16 = _mm256_set1_epi32(16);
1574
22.0k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1575
22.0k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1576
22.0k
  c3f = _mm256_set1_epi16(0x3f);
1577
1578
22.0k
  int x = dx;
1579
490k
  for (int r = 0; r < N; r++) {
1580
467k
    __m256i b, res[2], res1;
1581
1582
467k
    int base = x >> frac_bits;
1583
467k
    if (base >= max_base_x) {
1584
0
      for (int i = r; i < N; ++i) {
1585
0
        dstvec[i] = a_mbase_x;  // save 32 values
1586
0
        dstvec[i + N] = a_mbase_x;
1587
0
      }
1588
0
      return;
1589
0
    }
1590
1591
467k
    __m256i shift =
1592
467k
        _mm256_srli_epi32(_mm256_and_si256(_mm256_set1_epi32(x), c3f), 1);
1593
1594
1.40M
    for (int j = 0; j < 32; j += 16) {
1595
935k
      int mdif = max_base_x - (base + j);
1596
935k
      if (mdif <= 0) {
1597
586
        res1 = a_mbase_x;
1598
935k
      } else {
1599
935k
        a0 = _mm256_cvtepu16_epi32(
1600
935k
            _mm_loadu_si128((__m128i *)(above + base + j)));
1601
935k
        a1 = _mm256_cvtepu16_epi32(
1602
935k
            _mm_loadu_si128((__m128i *)(above + base + 1 + j)));
1603
1604
935k
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1605
935k
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1606
935k
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1607
935k
        b = _mm256_mullo_epi32(diff, shift);
1608
1609
935k
        res[0] = _mm256_add_epi32(a32, b);
1610
935k
        res[0] = _mm256_srli_epi32(res[0], 5);
1611
935k
        res[0] = _mm256_packus_epi32(
1612
935k
            res[0],
1613
935k
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1614
935k
        if (mdif > 8) {
1615
932k
          a0_1 = _mm256_cvtepu16_epi32(
1616
932k
              _mm_loadu_si128((__m128i *)(above + base + 8 + j)));
1617
932k
          a1_1 = _mm256_cvtepu16_epi32(
1618
932k
              _mm_loadu_si128((__m128i *)(above + base + 9 + j)));
1619
1620
932k
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1621
932k
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1622
932k
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1623
932k
          b = _mm256_mullo_epi32(diff, shift);
1624
1625
932k
          res[1] = _mm256_add_epi32(a32, b);
1626
932k
          res[1] = _mm256_srli_epi32(res[1], 5);
1627
932k
          res[1] = _mm256_packus_epi32(
1628
932k
              res[1],
1629
932k
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1630
932k
        } else {
1631
3.15k
          res[1] = a_mbase_x;
1632
3.15k
        }
1633
935k
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1634
935k
                                       1);  // 16 16bit values
1635
935k
        base_inc256 = _mm256_setr_epi16(
1636
935k
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1637
935k
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1638
935k
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1639
935k
            base + j + 13, base + j + 14, base + j + 15);
1640
1641
935k
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1642
935k
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1643
935k
      }
1644
935k
      if (!j) {
1645
467k
        dstvec[r] = res1;
1646
467k
      } else {
1647
467k
        dstvec[r + N] = res1;
1648
467k
      }
1649
935k
    }
1650
467k
    x += dx;
1651
467k
  }
1652
22.0k
}
1653
1654
static AOM_FORCE_INLINE void highbd_dr_prediction_z1_32xN_internal_avx2(
1655
192k
    int N, __m256i *dstvec, const uint16_t *above, int upsample_above, int dx) {
1656
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1657
192k
  (void)upsample_above;
1658
192k
  const int frac_bits = 6;
1659
192k
  const int max_base_x = ((32 + N) - 1);
1660
1661
  // pre-filter above pixels
1662
  // store in temp buffers:
1663
  //   above[x] * 32 + 16
1664
  //   above[x+1] - above[x]
1665
  // final pixels will be calculated as:
1666
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1667
192k
  __m256i a0, a1, a32, a16, c3f;
1668
192k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1669
1670
192k
  a16 = _mm256_set1_epi16(16);
1671
192k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1672
192k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1673
192k
  c3f = _mm256_set1_epi16(0x3f);
1674
1675
192k
  int x = dx;
1676
5.35M
  for (int r = 0; r < N; r++) {
1677
5.16M
    __m256i b, res;
1678
1679
5.16M
    int base = x >> frac_bits;
1680
5.16M
    if (base >= max_base_x) {
1681
0
      for (int i = r; i < N; ++i) {
1682
0
        dstvec[i] = a_mbase_x;  // save 32 values
1683
0
        dstvec[i + N] = a_mbase_x;
1684
0
      }
1685
0
      return;
1686
0
    }
1687
1688
5.16M
    __m256i shift =
1689
5.16M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1690
1691
15.4M
    for (int j = 0; j < 32; j += 16) {
1692
10.3M
      int mdif = max_base_x - (base + j);
1693
10.3M
      if (mdif <= 0) {
1694
773
        res = a_mbase_x;
1695
10.3M
      } else {
1696
10.3M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1697
10.3M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1698
1699
10.3M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1700
10.3M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1701
10.3M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1702
10.3M
        b = _mm256_mullo_epi16(diff, shift);
1703
1704
10.3M
        res = _mm256_add_epi16(a32, b);
1705
10.3M
        res = _mm256_srli_epi16(res, 5);
1706
1707
10.3M
        base_inc256 = _mm256_setr_epi16(
1708
10.3M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1709
10.3M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1710
10.3M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1711
10.3M
            base + j + 13, base + j + 14, base + j + 15);
1712
1713
10.3M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1714
10.3M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1715
10.3M
      }
1716
10.3M
      if (!j) {
1717
5.16M
        dstvec[r] = res;
1718
5.16M
      } else {
1719
5.16M
        dstvec[r + N] = res;
1720
5.16M
      }
1721
10.3M
    }
1722
5.16M
    x += dx;
1723
5.16M
  }
1724
192k
}
1725
1726
static void highbd_dr_prediction_z1_32xN_avx2(int N, uint16_t *dst,
1727
                                              ptrdiff_t stride,
1728
                                              const uint16_t *above,
1729
                                              int upsample_above, int dx,
1730
89.2k
                                              int bd) {
1731
89.2k
  __m256i dstvec[128];
1732
89.2k
  if (bd < 12) {
1733
77.0k
    highbd_dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above,
1734
77.0k
                                               dx);
1735
77.0k
  } else {
1736
12.1k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(N, dstvec, above,
1737
12.1k
                                                     upsample_above, dx);
1738
12.1k
  }
1739
2.44M
  for (int i = 0; i < N; i++) {
1740
2.35M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
1741
2.35M
    _mm256_storeu_si256((__m256i *)(dst + stride * i + 16), dstvec[i + N]);
1742
2.35M
  }
1743
89.2k
}
1744
1745
static void highbd_dr_prediction_32bit_z1_64xN_avx2(int N, uint16_t *dst,
1746
                                                    ptrdiff_t stride,
1747
                                                    const uint16_t *above,
1748
                                                    int upsample_above,
1749
19.7k
                                                    int dx) {
1750
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1751
19.7k
  (void)upsample_above;
1752
19.7k
  const int frac_bits = 6;
1753
19.7k
  const int max_base_x = ((64 + N) - 1);
1754
1755
  // pre-filter above pixels
1756
  // store in temp buffers:
1757
  //   above[x] * 32 + 16
1758
  //   above[x+1] - above[x]
1759
  // final pixels will be calculated as:
1760
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1761
19.7k
  __m256i a0, a0_1, a1, a1_1, a32, a16;
1762
19.7k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1763
1764
19.7k
  a16 = _mm256_set1_epi32(16);
1765
19.7k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1766
19.7k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1767
1768
19.7k
  int x = dx;
1769
1.20M
  for (int r = 0; r < N; r++, dst += stride) {
1770
1.18M
    __m256i b, res[2], res1;
1771
1772
1.18M
    int base = x >> frac_bits;
1773
1.18M
    if (base >= max_base_x) {
1774
0
      for (int i = r; i < N; ++i) {
1775
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1776
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1777
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1778
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1779
0
        dst += stride;
1780
0
      }
1781
0
      return;
1782
0
    }
1783
1784
1.18M
    __m256i shift = _mm256_srli_epi32(
1785
1.18M
        _mm256_and_si256(_mm256_set1_epi32(x), _mm256_set1_epi32(0x3f)), 1);
1786
1787
1.18M
    __m128i a0_128, a0_1_128, a1_128, a1_1_128;
1788
5.94M
    for (int j = 0; j < 64; j += 16) {
1789
4.75M
      int mdif = max_base_x - (base + j);
1790
4.75M
      if (mdif <= 0) {
1791
3.33k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1792
4.75M
      } else {
1793
4.75M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
1794
4.75M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
1795
4.75M
        a0 = _mm256_cvtepu16_epi32(a0_128);
1796
4.75M
        a1 = _mm256_cvtepu16_epi32(a1_128);
1797
1798
4.75M
        diff = _mm256_sub_epi32(a1, a0);   // a[x+1] - a[x]
1799
4.75M
        a32 = _mm256_slli_epi32(a0, 5);    // a[x] * 32
1800
4.75M
        a32 = _mm256_add_epi32(a32, a16);  // a[x] * 32 + 16
1801
4.75M
        b = _mm256_mullo_epi32(diff, shift);
1802
1803
4.75M
        res[0] = _mm256_add_epi32(a32, b);
1804
4.75M
        res[0] = _mm256_srli_epi32(res[0], 5);
1805
4.75M
        res[0] = _mm256_packus_epi32(
1806
4.75M
            res[0],
1807
4.75M
            _mm256_castsi128_si256(_mm256_extracti128_si256(res[0], 1)));
1808
4.75M
        if (mdif > 8) {
1809
4.74M
          a0_1_128 = _mm_loadu_si128((__m128i *)(above + base + 8 + j));
1810
4.74M
          a1_1_128 = _mm_loadu_si128((__m128i *)(above + base + 9 + j));
1811
4.74M
          a0_1 = _mm256_cvtepu16_epi32(a0_1_128);
1812
4.74M
          a1_1 = _mm256_cvtepu16_epi32(a1_1_128);
1813
1814
4.74M
          diff = _mm256_sub_epi32(a1_1, a0_1);  // a[x+1] - a[x]
1815
4.74M
          a32 = _mm256_slli_epi32(a0_1, 5);     // a[x] * 32
1816
4.74M
          a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
1817
4.74M
          b = _mm256_mullo_epi32(diff, shift);
1818
1819
4.74M
          res[1] = _mm256_add_epi32(a32, b);
1820
4.74M
          res[1] = _mm256_srli_epi32(res[1], 5);
1821
4.74M
          res[1] = _mm256_packus_epi32(
1822
4.74M
              res[1],
1823
4.74M
              _mm256_castsi128_si256(_mm256_extracti128_si256(res[1], 1)));
1824
4.74M
        } else {
1825
5.65k
          res[1] = a_mbase_x;
1826
5.65k
        }
1827
4.75M
        res1 = _mm256_inserti128_si256(res[0], _mm256_castsi256_si128(res[1]),
1828
4.75M
                                       1);  // 16 16bit values
1829
4.75M
        base_inc256 = _mm256_setr_epi16(
1830
4.75M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1831
4.75M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1832
4.75M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1833
4.75M
            base + j + 13, base + j + 14, base + j + 15);
1834
1835
4.75M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1836
4.75M
        res1 = _mm256_blendv_epi8(a_mbase_x, res1, mask256);
1837
4.75M
        _mm256_storeu_si256((__m256i *)(dst + j), res1);
1838
4.75M
      }
1839
4.75M
    }
1840
1.18M
    x += dx;
1841
1.18M
  }
1842
19.7k
}
1843
1844
static void highbd_dr_prediction_z1_64xN_avx2(int N, uint16_t *dst,
1845
                                              ptrdiff_t stride,
1846
                                              const uint16_t *above,
1847
36.4k
                                              int upsample_above, int dx) {
1848
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
1849
36.4k
  (void)upsample_above;
1850
36.4k
  const int frac_bits = 6;
1851
36.4k
  const int max_base_x = ((64 + N) - 1);
1852
1853
  // pre-filter above pixels
1854
  // store in temp buffers:
1855
  //   above[x] * 32 + 16
1856
  //   above[x+1] - above[x]
1857
  // final pixels will be calculated as:
1858
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1859
36.4k
  __m256i a0, a1, a32, a16, c3f;
1860
36.4k
  __m256i a_mbase_x, diff, max_base_x256, base_inc256, mask256;
1861
1862
36.4k
  a16 = _mm256_set1_epi16(16);
1863
36.4k
  a_mbase_x = _mm256_set1_epi16(above[max_base_x]);
1864
36.4k
  max_base_x256 = _mm256_set1_epi16(max_base_x);
1865
36.4k
  c3f = _mm256_set1_epi16(0x3f);
1866
1867
36.4k
  int x = dx;
1868
1.89M
  for (int r = 0; r < N; r++, dst += stride) {
1869
1.85M
    __m256i b, res;
1870
1871
1.85M
    int base = x >> frac_bits;
1872
1.85M
    if (base >= max_base_x) {
1873
0
      for (int i = r; i < N; ++i) {
1874
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
1875
0
        _mm256_storeu_si256((__m256i *)(dst + 16), a_mbase_x);
1876
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
1877
0
        _mm256_storeu_si256((__m256i *)(dst + 48), a_mbase_x);
1878
0
        dst += stride;
1879
0
      }
1880
0
      return;
1881
0
    }
1882
1883
1.85M
    __m256i shift =
1884
1.85M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
1885
1886
9.28M
    for (int j = 0; j < 64; j += 16) {
1887
7.42M
      int mdif = max_base_x - (base + j);
1888
7.42M
      if (mdif <= 0) {
1889
3.11k
        _mm256_storeu_si256((__m256i *)(dst + j), a_mbase_x);
1890
7.42M
      } else {
1891
7.42M
        a0 = _mm256_loadu_si256((__m256i *)(above + base + j));
1892
7.42M
        a1 = _mm256_loadu_si256((__m256i *)(above + base + 1 + j));
1893
1894
7.42M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
1895
7.42M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
1896
7.42M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
1897
7.42M
        b = _mm256_mullo_epi16(diff, shift);
1898
1899
7.42M
        res = _mm256_add_epi16(a32, b);
1900
7.42M
        res = _mm256_srli_epi16(res, 5);
1901
1902
7.42M
        base_inc256 = _mm256_setr_epi16(
1903
7.42M
            base + j, base + j + 1, base + j + 2, base + j + 3, base + j + 4,
1904
7.42M
            base + j + 5, base + j + 6, base + j + 7, base + j + 8,
1905
7.42M
            base + j + 9, base + j + 10, base + j + 11, base + j + 12,
1906
7.42M
            base + j + 13, base + j + 14, base + j + 15);
1907
1908
7.42M
        mask256 = _mm256_cmpgt_epi16(max_base_x256, base_inc256);
1909
7.42M
        res = _mm256_blendv_epi8(a_mbase_x, res, mask256);
1910
7.42M
        _mm256_storeu_si256((__m256i *)(dst + j), res);  // 16 16bit values
1911
7.42M
      }
1912
7.42M
    }
1913
1.85M
    x += dx;
1914
1.85M
  }
1915
36.4k
}
1916
1917
// Directional prediction, zone 1: 0 < angle < 90
1918
void av1_highbd_dr_prediction_z1_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
1919
                                      int bh, const uint16_t *above,
1920
                                      const uint16_t *left, int upsample_above,
1921
657k
                                      int dx, int dy, int bd) {
1922
657k
  (void)left;
1923
657k
  (void)dy;
1924
1925
657k
  switch (bw) {
1926
145k
    case 4:
1927
145k
      highbd_dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above,
1928
145k
                                       dx, bd);
1929
145k
      break;
1930
218k
    case 8:
1931
218k
      highbd_dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above,
1932
218k
                                       dx, bd);
1933
218k
      break;
1934
180k
    case 16:
1935
180k
      highbd_dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above,
1936
180k
                                        dx, bd);
1937
180k
      break;
1938
86.2k
    case 32:
1939
86.2k
      highbd_dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above,
1940
86.2k
                                        dx, bd);
1941
86.2k
      break;
1942
26.9k
    case 64:
1943
26.9k
      if (bd < 12) {
1944
12.6k
        highbd_dr_prediction_z1_64xN_avx2(bh, dst, stride, above,
1945
12.6k
                                          upsample_above, dx);
1946
14.3k
      } else {
1947
14.3k
        highbd_dr_prediction_32bit_z1_64xN_avx2(bh, dst, stride, above,
1948
14.3k
                                                upsample_above, dx);
1949
14.3k
      }
1950
26.9k
      break;
1951
0
    default: break;
1952
657k
  }
1953
657k
  return;
1954
657k
}
1955
1956
static void highbd_transpose_TX_16X16(const uint16_t *src, ptrdiff_t pitchSrc,
1957
416k
                                      uint16_t *dst, ptrdiff_t pitchDst) {
1958
416k
  __m256i r[16];
1959
416k
  __m256i d[16];
1960
7.08M
  for (int j = 0; j < 16; j++) {
1961
6.67M
    r[j] = _mm256_loadu_si256((__m256i *)(src + j * pitchSrc));
1962
6.67M
  }
1963
416k
  highbd_transpose16x16_avx2(r, d);
1964
7.08M
  for (int j = 0; j < 16; j++) {
1965
6.67M
    _mm256_storeu_si256((__m256i *)(dst + j * pitchDst), d[j]);
1966
6.67M
  }
1967
416k
}
1968
1969
static void highbd_transpose(const uint16_t *src, ptrdiff_t pitchSrc,
1970
                             uint16_t *dst, ptrdiff_t pitchDst, int width,
1971
32.2k
                             int height) {
1972
155k
  for (int j = 0; j < height; j += 16)
1973
539k
    for (int i = 0; i < width; i += 16)
1974
416k
      highbd_transpose_TX_16X16(src + i * pitchSrc + j, pitchSrc,
1975
416k
                                dst + j * pitchDst + i, pitchDst);
1976
32.2k
}
1977
1978
static void highbd_dr_prediction_32bit_z2_Nx4_avx2(
1979
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
1980
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
1981
137k
    int dy) {
1982
137k
  const int min_base_x = -(1 << upsample_above);
1983
137k
  const int min_base_y = -(1 << upsample_left);
1984
137k
  const int frac_bits_x = 6 - upsample_above;
1985
137k
  const int frac_bits_y = 6 - upsample_left;
1986
1987
137k
  assert(dx > 0);
1988
  // pre-filter above pixels
1989
  // store in temp buffers:
1990
  //   above[x] * 32 + 16
1991
  //   above[x+1] - above[x]
1992
  // final pixels will be calculated as:
1993
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
1994
137k
  __m256i a0_x, a1_x, a32, a16;
1995
137k
  __m256i diff;
1996
137k
  __m128i c3f, min_base_y128;
1997
1998
137k
  a16 = _mm256_set1_epi32(16);
1999
137k
  c3f = _mm_set1_epi32(0x3f);
2000
137k
  min_base_y128 = _mm_set1_epi32(min_base_y);
2001
2002
927k
  for (int r = 0; r < N; r++) {
2003
790k
    __m256i b, res, shift;
2004
790k
    __m128i resx, resy, resxy;
2005
790k
    __m128i a0_x128, a1_x128;
2006
790k
    int y = r + 1;
2007
790k
    int base_x = (-y * dx) >> frac_bits_x;
2008
790k
    int base_shift = 0;
2009
790k
    if (base_x < (min_base_x - 1)) {
2010
584k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2011
584k
    }
2012
790k
    int base_min_diff =
2013
790k
        (min_base_x - base_x + upsample_above) >> upsample_above;
2014
790k
    if (base_min_diff > 4) {
2015
460k
      base_min_diff = 4;
2016
460k
    } else {
2017
330k
      if (base_min_diff < 0) base_min_diff = 0;
2018
330k
    }
2019
2020
790k
    if (base_shift > 3) {
2021
460k
      a0_x = _mm256_setzero_si256();
2022
460k
      a1_x = _mm256_setzero_si256();
2023
460k
      shift = _mm256_setzero_si256();
2024
460k
    } else {
2025
330k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2026
330k
      if (upsample_above) {
2027
127k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2028
127k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2029
127k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2030
2031
127k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2032
127k
            _mm_and_si128(
2033
127k
                _mm_slli_epi32(
2034
127k
                    _mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2035
127k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx),
2036
127k
                    upsample_above),
2037
127k
                c3f),
2038
127k
            1));
2039
202k
      } else {
2040
202k
        a0_x128 =
2041
202k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2042
202k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2043
2044
202k
        shift = _mm256_castsi128_si256(_mm_srli_epi32(
2045
202k
            _mm_and_si128(_mm_setr_epi32(-y * dx, (1 << 6) - y * dx,
2046
202k
                                         (2 << 6) - y * dx, (3 << 6) - y * dx),
2047
202k
                          c3f),
2048
202k
            1));
2049
202k
      }
2050
330k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2051
330k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2052
330k
    }
2053
    // y calc
2054
790k
    __m128i a0_y, a1_y, shifty;
2055
790k
    if (base_x < min_base_x) {
2056
635k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2057
635k
      DECLARE_ALIGNED(32, int, base_y_c[4]);
2058
635k
      r6 = _mm_set1_epi32(r << 6);
2059
635k
      dy128 = _mm_set1_epi32(dy);
2060
635k
      c1234 = _mm_setr_epi32(1, 2, 3, 4);
2061
635k
      y_c128 = _mm_sub_epi32(r6, _mm_mullo_epi32(c1234, dy128));
2062
635k
      base_y_c128 = _mm_srai_epi32(y_c128, frac_bits_y);
2063
635k
      mask128 = _mm_cmpgt_epi32(min_base_y128, base_y_c128);
2064
635k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2065
635k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2066
2067
635k
      a0_y = _mm_setr_epi32(left[base_y_c[0]], left[base_y_c[1]],
2068
635k
                            left[base_y_c[2]], left[base_y_c[3]]);
2069
635k
      a1_y = _mm_setr_epi32(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2070
635k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1]);
2071
2072
635k
      if (upsample_left) {
2073
147k
        shifty = _mm_srli_epi32(
2074
147k
            _mm_and_si128(_mm_slli_epi32(y_c128, upsample_left), c3f), 1);
2075
487k
      } else {
2076
487k
        shifty = _mm_srli_epi32(_mm_and_si128(y_c128, c3f), 1);
2077
487k
      }
2078
635k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2079
635k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2080
635k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2081
635k
    }
2082
2083
790k
    diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2084
790k
    a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2085
790k
    a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2086
2087
790k
    b = _mm256_mullo_epi32(diff, shift);
2088
790k
    res = _mm256_add_epi32(a32, b);
2089
790k
    res = _mm256_srli_epi32(res, 5);
2090
2091
790k
    resx = _mm256_castsi256_si128(res);
2092
790k
    resx = _mm_packus_epi32(resx, resx);
2093
2094
790k
    resy = _mm256_extracti128_si256(res, 1);
2095
790k
    resy = _mm_packus_epi32(resy, resy);
2096
2097
790k
    resxy =
2098
790k
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2099
790k
    _mm_storel_epi64((__m128i *)(dst), resxy);
2100
790k
    dst += stride;
2101
790k
  }
2102
137k
}
2103
2104
static void highbd_dr_prediction_z2_Nx4_avx2(
2105
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2106
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2107
154k
    int dy) {
2108
154k
  const int min_base_x = -(1 << upsample_above);
2109
154k
  const int min_base_y = -(1 << upsample_left);
2110
154k
  const int frac_bits_x = 6 - upsample_above;
2111
154k
  const int frac_bits_y = 6 - upsample_left;
2112
2113
154k
  assert(dx > 0);
2114
  // pre-filter above pixels
2115
  // store in temp buffers:
2116
  //   above[x] * 32 + 16
2117
  //   above[x+1] - above[x]
2118
  // final pixels will be calculated as:
2119
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2120
154k
  __m256i a0_x, a1_x, a32, a16;
2121
154k
  __m256i diff;
2122
154k
  __m128i c3f, min_base_y128;
2123
2124
154k
  a16 = _mm256_set1_epi16(16);
2125
154k
  c3f = _mm_set1_epi16(0x3f);
2126
154k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2127
2128
1.26M
  for (int r = 0; r < N; r++) {
2129
1.10M
    __m256i b, res, shift;
2130
1.10M
    __m128i resx, resy, resxy;
2131
1.10M
    __m128i a0_x128, a1_x128;
2132
1.10M
    int y = r + 1;
2133
1.10M
    int base_x = (-y * dx) >> frac_bits_x;
2134
1.10M
    int base_shift = 0;
2135
1.10M
    if (base_x < (min_base_x - 1)) {
2136
810k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2137
810k
    }
2138
1.10M
    int base_min_diff =
2139
1.10M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2140
1.10M
    if (base_min_diff > 4) {
2141
527k
      base_min_diff = 4;
2142
578k
    } else {
2143
578k
      if (base_min_diff < 0) base_min_diff = 0;
2144
578k
    }
2145
2146
1.10M
    if (base_shift > 3) {
2147
527k
      a0_x = _mm256_setzero_si256();
2148
527k
      a1_x = _mm256_setzero_si256();
2149
527k
      shift = _mm256_setzero_si256();
2150
578k
    } else {
2151
578k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2152
578k
      if (upsample_above) {
2153
173k
        a0_x128 = _mm_shuffle_epi8(a0_x128,
2154
173k
                                   *(__m128i *)HighbdEvenOddMaskx4[base_shift]);
2155
173k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
2156
2157
173k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2158
173k
            _mm_and_si128(
2159
173k
                _mm_slli_epi16(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2160
173k
                                              (2 << 6) - y * dx,
2161
173k
                                              (3 << 6) - y * dx, 0, 0, 0, 0),
2162
173k
                               upsample_above),
2163
173k
                c3f),
2164
173k
            1));
2165
404k
      } else {
2166
404k
        a0_x128 =
2167
404k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2168
404k
        a1_x128 = _mm_srli_si128(a0_x128, 2);
2169
2170
404k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2171
404k
            _mm_and_si128(
2172
404k
                _mm_setr_epi16(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2173
404k
                               (3 << 6) - y * dx, 0, 0, 0, 0),
2174
404k
                c3f),
2175
404k
            1));
2176
404k
      }
2177
578k
      a0_x = _mm256_castsi128_si256(a0_x128);
2178
578k
      a1_x = _mm256_castsi128_si256(a1_x128);
2179
578k
    }
2180
    // y calc
2181
1.10M
    __m128i a0_y, a1_y, shifty;
2182
1.10M
    if (base_x < min_base_x) {
2183
928k
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2184
928k
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2185
928k
      r6 = _mm_set1_epi16(r << 6);
2186
928k
      dy128 = _mm_set1_epi16(dy);
2187
928k
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 0, 0, 0, 0);
2188
928k
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2189
928k
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2190
928k
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2191
928k
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2192
928k
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2193
2194
928k
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2195
928k
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
2196
928k
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2197
928k
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1], 0, 0,
2198
928k
                            0, 0);
2199
2200
928k
      if (upsample_left) {
2201
258k
        shifty = _mm_srli_epi16(
2202
258k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
2203
669k
      } else {
2204
669k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2205
669k
      }
2206
928k
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2207
928k
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2208
928k
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2209
928k
    }
2210
2211
1.10M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2212
1.10M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2213
1.10M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2214
2215
1.10M
    b = _mm256_mullo_epi16(diff, shift);
2216
1.10M
    res = _mm256_add_epi16(a32, b);
2217
1.10M
    res = _mm256_srli_epi16(res, 5);
2218
2219
1.10M
    resx = _mm256_castsi256_si128(res);
2220
1.10M
    resy = _mm256_extracti128_si256(res, 1);
2221
1.10M
    resxy =
2222
1.10M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2223
1.10M
    _mm_storel_epi64((__m128i *)(dst), resxy);
2224
1.10M
    dst += stride;
2225
1.10M
  }
2226
154k
}
2227
2228
static void highbd_dr_prediction_32bit_z2_Nx8_avx2(
2229
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2230
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2231
113k
    int dy) {
2232
113k
  const int min_base_x = -(1 << upsample_above);
2233
113k
  const int min_base_y = -(1 << upsample_left);
2234
113k
  const int frac_bits_x = 6 - upsample_above;
2235
113k
  const int frac_bits_y = 6 - upsample_left;
2236
2237
  // pre-filter above pixels
2238
  // store in temp buffers:
2239
  //   above[x] * 32 + 16
2240
  //   above[x+1] - above[x]
2241
  // final pixels will be calculated as:
2242
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2243
113k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c3f, min_base_y256;
2244
113k
  __m256i diff;
2245
113k
  __m128i a0_x128, a1_x128;
2246
2247
113k
  a16 = _mm256_set1_epi32(16);
2248
113k
  c3f = _mm256_set1_epi32(0x3f);
2249
113k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2250
2251
1.24M
  for (int r = 0; r < N; r++) {
2252
1.13M
    __m256i b, res, shift;
2253
1.13M
    __m128i resx, resy, resxy;
2254
1.13M
    int y = r + 1;
2255
1.13M
    int base_x = (-y * dx) >> frac_bits_x;
2256
1.13M
    int base_shift = 0;
2257
1.13M
    if (base_x < (min_base_x - 1)) {
2258
875k
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2259
875k
    }
2260
1.13M
    int base_min_diff =
2261
1.13M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2262
1.13M
    if (base_min_diff > 8) {
2263
538k
      base_min_diff = 8;
2264
592k
    } else {
2265
592k
      if (base_min_diff < 0) base_min_diff = 0;
2266
592k
    }
2267
2268
1.13M
    if (base_shift > 7) {
2269
538k
      resx = _mm_setzero_si128();
2270
592k
    } else {
2271
592k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2272
592k
      if (upsample_above) {
2273
80.4k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2274
80.4k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2275
80.4k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2276
80.4k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2277
80.4k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2278
80.4k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2279
80.4k
        atmp2 = _mm_shuffle_epi8(
2280
80.4k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2281
80.4k
        atmp3 = _mm_shuffle_epi8(
2282
80.4k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2283
80.4k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2284
80.4k
                              _mm_set1_epi8(15));
2285
80.4k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2286
80.4k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2287
80.4k
                              _mm_set1_epi8(15));
2288
80.4k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2289
80.4k
        shift = _mm256_srli_epi32(
2290
80.4k
            _mm256_and_si256(
2291
80.4k
                _mm256_slli_epi32(
2292
80.4k
                    _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx,
2293
80.4k
                                      (2 << 6) - y * dx, (3 << 6) - y * dx,
2294
80.4k
                                      (4 << 6) - y * dx, (5 << 6) - y * dx,
2295
80.4k
                                      (6 << 6) - y * dx, (7 << 6) - y * dx),
2296
80.4k
                    upsample_above),
2297
80.4k
                c3f),
2298
80.4k
            1);
2299
512k
      } else {
2300
512k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2301
512k
        a0_x128 =
2302
512k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2303
512k
        a1_x128 =
2304
512k
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2305
2306
512k
        shift = _mm256_srli_epi32(
2307
512k
            _mm256_and_si256(
2308
512k
                _mm256_setr_epi32(-y * dx, (1 << 6) - y * dx, (2 << 6) - y * dx,
2309
512k
                                  (3 << 6) - y * dx, (4 << 6) - y * dx,
2310
512k
                                  (5 << 6) - y * dx, (6 << 6) - y * dx,
2311
512k
                                  (7 << 6) - y * dx),
2312
512k
                c3f),
2313
512k
            1);
2314
512k
      }
2315
592k
      a0_x = _mm256_cvtepu16_epi32(a0_x128);
2316
592k
      a1_x = _mm256_cvtepu16_epi32(a1_x128);
2317
2318
592k
      diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2319
592k
      a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2320
592k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2321
2322
592k
      b = _mm256_mullo_epi32(diff, shift);
2323
592k
      res = _mm256_add_epi32(a32, b);
2324
592k
      res = _mm256_srli_epi32(res, 5);
2325
2326
592k
      resx = _mm256_castsi256_si128(_mm256_packus_epi32(
2327
592k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2328
592k
    }
2329
    // y calc
2330
1.13M
    if (base_x < min_base_x) {
2331
966k
      DECLARE_ALIGNED(32, int, base_y_c[8]);
2332
966k
      __m256i r6, c256, dy256, y_c256, base_y_c256, mask256;
2333
966k
      r6 = _mm256_set1_epi32(r << 6);
2334
966k
      dy256 = _mm256_set1_epi32(dy);
2335
966k
      c256 = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
2336
966k
      y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2337
966k
      base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2338
966k
      mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2339
966k
      base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2340
966k
      _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2341
2342
966k
      a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2343
966k
          left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2344
966k
          left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2345
966k
          left[base_y_c[6]], left[base_y_c[7]]));
2346
966k
      a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2347
966k
          left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2348
966k
          left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2349
966k
          left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2350
2351
966k
      if (upsample_left) {
2352
106k
        shift = _mm256_srli_epi32(
2353
106k
            _mm256_and_si256(_mm256_slli_epi32((y_c256), upsample_left), c3f),
2354
106k
            1);
2355
860k
      } else {
2356
860k
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2357
860k
      }
2358
966k
      diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2359
966k
      a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2360
966k
      a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2361
2362
966k
      b = _mm256_mullo_epi32(diff, shift);
2363
966k
      res = _mm256_add_epi32(a32, b);
2364
966k
      res = _mm256_srli_epi32(res, 5);
2365
2366
966k
      resy = _mm256_castsi256_si128(_mm256_packus_epi32(
2367
966k
          res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
2368
966k
    } else {
2369
164k
      resy = resx;
2370
164k
    }
2371
1.13M
    resxy =
2372
1.13M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2373
1.13M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2374
1.13M
    dst += stride;
2375
1.13M
  }
2376
113k
}
2377
2378
static void highbd_dr_prediction_z2_Nx8_avx2(
2379
    int N, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2380
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2381
256k
    int dy) {
2382
256k
  const int min_base_x = -(1 << upsample_above);
2383
256k
  const int min_base_y = -(1 << upsample_left);
2384
256k
  const int frac_bits_x = 6 - upsample_above;
2385
256k
  const int frac_bits_y = 6 - upsample_left;
2386
2387
  // pre-filter above pixels
2388
  // store in temp buffers:
2389
  //   above[x] * 32 + 16
2390
  //   above[x+1] - above[x]
2391
  // final pixels will be calculated as:
2392
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2393
256k
  __m128i c3f, min_base_y128;
2394
256k
  __m256i a0_x, a1_x, diff, a32, a16;
2395
256k
  __m128i a0_x128, a1_x128;
2396
2397
256k
  a16 = _mm256_set1_epi16(16);
2398
256k
  c3f = _mm_set1_epi16(0x3f);
2399
256k
  min_base_y128 = _mm_set1_epi16(min_base_y);
2400
2401
2.82M
  for (int r = 0; r < N; r++) {
2402
2.56M
    __m256i b, res, shift;
2403
2.56M
    __m128i resx, resy, resxy;
2404
2.56M
    int y = r + 1;
2405
2.56M
    int base_x = (-y * dx) >> frac_bits_x;
2406
2.56M
    int base_shift = 0;
2407
2.56M
    if (base_x < (min_base_x - 1)) {
2408
1.90M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
2409
1.90M
    }
2410
2.56M
    int base_min_diff =
2411
2.56M
        (min_base_x - base_x + upsample_above) >> upsample_above;
2412
2.56M
    if (base_min_diff > 8) {
2413
1.12M
      base_min_diff = 8;
2414
1.44M
    } else {
2415
1.44M
      if (base_min_diff < 0) base_min_diff = 0;
2416
1.44M
    }
2417
2418
2.56M
    if (base_shift > 7) {
2419
1.12M
      a0_x = _mm256_setzero_si256();
2420
1.12M
      a1_x = _mm256_setzero_si256();
2421
1.12M
      shift = _mm256_setzero_si256();
2422
1.44M
    } else {
2423
1.44M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2424
1.44M
      if (upsample_above) {
2425
381k
        __m128i mask, atmp0, atmp1, atmp2, atmp3;
2426
381k
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 8 + base_shift));
2427
381k
        atmp0 = _mm_shuffle_epi8(a0_x128,
2428
381k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2429
381k
        atmp1 = _mm_shuffle_epi8(a1_x128,
2430
381k
                                 *(__m128i *)HighbdEvenOddMaskx[base_shift]);
2431
381k
        atmp2 = _mm_shuffle_epi8(
2432
381k
            a0_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2433
381k
        atmp3 = _mm_shuffle_epi8(
2434
381k
            a1_x128, *(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16));
2435
381k
        mask = _mm_cmpgt_epi8(*(__m128i *)HighbdEvenOddMaskx[base_shift],
2436
381k
                              _mm_set1_epi8(15));
2437
381k
        a0_x128 = _mm_blendv_epi8(atmp0, atmp1, mask);
2438
381k
        mask = _mm_cmpgt_epi8(*(__m128i *)(HighbdEvenOddMaskx[base_shift] + 16),
2439
381k
                              _mm_set1_epi8(15));
2440
381k
        a1_x128 = _mm_blendv_epi8(atmp2, atmp3, mask);
2441
2442
381k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2443
381k
            _mm_and_si128(
2444
381k
                _mm_slli_epi16(
2445
381k
                    _mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2446
381k
                                   (2 << 6) - y * dx, (3 << 6) - y * dx,
2447
381k
                                   (4 << 6) - y * dx, (5 << 6) - y * dx,
2448
381k
                                   (6 << 6) - y * dx, (7 << 6) - y * dx),
2449
381k
                    upsample_above),
2450
381k
                c3f),
2451
381k
            1));
2452
1.06M
      } else {
2453
1.06M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + 1 + base_shift));
2454
1.06M
        a0_x128 =
2455
1.06M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2456
1.06M
        a1_x128 =
2457
1.06M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2458
2459
1.06M
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
2460
1.06M
            _mm_and_si128(_mm_setr_epi16(-y * dx, (1 << 6) - y * dx,
2461
1.06M
                                         (2 << 6) - y * dx, (3 << 6) - y * dx,
2462
1.06M
                                         (4 << 6) - y * dx, (5 << 6) - y * dx,
2463
1.06M
                                         (6 << 6) - y * dx, (7 << 6) - y * dx),
2464
1.06M
                          c3f),
2465
1.06M
            1));
2466
1.06M
      }
2467
1.44M
      a0_x = _mm256_castsi128_si256(a0_x128);
2468
1.44M
      a1_x = _mm256_castsi128_si256(a1_x128);
2469
1.44M
    }
2470
2471
    // y calc
2472
2.56M
    __m128i a0_y, a1_y, shifty;
2473
2.56M
    if (base_x < min_base_x) {
2474
2.13M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
2475
2.13M
      __m128i r6, c1234, dy128, y_c128, base_y_c128, mask128;
2476
2.13M
      r6 = _mm_set1_epi16(r << 6);
2477
2.13M
      dy128 = _mm_set1_epi16(dy);
2478
2.13M
      c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
2479
2.13M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
2480
2.13M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
2481
2.13M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
2482
2.13M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
2483
2.13M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
2484
2485
2.13M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
2486
2.13M
                            left[base_y_c[2]], left[base_y_c[3]],
2487
2.13M
                            left[base_y_c[4]], left[base_y_c[5]],
2488
2.13M
                            left[base_y_c[6]], left[base_y_c[7]]);
2489
2.13M
      a1_y = _mm_setr_epi16(left[base_y_c[0] + 1], left[base_y_c[1] + 1],
2490
2.13M
                            left[base_y_c[2] + 1], left[base_y_c[3] + 1],
2491
2.13M
                            left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2492
2.13M
                            left[base_y_c[6] + 1], left[base_y_c[7] + 1]);
2493
2494
2.13M
      if (upsample_left) {
2495
471k
        shifty = _mm_srli_epi16(
2496
471k
            _mm_and_si128(_mm_slli_epi16((y_c128), upsample_left), c3f), 1);
2497
1.66M
      } else {
2498
1.66M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
2499
1.66M
      }
2500
2.13M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
2501
2.13M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
2502
2.13M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
2503
2.13M
    }
2504
2505
2.56M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2506
2.56M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2507
2.56M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2508
2509
2.56M
    b = _mm256_mullo_epi16(diff, shift);
2510
2.56M
    res = _mm256_add_epi16(a32, b);
2511
2.56M
    res = _mm256_srli_epi16(res, 5);
2512
2513
2.56M
    resx = _mm256_castsi256_si128(res);
2514
2.56M
    resy = _mm256_extracti128_si256(res, 1);
2515
2516
2.56M
    resxy =
2517
2.56M
        _mm_blendv_epi8(resx, resy, *(__m128i *)HighbdBaseMask[base_min_diff]);
2518
2.56M
    _mm_storeu_si128((__m128i *)(dst), resxy);
2519
2.56M
    dst += stride;
2520
2.56M
  }
2521
256k
}
2522
2523
static void highbd_dr_prediction_32bit_z2_HxW_avx2(
2524
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2525
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2526
109k
    int dy) {
2527
  // here upsample_above and upsample_left are 0 by design of
2528
  // av1_use_intra_edge_upsample
2529
109k
  const int min_base_x = -1;
2530
109k
  const int min_base_y = -1;
2531
109k
  (void)upsample_above;
2532
109k
  (void)upsample_left;
2533
109k
  const int frac_bits_x = 6;
2534
109k
  const int frac_bits_y = 6;
2535
2536
  // pre-filter above pixels
2537
  // store in temp buffers:
2538
  //   above[x] * 32 + 16
2539
  //   above[x+1] - above[x]
2540
  // final pixels will be calculated as:
2541
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2542
109k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a0_1_x, a1_1_x, a16, c1;
2543
109k
  __m256i diff, min_base_y256, c3f, dy256, c1234, c0123, c8;
2544
109k
  __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2545
109k
  DECLARE_ALIGNED(32, int, base_y_c[16]);
2546
2547
109k
  a16 = _mm256_set1_epi32(16);
2548
109k
  c1 = _mm256_srli_epi32(a16, 4);
2549
109k
  c8 = _mm256_srli_epi32(a16, 1);
2550
109k
  min_base_y256 = _mm256_set1_epi32(min_base_y);
2551
109k
  c3f = _mm256_set1_epi32(0x3f);
2552
109k
  dy256 = _mm256_set1_epi32(dy);
2553
109k
  c0123 = _mm256_setr_epi32(0, 1, 2, 3, 4, 5, 6, 7);
2554
109k
  c1234 = _mm256_add_epi32(c0123, c1);
2555
2556
2.03M
  for (int r = 0; r < H; r++) {
2557
1.92M
    __m256i b, res, shift, ydx;
2558
1.92M
    __m256i resx[2], resy[2];
2559
1.92M
    __m256i resxy, j256, r6;
2560
6.43M
    for (int j = 0; j < W; j += 16) {
2561
4.50M
      j256 = _mm256_set1_epi32(j);
2562
4.50M
      int y = r + 1;
2563
4.50M
      ydx = _mm256_set1_epi32(y * dx);
2564
2565
4.50M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2566
4.50M
      int base_shift = 0;
2567
4.50M
      if ((base_x) < (min_base_x - 1)) {
2568
2.86M
        base_shift = (min_base_x - base_x - 1);
2569
2.86M
      }
2570
4.50M
      int base_min_diff = (min_base_x - base_x);
2571
4.50M
      if (base_min_diff > 16) {
2572
1.88M
        base_min_diff = 16;
2573
2.62M
      } else {
2574
2.62M
        if (base_min_diff < 0) base_min_diff = 0;
2575
2.62M
      }
2576
2577
4.50M
      if (base_shift > 7) {
2578
2.27M
        resx[0] = _mm256_setzero_si256();
2579
2.27M
      } else {
2580
2.23M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2581
2.23M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2582
2.23M
        a0_x128 =
2583
2.23M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2584
2.23M
        a1_x128 =
2585
2.23M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2586
2587
2.23M
        a0_x = _mm256_cvtepu16_epi32(a0_x128);
2588
2.23M
        a1_x = _mm256_cvtepu16_epi32(a1_x128);
2589
2590
2.23M
        r6 = _mm256_slli_epi32(_mm256_add_epi32(c0123, j256), 6);
2591
2.23M
        shift = _mm256_srli_epi32(
2592
2.23M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2593
2594
2.23M
        diff = _mm256_sub_epi32(a1_x, a0_x);  // a[x+1] - a[x]
2595
2.23M
        a32 = _mm256_slli_epi32(a0_x, 5);     // a[x] * 32
2596
2.23M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2597
2598
2.23M
        b = _mm256_mullo_epi32(diff, shift);
2599
2.23M
        res = _mm256_add_epi32(a32, b);
2600
2.23M
        res = _mm256_srli_epi32(res, 5);
2601
2602
2.23M
        resx[0] = _mm256_packus_epi32(
2603
2.23M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2604
2.23M
      }
2605
4.50M
      int base_shift8 = 0;
2606
4.50M
      if ((base_x + 8) < (min_base_x - 1)) {
2607
2.21M
        base_shift8 = (min_base_x - (base_x + 8) - 1);
2608
2.21M
      }
2609
4.50M
      if (base_shift8 > 7) {
2610
1.88M
        resx[1] = _mm256_setzero_si256();
2611
2.62M
      } else {
2612
2.62M
        a0_1_x128 =
2613
2.62M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 8));
2614
2.62M
        a1_1_x128 =
2615
2.62M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift8 + 9));
2616
2.62M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2617
2.62M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2618
2.62M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2619
2.62M
                                     *(__m128i *)HighbdLoadMaskx[base_shift8]);
2620
2621
2.62M
        a0_1_x = _mm256_cvtepu16_epi32(a0_1_x128);
2622
2.62M
        a1_1_x = _mm256_cvtepu16_epi32(a1_1_x128);
2623
2624
2.62M
        r6 = _mm256_slli_epi32(
2625
2.62M
            _mm256_add_epi32(c0123, _mm256_add_epi32(j256, c8)), 6);
2626
2.62M
        shift = _mm256_srli_epi32(
2627
2.62M
            _mm256_and_si256(_mm256_sub_epi32(r6, ydx), c3f), 1);
2628
2629
2.62M
        diff = _mm256_sub_epi32(a1_1_x, a0_1_x);  // a[x+1] - a[x]
2630
2.62M
        a32 = _mm256_slli_epi32(a0_1_x, 5);       // a[x] * 32
2631
2.62M
        a32 = _mm256_add_epi32(a32, a16);         // a[x] * 32 + 16
2632
2.62M
        b = _mm256_mullo_epi32(diff, shift);
2633
2634
2.62M
        resx[1] = _mm256_add_epi32(a32, b);
2635
2.62M
        resx[1] = _mm256_srli_epi32(resx[1], 5);
2636
2.62M
        resx[1] = _mm256_packus_epi32(
2637
2.62M
            resx[1],
2638
2.62M
            _mm256_castsi128_si256(_mm256_extracti128_si256(resx[1], 1)));
2639
2.62M
      }
2640
4.50M
      resx[0] =
2641
4.50M
          _mm256_inserti128_si256(resx[0], _mm256_castsi256_si128(resx[1]),
2642
4.50M
                                  1);  // 16 16bit values
2643
2644
      // y calc
2645
4.50M
      resy[0] = _mm256_setzero_si256();
2646
4.50M
      if ((base_x < min_base_x)) {
2647
3.01M
        __m256i c256, y_c256, y_c_1_256, base_y_c256, mask256;
2648
3.01M
        r6 = _mm256_set1_epi32(r << 6);
2649
3.01M
        c256 = _mm256_add_epi32(j256, c1234);
2650
3.01M
        y_c256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2651
3.01M
        base_y_c256 = _mm256_srai_epi32(y_c256, frac_bits_y);
2652
3.01M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2653
3.01M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2654
3.01M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2655
3.01M
        c256 = _mm256_add_epi32(c256, c8);
2656
3.01M
        y_c_1_256 = _mm256_sub_epi32(r6, _mm256_mullo_epi32(c256, dy256));
2657
3.01M
        base_y_c256 = _mm256_srai_epi32(y_c_1_256, frac_bits_y);
2658
3.01M
        mask256 = _mm256_cmpgt_epi32(min_base_y256, base_y_c256);
2659
3.01M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2660
3.01M
        _mm256_store_si256((__m256i *)(base_y_c + 8), base_y_c256);
2661
2662
3.01M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2663
3.01M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2664
3.01M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2665
3.01M
            left[base_y_c[6]], left[base_y_c[7]]));
2666
3.01M
        a1_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2667
3.01M
            left[base_y_c[0] + 1], left[base_y_c[1] + 1], left[base_y_c[2] + 1],
2668
3.01M
            left[base_y_c[3] + 1], left[base_y_c[4] + 1], left[base_y_c[5] + 1],
2669
3.01M
            left[base_y_c[6] + 1], left[base_y_c[7] + 1]));
2670
2671
3.01M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c256, c3f), 1);
2672
2673
3.01M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2674
3.01M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2675
3.01M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2676
2677
3.01M
        b = _mm256_mullo_epi32(diff, shift);
2678
3.01M
        res = _mm256_add_epi32(a32, b);
2679
3.01M
        res = _mm256_srli_epi32(res, 5);
2680
2681
3.01M
        resy[0] = _mm256_packus_epi32(
2682
3.01M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2683
2684
3.01M
        a0_y = _mm256_cvtepu16_epi32(_mm_setr_epi16(
2685
3.01M
            left[base_y_c[8]], left[base_y_c[9]], left[base_y_c[10]],
2686
3.01M
            left[base_y_c[11]], left[base_y_c[12]], left[base_y_c[13]],
2687
3.01M
            left[base_y_c[14]], left[base_y_c[15]]));
2688
3.01M
        a1_y = _mm256_cvtepu16_epi32(
2689
3.01M
            _mm_setr_epi16(left[base_y_c[8] + 1], left[base_y_c[9] + 1],
2690
3.01M
                           left[base_y_c[10] + 1], left[base_y_c[11] + 1],
2691
3.01M
                           left[base_y_c[12] + 1], left[base_y_c[13] + 1],
2692
3.01M
                           left[base_y_c[14] + 1], left[base_y_c[15] + 1]));
2693
3.01M
        shift = _mm256_srli_epi32(_mm256_and_si256(y_c_1_256, c3f), 1);
2694
2695
3.01M
        diff = _mm256_sub_epi32(a1_y, a0_y);  // a[x+1] - a[x]
2696
3.01M
        a32 = _mm256_slli_epi32(a0_y, 5);     // a[x] * 32
2697
3.01M
        a32 = _mm256_add_epi32(a32, a16);     // a[x] * 32 + 16
2698
2699
3.01M
        b = _mm256_mullo_epi32(diff, shift);
2700
3.01M
        res = _mm256_add_epi32(a32, b);
2701
3.01M
        res = _mm256_srli_epi32(res, 5);
2702
2703
3.01M
        resy[1] = _mm256_packus_epi32(
2704
3.01M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1)));
2705
2706
3.01M
        resy[0] =
2707
3.01M
            _mm256_inserti128_si256(resy[0], _mm256_castsi256_si128(resy[1]),
2708
3.01M
                                    1);  // 16 16bit values
2709
3.01M
      }
2710
2711
4.50M
      resxy = _mm256_blendv_epi8(resx[0], resy[0],
2712
4.50M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2713
4.50M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2714
4.50M
    }  // for j
2715
1.92M
    dst += stride;
2716
1.92M
  }
2717
109k
}
2718
2719
static void highbd_dr_prediction_z2_HxW_avx2(
2720
    int H, int W, uint16_t *dst, ptrdiff_t stride, const uint16_t *above,
2721
    const uint16_t *left, int upsample_above, int upsample_left, int dx,
2722
442k
    int dy) {
2723
  // here upsample_above and upsample_left are 0 by design of
2724
  // av1_use_intra_edge_upsample
2725
442k
  const int min_base_x = -1;
2726
442k
  const int min_base_y = -1;
2727
442k
  (void)upsample_above;
2728
442k
  (void)upsample_left;
2729
442k
  const int frac_bits_x = 6;
2730
442k
  const int frac_bits_y = 6;
2731
2732
  // pre-filter above pixels
2733
  // store in temp buffers:
2734
  //   above[x] * 32 + 16
2735
  //   above[x+1] - above[x]
2736
  // final pixels will be calculated as:
2737
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
2738
442k
  __m256i a0_x, a1_x, a32, a16, c3f, c1;
2739
442k
  __m256i diff, min_base_y256, dy256, c1234, c0123;
2740
442k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
2741
2742
442k
  a16 = _mm256_set1_epi16(16);
2743
442k
  c1 = _mm256_srli_epi16(a16, 4);
2744
442k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
2745
442k
  c3f = _mm256_set1_epi16(0x3f);
2746
442k
  dy256 = _mm256_set1_epi16(dy);
2747
442k
  c0123 =
2748
442k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
2749
442k
  c1234 = _mm256_add_epi16(c0123, c1);
2750
2751
8.91M
  for (int r = 0; r < H; r++) {
2752
8.47M
    __m256i b, res, shift;
2753
8.47M
    __m256i resx, resy, ydx;
2754
8.47M
    __m256i resxy, j256, r6;
2755
8.47M
    __m128i a0_x128, a1_x128, a0_1_x128, a1_1_x128;
2756
8.47M
    int y = r + 1;
2757
8.47M
    ydx = _mm256_set1_epi16((short)(y * dx));
2758
2759
23.5M
    for (int j = 0; j < W; j += 16) {
2760
15.0M
      j256 = _mm256_set1_epi16(j);
2761
15.0M
      int base_x = ((j << 6) - y * dx) >> frac_bits_x;
2762
15.0M
      int base_shift = 0;
2763
15.0M
      if ((base_x) < (min_base_x - 1)) {
2764
10.9M
        base_shift = (min_base_x - (base_x)-1);
2765
10.9M
      }
2766
15.0M
      int base_min_diff = (min_base_x - base_x);
2767
15.0M
      if (base_min_diff > 16) {
2768
7.83M
        base_min_diff = 16;
2769
7.83M
      } else {
2770
7.25M
        if (base_min_diff < 0) base_min_diff = 0;
2771
7.25M
      }
2772
2773
15.0M
      if (base_shift < 8) {
2774
6.07M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
2775
6.07M
        a1_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1));
2776
6.07M
        a0_x128 =
2777
6.07M
            _mm_shuffle_epi8(a0_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2778
6.07M
        a1_x128 =
2779
6.07M
            _mm_shuffle_epi8(a1_x128, *(__m128i *)HighbdLoadMaskx[base_shift]);
2780
2781
6.07M
        a0_x = _mm256_castsi128_si256(a0_x128);
2782
6.07M
        a1_x = _mm256_castsi128_si256(a1_x128);
2783
9.02M
      } else {
2784
9.02M
        a0_x = _mm256_setzero_si256();
2785
9.02M
        a1_x = _mm256_setzero_si256();
2786
9.02M
      }
2787
2788
15.0M
      int base_shift1 = 0;
2789
15.0M
      if (base_shift > 8) {
2790
8.84M
        base_shift1 = base_shift - 8;
2791
8.84M
      }
2792
15.0M
      if (base_shift1 < 8) {
2793
7.25M
        a0_1_x128 =
2794
7.25M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 8));
2795
7.25M
        a1_1_x128 =
2796
7.25M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift1 + 9));
2797
7.25M
        a0_1_x128 = _mm_shuffle_epi8(a0_1_x128,
2798
7.25M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2799
7.25M
        a1_1_x128 = _mm_shuffle_epi8(a1_1_x128,
2800
7.25M
                                     *(__m128i *)HighbdLoadMaskx[base_shift1]);
2801
2802
7.25M
        a0_x = _mm256_inserti128_si256(a0_x, a0_1_x128, 1);
2803
7.25M
        a1_x = _mm256_inserti128_si256(a1_x, a1_1_x128, 1);
2804
7.25M
      }
2805
15.0M
      r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
2806
15.0M
      shift = _mm256_srli_epi16(
2807
15.0M
          _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
2808
2809
15.0M
      diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
2810
15.0M
      a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
2811
15.0M
      a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2812
2813
15.0M
      b = _mm256_mullo_epi16(diff, shift);
2814
15.0M
      res = _mm256_add_epi16(a32, b);
2815
15.0M
      resx = _mm256_srli_epi16(res, 5);  // 16 16-bit values
2816
2817
      // y calc
2818
15.0M
      resy = _mm256_setzero_si256();
2819
15.0M
      __m256i a0_y, a1_y, shifty;
2820
15.0M
      if ((base_x < min_base_x)) {
2821
11.5M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
2822
11.5M
        r6 = _mm256_set1_epi16(r << 6);
2823
11.5M
        c256 = _mm256_add_epi16(j256, c1234);
2824
11.5M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
2825
11.5M
                                 _mm256_srli_epi16(min_base_y256, 1));
2826
11.5M
        y_c256 = _mm256_sub_epi16(r6, mul16);
2827
11.5M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
2828
11.5M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
2829
11.5M
        base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
2830
11.5M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2831
2832
11.5M
        a0_y = _mm256_setr_epi16(
2833
11.5M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2834
11.5M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2835
11.5M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2836
11.5M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2837
11.5M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2838
11.5M
            left[base_y_c[15]]);
2839
11.5M
        base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
2840
11.5M
        _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
2841
2842
11.5M
        a1_y = _mm256_setr_epi16(
2843
11.5M
            left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
2844
11.5M
            left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
2845
11.5M
            left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
2846
11.5M
            left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
2847
11.5M
            left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
2848
11.5M
            left[base_y_c[15]]);
2849
2850
11.5M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
2851
2852
11.5M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
2853
11.5M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
2854
11.5M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
2855
2856
11.5M
        b = _mm256_mullo_epi16(diff, shifty);
2857
11.5M
        res = _mm256_add_epi16(a32, b);
2858
11.5M
        resy = _mm256_srli_epi16(res, 5);
2859
11.5M
      }
2860
2861
15.0M
      resxy = _mm256_blendv_epi8(resx, resy,
2862
15.0M
                                 *(__m256i *)HighbdBaseMask[base_min_diff]);
2863
15.0M
      _mm256_storeu_si256((__m256i *)(dst + j), resxy);
2864
15.0M
    }  // for j
2865
8.47M
    dst += stride;
2866
8.47M
  }
2867
442k
}
2868
2869
// Directional prediction, zone 2: 90 < angle < 180
2870
void av1_highbd_dr_prediction_z2_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
2871
                                      int bh, const uint16_t *above,
2872
                                      const uint16_t *left, int upsample_above,
2873
                                      int upsample_left, int dx, int dy,
2874
1.21M
                                      int bd) {
2875
1.21M
  (void)bd;
2876
1.21M
  assert(dx > 0);
2877
1.21M
  assert(dy > 0);
2878
1.21M
  switch (bw) {
2879
291k
    case 4:
2880
291k
      if (bd < 12) {
2881
154k
        highbd_dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left,
2882
154k
                                         upsample_above, upsample_left, dx, dy);
2883
154k
      } else {
2884
137k
        highbd_dr_prediction_32bit_z2_Nx4_avx2(bh, dst, stride, above, left,
2885
137k
                                               upsample_above, upsample_left,
2886
137k
                                               dx, dy);
2887
137k
      }
2888
291k
      break;
2889
370k
    case 8:
2890
370k
      if (bd < 12) {
2891
256k
        highbd_dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left,
2892
256k
                                         upsample_above, upsample_left, dx, dy);
2893
256k
      } else {
2894
113k
        highbd_dr_prediction_32bit_z2_Nx8_avx2(bh, dst, stride, above, left,
2895
113k
                                               upsample_above, upsample_left,
2896
113k
                                               dx, dy);
2897
113k
      }
2898
370k
      break;
2899
551k
    default:
2900
551k
      if (bd < 12) {
2901
442k
        highbd_dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2902
442k
                                         upsample_above, upsample_left, dx, dy);
2903
442k
      } else {
2904
109k
        highbd_dr_prediction_32bit_z2_HxW_avx2(bh, bw, dst, stride, above, left,
2905
109k
                                               upsample_above, upsample_left,
2906
109k
                                               dx, dy);
2907
109k
      }
2908
551k
      break;
2909
1.21M
  }
2910
1.21M
}
2911
2912
//  Directional prediction, zone 3 functions
2913
static void highbd_dr_prediction_z3_4x4_avx2(uint16_t *dst, ptrdiff_t stride,
2914
                                             const uint16_t *left,
2915
                                             int upsample_left, int dy,
2916
203k
                                             int bd) {
2917
203k
  __m128i dstvec[4], d[4];
2918
203k
  if (bd < 12) {
2919
156k
    highbd_dr_prediction_z1_4xN_internal_avx2(4, dstvec, left, upsample_left,
2920
156k
                                              dy);
2921
156k
  } else {
2922
46.4k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(4, dstvec, left,
2923
46.4k
                                                    upsample_left, dy);
2924
46.4k
  }
2925
203k
  highbd_transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2],
2926
203k
                                   &dstvec[3], &d[0], &d[1], &d[2], &d[3]);
2927
203k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
2928
203k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
2929
203k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
2930
203k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
2931
203k
  return;
2932
203k
}
2933
2934
static void highbd_dr_prediction_z3_8x8_avx2(uint16_t *dst, ptrdiff_t stride,
2935
                                             const uint16_t *left,
2936
                                             int upsample_left, int dy,
2937
146k
                                             int bd) {
2938
146k
  __m128i dstvec[8], d[8];
2939
146k
  if (bd < 12) {
2940
98.6k
    highbd_dr_prediction_z1_8xN_internal_avx2(8, dstvec, left, upsample_left,
2941
98.6k
                                              dy);
2942
98.6k
  } else {
2943
48.2k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(8, dstvec, left,
2944
48.2k
                                                    upsample_left, dy);
2945
48.2k
  }
2946
146k
  highbd_transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2947
146k
                           &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2948
146k
                           &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2949
146k
                           &d[7]);
2950
1.32M
  for (int i = 0; i < 8; i++) {
2951
1.17M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
2952
1.17M
  }
2953
146k
}
2954
2955
static void highbd_dr_prediction_z3_4x8_avx2(uint16_t *dst, ptrdiff_t stride,
2956
                                             const uint16_t *left,
2957
                                             int upsample_left, int dy,
2958
24.8k
                                             int bd) {
2959
24.8k
  __m128i dstvec[4], d[8];
2960
24.8k
  if (bd < 12) {
2961
19.2k
    highbd_dr_prediction_z1_8xN_internal_avx2(4, dstvec, left, upsample_left,
2962
19.2k
                                              dy);
2963
19.2k
  } else {
2964
5.56k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(4, dstvec, left,
2965
5.56k
                                                    upsample_left, dy);
2966
5.56k
  }
2967
2968
24.8k
  highbd_transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2969
24.8k
                               &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6],
2970
24.8k
                               &d[7]);
2971
223k
  for (int i = 0; i < 8; i++) {
2972
198k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
2973
198k
  }
2974
24.8k
}
2975
2976
static void highbd_dr_prediction_z3_8x4_avx2(uint16_t *dst, ptrdiff_t stride,
2977
                                             const uint16_t *left,
2978
                                             int upsample_left, int dy,
2979
51.1k
                                             int bd) {
2980
51.1k
  __m128i dstvec[8], d[4];
2981
51.1k
  if (bd < 12) {
2982
38.1k
    highbd_dr_prediction_z1_4xN_internal_avx2(8, dstvec, left, upsample_left,
2983
38.1k
                                              dy);
2984
38.1k
  } else {
2985
12.9k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(8, dstvec, left,
2986
12.9k
                                                    upsample_left, dy);
2987
12.9k
  }
2988
2989
51.1k
  highbd_transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
2990
51.1k
                               &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7],
2991
51.1k
                               &d[0], &d[1], &d[2], &d[3]);
2992
51.1k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
2993
51.1k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[1]);
2994
51.1k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[2]);
2995
51.1k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[3]);
2996
51.1k
}
2997
2998
static void highbd_dr_prediction_z3_8x16_avx2(uint16_t *dst, ptrdiff_t stride,
2999
                                              const uint16_t *left,
3000
                                              int upsample_left, int dy,
3001
38.4k
                                              int bd) {
3002
38.4k
  __m256i dstvec[8], d[8];
3003
38.4k
  if (bd < 12) {
3004
25.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(8, dstvec, left, upsample_left,
3005
25.6k
                                               dy);
3006
25.6k
  } else {
3007
12.8k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(8, dstvec, left,
3008
12.8k
                                                     upsample_left, dy);
3009
12.8k
  }
3010
38.4k
  highbd_transpose8x16_16x8_avx2(dstvec, d);
3011
346k
  for (int i = 0; i < 8; i++) {
3012
307k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3013
307k
                     _mm256_castsi256_si128(d[i]));
3014
307k
  }
3015
346k
  for (int i = 8; i < 16; i++) {
3016
307k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3017
307k
                     _mm256_extracti128_si256(d[i - 8], 1));
3018
307k
  }
3019
38.4k
}
3020
3021
static void highbd_dr_prediction_z3_16x8_avx2(uint16_t *dst, ptrdiff_t stride,
3022
                                              const uint16_t *left,
3023
                                              int upsample_left, int dy,
3024
64.8k
                                              int bd) {
3025
64.8k
  __m128i dstvec[16], d[16];
3026
64.8k
  if (bd < 12) {
3027
44.4k
    highbd_dr_prediction_z1_8xN_internal_avx2(16, dstvec, left, upsample_left,
3028
44.4k
                                              dy);
3029
44.4k
  } else {
3030
20.4k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(16, dstvec, left,
3031
20.4k
                                                    upsample_left, dy);
3032
20.4k
  }
3033
194k
  for (int i = 0; i < 16; i += 8) {
3034
129k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3035
129k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3036
129k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3037
129k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3038
129k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3039
129k
  }
3040
584k
  for (int i = 0; i < 8; i++) {
3041
519k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3042
519k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3043
519k
  }
3044
64.8k
}
3045
3046
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3047
static void highbd_dr_prediction_z3_4x16_avx2(uint16_t *dst, ptrdiff_t stride,
3048
                                              const uint16_t *left,
3049
                                              int upsample_left, int dy,
3050
32.1k
                                              int bd) {
3051
32.1k
  __m256i dstvec[4], d[4], d1;
3052
32.1k
  if (bd < 12) {
3053
18.5k
    highbd_dr_prediction_z1_16xN_internal_avx2(4, dstvec, left, upsample_left,
3054
18.5k
                                               dy);
3055
18.5k
  } else {
3056
13.5k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(4, dstvec, left,
3057
13.5k
                                                     upsample_left, dy);
3058
13.5k
  }
3059
32.1k
  highbd_transpose4x16_avx2(dstvec, d);
3060
160k
  for (int i = 0; i < 4; i++) {
3061
128k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
3062
128k
                     _mm256_castsi256_si128(d[i]));
3063
128k
    d1 = _mm256_bsrli_epi128(d[i], 8);
3064
128k
    _mm_storel_epi64((__m128i *)(dst + (i + 4) * stride),
3065
128k
                     _mm256_castsi256_si128(d1));
3066
128k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
3067
128k
                     _mm256_extracti128_si256(d[i], 1));
3068
128k
    _mm_storel_epi64((__m128i *)(dst + (i + 12) * stride),
3069
128k
                     _mm256_extracti128_si256(d1, 1));
3070
128k
  }
3071
32.1k
}
3072
3073
static void highbd_dr_prediction_z3_16x4_avx2(uint16_t *dst, ptrdiff_t stride,
3074
                                              const uint16_t *left,
3075
                                              int upsample_left, int dy,
3076
64.3k
                                              int bd) {
3077
64.3k
  __m128i dstvec[16], d[8];
3078
64.3k
  if (bd < 12) {
3079
48.1k
    highbd_dr_prediction_z1_4xN_internal_avx2(16, dstvec, left, upsample_left,
3080
48.1k
                                              dy);
3081
48.1k
  } else {
3082
16.1k
    highbd_dr_prediction_32bit_z1_4xN_internal_avx2(16, dstvec, left,
3083
16.1k
                                                    upsample_left, dy);
3084
16.1k
  }
3085
64.3k
  highbd_transpose16x4_8x8_sse2(dstvec, d);
3086
3087
64.3k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride), d[0]);
3088
64.3k
  _mm_storeu_si128((__m128i *)(dst + 0 * stride + 8), d[1]);
3089
64.3k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride), d[2]);
3090
64.3k
  _mm_storeu_si128((__m128i *)(dst + 1 * stride + 8), d[3]);
3091
64.3k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride), d[4]);
3092
64.3k
  _mm_storeu_si128((__m128i *)(dst + 2 * stride + 8), d[5]);
3093
64.3k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride), d[6]);
3094
64.3k
  _mm_storeu_si128((__m128i *)(dst + 3 * stride + 8), d[7]);
3095
64.3k
}
3096
3097
static void highbd_dr_prediction_z3_8x32_avx2(uint16_t *dst, ptrdiff_t stride,
3098
                                              const uint16_t *left,
3099
                                              int upsample_left, int dy,
3100
14.5k
                                              int bd) {
3101
14.5k
  __m256i dstvec[16], d[16];
3102
14.5k
  if (bd < 12) {
3103
11.5k
    highbd_dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left,
3104
11.5k
                                               dy);
3105
11.5k
  } else {
3106
2.96k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(8, dstvec, left,
3107
2.96k
                                                     upsample_left, dy);
3108
2.96k
  }
3109
3110
43.6k
  for (int i = 0; i < 16; i += 8) {
3111
29.0k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3112
29.0k
  }
3113
3114
130k
  for (int i = 0; i < 8; i++) {
3115
116k
    _mm_storeu_si128((__m128i *)(dst + i * stride),
3116
116k
                     _mm256_castsi256_si128(d[i]));
3117
116k
  }
3118
130k
  for (int i = 0; i < 8; i++) {
3119
116k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3120
116k
                     _mm256_extracti128_si256(d[i], 1));
3121
116k
  }
3122
130k
  for (int i = 8; i < 16; i++) {
3123
116k
    _mm_storeu_si128((__m128i *)(dst + (i + 8) * stride),
3124
116k
                     _mm256_castsi256_si128(d[i]));
3125
116k
  }
3126
130k
  for (int i = 8; i < 16; i++) {
3127
116k
    _mm_storeu_si128((__m128i *)(dst + (i + 16) * stride),
3128
116k
                     _mm256_extracti128_si256(d[i], 1));
3129
116k
  }
3130
14.5k
}
3131
3132
static void highbd_dr_prediction_z3_32x8_avx2(uint16_t *dst, ptrdiff_t stride,
3133
                                              const uint16_t *left,
3134
                                              int upsample_left, int dy,
3135
51.1k
                                              int bd) {
3136
51.1k
  __m128i dstvec[32], d[32];
3137
51.1k
  if (bd < 12) {
3138
45.7k
    highbd_dr_prediction_z1_8xN_internal_avx2(32, dstvec, left, upsample_left,
3139
45.7k
                                              dy);
3140
45.7k
  } else {
3141
5.37k
    highbd_dr_prediction_32bit_z1_8xN_internal_avx2(32, dstvec, left,
3142
5.37k
                                                    upsample_left, dy);
3143
5.37k
  }
3144
3145
255k
  for (int i = 0; i < 32; i += 8) {
3146
204k
    highbd_transpose8x8_sse2(&dstvec[0 + i], &dstvec[1 + i], &dstvec[2 + i],
3147
204k
                             &dstvec[3 + i], &dstvec[4 + i], &dstvec[5 + i],
3148
204k
                             &dstvec[6 + i], &dstvec[7 + i], &d[0 + i],
3149
204k
                             &d[1 + i], &d[2 + i], &d[3 + i], &d[4 + i],
3150
204k
                             &d[5 + i], &d[6 + i], &d[7 + i]);
3151
204k
  }
3152
460k
  for (int i = 0; i < 8; i++) {
3153
409k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
3154
409k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 8), d[i + 8]);
3155
409k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 16]);
3156
409k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 24), d[i + 24]);
3157
409k
  }
3158
51.1k
}
3159
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3160
3161
static void highbd_dr_prediction_z3_16x16_avx2(uint16_t *dst, ptrdiff_t stride,
3162
                                               const uint16_t *left,
3163
                                               int upsample_left, int dy,
3164
96.8k
                                               int bd) {
3165
96.8k
  __m256i dstvec[16], d[16];
3166
96.8k
  if (bd < 12) {
3167
82.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(16, dstvec, left, upsample_left,
3168
82.6k
                                               dy);
3169
82.6k
  } else {
3170
14.2k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(16, dstvec, left,
3171
14.2k
                                                     upsample_left, dy);
3172
14.2k
  }
3173
3174
96.8k
  highbd_transpose16x16_avx2(dstvec, d);
3175
3176
1.64M
  for (int i = 0; i < 16; i++) {
3177
1.54M
    _mm256_storeu_si256((__m256i *)(dst + i * stride), d[i]);
3178
1.54M
  }
3179
96.8k
}
3180
3181
static void highbd_dr_prediction_z3_32x32_avx2(uint16_t *dst, ptrdiff_t stride,
3182
                                               const uint16_t *left,
3183
                                               int upsample_left, int dy,
3184
86.7k
                                               int bd) {
3185
86.7k
  __m256i dstvec[64], d[16];
3186
86.7k
  if (bd < 12) {
3187
81.6k
    highbd_dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left,
3188
81.6k
                                               dy);
3189
81.6k
  } else {
3190
5.13k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(32, dstvec, left,
3191
5.13k
                                                     upsample_left, dy);
3192
5.13k
  }
3193
86.7k
  highbd_transpose16x16_avx2(dstvec, d);
3194
1.47M
  for (int j = 0; j < 16; j++) {
3195
1.38M
    _mm256_storeu_si256((__m256i *)(dst + j * stride), d[j]);
3196
1.38M
  }
3197
86.7k
  highbd_transpose16x16_avx2(dstvec + 16, d);
3198
1.47M
  for (int j = 0; j < 16; j++) {
3199
1.38M
    _mm256_storeu_si256((__m256i *)(dst + j * stride + 16), d[j]);
3200
1.38M
  }
3201
86.7k
  highbd_transpose16x16_avx2(dstvec + 32, d);
3202
1.47M
  for (int j = 0; j < 16; j++) {
3203
1.38M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride), d[j]);
3204
1.38M
  }
3205
86.7k
  highbd_transpose16x16_avx2(dstvec + 48, d);
3206
1.47M
  for (int j = 0; j < 16; j++) {
3207
1.38M
    _mm256_storeu_si256((__m256i *)(dst + (j + 16) * stride + 16), d[j]);
3208
1.38M
  }
3209
86.7k
}
3210
3211
static void highbd_dr_prediction_z3_64x64_avx2(uint16_t *dst, ptrdiff_t stride,
3212
                                               const uint16_t *left,
3213
                                               int upsample_left, int dy,
3214
22.3k
                                               int bd) {
3215
22.3k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 64]);
3216
22.3k
  if (bd < 12) {
3217
17.9k
    highbd_dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
3218
17.9k
  } else {
3219
4.39k
    highbd_dr_prediction_32bit_z1_64xN_avx2(64, dstT, 64, left, upsample_left,
3220
4.39k
                                            dy);
3221
4.39k
  }
3222
22.3k
  highbd_transpose(dstT, 64, dst, stride, 64, 64);
3223
22.3k
}
3224
3225
static void highbd_dr_prediction_z3_16x32_avx2(uint16_t *dst, ptrdiff_t stride,
3226
                                               const uint16_t *left,
3227
                                               int upsample_left, int dy,
3228
23.9k
                                               int bd) {
3229
23.9k
  __m256i dstvec[32], d[32];
3230
23.9k
  if (bd < 12) {
3231
22.1k
    highbd_dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left,
3232
22.1k
                                               dy);
3233
22.1k
  } else {
3234
1.76k
    highbd_dr_prediction_32bit_z1_32xN_internal_avx2(16, dstvec, left,
3235
1.76k
                                                     upsample_left, dy);
3236
1.76k
  }
3237
119k
  for (int i = 0; i < 32; i += 8) {
3238
95.6k
    highbd_transpose8x16_16x8_avx2(dstvec + i, d + i);
3239
95.6k
  }
3240
  // store
3241
71.7k
  for (int j = 0; j < 32; j += 16) {
3242
430k
    for (int i = 0; i < 8; i++) {
3243
382k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride),
3244
382k
                       _mm256_castsi256_si128(d[(i + j)]));
3245
382k
    }
3246
430k
    for (int i = 0; i < 8; i++) {
3247
382k
      _mm_storeu_si128((__m128i *)(dst + (i + j) * stride + 8),
3248
382k
                       _mm256_castsi256_si128(d[(i + j) + 8]));
3249
382k
    }
3250
430k
    for (int i = 8; i < 16; i++) {
3251
382k
      _mm256_storeu_si256(
3252
382k
          (__m256i *)(dst + (i + j) * stride),
3253
382k
          _mm256_inserti128_si256(
3254
382k
              d[(i + j)], _mm256_extracti128_si256(d[(i + j) - 8], 1), 0));
3255
382k
    }
3256
47.8k
  }
3257
23.9k
}
3258
3259
static void highbd_dr_prediction_z3_32x16_avx2(uint16_t *dst, ptrdiff_t stride,
3260
                                               const uint16_t *left,
3261
                                               int upsample_left, int dy,
3262
25.2k
                                               int bd) {
3263
25.2k
  __m256i dstvec[32], d[16];
3264
25.2k
  if (bd < 12) {
3265
22.6k
    highbd_dr_prediction_z1_16xN_internal_avx2(32, dstvec, left, upsample_left,
3266
22.6k
                                               dy);
3267
22.6k
  } else {
3268
2.66k
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(32, dstvec, left,
3269
2.66k
                                                     upsample_left, dy);
3270
2.66k
  }
3271
75.8k
  for (int i = 0; i < 32; i += 16) {
3272
50.5k
    highbd_transpose16x16_avx2((dstvec + i), d);
3273
859k
    for (int j = 0; j < 16; j++) {
3274
808k
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3275
808k
    }
3276
50.5k
  }
3277
25.2k
}
3278
3279
static void highbd_dr_prediction_z3_32x64_avx2(uint16_t *dst, ptrdiff_t stride,
3280
                                               const uint16_t *left,
3281
                                               int upsample_left, int dy,
3282
2.10k
                                               int bd) {
3283
2.10k
  uint16_t dstT[64 * 32];
3284
2.10k
  if (bd < 12) {
3285
1.74k
    highbd_dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
3286
1.74k
  } else {
3287
359
    highbd_dr_prediction_32bit_z1_64xN_avx2(32, dstT, 64, left, upsample_left,
3288
359
                                            dy);
3289
359
  }
3290
2.10k
  highbd_transpose(dstT, 64, dst, stride, 32, 64);
3291
2.10k
}
3292
3293
static void highbd_dr_prediction_z3_64x32_avx2(uint16_t *dst, ptrdiff_t stride,
3294
                                               const uint16_t *left,
3295
                                               int upsample_left, int dy,
3296
2.94k
                                               int bd) {
3297
2.94k
  DECLARE_ALIGNED(16, uint16_t, dstT[32 * 64]);
3298
2.94k
  highbd_dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy, bd);
3299
2.94k
  highbd_transpose(dstT, 32, dst, stride, 64, 32);
3300
2.94k
  return;
3301
2.94k
}
3302
3303
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3304
static void highbd_dr_prediction_z3_16x64_avx2(uint16_t *dst, ptrdiff_t stride,
3305
                                               const uint16_t *left,
3306
                                               int upsample_left, int dy,
3307
4.88k
                                               int bd) {
3308
4.88k
  DECLARE_ALIGNED(16, uint16_t, dstT[64 * 16]);
3309
4.88k
  if (bd < 12) {
3310
4.18k
    highbd_dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
3311
4.18k
  } else {
3312
692
    highbd_dr_prediction_32bit_z1_64xN_avx2(16, dstT, 64, left, upsample_left,
3313
692
                                            dy);
3314
692
  }
3315
4.88k
  highbd_transpose(dstT, 64, dst, stride, 16, 64);
3316
4.88k
}
3317
3318
static void highbd_dr_prediction_z3_64x16_avx2(uint16_t *dst, ptrdiff_t stride,
3319
                                               const uint16_t *left,
3320
                                               int upsample_left, int dy,
3321
16.1k
                                               int bd) {
3322
16.1k
  __m256i dstvec[64], d[16];
3323
16.1k
  if (bd < 12) {
3324
15.5k
    highbd_dr_prediction_z1_16xN_internal_avx2(64, dstvec, left, upsample_left,
3325
15.5k
                                               dy);
3326
15.5k
  } else {
3327
595
    highbd_dr_prediction_32bit_z1_16xN_internal_avx2(64, dstvec, left,
3328
595
                                                     upsample_left, dy);
3329
595
  }
3330
80.9k
  for (int i = 0; i < 64; i += 16) {
3331
64.7k
    highbd_transpose16x16_avx2((dstvec + i), d);
3332
1.10M
    for (int j = 0; j < 16; j++) {
3333
1.03M
      _mm256_storeu_si256((__m256i *)(dst + j * stride + i), d[j]);
3334
1.03M
    }
3335
64.7k
  }
3336
16.1k
}
3337
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3338
3339
void av1_highbd_dr_prediction_z3_avx2(uint16_t *dst, ptrdiff_t stride, int bw,
3340
                                      int bh, const uint16_t *above,
3341
                                      const uint16_t *left, int upsample_left,
3342
972k
                                      int dx, int dy, int bd) {
3343
972k
  (void)above;
3344
972k
  (void)dx;
3345
3346
972k
  assert(dx == 1);
3347
972k
  assert(dy > 0);
3348
972k
  if (bw == bh) {
3349
556k
    switch (bw) {
3350
203k
      case 4:
3351
203k
        highbd_dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy,
3352
203k
                                         bd);
3353
203k
        break;
3354
146k
      case 8:
3355
146k
        highbd_dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy,
3356
146k
                                         bd);
3357
146k
        break;
3358
96.8k
      case 16:
3359
96.8k
        highbd_dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy,
3360
96.8k
                                           bd);
3361
96.8k
        break;
3362
86.7k
      case 32:
3363
86.7k
        highbd_dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy,
3364
86.7k
                                           bd);
3365
86.7k
        break;
3366
22.3k
      case 64:
3367
22.3k
        highbd_dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy,
3368
22.3k
                                           bd);
3369
22.3k
        break;
3370
556k
    }
3371
556k
  } else {
3372
416k
    if (bw < bh) {
3373
140k
      if (bw + bw == bh) {
3374
89.3k
        switch (bw) {
3375
24.8k
          case 4:
3376
24.8k
            highbd_dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left,
3377
24.8k
                                             dy, bd);
3378
24.8k
            break;
3379
38.4k
          case 8:
3380
38.4k
            highbd_dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left,
3381
38.4k
                                              dy, bd);
3382
38.4k
            break;
3383
23.9k
          case 16:
3384
23.9k
            highbd_dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left,
3385
23.9k
                                               dy, bd);
3386
23.9k
            break;
3387
2.10k
          case 32:
3388
2.10k
            highbd_dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left,
3389
2.10k
                                               dy, bd);
3390
2.10k
            break;
3391
89.3k
        }
3392
89.3k
      } else {
3393
51.5k
        switch (bw) {
3394
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3395
32.1k
          case 4:
3396
32.1k
            highbd_dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left,
3397
32.1k
                                              dy, bd);
3398
32.1k
            break;
3399
14.5k
          case 8:
3400
14.5k
            highbd_dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left,
3401
14.5k
                                              dy, bd);
3402
14.5k
            break;
3403
4.88k
          case 16:
3404
4.88k
            highbd_dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left,
3405
4.88k
                                               dy, bd);
3406
4.88k
            break;
3407
51.5k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3408
51.5k
        }
3409
51.5k
      }
3410
275k
    } else {
3411
275k
      if (bh + bh == bw) {
3412
144k
        switch (bh) {
3413
51.1k
          case 4:
3414
51.1k
            highbd_dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left,
3415
51.1k
                                             dy, bd);
3416
51.1k
            break;
3417
64.8k
          case 8:
3418
64.8k
            highbd_dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left,
3419
64.8k
                                              dy, bd);
3420
64.8k
            break;
3421
25.2k
          case 16:
3422
25.2k
            highbd_dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left,
3423
25.2k
                                               dy, bd);
3424
25.2k
            break;
3425
2.94k
          case 32:
3426
2.94k
            highbd_dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left,
3427
2.94k
                                               dy, bd);
3428
2.94k
            break;
3429
144k
        }
3430
144k
      } else {
3431
131k
        switch (bh) {
3432
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3433
64.3k
          case 4:
3434
64.3k
            highbd_dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left,
3435
64.3k
                                              dy, bd);
3436
64.3k
            break;
3437
51.1k
          case 8:
3438
51.1k
            highbd_dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left,
3439
51.1k
                                              dy, bd);
3440
51.1k
            break;
3441
16.1k
          case 16:
3442
16.1k
            highbd_dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left,
3443
16.1k
                                               dy, bd);
3444
16.1k
            break;
3445
131k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
3446
131k
        }
3447
131k
      }
3448
275k
    }
3449
416k
  }
3450
972k
  return;
3451
972k
}
3452
#endif  // CONFIG_AV1_HIGHBITDEPTH
3453
3454
// Low bit depth functions
3455
static DECLARE_ALIGNED(32, uint8_t, BaseMask[33][32]) = {
3456
  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3457
    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3458
  { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3459
    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3460
  { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3461
    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3462
  { 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3463
    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3464
  { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3465
    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3466
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3467
    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3468
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3469
    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
3470
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3471
    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0, 0 },
3472
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0,
3473
    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0, 0 },
3474
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0,
3475
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0, 0, 0, 0, 0, 0, 0 },
3476
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3477
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3478
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3479
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3480
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3481
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3482
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3483
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,    0,
3484
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3485
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3486
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0,
3487
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3488
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3489
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0,
3490
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3491
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3492
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,
3493
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3494
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3495
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,
3496
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3497
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3498
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,
3499
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3500
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3501
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,
3502
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3503
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3504
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,
3505
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3506
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3507
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,
3508
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3509
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3510
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
3511
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3512
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3513
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3514
    0,    0,    0,    0,    0,    0,    0,    0,    0,    0 },
3515
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3516
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3517
    0xff, 0,    0,    0,    0,    0,    0,    0,    0,    0 },
3518
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3519
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3520
    0xff, 0xff, 0,    0,    0,    0,    0,    0,    0,    0 },
3521
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3522
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3523
    0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0,    0 },
3524
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3525
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3526
    0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0,    0 },
3527
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3528
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3529
    0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0,    0 },
3530
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3531
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3532
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0,    0 },
3533
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3534
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3535
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0,    0 },
3536
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3537
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3538
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,    0 },
3539
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3540
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3541
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 },
3542
  { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3543
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
3544
    0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
3545
};
3546
3547
/* clang-format on */
3548
static AOM_FORCE_INLINE void dr_prediction_z1_HxW_internal_avx2(
3549
    int H, int W, __m128i *dst, const uint8_t *above, int upsample_above,
3550
908k
    int dx) {
3551
908k
  const int frac_bits = 6 - upsample_above;
3552
908k
  const int max_base_x = ((W + H) - 1) << upsample_above;
3553
3554
908k
  assert(dx > 0);
3555
  // pre-filter above pixels
3556
  // store in temp buffers:
3557
  //   above[x] * 32 + 16
3558
  //   above[x+1] - above[x]
3559
  // final pixels will be calculated as:
3560
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3561
908k
  __m256i a0, a1, a32, a16;
3562
908k
  __m256i diff, c3f;
3563
908k
  __m128i a_mbase_x;
3564
3565
908k
  a16 = _mm256_set1_epi16(16);
3566
908k
  a_mbase_x = _mm_set1_epi8((int8_t)above[max_base_x]);
3567
908k
  c3f = _mm256_set1_epi16(0x3f);
3568
3569
908k
  int x = dx;
3570
12.4M
  for (int r = 0; r < W; r++) {
3571
11.5M
    __m256i b, res, shift;
3572
11.5M
    __m128i res1, a0_128, a1_128;
3573
3574
11.5M
    int base = x >> frac_bits;
3575
11.5M
    int base_max_diff = (max_base_x - base) >> upsample_above;
3576
11.5M
    if (base_max_diff <= 0) {
3577
14.6k
      for (int i = r; i < W; ++i) {
3578
10.2k
        dst[i] = a_mbase_x;  // save 4 values
3579
10.2k
      }
3580
4.36k
      return;
3581
4.36k
    }
3582
11.5M
    if (base_max_diff > H) base_max_diff = H;
3583
11.5M
    a0_128 = _mm_loadu_si128((__m128i *)(above + base));
3584
11.5M
    a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1));
3585
3586
11.5M
    if (upsample_above) {
3587
1.76M
      a0_128 = _mm_shuffle_epi8(a0_128, *(__m128i *)EvenOddMaskx[0]);
3588
1.76M
      a1_128 = _mm_srli_si128(a0_128, 8);
3589
3590
1.76M
      shift = _mm256_srli_epi16(
3591
1.76M
          _mm256_and_si256(
3592
1.76M
              _mm256_slli_epi16(_mm256_set1_epi16(x), upsample_above), c3f),
3593
1.76M
          1);
3594
9.78M
    } else {
3595
9.78M
      shift = _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3596
9.78M
    }
3597
11.5M
    a0 = _mm256_cvtepu8_epi16(a0_128);
3598
11.5M
    a1 = _mm256_cvtepu8_epi16(a1_128);
3599
3600
11.5M
    diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3601
11.5M
    a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3602
11.5M
    a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3603
3604
11.5M
    b = _mm256_mullo_epi16(diff, shift);
3605
11.5M
    res = _mm256_add_epi16(a32, b);
3606
11.5M
    res = _mm256_srli_epi16(res, 5);
3607
3608
11.5M
    res = _mm256_packus_epi16(
3609
11.5M
        res, _mm256_castsi128_si256(
3610
11.5M
                 _mm256_extracti128_si256(res, 1)));  // goto 8 bit
3611
11.5M
    res1 = _mm256_castsi256_si128(res);               // 16 8bit values
3612
3613
11.5M
    dst[r] =
3614
11.5M
        _mm_blendv_epi8(a_mbase_x, res1, *(__m128i *)BaseMask[base_max_diff]);
3615
11.5M
    x += dx;
3616
11.5M
  }
3617
908k
}
3618
3619
static void dr_prediction_z1_4xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3620
                                      const uint8_t *above, int upsample_above,
3621
105k
                                      int dx) {
3622
105k
  __m128i dstvec[16];
3623
3624
105k
  dr_prediction_z1_HxW_internal_avx2(4, N, dstvec, above, upsample_above, dx);
3625
759k
  for (int i = 0; i < N; i++) {
3626
654k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(dstvec[i]);
3627
654k
  }
3628
105k
}
3629
3630
static void dr_prediction_z1_8xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3631
                                      const uint8_t *above, int upsample_above,
3632
120k
                                      int dx) {
3633
120k
  __m128i dstvec[32];
3634
3635
120k
  dr_prediction_z1_HxW_internal_avx2(8, N, dstvec, above, upsample_above, dx);
3636
1.27M
  for (int i = 0; i < N; i++) {
3637
1.15M
    _mm_storel_epi64((__m128i *)(dst + stride * i), dstvec[i]);
3638
1.15M
  }
3639
120k
}
3640
3641
static void dr_prediction_z1_16xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3642
                                       const uint8_t *above, int upsample_above,
3643
117k
                                       int dx) {
3644
117k
  __m128i dstvec[64];
3645
3646
117k
  dr_prediction_z1_HxW_internal_avx2(16, N, dstvec, above, upsample_above, dx);
3647
1.87M
  for (int i = 0; i < N; i++) {
3648
1.75M
    _mm_storeu_si128((__m128i *)(dst + stride * i), dstvec[i]);
3649
1.75M
  }
3650
117k
}
3651
3652
static AOM_FORCE_INLINE void dr_prediction_z1_32xN_internal_avx2(
3653
192k
    int N, __m256i *dstvec, const uint8_t *above, int upsample_above, int dx) {
3654
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3655
192k
  (void)upsample_above;
3656
192k
  const int frac_bits = 6;
3657
192k
  const int max_base_x = ((32 + N) - 1);
3658
3659
  // pre-filter above pixels
3660
  // store in temp buffers:
3661
  //   above[x] * 32 + 16
3662
  //   above[x+1] - above[x]
3663
  // final pixels will be calculated as:
3664
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3665
192k
  __m256i a0, a1, a32, a16;
3666
192k
  __m256i a_mbase_x, diff, c3f;
3667
3668
192k
  a16 = _mm256_set1_epi16(16);
3669
192k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3670
192k
  c3f = _mm256_set1_epi16(0x3f);
3671
3672
192k
  int x = dx;
3673
5.38M
  for (int r = 0; r < N; r++) {
3674
5.19M
    __m256i b, res, res16[2];
3675
5.19M
    __m128i a0_128, a1_128;
3676
3677
5.19M
    int base = x >> frac_bits;
3678
5.19M
    int base_max_diff = (max_base_x - base);
3679
5.19M
    if (base_max_diff <= 0) {
3680
0
      for (int i = r; i < N; ++i) {
3681
0
        dstvec[i] = a_mbase_x;  // save 32 values
3682
0
      }
3683
0
      return;
3684
0
    }
3685
5.19M
    if (base_max_diff > 32) base_max_diff = 32;
3686
5.19M
    __m256i shift =
3687
5.19M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3688
3689
15.5M
    for (int j = 0, jj = 0; j < 32; j += 16, jj++) {
3690
10.3M
      int mdiff = base_max_diff - j;
3691
10.3M
      if (mdiff <= 0) {
3692
587
        res16[jj] = a_mbase_x;
3693
10.3M
      } else {
3694
10.3M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3695
10.3M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + j + 1));
3696
10.3M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3697
10.3M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3698
3699
10.3M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3700
10.3M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3701
10.3M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3702
10.3M
        b = _mm256_mullo_epi16(diff, shift);
3703
3704
10.3M
        res = _mm256_add_epi16(a32, b);
3705
10.3M
        res = _mm256_srli_epi16(res, 5);
3706
10.3M
        res16[jj] = _mm256_packus_epi16(
3707
10.3M
            res, _mm256_castsi128_si256(
3708
10.3M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3709
10.3M
      }
3710
10.3M
    }
3711
5.19M
    res16[1] =
3712
5.19M
        _mm256_inserti128_si256(res16[0], _mm256_castsi256_si128(res16[1]),
3713
5.19M
                                1);  // 32 8bit values
3714
3715
5.19M
    dstvec[r] = _mm256_blendv_epi8(
3716
5.19M
        a_mbase_x, res16[1],
3717
5.19M
        *(__m256i *)BaseMask[base_max_diff]);  // 32 8bit values
3718
5.19M
    x += dx;
3719
5.19M
  }
3720
192k
}
3721
3722
static void dr_prediction_z1_32xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3723
                                       const uint8_t *above, int upsample_above,
3724
76.3k
                                       int dx) {
3725
76.3k
  __m256i dstvec[64];
3726
76.3k
  dr_prediction_z1_32xN_internal_avx2(N, dstvec, above, upsample_above, dx);
3727
2.15M
  for (int i = 0; i < N; i++) {
3728
2.08M
    _mm256_storeu_si256((__m256i *)(dst + stride * i), dstvec[i]);
3729
2.08M
  }
3730
76.3k
}
3731
3732
static void dr_prediction_z1_64xN_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3733
                                       const uint8_t *above, int upsample_above,
3734
43.1k
                                       int dx) {
3735
  // here upsample_above is 0 by design of av1_use_intra_edge_upsample
3736
43.1k
  (void)upsample_above;
3737
43.1k
  const int frac_bits = 6;
3738
43.1k
  const int max_base_x = ((64 + N) - 1);
3739
3740
  // pre-filter above pixels
3741
  // store in temp buffers:
3742
  //   above[x] * 32 + 16
3743
  //   above[x+1] - above[x]
3744
  // final pixels will be calculated as:
3745
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3746
43.1k
  __m256i a0, a1, a32, a16;
3747
43.1k
  __m256i a_mbase_x, diff, c3f;
3748
43.1k
  __m128i max_base_x128, base_inc128, mask128;
3749
3750
43.1k
  a16 = _mm256_set1_epi16(16);
3751
43.1k
  a_mbase_x = _mm256_set1_epi8((int8_t)above[max_base_x]);
3752
43.1k
  max_base_x128 = _mm_set1_epi8(max_base_x);
3753
43.1k
  c3f = _mm256_set1_epi16(0x3f);
3754
3755
43.1k
  int x = dx;
3756
2.30M
  for (int r = 0; r < N; r++, dst += stride) {
3757
2.26M
    __m256i b, res;
3758
2.26M
    int base = x >> frac_bits;
3759
2.26M
    if (base >= max_base_x) {
3760
0
      for (int i = r; i < N; ++i) {
3761
0
        _mm256_storeu_si256((__m256i *)dst, a_mbase_x);  // save 32 values
3762
0
        _mm256_storeu_si256((__m256i *)(dst + 32), a_mbase_x);
3763
0
        dst += stride;
3764
0
      }
3765
0
      return;
3766
0
    }
3767
3768
2.26M
    __m256i shift =
3769
2.26M
        _mm256_srli_epi16(_mm256_and_si256(_mm256_set1_epi16(x), c3f), 1);
3770
3771
2.26M
    __m128i a0_128, a1_128, res128;
3772
11.3M
    for (int j = 0; j < 64; j += 16) {
3773
9.05M
      int mdif = max_base_x - (base + j);
3774
9.05M
      if (mdif <= 0) {
3775
2.78k
        _mm_storeu_si128((__m128i *)(dst + j),
3776
2.78k
                         _mm256_castsi256_si128(a_mbase_x));
3777
9.05M
      } else {
3778
9.05M
        a0_128 = _mm_loadu_si128((__m128i *)(above + base + j));
3779
9.05M
        a1_128 = _mm_loadu_si128((__m128i *)(above + base + 1 + j));
3780
9.05M
        a0 = _mm256_cvtepu8_epi16(a0_128);
3781
9.05M
        a1 = _mm256_cvtepu8_epi16(a1_128);
3782
3783
9.05M
        diff = _mm256_sub_epi16(a1, a0);   // a[x+1] - a[x]
3784
9.05M
        a32 = _mm256_slli_epi16(a0, 5);    // a[x] * 32
3785
9.05M
        a32 = _mm256_add_epi16(a32, a16);  // a[x] * 32 + 16
3786
9.05M
        b = _mm256_mullo_epi16(diff, shift);
3787
3788
9.05M
        res = _mm256_add_epi16(a32, b);
3789
9.05M
        res = _mm256_srli_epi16(res, 5);
3790
9.05M
        res = _mm256_packus_epi16(
3791
9.05M
            res, _mm256_castsi128_si256(
3792
9.05M
                     _mm256_extracti128_si256(res, 1)));  // 16 8bit values
3793
3794
9.05M
        base_inc128 =
3795
9.05M
            _mm_setr_epi8((int8_t)(base + j), (int8_t)(base + j + 1),
3796
9.05M
                          (int8_t)(base + j + 2), (int8_t)(base + j + 3),
3797
9.05M
                          (int8_t)(base + j + 4), (int8_t)(base + j + 5),
3798
9.05M
                          (int8_t)(base + j + 6), (int8_t)(base + j + 7),
3799
9.05M
                          (int8_t)(base + j + 8), (int8_t)(base + j + 9),
3800
9.05M
                          (int8_t)(base + j + 10), (int8_t)(base + j + 11),
3801
9.05M
                          (int8_t)(base + j + 12), (int8_t)(base + j + 13),
3802
9.05M
                          (int8_t)(base + j + 14), (int8_t)(base + j + 15));
3803
3804
9.05M
        mask128 = _mm_cmpgt_epi8(_mm_subs_epu8(max_base_x128, base_inc128),
3805
9.05M
                                 _mm_setzero_si128());
3806
9.05M
        res128 = _mm_blendv_epi8(_mm256_castsi256_si128(a_mbase_x),
3807
9.05M
                                 _mm256_castsi256_si128(res), mask128);
3808
9.05M
        _mm_storeu_si128((__m128i *)(dst + j), res128);
3809
9.05M
      }
3810
9.05M
    }
3811
2.26M
    x += dx;
3812
2.26M
  }
3813
43.1k
}
3814
3815
// Directional prediction, zone 1: 0 < angle < 90
3816
void av1_dr_prediction_z1_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
3817
                               const uint8_t *above, const uint8_t *left,
3818
431k
                               int upsample_above, int dx, int dy) {
3819
431k
  (void)left;
3820
431k
  (void)dy;
3821
431k
  switch (bw) {
3822
105k
    case 4:
3823
105k
      dr_prediction_z1_4xN_avx2(bh, dst, stride, above, upsample_above, dx);
3824
105k
      break;
3825
120k
    case 8:
3826
120k
      dr_prediction_z1_8xN_avx2(bh, dst, stride, above, upsample_above, dx);
3827
120k
      break;
3828
117k
    case 16:
3829
117k
      dr_prediction_z1_16xN_avx2(bh, dst, stride, above, upsample_above, dx);
3830
117k
      break;
3831
73.6k
    case 32:
3832
73.6k
      dr_prediction_z1_32xN_avx2(bh, dst, stride, above, upsample_above, dx);
3833
73.6k
      break;
3834
14.9k
    case 64:
3835
14.9k
      dr_prediction_z1_64xN_avx2(bh, dst, stride, above, upsample_above, dx);
3836
14.9k
      break;
3837
0
    default: break;
3838
431k
  }
3839
431k
  return;
3840
431k
}
3841
3842
static void dr_prediction_z2_Nx4_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3843
                                      const uint8_t *above, const uint8_t *left,
3844
                                      int upsample_above, int upsample_left,
3845
316k
                                      int dx, int dy) {
3846
316k
  const int min_base_x = -(1 << upsample_above);
3847
316k
  const int min_base_y = -(1 << upsample_left);
3848
316k
  const int frac_bits_x = 6 - upsample_above;
3849
316k
  const int frac_bits_y = 6 - upsample_left;
3850
3851
316k
  assert(dx > 0);
3852
  // pre-filter above pixels
3853
  // store in temp buffers:
3854
  //   above[x] * 32 + 16
3855
  //   above[x+1] - above[x]
3856
  // final pixels will be calculated as:
3857
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3858
316k
  __m128i a0_x, a1_x, a32, a16, diff;
3859
316k
  __m128i c3f, min_base_y128, c1234, dy128;
3860
3861
316k
  a16 = _mm_set1_epi16(16);
3862
316k
  c3f = _mm_set1_epi16(0x3f);
3863
316k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3864
316k
  c1234 = _mm_setr_epi16(0, 1, 2, 3, 4, 0, 0, 0);
3865
316k
  dy128 = _mm_set1_epi16(dy);
3866
3867
2.10M
  for (int r = 0; r < N; r++) {
3868
1.78M
    __m128i b, res, shift, r6, ydx;
3869
1.78M
    __m128i resx, resy, resxy;
3870
1.78M
    __m128i a0_x128, a1_x128;
3871
1.78M
    int y = r + 1;
3872
1.78M
    int base_x = (-y * dx) >> frac_bits_x;
3873
1.78M
    int base_shift = 0;
3874
1.78M
    if (base_x < (min_base_x - 1)) {
3875
1.33M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3876
1.33M
    }
3877
1.78M
    int base_min_diff =
3878
1.78M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3879
1.78M
    if (base_min_diff > 4) {
3880
927k
      base_min_diff = 4;
3881
927k
    } else {
3882
861k
      if (base_min_diff < 0) base_min_diff = 0;
3883
861k
    }
3884
3885
1.78M
    if (base_shift > 3) {
3886
927k
      a0_x = _mm_setzero_si128();
3887
927k
      a1_x = _mm_setzero_si128();
3888
927k
      shift = _mm_setzero_si128();
3889
927k
    } else {
3890
861k
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
3891
861k
      ydx = _mm_set1_epi16(y * dx);
3892
861k
      r6 = _mm_slli_epi16(c1234, 6);
3893
3894
861k
      if (upsample_above) {
3895
341k
        a0_x128 =
3896
341k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
3897
341k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
3898
3899
341k
        shift = _mm_srli_epi16(
3900
341k
            _mm_and_si128(
3901
341k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
3902
341k
            1);
3903
520k
      } else {
3904
520k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
3905
520k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
3906
3907
520k
        shift = _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1);
3908
520k
      }
3909
861k
      a0_x = _mm_cvtepu8_epi16(a0_x128);
3910
861k
      a1_x = _mm_cvtepu8_epi16(a1_x128);
3911
861k
    }
3912
    // y calc
3913
1.78M
    __m128i a0_y, a1_y, shifty;
3914
1.78M
    if (base_x < min_base_x) {
3915
1.50M
      DECLARE_ALIGNED(32, int16_t, base_y_c[8]);
3916
1.50M
      __m128i y_c128, base_y_c128, mask128, c1234_;
3917
1.50M
      c1234_ = _mm_srli_si128(c1234, 2);
3918
1.50M
      r6 = _mm_set1_epi16(r << 6);
3919
1.50M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234_, dy128));
3920
1.50M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
3921
1.50M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
3922
1.50M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
3923
1.50M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3924
3925
1.50M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3926
1.50M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3927
1.50M
      base_y_c128 = _mm_add_epi16(base_y_c128, _mm_srli_epi16(a16, 4));
3928
1.50M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
3929
1.50M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
3930
1.50M
                            left[base_y_c[2]], left[base_y_c[3]], 0, 0, 0, 0);
3931
3932
1.50M
      if (upsample_left) {
3933
807k
        shifty = _mm_srli_epi16(
3934
807k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
3935
807k
      } else {
3936
702k
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
3937
702k
      }
3938
1.50M
      a0_x = _mm_unpacklo_epi64(a0_x, a0_y);
3939
1.50M
      a1_x = _mm_unpacklo_epi64(a1_x, a1_y);
3940
1.50M
      shift = _mm_unpacklo_epi64(shift, shifty);
3941
1.50M
    }
3942
3943
1.78M
    diff = _mm_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
3944
1.78M
    a32 = _mm_slli_epi16(a0_x, 5);     // a[x] * 32
3945
1.78M
    a32 = _mm_add_epi16(a32, a16);     // a[x] * 32 + 16
3946
3947
1.78M
    b = _mm_mullo_epi16(diff, shift);
3948
1.78M
    res = _mm_add_epi16(a32, b);
3949
1.78M
    res = _mm_srli_epi16(res, 5);
3950
3951
1.78M
    resx = _mm_packus_epi16(res, res);
3952
1.78M
    resy = _mm_srli_si128(resx, 4);
3953
3954
1.78M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
3955
1.78M
    *(int *)(dst) = _mm_cvtsi128_si32(resxy);
3956
1.78M
    dst += stride;
3957
1.78M
  }
3958
316k
}
3959
3960
static void dr_prediction_z2_Nx8_avx2(int N, uint8_t *dst, ptrdiff_t stride,
3961
                                      const uint8_t *above, const uint8_t *left,
3962
                                      int upsample_above, int upsample_left,
3963
267k
                                      int dx, int dy) {
3964
267k
  const int min_base_x = -(1 << upsample_above);
3965
267k
  const int min_base_y = -(1 << upsample_left);
3966
267k
  const int frac_bits_x = 6 - upsample_above;
3967
267k
  const int frac_bits_y = 6 - upsample_left;
3968
3969
  // pre-filter above pixels
3970
  // store in temp buffers:
3971
  //   above[x] * 32 + 16
3972
  //   above[x+1] - above[x]
3973
  // final pixels will be calculated as:
3974
  //   (above[x] * 32 + 16 + (above[x+1] - above[x]) * shift) >> 5
3975
267k
  __m256i diff, a32, a16;
3976
267k
  __m256i a0_x, a1_x;
3977
267k
  __m128i a0_x128, a1_x128, min_base_y128, c3f;
3978
267k
  __m128i c1234, dy128;
3979
3980
267k
  a16 = _mm256_set1_epi16(16);
3981
267k
  c3f = _mm_set1_epi16(0x3f);
3982
267k
  min_base_y128 = _mm_set1_epi16(min_base_y);
3983
267k
  dy128 = _mm_set1_epi16(dy);
3984
267k
  c1234 = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
3985
3986
2.74M
  for (int r = 0; r < N; r++) {
3987
2.48M
    __m256i b, res, shift;
3988
2.48M
    __m128i resx, resy, resxy, r6, ydx;
3989
3990
2.48M
    int y = r + 1;
3991
2.48M
    int base_x = (-y * dx) >> frac_bits_x;
3992
2.48M
    int base_shift = 0;
3993
2.48M
    if (base_x < (min_base_x - 1)) {
3994
1.85M
      base_shift = (min_base_x - base_x - 1) >> upsample_above;
3995
1.85M
    }
3996
2.48M
    int base_min_diff =
3997
2.48M
        (min_base_x - base_x + upsample_above) >> upsample_above;
3998
2.48M
    if (base_min_diff > 8) {
3999
1.07M
      base_min_diff = 8;
4000
1.41M
    } else {
4001
1.41M
      if (base_min_diff < 0) base_min_diff = 0;
4002
1.41M
    }
4003
4004
2.48M
    if (base_shift > 7) {
4005
1.07M
      a0_x = _mm256_setzero_si256();
4006
1.07M
      a1_x = _mm256_setzero_si256();
4007
1.07M
      shift = _mm256_setzero_si256();
4008
1.41M
    } else {
4009
1.41M
      a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift));
4010
1.41M
      ydx = _mm_set1_epi16(y * dx);
4011
1.41M
      r6 = _mm_slli_epi16(_mm_srli_si128(c1234, 2), 6);
4012
1.41M
      if (upsample_above) {
4013
511k
        a0_x128 =
4014
511k
            _mm_shuffle_epi8(a0_x128, *(__m128i *)EvenOddMaskx[base_shift]);
4015
511k
        a1_x128 = _mm_srli_si128(a0_x128, 8);
4016
4017
511k
        shift = _mm256_castsi128_si256(_mm_srli_epi16(
4018
511k
            _mm_and_si128(
4019
511k
                _mm_slli_epi16(_mm_sub_epi16(r6, ydx), upsample_above), c3f),
4020
511k
            1));
4021
898k
      } else {
4022
898k
        a1_x128 = _mm_srli_si128(a0_x128, 1);
4023
898k
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4024
898k
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4025
4026
898k
        shift = _mm256_castsi128_si256(
4027
898k
            _mm_srli_epi16(_mm_and_si128(_mm_sub_epi16(r6, ydx), c3f), 1));
4028
898k
      }
4029
1.41M
      a0_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a0_x128));
4030
1.41M
      a1_x = _mm256_castsi128_si256(_mm_cvtepu8_epi16(a1_x128));
4031
1.41M
    }
4032
4033
    // y calc
4034
2.48M
    __m128i a0_y, a1_y, shifty;
4035
2.48M
    if (base_x < min_base_x) {
4036
2.07M
      DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4037
2.07M
      __m128i y_c128, base_y_c128, mask128;
4038
2.07M
      r6 = _mm_set1_epi16(r << 6);
4039
2.07M
      y_c128 = _mm_sub_epi16(r6, _mm_mullo_epi16(c1234, dy128));
4040
2.07M
      base_y_c128 = _mm_srai_epi16(y_c128, frac_bits_y);
4041
2.07M
      mask128 = _mm_cmpgt_epi16(min_base_y128, base_y_c128);
4042
2.07M
      base_y_c128 = _mm_andnot_si128(mask128, base_y_c128);
4043
2.07M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4044
4045
2.07M
      a0_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4046
2.07M
                            left[base_y_c[2]], left[base_y_c[3]],
4047
2.07M
                            left[base_y_c[4]], left[base_y_c[5]],
4048
2.07M
                            left[base_y_c[6]], left[base_y_c[7]]);
4049
2.07M
      base_y_c128 = _mm_add_epi16(
4050
2.07M
          base_y_c128, _mm_srli_epi16(_mm256_castsi256_si128(a16), 4));
4051
2.07M
      _mm_store_si128((__m128i *)base_y_c, base_y_c128);
4052
4053
2.07M
      a1_y = _mm_setr_epi16(left[base_y_c[0]], left[base_y_c[1]],
4054
2.07M
                            left[base_y_c[2]], left[base_y_c[3]],
4055
2.07M
                            left[base_y_c[4]], left[base_y_c[5]],
4056
2.07M
                            left[base_y_c[6]], left[base_y_c[7]]);
4057
4058
2.07M
      if (upsample_left) {
4059
555k
        shifty = _mm_srli_epi16(
4060
555k
            _mm_and_si128(_mm_slli_epi16(y_c128, upsample_left), c3f), 1);
4061
1.52M
      } else {
4062
1.52M
        shifty = _mm_srli_epi16(_mm_and_si128(y_c128, c3f), 1);
4063
1.52M
      }
4064
4065
2.07M
      a0_x = _mm256_inserti128_si256(a0_x, a0_y, 1);
4066
2.07M
      a1_x = _mm256_inserti128_si256(a1_x, a1_y, 1);
4067
2.07M
      shift = _mm256_inserti128_si256(shift, shifty, 1);
4068
2.07M
    }
4069
4070
2.48M
    diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4071
2.48M
    a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4072
2.48M
    a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4073
4074
2.48M
    b = _mm256_mullo_epi16(diff, shift);
4075
2.48M
    res = _mm256_add_epi16(a32, b);
4076
2.48M
    res = _mm256_srli_epi16(res, 5);
4077
4078
2.48M
    resx = _mm_packus_epi16(_mm256_castsi256_si128(res),
4079
2.48M
                            _mm256_castsi256_si128(res));
4080
2.48M
    resy = _mm256_extracti128_si256(res, 1);
4081
2.48M
    resy = _mm_packus_epi16(resy, resy);
4082
4083
2.48M
    resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4084
2.48M
    _mm_storel_epi64((__m128i *)(dst), resxy);
4085
2.48M
    dst += stride;
4086
2.48M
  }
4087
267k
}
4088
4089
static void dr_prediction_z2_HxW_avx2(int H, int W, uint8_t *dst,
4090
                                      ptrdiff_t stride, const uint8_t *above,
4091
                                      const uint8_t *left, int upsample_above,
4092
457k
                                      int upsample_left, int dx, int dy) {
4093
  // here upsample_above and upsample_left are 0 by design of
4094
  // av1_use_intra_edge_upsample
4095
457k
  const int min_base_x = -1;
4096
457k
  const int min_base_y = -1;
4097
457k
  (void)upsample_above;
4098
457k
  (void)upsample_left;
4099
457k
  const int frac_bits_x = 6;
4100
457k
  const int frac_bits_y = 6;
4101
4102
457k
  __m256i a0_x, a1_x, a0_y, a1_y, a32, a16, c1234, c0123;
4103
457k
  __m256i diff, min_base_y256, c3f, shifty, dy256, c1;
4104
457k
  __m128i a0_x128, a1_x128;
4105
4106
457k
  DECLARE_ALIGNED(32, int16_t, base_y_c[16]);
4107
457k
  a16 = _mm256_set1_epi16(16);
4108
457k
  c1 = _mm256_srli_epi16(a16, 4);
4109
457k
  min_base_y256 = _mm256_set1_epi16(min_base_y);
4110
457k
  c3f = _mm256_set1_epi16(0x3f);
4111
457k
  dy256 = _mm256_set1_epi16(dy);
4112
457k
  c0123 =
4113
457k
      _mm256_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4114
457k
  c1234 = _mm256_add_epi16(c0123, c1);
4115
4116
9.22M
  for (int r = 0; r < H; r++) {
4117
8.76M
    __m256i b, res, shift, j256, r6, ydx;
4118
8.76M
    __m128i resx, resy;
4119
8.76M
    __m128i resxy;
4120
8.76M
    int y = r + 1;
4121
8.76M
    ydx = _mm256_set1_epi16((int16_t)(y * dx));
4122
4123
8.76M
    int base_x = (-y * dx) >> frac_bits_x;
4124
25.2M
    for (int j = 0; j < W; j += 16) {
4125
16.4M
      j256 = _mm256_set1_epi16(j);
4126
16.4M
      int base_shift = 0;
4127
16.4M
      if ((base_x + j) < (min_base_x - 1)) {
4128
12.2M
        base_shift = (min_base_x - (base_x + j) - 1);
4129
12.2M
      }
4130
16.4M
      int base_min_diff = (min_base_x - base_x - j);
4131
16.4M
      if (base_min_diff > 16) {
4132
8.95M
        base_min_diff = 16;
4133
8.95M
      } else {
4134
7.48M
        if (base_min_diff < 0) base_min_diff = 0;
4135
7.48M
      }
4136
4137
16.4M
      if (base_shift < 16) {
4138
7.49M
        a0_x128 = _mm_loadu_si128((__m128i *)(above + base_x + base_shift + j));
4139
7.49M
        a1_x128 =
4140
7.49M
            _mm_loadu_si128((__m128i *)(above + base_x + base_shift + 1 + j));
4141
7.49M
        a0_x128 = _mm_shuffle_epi8(a0_x128, *(__m128i *)LoadMaskx[base_shift]);
4142
7.49M
        a1_x128 = _mm_shuffle_epi8(a1_x128, *(__m128i *)LoadMaskx[base_shift]);
4143
4144
7.49M
        a0_x = _mm256_cvtepu8_epi16(a0_x128);
4145
7.49M
        a1_x = _mm256_cvtepu8_epi16(a1_x128);
4146
4147
7.49M
        r6 = _mm256_slli_epi16(_mm256_add_epi16(c0123, j256), 6);
4148
7.49M
        shift = _mm256_srli_epi16(
4149
7.49M
            _mm256_and_si256(_mm256_sub_epi16(r6, ydx), c3f), 1);
4150
4151
7.49M
        diff = _mm256_sub_epi16(a1_x, a0_x);  // a[x+1] - a[x]
4152
7.49M
        a32 = _mm256_slli_epi16(a0_x, 5);     // a[x] * 32
4153
7.49M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4154
4155
7.49M
        b = _mm256_mullo_epi16(diff, shift);
4156
7.49M
        res = _mm256_add_epi16(a32, b);
4157
7.49M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4158
7.49M
        resx = _mm256_castsi256_si128(_mm256_packus_epi16(
4159
7.49M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4160
8.95M
      } else {
4161
8.95M
        resx = _mm_setzero_si128();
4162
8.95M
      }
4163
4164
      // y calc
4165
16.4M
      if (base_x < min_base_x) {
4166
15.3M
        __m256i c256, y_c256, base_y_c256, mask256, mul16;
4167
15.3M
        r6 = _mm256_set1_epi16(r << 6);
4168
15.3M
        c256 = _mm256_add_epi16(j256, c1234);
4169
15.3M
        mul16 = _mm256_min_epu16(_mm256_mullo_epi16(c256, dy256),
4170
15.3M
                                 _mm256_srli_epi16(min_base_y256, 1));
4171
15.3M
        y_c256 = _mm256_sub_epi16(r6, mul16);
4172
4173
15.3M
        base_y_c256 = _mm256_srai_epi16(y_c256, frac_bits_y);
4174
15.3M
        mask256 = _mm256_cmpgt_epi16(min_base_y256, base_y_c256);
4175
4176
15.3M
        base_y_c256 = _mm256_blendv_epi8(base_y_c256, min_base_y256, mask256);
4177
15.3M
        int16_t min_y = (int16_t)_mm_extract_epi16(
4178
15.3M
            _mm256_extracti128_si256(base_y_c256, 1), 7);
4179
15.3M
        int16_t max_y =
4180
15.3M
            (int16_t)_mm_extract_epi16(_mm256_castsi256_si128(base_y_c256), 0);
4181
15.3M
        int16_t offset_diff = max_y - min_y;
4182
4183
15.3M
        if (offset_diff < 16) {
4184
14.4M
          __m256i min_y256 = _mm256_set1_epi16(min_y);
4185
4186
14.4M
          __m256i base_y_offset = _mm256_sub_epi16(base_y_c256, min_y256);
4187
14.4M
          __m128i base_y_offset128 =
4188
14.4M
              _mm_packs_epi16(_mm256_extracti128_si256(base_y_offset, 0),
4189
14.4M
                              _mm256_extracti128_si256(base_y_offset, 1));
4190
4191
14.4M
          __m128i a0_y128 = _mm_maskload_epi32(
4192
14.4M
              (int *)(left + min_y), *(__m128i *)LoadMaskz2[offset_diff / 4]);
4193
14.4M
          __m128i a1_y128 =
4194
14.4M
              _mm_maskload_epi32((int *)(left + min_y + 1),
4195
14.4M
                                 *(__m128i *)LoadMaskz2[offset_diff / 4]);
4196
14.4M
          a0_y128 = _mm_shuffle_epi8(a0_y128, base_y_offset128);
4197
14.4M
          a1_y128 = _mm_shuffle_epi8(a1_y128, base_y_offset128);
4198
14.4M
          a0_y = _mm256_cvtepu8_epi16(a0_y128);
4199
14.4M
          a1_y = _mm256_cvtepu8_epi16(a1_y128);
4200
14.4M
        } else {
4201
860k
          base_y_c256 = _mm256_andnot_si256(mask256, base_y_c256);
4202
860k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4203
4204
860k
          a0_y = _mm256_setr_epi16(
4205
860k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4206
860k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4207
860k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4208
860k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4209
860k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4210
860k
              left[base_y_c[15]]);
4211
860k
          base_y_c256 = _mm256_add_epi16(base_y_c256, c1);
4212
860k
          _mm256_store_si256((__m256i *)base_y_c, base_y_c256);
4213
4214
860k
          a1_y = _mm256_setr_epi16(
4215
860k
              left[base_y_c[0]], left[base_y_c[1]], left[base_y_c[2]],
4216
860k
              left[base_y_c[3]], left[base_y_c[4]], left[base_y_c[5]],
4217
860k
              left[base_y_c[6]], left[base_y_c[7]], left[base_y_c[8]],
4218
860k
              left[base_y_c[9]], left[base_y_c[10]], left[base_y_c[11]],
4219
860k
              left[base_y_c[12]], left[base_y_c[13]], left[base_y_c[14]],
4220
860k
              left[base_y_c[15]]);
4221
860k
        }
4222
15.3M
        shifty = _mm256_srli_epi16(_mm256_and_si256(y_c256, c3f), 1);
4223
4224
15.3M
        diff = _mm256_sub_epi16(a1_y, a0_y);  // a[x+1] - a[x]
4225
15.3M
        a32 = _mm256_slli_epi16(a0_y, 5);     // a[x] * 32
4226
15.3M
        a32 = _mm256_add_epi16(a32, a16);     // a[x] * 32 + 16
4227
4228
15.3M
        b = _mm256_mullo_epi16(diff, shifty);
4229
15.3M
        res = _mm256_add_epi16(a32, b);
4230
15.3M
        res = _mm256_srli_epi16(res, 5);  // 16 16-bit values
4231
15.3M
        resy = _mm256_castsi256_si128(_mm256_packus_epi16(
4232
15.3M
            res, _mm256_castsi128_si256(_mm256_extracti128_si256(res, 1))));
4233
15.3M
      } else {
4234
1.13M
        resy = _mm_setzero_si128();
4235
1.13M
      }
4236
16.4M
      resxy = _mm_blendv_epi8(resx, resy, *(__m128i *)BaseMask[base_min_diff]);
4237
16.4M
      _mm_storeu_si128((__m128i *)(dst + j), resxy);
4238
16.4M
    }  // for j
4239
8.76M
    dst += stride;
4240
8.76M
  }
4241
457k
}
4242
4243
// Directional prediction, zone 2: 90 < angle < 180
4244
void av1_dr_prediction_z2_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4245
                               const uint8_t *above, const uint8_t *left,
4246
                               int upsample_above, int upsample_left, int dx,
4247
1.04M
                               int dy) {
4248
1.04M
  assert(dx > 0);
4249
1.04M
  assert(dy > 0);
4250
1.04M
  switch (bw) {
4251
316k
    case 4:
4252
316k
      dr_prediction_z2_Nx4_avx2(bh, dst, stride, above, left, upsample_above,
4253
316k
                                upsample_left, dx, dy);
4254
316k
      break;
4255
267k
    case 8:
4256
267k
      dr_prediction_z2_Nx8_avx2(bh, dst, stride, above, left, upsample_above,
4257
267k
                                upsample_left, dx, dy);
4258
267k
      break;
4259
457k
    default:
4260
457k
      dr_prediction_z2_HxW_avx2(bh, bw, dst, stride, above, left,
4261
457k
                                upsample_above, upsample_left, dx, dy);
4262
457k
      break;
4263
1.04M
  }
4264
1.04M
  return;
4265
1.04M
}
4266
4267
// z3 functions
4268
199k
static inline void transpose16x32_avx2(__m256i *x, __m256i *d) {
4269
199k
  __m256i w0, w1, w2, w3, w4, w5, w6, w7, w8, w9;
4270
199k
  __m256i w10, w11, w12, w13, w14, w15;
4271
4272
199k
  w0 = _mm256_unpacklo_epi8(x[0], x[1]);
4273
199k
  w1 = _mm256_unpacklo_epi8(x[2], x[3]);
4274
199k
  w2 = _mm256_unpacklo_epi8(x[4], x[5]);
4275
199k
  w3 = _mm256_unpacklo_epi8(x[6], x[7]);
4276
4277
199k
  w8 = _mm256_unpacklo_epi8(x[8], x[9]);
4278
199k
  w9 = _mm256_unpacklo_epi8(x[10], x[11]);
4279
199k
  w10 = _mm256_unpacklo_epi8(x[12], x[13]);
4280
199k
  w11 = _mm256_unpacklo_epi8(x[14], x[15]);
4281
4282
199k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4283
199k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4284
199k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4285
199k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4286
4287
199k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4288
199k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4289
199k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4290
199k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4291
4292
  // Store first 4-line result
4293
199k
  d[0] = _mm256_unpacklo_epi64(w6, w14);
4294
199k
  d[1] = _mm256_unpackhi_epi64(w6, w14);
4295
199k
  d[2] = _mm256_unpacklo_epi64(w7, w15);
4296
199k
  d[3] = _mm256_unpackhi_epi64(w7, w15);
4297
4298
199k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4299
199k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4300
199k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4301
199k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4302
4303
199k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4304
199k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4305
199k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4306
199k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4307
4308
  // Store second 4-line result
4309
199k
  d[4] = _mm256_unpacklo_epi64(w6, w14);
4310
199k
  d[5] = _mm256_unpackhi_epi64(w6, w14);
4311
199k
  d[6] = _mm256_unpacklo_epi64(w7, w15);
4312
199k
  d[7] = _mm256_unpackhi_epi64(w7, w15);
4313
4314
  // upper half
4315
199k
  w0 = _mm256_unpackhi_epi8(x[0], x[1]);
4316
199k
  w1 = _mm256_unpackhi_epi8(x[2], x[3]);
4317
199k
  w2 = _mm256_unpackhi_epi8(x[4], x[5]);
4318
199k
  w3 = _mm256_unpackhi_epi8(x[6], x[7]);
4319
4320
199k
  w8 = _mm256_unpackhi_epi8(x[8], x[9]);
4321
199k
  w9 = _mm256_unpackhi_epi8(x[10], x[11]);
4322
199k
  w10 = _mm256_unpackhi_epi8(x[12], x[13]);
4323
199k
  w11 = _mm256_unpackhi_epi8(x[14], x[15]);
4324
4325
199k
  w4 = _mm256_unpacklo_epi16(w0, w1);
4326
199k
  w5 = _mm256_unpacklo_epi16(w2, w3);
4327
199k
  w12 = _mm256_unpacklo_epi16(w8, w9);
4328
199k
  w13 = _mm256_unpacklo_epi16(w10, w11);
4329
4330
199k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4331
199k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4332
199k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4333
199k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4334
4335
  // Store first 4-line result
4336
199k
  d[8] = _mm256_unpacklo_epi64(w6, w14);
4337
199k
  d[9] = _mm256_unpackhi_epi64(w6, w14);
4338
199k
  d[10] = _mm256_unpacklo_epi64(w7, w15);
4339
199k
  d[11] = _mm256_unpackhi_epi64(w7, w15);
4340
4341
199k
  w4 = _mm256_unpackhi_epi16(w0, w1);
4342
199k
  w5 = _mm256_unpackhi_epi16(w2, w3);
4343
199k
  w12 = _mm256_unpackhi_epi16(w8, w9);
4344
199k
  w13 = _mm256_unpackhi_epi16(w10, w11);
4345
4346
199k
  w6 = _mm256_unpacklo_epi32(w4, w5);
4347
199k
  w7 = _mm256_unpackhi_epi32(w4, w5);
4348
199k
  w14 = _mm256_unpacklo_epi32(w12, w13);
4349
199k
  w15 = _mm256_unpackhi_epi32(w12, w13);
4350
4351
  // Store second 4-line result
4352
199k
  d[12] = _mm256_unpacklo_epi64(w6, w14);
4353
199k
  d[13] = _mm256_unpackhi_epi64(w6, w14);
4354
199k
  d[14] = _mm256_unpacklo_epi64(w7, w15);
4355
199k
  d[15] = _mm256_unpackhi_epi64(w7, w15);
4356
199k
}
4357
4358
static void dr_prediction_z3_4x4_avx2(uint8_t *dst, ptrdiff_t stride,
4359
                                      const uint8_t *left, int upsample_left,
4360
84.5k
                                      int dy) {
4361
84.5k
  __m128i dstvec[4], d[4];
4362
4363
84.5k
  dr_prediction_z1_HxW_internal_avx2(4, 4, dstvec, left, upsample_left, dy);
4364
84.5k
  transpose4x8_8x4_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4365
84.5k
                            &d[0], &d[1], &d[2], &d[3]);
4366
4367
84.5k
  *(int *)(dst + stride * 0) = _mm_cvtsi128_si32(d[0]);
4368
84.5k
  *(int *)(dst + stride * 1) = _mm_cvtsi128_si32(d[1]);
4369
84.5k
  *(int *)(dst + stride * 2) = _mm_cvtsi128_si32(d[2]);
4370
84.5k
  *(int *)(dst + stride * 3) = _mm_cvtsi128_si32(d[3]);
4371
84.5k
  return;
4372
84.5k
}
4373
4374
static void dr_prediction_z3_8x8_avx2(uint8_t *dst, ptrdiff_t stride,
4375
                                      const uint8_t *left, int upsample_left,
4376
95.9k
                                      int dy) {
4377
95.9k
  __m128i dstvec[8], d[8];
4378
4379
95.9k
  dr_prediction_z1_HxW_internal_avx2(8, 8, dstvec, left, upsample_left, dy);
4380
95.9k
  transpose8x8_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4],
4381
95.9k
                    &dstvec[5], &dstvec[6], &dstvec[7], &d[0], &d[1], &d[2],
4382
95.9k
                    &d[3]);
4383
4384
95.9k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4385
95.9k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), _mm_srli_si128(d[0], 8));
4386
95.9k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[1]);
4387
95.9k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), _mm_srli_si128(d[1], 8));
4388
95.9k
  _mm_storel_epi64((__m128i *)(dst + 4 * stride), d[2]);
4389
95.9k
  _mm_storel_epi64((__m128i *)(dst + 5 * stride), _mm_srli_si128(d[2], 8));
4390
95.9k
  _mm_storel_epi64((__m128i *)(dst + 6 * stride), d[3]);
4391
95.9k
  _mm_storel_epi64((__m128i *)(dst + 7 * stride), _mm_srli_si128(d[3], 8));
4392
95.9k
}
4393
4394
static void dr_prediction_z3_4x8_avx2(uint8_t *dst, ptrdiff_t stride,
4395
                                      const uint8_t *left, int upsample_left,
4396
23.6k
                                      int dy) {
4397
23.6k
  __m128i dstvec[4], d[8];
4398
4399
23.6k
  dr_prediction_z1_HxW_internal_avx2(8, 4, dstvec, left, upsample_left, dy);
4400
23.6k
  transpose4x8_8x4_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &d[0],
4401
23.6k
                        &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7]);
4402
212k
  for (int i = 0; i < 8; i++) {
4403
189k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4404
189k
  }
4405
23.6k
}
4406
4407
static void dr_prediction_z3_8x4_avx2(uint8_t *dst, ptrdiff_t stride,
4408
                                      const uint8_t *left, int upsample_left,
4409
40.0k
                                      int dy) {
4410
40.0k
  __m128i dstvec[8], d[4];
4411
4412
40.0k
  dr_prediction_z1_HxW_internal_avx2(4, 8, dstvec, left, upsample_left, dy);
4413
40.0k
  transpose8x8_low_sse2(&dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3],
4414
40.0k
                        &dstvec[4], &dstvec[5], &dstvec[6], &dstvec[7], &d[0],
4415
40.0k
                        &d[1], &d[2], &d[3]);
4416
40.0k
  _mm_storel_epi64((__m128i *)(dst + 0 * stride), d[0]);
4417
40.0k
  _mm_storel_epi64((__m128i *)(dst + 1 * stride), d[1]);
4418
40.0k
  _mm_storel_epi64((__m128i *)(dst + 2 * stride), d[2]);
4419
40.0k
  _mm_storel_epi64((__m128i *)(dst + 3 * stride), d[3]);
4420
40.0k
}
4421
4422
static void dr_prediction_z3_8x16_avx2(uint8_t *dst, ptrdiff_t stride,
4423
                                       const uint8_t *left, int upsample_left,
4424
25.7k
                                       int dy) {
4425
25.7k
  __m128i dstvec[8], d[8];
4426
4427
25.7k
  dr_prediction_z1_HxW_internal_avx2(16, 8, dstvec, left, upsample_left, dy);
4428
25.7k
  transpose8x16_16x8_sse2(dstvec, dstvec + 1, dstvec + 2, dstvec + 3,
4429
25.7k
                          dstvec + 4, dstvec + 5, dstvec + 6, dstvec + 7, d,
4430
25.7k
                          d + 1, d + 2, d + 3, d + 4, d + 5, d + 6, d + 7);
4431
231k
  for (int i = 0; i < 8; i++) {
4432
206k
    _mm_storel_epi64((__m128i *)(dst + i * stride), d[i]);
4433
206k
    _mm_storel_epi64((__m128i *)(dst + (i + 8) * stride),
4434
206k
                     _mm_srli_si128(d[i], 8));
4435
206k
  }
4436
25.7k
}
4437
4438
static void dr_prediction_z3_16x8_avx2(uint8_t *dst, ptrdiff_t stride,
4439
                                       const uint8_t *left, int upsample_left,
4440
47.6k
                                       int dy) {
4441
47.6k
  __m128i dstvec[16], d[16];
4442
4443
47.6k
  dr_prediction_z1_HxW_internal_avx2(8, 16, dstvec, left, upsample_left, dy);
4444
47.6k
  transpose16x8_8x16_sse2(
4445
47.6k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4446
47.6k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4447
47.6k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4448
47.6k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4449
4450
428k
  for (int i = 0; i < 8; i++) {
4451
380k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4452
380k
  }
4453
47.6k
}
4454
4455
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4456
static void dr_prediction_z3_4x16_avx2(uint8_t *dst, ptrdiff_t stride,
4457
                                       const uint8_t *left, int upsample_left,
4458
20.2k
                                       int dy) {
4459
20.2k
  __m128i dstvec[4], d[16];
4460
4461
20.2k
  dr_prediction_z1_HxW_internal_avx2(16, 4, dstvec, left, upsample_left, dy);
4462
20.2k
  transpose4x16_sse2(dstvec, d);
4463
343k
  for (int i = 0; i < 16; i++) {
4464
323k
    *(int *)(dst + stride * i) = _mm_cvtsi128_si32(d[i]);
4465
323k
  }
4466
20.2k
}
4467
4468
static void dr_prediction_z3_16x4_avx2(uint8_t *dst, ptrdiff_t stride,
4469
                                       const uint8_t *left, int upsample_left,
4470
60.7k
                                       int dy) {
4471
60.7k
  __m128i dstvec[16], d[8];
4472
4473
60.7k
  dr_prediction_z1_HxW_internal_avx2(4, 16, dstvec, left, upsample_left, dy);
4474
303k
  for (int i = 4; i < 8; i++) {
4475
243k
    d[i] = _mm_setzero_si128();
4476
243k
  }
4477
60.7k
  transpose16x8_8x16_sse2(
4478
60.7k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4479
60.7k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4480
60.7k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4481
60.7k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4482
4483
303k
  for (int i = 0; i < 4; i++) {
4484
243k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4485
243k
  }
4486
60.7k
}
4487
4488
static void dr_prediction_z3_8x32_avx2(uint8_t *dst, ptrdiff_t stride,
4489
                                       const uint8_t *left, int upsample_left,
4490
10.0k
                                       int dy) {
4491
10.0k
  __m256i dstvec[16], d[16];
4492
4493
10.0k
  dr_prediction_z1_32xN_internal_avx2(8, dstvec, left, upsample_left, dy);
4494
90.7k
  for (int i = 8; i < 16; i++) {
4495
80.6k
    dstvec[i] = _mm256_setzero_si256();
4496
80.6k
  }
4497
10.0k
  transpose16x32_avx2(dstvec, d);
4498
4499
171k
  for (int i = 0; i < 16; i++) {
4500
161k
    _mm_storel_epi64((__m128i *)(dst + i * stride),
4501
161k
                     _mm256_castsi256_si128(d[i]));
4502
161k
  }
4503
171k
  for (int i = 0; i < 16; i++) {
4504
161k
    _mm_storel_epi64((__m128i *)(dst + (i + 16) * stride),
4505
161k
                     _mm256_extracti128_si256(d[i], 1));
4506
161k
  }
4507
10.0k
}
4508
4509
static void dr_prediction_z3_32x8_avx2(uint8_t *dst, ptrdiff_t stride,
4510
                                       const uint8_t *left, int upsample_left,
4511
45.2k
                                       int dy) {
4512
45.2k
  __m128i dstvec[32], d[16];
4513
4514
45.2k
  dr_prediction_z1_HxW_internal_avx2(8, 32, dstvec, left, upsample_left, dy);
4515
4516
45.2k
  transpose16x8_8x16_sse2(
4517
45.2k
      &dstvec[0], &dstvec[1], &dstvec[2], &dstvec[3], &dstvec[4], &dstvec[5],
4518
45.2k
      &dstvec[6], &dstvec[7], &dstvec[8], &dstvec[9], &dstvec[10], &dstvec[11],
4519
45.2k
      &dstvec[12], &dstvec[13], &dstvec[14], &dstvec[15], &d[0], &d[1], &d[2],
4520
45.2k
      &d[3], &d[4], &d[5], &d[6], &d[7]);
4521
45.2k
  transpose16x8_8x16_sse2(
4522
45.2k
      &dstvec[0 + 16], &dstvec[1 + 16], &dstvec[2 + 16], &dstvec[3 + 16],
4523
45.2k
      &dstvec[4 + 16], &dstvec[5 + 16], &dstvec[6 + 16], &dstvec[7 + 16],
4524
45.2k
      &dstvec[8 + 16], &dstvec[9 + 16], &dstvec[10 + 16], &dstvec[11 + 16],
4525
45.2k
      &dstvec[12 + 16], &dstvec[13 + 16], &dstvec[14 + 16], &dstvec[15 + 16],
4526
45.2k
      &d[0 + 8], &d[1 + 8], &d[2 + 8], &d[3 + 8], &d[4 + 8], &d[5 + 8],
4527
45.2k
      &d[6 + 8], &d[7 + 8]);
4528
4529
406k
  for (int i = 0; i < 8; i++) {
4530
361k
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4531
361k
    _mm_storeu_si128((__m128i *)(dst + i * stride + 16), d[i + 8]);
4532
361k
  }
4533
45.2k
}
4534
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4535
4536
static void dr_prediction_z3_16x16_avx2(uint8_t *dst, ptrdiff_t stride,
4537
                                        const uint8_t *left, int upsample_left,
4538
85.2k
                                        int dy) {
4539
85.2k
  __m128i dstvec[16], d[16];
4540
4541
85.2k
  dr_prediction_z1_HxW_internal_avx2(16, 16, dstvec, left, upsample_left, dy);
4542
85.2k
  transpose16x16_sse2(dstvec, d);
4543
4544
1.44M
  for (int i = 0; i < 16; i++) {
4545
1.36M
    _mm_storeu_si128((__m128i *)(dst + i * stride), d[i]);
4546
1.36M
  }
4547
85.2k
}
4548
4549
static void dr_prediction_z3_32x32_avx2(uint8_t *dst, ptrdiff_t stride,
4550
                                        const uint8_t *left, int upsample_left,
4551
83.3k
                                        int dy) {
4552
83.3k
  __m256i dstvec[32], d[32];
4553
4554
83.3k
  dr_prediction_z1_32xN_internal_avx2(32, dstvec, left, upsample_left, dy);
4555
83.3k
  transpose16x32_avx2(dstvec, d);
4556
83.3k
  transpose16x32_avx2(dstvec + 16, d + 16);
4557
1.41M
  for (int j = 0; j < 16; j++) {
4558
1.33M
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4559
1.33M
                     _mm256_castsi256_si128(d[j]));
4560
1.33M
    _mm_storeu_si128((__m128i *)(dst + j * stride + 16),
4561
1.33M
                     _mm256_castsi256_si128(d[j + 16]));
4562
1.33M
  }
4563
1.41M
  for (int j = 0; j < 16; j++) {
4564
1.33M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4565
1.33M
                     _mm256_extracti128_si256(d[j], 1));
4566
1.33M
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride + 16),
4567
1.33M
                     _mm256_extracti128_si256(d[j + 16], 1));
4568
1.33M
  }
4569
83.3k
}
4570
4571
static void dr_prediction_z3_64x64_avx2(uint8_t *dst, ptrdiff_t stride,
4572
                                        const uint8_t *left, int upsample_left,
4573
21.6k
                                        int dy) {
4574
21.6k
  DECLARE_ALIGNED(16, uint8_t, dstT[64 * 64]);
4575
21.6k
  dr_prediction_z1_64xN_avx2(64, dstT, 64, left, upsample_left, dy);
4576
21.6k
  transpose(dstT, 64, dst, stride, 64, 64);
4577
21.6k
}
4578
4579
static void dr_prediction_z3_16x32_avx2(uint8_t *dst, ptrdiff_t stride,
4580
                                        const uint8_t *left, int upsample_left,
4581
22.7k
                                        int dy) {
4582
22.7k
  __m256i dstvec[16], d[16];
4583
4584
22.7k
  dr_prediction_z1_32xN_internal_avx2(16, dstvec, left, upsample_left, dy);
4585
22.7k
  transpose16x32_avx2(dstvec, d);
4586
  // store
4587
386k
  for (int j = 0; j < 16; j++) {
4588
363k
    _mm_storeu_si128((__m128i *)(dst + j * stride),
4589
363k
                     _mm256_castsi256_si128(d[j]));
4590
363k
    _mm_storeu_si128((__m128i *)(dst + (j + 16) * stride),
4591
363k
                     _mm256_extracti128_si256(d[j], 1));
4592
363k
  }
4593
22.7k
}
4594
4595
static void dr_prediction_z3_32x16_avx2(uint8_t *dst, ptrdiff_t stride,
4596
                                        const uint8_t *left, int upsample_left,
4597
20.5k
                                        int dy) {
4598
20.5k
  __m128i dstvec[32], d[16];
4599
4600
20.5k
  dr_prediction_z1_HxW_internal_avx2(16, 32, dstvec, left, upsample_left, dy);
4601
61.7k
  for (int i = 0; i < 32; i += 16) {
4602
41.1k
    transpose16x16_sse2((dstvec + i), d);
4603
700k
    for (int j = 0; j < 16; j++) {
4604
659k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4605
659k
    }
4606
41.1k
  }
4607
20.5k
}
4608
4609
static void dr_prediction_z3_32x64_avx2(uint8_t *dst, ptrdiff_t stride,
4610
                                        const uint8_t *left, int upsample_left,
4611
1.75k
                                        int dy) {
4612
1.75k
  uint8_t dstT[64 * 32];
4613
1.75k
  dr_prediction_z1_64xN_avx2(32, dstT, 64, left, upsample_left, dy);
4614
1.75k
  transpose(dstT, 64, dst, stride, 32, 64);
4615
1.75k
}
4616
4617
static void dr_prediction_z3_64x32_avx2(uint8_t *dst, ptrdiff_t stride,
4618
                                        const uint8_t *left, int upsample_left,
4619
2.68k
                                        int dy) {
4620
2.68k
  uint8_t dstT[32 * 64];
4621
2.68k
  dr_prediction_z1_32xN_avx2(64, dstT, 32, left, upsample_left, dy);
4622
2.68k
  transpose(dstT, 32, dst, stride, 64, 32);
4623
2.68k
  return;
4624
2.68k
}
4625
4626
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4627
static void dr_prediction_z3_16x64_avx2(uint8_t *dst, ptrdiff_t stride,
4628
                                        const uint8_t *left, int upsample_left,
4629
4.80k
                                        int dy) {
4630
4.80k
  uint8_t dstT[64 * 16];
4631
4.80k
  dr_prediction_z1_64xN_avx2(16, dstT, 64, left, upsample_left, dy);
4632
4.80k
  transpose(dstT, 64, dst, stride, 16, 64);
4633
4.80k
}
4634
4635
static void dr_prediction_z3_64x16_avx2(uint8_t *dst, ptrdiff_t stride,
4636
                                        const uint8_t *left, int upsample_left,
4637
15.3k
                                        int dy) {
4638
15.3k
  __m128i dstvec[64], d[16];
4639
4640
15.3k
  dr_prediction_z1_HxW_internal_avx2(16, 64, dstvec, left, upsample_left, dy);
4641
76.8k
  for (int i = 0; i < 64; i += 16) {
4642
61.5k
    transpose16x16_sse2((dstvec + i), d);
4643
1.04M
    for (int j = 0; j < 16; j++) {
4644
984k
      _mm_storeu_si128((__m128i *)(dst + j * stride + i), d[j]);
4645
984k
    }
4646
61.5k
  }
4647
15.3k
}
4648
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4649
4650
void av1_dr_prediction_z3_avx2(uint8_t *dst, ptrdiff_t stride, int bw, int bh,
4651
                               const uint8_t *above, const uint8_t *left,
4652
711k
                               int upsample_left, int dx, int dy) {
4653
711k
  (void)above;
4654
711k
  (void)dx;
4655
711k
  assert(dx == 1);
4656
711k
  assert(dy > 0);
4657
4658
711k
  if (bw == bh) {
4659
370k
    switch (bw) {
4660
84.5k
      case 4:
4661
84.5k
        dr_prediction_z3_4x4_avx2(dst, stride, left, upsample_left, dy);
4662
84.5k
        break;
4663
95.9k
      case 8:
4664
95.9k
        dr_prediction_z3_8x8_avx2(dst, stride, left, upsample_left, dy);
4665
95.9k
        break;
4666
85.2k
      case 16:
4667
85.2k
        dr_prediction_z3_16x16_avx2(dst, stride, left, upsample_left, dy);
4668
85.2k
        break;
4669
83.3k
      case 32:
4670
83.3k
        dr_prediction_z3_32x32_avx2(dst, stride, left, upsample_left, dy);
4671
83.3k
        break;
4672
21.6k
      case 64:
4673
21.6k
        dr_prediction_z3_64x64_avx2(dst, stride, left, upsample_left, dy);
4674
21.6k
        break;
4675
370k
    }
4676
370k
  } else {
4677
341k
    if (bw < bh) {
4678
108k
      if (bw + bw == bh) {
4679
73.8k
        switch (bw) {
4680
23.6k
          case 4:
4681
23.6k
            dr_prediction_z3_4x8_avx2(dst, stride, left, upsample_left, dy);
4682
23.6k
            break;
4683
25.7k
          case 8:
4684
25.7k
            dr_prediction_z3_8x16_avx2(dst, stride, left, upsample_left, dy);
4685
25.7k
            break;
4686
22.7k
          case 16:
4687
22.7k
            dr_prediction_z3_16x32_avx2(dst, stride, left, upsample_left, dy);
4688
22.7k
            break;
4689
1.75k
          case 32:
4690
1.75k
            dr_prediction_z3_32x64_avx2(dst, stride, left, upsample_left, dy);
4691
1.75k
            break;
4692
73.8k
        }
4693
73.8k
      } else {
4694
35.0k
        switch (bw) {
4695
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4696
20.2k
          case 4:
4697
20.2k
            dr_prediction_z3_4x16_avx2(dst, stride, left, upsample_left, dy);
4698
20.2k
            break;
4699
10.0k
          case 8:
4700
10.0k
            dr_prediction_z3_8x32_avx2(dst, stride, left, upsample_left, dy);
4701
10.0k
            break;
4702
4.80k
          case 16:
4703
4.80k
            dr_prediction_z3_16x64_avx2(dst, stride, left, upsample_left, dy);
4704
4.80k
            break;
4705
35.0k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4706
35.0k
        }
4707
35.0k
      }
4708
232k
    } else {
4709
232k
      if (bh + bh == bw) {
4710
110k
        switch (bh) {
4711
40.0k
          case 4:
4712
40.0k
            dr_prediction_z3_8x4_avx2(dst, stride, left, upsample_left, dy);
4713
40.0k
            break;
4714
47.6k
          case 8:
4715
47.6k
            dr_prediction_z3_16x8_avx2(dst, stride, left, upsample_left, dy);
4716
47.6k
            break;
4717
20.5k
          case 16:
4718
20.5k
            dr_prediction_z3_32x16_avx2(dst, stride, left, upsample_left, dy);
4719
20.5k
            break;
4720
2.68k
          case 32:
4721
2.68k
            dr_prediction_z3_64x32_avx2(dst, stride, left, upsample_left, dy);
4722
2.68k
            break;
4723
110k
        }
4724
121k
      } else {
4725
121k
        switch (bh) {
4726
0
#if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4727
60.7k
          case 4:
4728
60.7k
            dr_prediction_z3_16x4_avx2(dst, stride, left, upsample_left, dy);
4729
60.7k
            break;
4730
45.2k
          case 8:
4731
45.2k
            dr_prediction_z3_32x8_avx2(dst, stride, left, upsample_left, dy);
4732
45.2k
            break;
4733
15.3k
          case 16:
4734
15.3k
            dr_prediction_z3_64x16_avx2(dst, stride, left, upsample_left, dy);
4735
15.3k
            break;
4736
121k
#endif  // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER
4737
121k
        }
4738
121k
      }
4739
232k
    }
4740
341k
  }
4741
711k
}