/src/aom/av1/common/x86/selfguided_avx2.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2018, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <immintrin.h> |
13 | | |
14 | | #include "config/aom_config.h" |
15 | | #include "config/av1_rtcd.h" |
16 | | |
17 | | #include "av1/common/restoration.h" |
18 | | #include "aom_dsp/x86/synonyms.h" |
19 | | #include "aom_dsp/x86/synonyms_avx2.h" |
20 | | |
21 | | // Load 8 bytes from the possibly-misaligned pointer p, extend each byte to |
22 | | // 32-bit precision and return them in an AVX2 register. |
23 | 77.6M | static __m256i yy256_load_extend_8_32(const void *p) { |
24 | 77.6M | return _mm256_cvtepu8_epi32(xx_loadl_64(p)); |
25 | 77.6M | } |
26 | | |
27 | | // Load 8 halfwords from the possibly-misaligned pointer p, extend each |
28 | | // halfword to 32-bit precision and return them in an AVX2 register. |
29 | 129M | static __m256i yy256_load_extend_16_32(const void *p) { |
30 | 129M | return _mm256_cvtepu16_epi32(xx_loadu_128(p)); |
31 | 129M | } |
32 | | |
33 | | // Compute the scan of an AVX2 register holding 8 32-bit integers. If the |
34 | | // register holds x0..x7 then the scan will hold x0, x0+x1, x0+x1+x2, ..., |
35 | | // x0+x1+...+x7 |
36 | | // |
37 | | // Let [...] represent a 128-bit block, and let a, ..., h be 32-bit integers |
38 | | // (assumed small enough to be able to add them without overflow). |
39 | | // |
40 | | // Use -> as shorthand for summing, i.e. h->a = h + g + f + e + d + c + b + a. |
41 | | // |
42 | | // x = [h g f e][d c b a] |
43 | | // x01 = [g f e 0][c b a 0] |
44 | | // x02 = [g+h f+g e+f e][c+d b+c a+b a] |
45 | | // x03 = [e+f e 0 0][a+b a 0 0] |
46 | | // x04 = [e->h e->g e->f e][a->d a->c a->b a] |
47 | | // s = a->d |
48 | | // s01 = [a->d a->d a->d a->d] |
49 | | // s02 = [a->d a->d a->d a->d][0 0 0 0] |
50 | | // ret = [a->h a->g a->f a->e][a->d a->c a->b a] |
51 | 405M | static __m256i scan_32(__m256i x) { |
52 | 405M | const __m256i x01 = _mm256_slli_si256(x, 4); |
53 | 405M | const __m256i x02 = _mm256_add_epi32(x, x01); |
54 | 405M | const __m256i x03 = _mm256_slli_si256(x02, 8); |
55 | 405M | const __m256i x04 = _mm256_add_epi32(x02, x03); |
56 | 405M | const int32_t s = _mm256_extract_epi32(x04, 3); |
57 | 405M | const __m128i s01 = _mm_set1_epi32(s); |
58 | 405M | const __m256i s02 = _mm256_insertf128_si256(_mm256_setzero_si256(), s01, 1); |
59 | 405M | return _mm256_add_epi32(x04, s02); |
60 | 405M | } |
61 | | |
62 | | // Compute two integral images from src. B sums elements; A sums their |
63 | | // squares. The images are offset by one pixel, so will have width and height |
64 | | // equal to width + 1, height + 1 and the first row and column will be zero. |
65 | | // |
66 | | // A+1 and B+1 should be aligned to 32 bytes. buf_stride should be a multiple |
67 | | // of 8. |
68 | | |
69 | 1.10M | static void *memset_zero_avx(int32_t *dest, const __m256i *zero, size_t count) { |
70 | 1.10M | unsigned int i = 0; |
71 | 2.94M | for (i = 0; i < (count & 0xffffffe0); i += 32) { |
72 | 1.84M | _mm256_storeu_si256((__m256i *)(dest + i), *zero); |
73 | 1.84M | _mm256_storeu_si256((__m256i *)(dest + i + 8), *zero); |
74 | 1.84M | _mm256_storeu_si256((__m256i *)(dest + i + 16), *zero); |
75 | 1.84M | _mm256_storeu_si256((__m256i *)(dest + i + 24), *zero); |
76 | 1.84M | } |
77 | 2.21M | for (; i < (count & 0xfffffff8); i += 8) { |
78 | 1.10M | _mm256_storeu_si256((__m256i *)(dest + i), *zero); |
79 | 1.10M | } |
80 | 7.22M | for (; i < count; i++) { |
81 | 6.12M | dest[i] = 0; |
82 | 6.12M | } |
83 | 1.10M | return dest; |
84 | 1.10M | } |
85 | | |
86 | | static void integral_images(const uint8_t *src, int src_stride, int width, |
87 | | int height, int32_t *A, int32_t *B, |
88 | 264k | int buf_stride) { |
89 | 264k | const __m256i zero = _mm256_setzero_si256(); |
90 | | // Write out the zero top row |
91 | 264k | memset_zero_avx(A, &zero, (width + 8)); |
92 | 264k | memset_zero_avx(B, &zero, (width + 8)); |
93 | 11.5M | for (int i = 0; i < height; ++i) { |
94 | | // Zero the left column. |
95 | 11.2M | A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; |
96 | | |
97 | | // ldiff is the difference H - D where H is the output sample immediately |
98 | | // to the left and D is the output sample above it. These are scalars, |
99 | | // replicated across the eight lanes. |
100 | 11.2M | __m256i ldiff1 = zero, ldiff2 = zero; |
101 | 89.7M | for (int j = 0; j < width; j += 8) { |
102 | 78.4M | const int ABj = 1 + j; |
103 | | |
104 | 78.4M | const __m256i above1 = yy_load_256(B + ABj + i * buf_stride); |
105 | 78.4M | const __m256i above2 = yy_load_256(A + ABj + i * buf_stride); |
106 | | |
107 | 78.4M | const __m256i x1 = yy256_load_extend_8_32(src + j + i * src_stride); |
108 | 78.4M | const __m256i x2 = _mm256_madd_epi16(x1, x1); |
109 | | |
110 | 78.4M | const __m256i sc1 = scan_32(x1); |
111 | 78.4M | const __m256i sc2 = scan_32(x2); |
112 | | |
113 | 78.4M | const __m256i row1 = |
114 | 78.4M | _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1); |
115 | 78.4M | const __m256i row2 = |
116 | 78.4M | _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2); |
117 | | |
118 | 78.4M | yy_store_256(B + ABj + (i + 1) * buf_stride, row1); |
119 | 78.4M | yy_store_256(A + ABj + (i + 1) * buf_stride, row2); |
120 | | |
121 | | // Calculate the new H - D. |
122 | 78.4M | ldiff1 = _mm256_set1_epi32( |
123 | 78.4M | _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7)); |
124 | 78.4M | ldiff2 = _mm256_set1_epi32( |
125 | 78.4M | _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7)); |
126 | 78.4M | } |
127 | 11.2M | } |
128 | 264k | } |
129 | | |
130 | | // Compute two integral images from src. B sums elements; A sums their squares |
131 | | // |
132 | | // A and B should be aligned to 32 bytes. buf_stride should be a multiple of 8. |
133 | | static void integral_images_highbd(const uint16_t *src, int src_stride, |
134 | | int width, int height, int32_t *A, |
135 | 286k | int32_t *B, int buf_stride) { |
136 | 286k | const __m256i zero = _mm256_setzero_si256(); |
137 | | // Write out the zero top row |
138 | 286k | memset_zero_avx(A, &zero, (width + 8)); |
139 | 286k | memset_zero_avx(B, &zero, (width + 8)); |
140 | | |
141 | 15.5M | for (int i = 0; i < height; ++i) { |
142 | | // Zero the left column. |
143 | 15.2M | A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; |
144 | | |
145 | | // ldiff is the difference H - D where H is the output sample immediately |
146 | | // to the left and D is the output sample above it. These are scalars, |
147 | | // replicated across the eight lanes. |
148 | 15.2M | __m256i ldiff1 = zero, ldiff2 = zero; |
149 | 143M | for (int j = 0; j < width; j += 8) { |
150 | 128M | const int ABj = 1 + j; |
151 | | |
152 | 128M | const __m256i above1 = yy_load_256(B + ABj + i * buf_stride); |
153 | 128M | const __m256i above2 = yy_load_256(A + ABj + i * buf_stride); |
154 | | |
155 | 128M | const __m256i x1 = yy256_load_extend_16_32(src + j + i * src_stride); |
156 | 128M | const __m256i x2 = _mm256_madd_epi16(x1, x1); |
157 | | |
158 | 128M | const __m256i sc1 = scan_32(x1); |
159 | 128M | const __m256i sc2 = scan_32(x2); |
160 | | |
161 | 128M | const __m256i row1 = |
162 | 128M | _mm256_add_epi32(_mm256_add_epi32(sc1, above1), ldiff1); |
163 | 128M | const __m256i row2 = |
164 | 128M | _mm256_add_epi32(_mm256_add_epi32(sc2, above2), ldiff2); |
165 | | |
166 | 128M | yy_store_256(B + ABj + (i + 1) * buf_stride, row1); |
167 | 128M | yy_store_256(A + ABj + (i + 1) * buf_stride, row2); |
168 | | |
169 | | // Calculate the new H - D. |
170 | 128M | ldiff1 = _mm256_set1_epi32( |
171 | 128M | _mm256_extract_epi32(_mm256_sub_epi32(row1, above1), 7)); |
172 | 128M | ldiff2 = _mm256_set1_epi32( |
173 | 128M | _mm256_extract_epi32(_mm256_sub_epi32(row2, above2), 7)); |
174 | 128M | } |
175 | 15.2M | } |
176 | 286k | } |
177 | | |
178 | | // Compute 8 values of boxsum from the given integral image. ii should point |
179 | | // at the middle of the box (for the first value). r is the box radius. |
180 | 285M | static inline __m256i boxsum_from_ii(const int32_t *ii, int stride, int r) { |
181 | 285M | const __m256i tl = yy_loadu_256(ii - (r + 1) - (r + 1) * stride); |
182 | 285M | const __m256i tr = yy_loadu_256(ii + (r + 0) - (r + 1) * stride); |
183 | 285M | const __m256i bl = yy_loadu_256(ii - (r + 1) + r * stride); |
184 | 285M | const __m256i br = yy_loadu_256(ii + (r + 0) + r * stride); |
185 | 285M | const __m256i u = _mm256_sub_epi32(tr, tl); |
186 | 285M | const __m256i v = _mm256_sub_epi32(br, bl); |
187 | 285M | return _mm256_sub_epi32(v, u); |
188 | 285M | } |
189 | | |
190 | 179M | static __m256i round_for_shift(unsigned shift) { |
191 | 179M | return _mm256_set1_epi32((1 << shift) >> 1); |
192 | 179M | } |
193 | | |
194 | 142M | static __m256i compute_p(__m256i sum1, __m256i sum2, int bit_depth, int n) { |
195 | 142M | __m256i an, bb; |
196 | 142M | if (bit_depth > 8) { |
197 | 73.8M | const __m256i rounding_a = round_for_shift(2 * (bit_depth - 8)); |
198 | 73.8M | const __m256i rounding_b = round_for_shift(bit_depth - 8); |
199 | 73.8M | const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8)); |
200 | 73.8M | const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8); |
201 | 73.8M | const __m256i a = |
202 | 73.8M | _mm256_srl_epi32(_mm256_add_epi32(sum2, rounding_a), shift_a); |
203 | 73.8M | const __m256i b = |
204 | 73.8M | _mm256_srl_epi32(_mm256_add_epi32(sum1, rounding_b), shift_b); |
205 | | // b < 2^14, so we can use a 16-bit madd rather than a 32-bit |
206 | | // mullo to square it |
207 | 73.8M | bb = _mm256_madd_epi16(b, b); |
208 | 73.8M | an = _mm256_max_epi32(_mm256_mullo_epi32(a, _mm256_set1_epi32(n)), bb); |
209 | 73.8M | } else { |
210 | 69.1M | bb = _mm256_madd_epi16(sum1, sum1); |
211 | 69.1M | an = _mm256_mullo_epi32(sum2, _mm256_set1_epi32(n)); |
212 | 69.1M | } |
213 | 142M | return _mm256_sub_epi32(an, bb); |
214 | 142M | } |
215 | | |
216 | | // Assumes that C, D are integral images for the original buffer which has been |
217 | | // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels |
218 | | // on the sides. A, B, C, D point at logical position (0, 0). |
219 | | static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D, |
220 | | int width, int height, int buf_stride, int bit_depth, |
221 | 520k | int sgr_params_idx, int radius_idx) { |
222 | 520k | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
223 | 520k | const int r = params->r[radius_idx]; |
224 | 520k | const int n = (2 * r + 1) * (2 * r + 1); |
225 | 520k | const __m256i s = _mm256_set1_epi32(params->s[radius_idx]); |
226 | | // one_over_n[n-1] is 2^12/n, so easily fits in an int16 |
227 | 520k | const __m256i one_over_n = _mm256_set1_epi32(av1_one_by_x[n - 1]); |
228 | | |
229 | 520k | const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); |
230 | 520k | const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); |
231 | | |
232 | | // Set up masks |
233 | 520k | const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0); |
234 | 520k | __m256i mask[8]; |
235 | 4.68M | for (int idx = 0; idx < 8; idx++) { |
236 | 4.16M | const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx)); |
237 | 4.16M | mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); |
238 | 4.16M | } |
239 | | |
240 | 21.9M | for (int i = -1; i < height + 1; ++i) { |
241 | 139M | for (int j = -1; j < width + 1; j += 8) { |
242 | 118M | const int32_t *Cij = C + i * buf_stride + j; |
243 | 118M | const int32_t *Dij = D + i * buf_stride + j; |
244 | | |
245 | 118M | __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r); |
246 | 118M | __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r); |
247 | | |
248 | | // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain |
249 | | // some uninitialised data in their upper words. We use a mask to |
250 | | // ensure that these bits are set to 0. |
251 | 118M | int idx = AOMMIN(8, width + 1 - j); |
252 | 118M | assert(idx >= 1); |
253 | | |
254 | 118M | if (idx < 8) { |
255 | 22.2M | sum1 = _mm256_and_si256(mask[idx], sum1); |
256 | 22.2M | sum2 = _mm256_and_si256(mask[idx], sum2); |
257 | 22.2M | } |
258 | | |
259 | 118M | const __m256i p = compute_p(sum1, sum2, bit_depth, n); |
260 | | |
261 | 118M | const __m256i z = _mm256_min_epi32( |
262 | 118M | _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z), |
263 | 118M | SGRPROJ_MTABLE_BITS), |
264 | 118M | _mm256_set1_epi32(255)); |
265 | | |
266 | 118M | const __m256i a_res = _mm256_i32gather_epi32(av1_x_by_xplus1, z, 4); |
267 | | |
268 | 118M | yy_storeu_256(A + i * buf_stride + j, a_res); |
269 | | |
270 | 118M | const __m256i a_complement = |
271 | 118M | _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res); |
272 | | |
273 | | // sum1 might have lanes greater than 2^15, so we can't use madd to do |
274 | | // multiplication involving sum1. However, a_complement and one_over_n |
275 | | // are both less than 256, so we can multiply them first. |
276 | 118M | const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n); |
277 | 118M | const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1); |
278 | 118M | const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res), |
279 | 118M | SGRPROJ_RECIP_BITS); |
280 | | |
281 | 118M | yy_storeu_256(B + i * buf_stride + j, b_res); |
282 | 118M | } |
283 | 21.5M | } |
284 | 520k | } |
285 | | |
286 | | // Calculate 8 values of the "cross sum" starting at buf. This is a 3x3 filter |
287 | | // where the outer four corners have weight 3 and all other pixels have weight |
288 | | // 4. |
289 | | // |
290 | | // Pixels are indexed as follows: |
291 | | // xtl xt xtr |
292 | | // xl x xr |
293 | | // xbl xb xbr |
294 | | // |
295 | | // buf points to x |
296 | | // |
297 | | // fours = xl + xt + xr + xb + x |
298 | | // threes = xtl + xtr + xbr + xbl |
299 | | // cross_sum = 4 * fours + 3 * threes |
300 | | // = 4 * (fours + threes) - threes |
301 | | // = (fours + threes) << 2 - threes |
302 | 375M | static inline __m256i cross_sum(const int32_t *buf, int stride) { |
303 | 375M | const __m256i xtl = yy_loadu_256(buf - 1 - stride); |
304 | 375M | const __m256i xt = yy_loadu_256(buf - stride); |
305 | 375M | const __m256i xtr = yy_loadu_256(buf + 1 - stride); |
306 | 375M | const __m256i xl = yy_loadu_256(buf - 1); |
307 | 375M | const __m256i x = yy_loadu_256(buf); |
308 | 375M | const __m256i xr = yy_loadu_256(buf + 1); |
309 | 375M | const __m256i xbl = yy_loadu_256(buf - 1 + stride); |
310 | 375M | const __m256i xb = yy_loadu_256(buf + stride); |
311 | 375M | const __m256i xbr = yy_loadu_256(buf + 1 + stride); |
312 | | |
313 | 375M | const __m256i fours = _mm256_add_epi32( |
314 | 375M | xl, _mm256_add_epi32(xt, _mm256_add_epi32(xr, _mm256_add_epi32(xb, x)))); |
315 | 375M | const __m256i threes = |
316 | 375M | _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl))); |
317 | | |
318 | 375M | return _mm256_sub_epi32(_mm256_slli_epi32(_mm256_add_epi32(fours, threes), 2), |
319 | 375M | threes); |
320 | 375M | } |
321 | | |
322 | | // The final filter for self-guided restoration. Computes a weighted average |
323 | | // across A, B with "cross sums" (see cross_sum implementation above). |
324 | | static void final_filter(int32_t *dst, int dst_stride, const int32_t *A, |
325 | | const int32_t *B, int buf_stride, const void *dgd8, |
326 | 518k | int dgd_stride, int width, int height, int highbd) { |
327 | 518k | const int nb = 5; |
328 | 518k | const __m256i rounding = |
329 | 518k | round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); |
330 | 518k | const uint8_t *dgd_real = |
331 | 518k | highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; |
332 | | |
333 | 26.6M | for (int i = 0; i < height; ++i) { |
334 | 217M | for (int j = 0; j < width; j += 8) { |
335 | 190M | const __m256i a = cross_sum(A + i * buf_stride + j, buf_stride); |
336 | 190M | const __m256i b = cross_sum(B + i * buf_stride + j, buf_stride); |
337 | | |
338 | 190M | const __m128i raw = |
339 | 190M | xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd)); |
340 | 190M | const __m256i src = |
341 | 190M | highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw); |
342 | | |
343 | 190M | __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b); |
344 | 190M | __m256i w = _mm256_srai_epi32(_mm256_add_epi32(v, rounding), |
345 | 190M | SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); |
346 | | |
347 | 190M | yy_storeu_256(dst + i * dst_stride + j, w); |
348 | 190M | } |
349 | 26.1M | } |
350 | 518k | } |
351 | | |
352 | | // Assumes that C, D are integral images for the original buffer which has been |
353 | | // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels |
354 | | // on the sides. A, B, C, D point at logical position (0, 0). |
355 | | static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C, |
356 | | const int32_t *D, int width, int height, |
357 | | int buf_stride, int bit_depth, int sgr_params_idx, |
358 | 471k | int radius_idx) { |
359 | 471k | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
360 | 471k | const int r = params->r[radius_idx]; |
361 | 471k | const int n = (2 * r + 1) * (2 * r + 1); |
362 | 471k | const __m256i s = _mm256_set1_epi32(params->s[radius_idx]); |
363 | | // one_over_n[n-1] is 2^12/n, so easily fits in an int16 |
364 | 471k | const __m256i one_over_n = _mm256_set1_epi32(av1_one_by_x[n - 1]); |
365 | | |
366 | 471k | const __m256i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); |
367 | 471k | const __m256i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); |
368 | | |
369 | | // Set up masks |
370 | 471k | const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0); |
371 | 471k | __m256i mask[8]; |
372 | 4.23M | for (int idx = 0; idx < 8; idx++) { |
373 | 3.76M | const __m128i shift = _mm_cvtsi32_si128(8 * (8 - idx)); |
374 | 3.76M | mask[idx] = _mm256_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); |
375 | 3.76M | } |
376 | | |
377 | 12.1M | for (int i = -1; i < height + 1; i += 2) { |
378 | 87.3M | for (int j = -1; j < width + 1; j += 8) { |
379 | 75.6M | const int32_t *Cij = C + i * buf_stride + j; |
380 | 75.6M | const int32_t *Dij = D + i * buf_stride + j; |
381 | | |
382 | 75.6M | __m256i sum1 = boxsum_from_ii(Dij, buf_stride, r); |
383 | 75.6M | __m256i sum2 = boxsum_from_ii(Cij, buf_stride, r); |
384 | | |
385 | | // When width + 2 isn't a multiple of 8, sum1 and sum2 will contain |
386 | | // some uninitialised data in their upper words. We use a mask to |
387 | | // ensure that these bits are set to 0. |
388 | 75.6M | int idx = AOMMIN(8, width + 1 - j); |
389 | 75.6M | assert(idx >= 1); |
390 | | |
391 | 75.7M | if (idx < 8) { |
392 | 11.8M | sum1 = _mm256_and_si256(mask[idx], sum1); |
393 | 11.8M | sum2 = _mm256_and_si256(mask[idx], sum2); |
394 | 11.8M | } |
395 | | |
396 | 75.7M | const __m256i p = compute_p(sum1, sum2, bit_depth, n); |
397 | | |
398 | 75.7M | const __m256i z = _mm256_min_epi32( |
399 | 75.7M | _mm256_srli_epi32(_mm256_add_epi32(_mm256_mullo_epi32(p, s), rnd_z), |
400 | 75.7M | SGRPROJ_MTABLE_BITS), |
401 | 75.7M | _mm256_set1_epi32(255)); |
402 | | |
403 | 75.7M | const __m256i a_res = _mm256_i32gather_epi32(av1_x_by_xplus1, z, 4); |
404 | | |
405 | 75.7M | yy_storeu_256(A + i * buf_stride + j, a_res); |
406 | | |
407 | 75.7M | const __m256i a_complement = |
408 | 75.7M | _mm256_sub_epi32(_mm256_set1_epi32(SGRPROJ_SGR), a_res); |
409 | | |
410 | | // sum1 might have lanes greater than 2^15, so we can't use madd to do |
411 | | // multiplication involving sum1. However, a_complement and one_over_n |
412 | | // are both less than 256, so we can multiply them first. |
413 | 75.7M | const __m256i a_comp_over_n = _mm256_madd_epi16(a_complement, one_over_n); |
414 | 75.7M | const __m256i b_int = _mm256_mullo_epi32(a_comp_over_n, sum1); |
415 | 75.7M | const __m256i b_res = _mm256_srli_epi32(_mm256_add_epi32(b_int, rnd_res), |
416 | 75.7M | SGRPROJ_RECIP_BITS); |
417 | | |
418 | 75.7M | yy_storeu_256(B + i * buf_stride + j, b_res); |
419 | 75.7M | } |
420 | 11.6M | } |
421 | 471k | } |
422 | | |
423 | | // Calculate 8 values of the "cross sum" starting at buf. |
424 | | // |
425 | | // Pixels are indexed like this: |
426 | | // xtl xt xtr |
427 | | // - buf - |
428 | | // xbl xb xbr |
429 | | // |
430 | | // Pixels are weighted like this: |
431 | | // 5 6 5 |
432 | | // 0 0 0 |
433 | | // 5 6 5 |
434 | | // |
435 | | // fives = xtl + xtr + xbl + xbr |
436 | | // sixes = xt + xb |
437 | | // cross_sum = 6 * sixes + 5 * fives |
438 | | // = 5 * (fives + sixes) - sixes |
439 | | // = (fives + sixes) << 2 + (fives + sixes) + sixes |
440 | 181M | static inline __m256i cross_sum_fast_even_row(const int32_t *buf, int stride) { |
441 | 181M | const __m256i xtl = yy_loadu_256(buf - 1 - stride); |
442 | 181M | const __m256i xt = yy_loadu_256(buf - stride); |
443 | 181M | const __m256i xtr = yy_loadu_256(buf + 1 - stride); |
444 | 181M | const __m256i xbl = yy_loadu_256(buf - 1 + stride); |
445 | 181M | const __m256i xb = yy_loadu_256(buf + stride); |
446 | 181M | const __m256i xbr = yy_loadu_256(buf + 1 + stride); |
447 | | |
448 | 181M | const __m256i fives = |
449 | 181M | _mm256_add_epi32(xtl, _mm256_add_epi32(xtr, _mm256_add_epi32(xbr, xbl))); |
450 | 181M | const __m256i sixes = _mm256_add_epi32(xt, xb); |
451 | 181M | const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes); |
452 | | |
453 | 181M | return _mm256_add_epi32( |
454 | 181M | _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2), |
455 | 181M | fives_plus_sixes), |
456 | 181M | sixes); |
457 | 181M | } |
458 | | |
459 | | // Calculate 8 values of the "cross sum" starting at buf. |
460 | | // |
461 | | // Pixels are indexed like this: |
462 | | // xl x xr |
463 | | // |
464 | | // Pixels are weighted like this: |
465 | | // 5 6 5 |
466 | | // |
467 | | // buf points to x |
468 | | // |
469 | | // fives = xl + xr |
470 | | // sixes = x |
471 | | // cross_sum = 5 * fives + 6 * sixes |
472 | | // = 4 * (fives + sixes) + (fives + sixes) + sixes |
473 | | // = (fives + sixes) << 2 + (fives + sixes) + sixes |
474 | 179M | static inline __m256i cross_sum_fast_odd_row(const int32_t *buf) { |
475 | 179M | const __m256i xl = yy_loadu_256(buf - 1); |
476 | 179M | const __m256i x = yy_loadu_256(buf); |
477 | 179M | const __m256i xr = yy_loadu_256(buf + 1); |
478 | | |
479 | 179M | const __m256i fives = _mm256_add_epi32(xl, xr); |
480 | 179M | const __m256i sixes = x; |
481 | | |
482 | 179M | const __m256i fives_plus_sixes = _mm256_add_epi32(fives, sixes); |
483 | | |
484 | 179M | return _mm256_add_epi32( |
485 | 179M | _mm256_add_epi32(_mm256_slli_epi32(fives_plus_sixes, 2), |
486 | 179M | fives_plus_sixes), |
487 | 179M | sixes); |
488 | 179M | } |
489 | | |
490 | | // The final filter for the self-guided restoration. Computes a |
491 | | // weighted average across A, B with "cross sums" (see cross_sum_... |
492 | | // implementations above). |
493 | | static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A, |
494 | | const int32_t *B, int buf_stride, |
495 | | const void *dgd8, int dgd_stride, int width, |
496 | 469k | int height, int highbd) { |
497 | 469k | const int nb0 = 5; |
498 | 469k | const int nb1 = 4; |
499 | | |
500 | 469k | const __m256i rounding0 = |
501 | 469k | round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); |
502 | 469k | const __m256i rounding1 = |
503 | 469k | round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); |
504 | | |
505 | 469k | const uint8_t *dgd_real = |
506 | 469k | highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; |
507 | | |
508 | 24.7M | for (int i = 0; i < height; ++i) { |
509 | 24.2M | if (!(i & 1)) { // even row |
510 | 103M | for (int j = 0; j < width; j += 8) { |
511 | 91.3M | const __m256i a = |
512 | 91.3M | cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride); |
513 | 91.3M | const __m256i b = |
514 | 91.3M | cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride); |
515 | | |
516 | 91.3M | const __m128i raw = |
517 | 91.3M | xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd)); |
518 | 91.3M | const __m256i src = |
519 | 91.3M | highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw); |
520 | | |
521 | 91.3M | __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b); |
522 | 91.3M | __m256i w = |
523 | 91.3M | _mm256_srai_epi32(_mm256_add_epi32(v, rounding0), |
524 | 91.3M | SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); |
525 | | |
526 | 91.3M | yy_storeu_256(dst + i * dst_stride + j, w); |
527 | 91.3M | } |
528 | 12.3M | } else { // odd row |
529 | 103M | for (int j = 0; j < width; j += 8) { |
530 | 91.1M | const __m256i a = cross_sum_fast_odd_row(A + i * buf_stride + j); |
531 | 91.1M | const __m256i b = cross_sum_fast_odd_row(B + i * buf_stride + j); |
532 | | |
533 | 91.1M | const __m128i raw = |
534 | 91.1M | xx_loadu_128(dgd_real + ((i * dgd_stride + j) << highbd)); |
535 | 91.1M | const __m256i src = |
536 | 91.1M | highbd ? _mm256_cvtepu16_epi32(raw) : _mm256_cvtepu8_epi32(raw); |
537 | | |
538 | 91.1M | __m256i v = _mm256_add_epi32(_mm256_madd_epi16(a, src), b); |
539 | 91.1M | __m256i w = |
540 | 91.1M | _mm256_srai_epi32(_mm256_add_epi32(v, rounding1), |
541 | 91.1M | SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); |
542 | | |
543 | 91.1M | yy_storeu_256(dst + i * dst_stride + j, w); |
544 | 91.1M | } |
545 | 11.9M | } |
546 | 24.2M | } |
547 | 469k | } |
548 | | |
549 | | int av1_selfguided_restoration_avx2(const uint8_t *dgd8, int width, int height, |
550 | | int dgd_stride, int32_t *flt0, |
551 | | int32_t *flt1, int flt_stride, |
552 | | int sgr_params_idx, int bit_depth, |
553 | 550k | int highbd) { |
554 | | // The ALIGN_POWER_OF_TWO macro here ensures that column 1 of Atl, Btl, |
555 | | // Ctl and Dtl is 32-byte aligned. |
556 | 550k | const int buf_elts = ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3); |
557 | | |
558 | 550k | int32_t *buf = aom_memalign( |
559 | 550k | 32, 4 * sizeof(*buf) * ALIGN_POWER_OF_TWO(RESTORATION_PROC_UNIT_PELS, 3)); |
560 | 550k | if (!buf) return -1; |
561 | | |
562 | 550k | const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; |
563 | 550k | const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; |
564 | | |
565 | | // Adjusting the stride of A and B here appears to avoid bad cache effects, |
566 | | // leading to a significant speed improvement. |
567 | | // We also align the stride to a multiple of 32 bytes for efficiency. |
568 | 550k | int buf_stride = ALIGN_POWER_OF_TWO(width_ext + 16, 3); |
569 | | |
570 | | // The "tl" pointers point at the top-left of the initialised data for the |
571 | | // array. |
572 | 550k | int32_t *Atl = buf + 0 * buf_elts + 7; |
573 | 550k | int32_t *Btl = buf + 1 * buf_elts + 7; |
574 | 550k | int32_t *Ctl = buf + 2 * buf_elts + 7; |
575 | 550k | int32_t *Dtl = buf + 3 * buf_elts + 7; |
576 | | |
577 | | // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note |
578 | | // there's a zero row and column in A, B (integral images), so we move down |
579 | | // and right one for them. |
580 | 550k | const int buf_diag_border = |
581 | 550k | SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT; |
582 | | |
583 | 550k | int32_t *A0 = Atl + 1 + buf_stride; |
584 | 550k | int32_t *B0 = Btl + 1 + buf_stride; |
585 | 550k | int32_t *C0 = Ctl + 1 + buf_stride; |
586 | 550k | int32_t *D0 = Dtl + 1 + buf_stride; |
587 | | |
588 | | // Finally, A, B, C, D point at position (0, 0). |
589 | 550k | int32_t *A = A0 + buf_diag_border; |
590 | 550k | int32_t *B = B0 + buf_diag_border; |
591 | 550k | int32_t *C = C0 + buf_diag_border; |
592 | 550k | int32_t *D = D0 + buf_diag_border; |
593 | | |
594 | 550k | const int dgd_diag_border = |
595 | 550k | SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT; |
596 | 550k | const uint8_t *dgd0 = dgd8 - dgd_diag_border; |
597 | | |
598 | | // Generate integral images from the input. C will contain sums of squares; D |
599 | | // will contain just sums |
600 | 550k | if (highbd) |
601 | 286k | integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext, |
602 | 286k | height_ext, Ctl, Dtl, buf_stride); |
603 | 264k | else |
604 | 264k | integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl, |
605 | 264k | buf_stride); |
606 | | |
607 | 550k | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
608 | | // Write to flt0 and flt1 |
609 | | // If params->r == 0 we skip the corresponding filter. We only allow one of |
610 | | // the radii to be 0, as having both equal to 0 would be equivalent to |
611 | | // skipping SGR entirely. |
612 | 550k | assert(!(params->r[0] == 0 && params->r[1] == 0)); |
613 | 550k | assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); |
614 | 550k | assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); |
615 | | |
616 | 550k | if (params->r[0] > 0) { |
617 | 470k | calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth, |
618 | 470k | sgr_params_idx, 0); |
619 | 470k | final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride, |
620 | 470k | width, height, highbd); |
621 | 470k | } |
622 | | |
623 | 550k | if (params->r[1] > 0) { |
624 | 519k | calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx, |
625 | 519k | 1); |
626 | 519k | final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, |
627 | 519k | height, highbd); |
628 | 519k | } |
629 | 550k | aom_free(buf); |
630 | 550k | return 0; |
631 | 550k | } |
632 | | |
633 | | int av1_apply_selfguided_restoration_avx2(const uint8_t *dat8, int width, |
634 | | int height, int stride, int eps, |
635 | | const int *xqd, uint8_t *dst8, |
636 | | int dst_stride, int32_t *tmpbuf, |
637 | 550k | int bit_depth, int highbd) { |
638 | 550k | int32_t *flt0 = tmpbuf; |
639 | 550k | int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX; |
640 | 550k | assert(width * height <= RESTORATION_UNITPELS_MAX); |
641 | 550k | const int ret = av1_selfguided_restoration_avx2( |
642 | 550k | dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd); |
643 | 550k | if (ret != 0) return ret; |
644 | 550k | const sgr_params_type *const params = &av1_sgr_params[eps]; |
645 | 550k | int xq[2]; |
646 | 550k | av1_decode_xq(xqd, xq, params); |
647 | | |
648 | 550k | __m256i xq0 = _mm256_set1_epi32(xq[0]); |
649 | 550k | __m256i xq1 = _mm256_set1_epi32(xq[1]); |
650 | | |
651 | 24.1M | for (int i = 0; i < height; ++i) { |
652 | | // Calculate output in batches of 16 pixels |
653 | 106M | for (int j = 0; j < width; j += 16) { |
654 | 82.7M | const int k = i * width + j; |
655 | 82.7M | const int m = i * dst_stride + j; |
656 | | |
657 | 82.7M | const uint8_t *dat8ij = dat8 + i * stride + j; |
658 | 82.7M | __m256i ep_0, ep_1; |
659 | 82.7M | __m128i src_0, src_1; |
660 | 82.7M | if (highbd) { |
661 | 42.9M | src_0 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij)); |
662 | 42.9M | src_1 = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij + 8)); |
663 | 42.9M | ep_0 = _mm256_cvtepu16_epi32(src_0); |
664 | 42.9M | ep_1 = _mm256_cvtepu16_epi32(src_1); |
665 | 42.9M | } else { |
666 | 39.8M | src_0 = xx_loadu_128(dat8ij); |
667 | 39.8M | ep_0 = _mm256_cvtepu8_epi32(src_0); |
668 | 39.8M | ep_1 = _mm256_cvtepu8_epi32(_mm_srli_si128(src_0, 8)); |
669 | 39.8M | } |
670 | | |
671 | 82.7M | const __m256i u_0 = _mm256_slli_epi32(ep_0, SGRPROJ_RST_BITS); |
672 | 82.7M | const __m256i u_1 = _mm256_slli_epi32(ep_1, SGRPROJ_RST_BITS); |
673 | | |
674 | 82.7M | __m256i v_0 = _mm256_slli_epi32(u_0, SGRPROJ_PRJ_BITS); |
675 | 82.7M | __m256i v_1 = _mm256_slli_epi32(u_1, SGRPROJ_PRJ_BITS); |
676 | | |
677 | 82.7M | if (params->r[0] > 0) { |
678 | 76.9M | const __m256i f1_0 = _mm256_sub_epi32(yy_loadu_256(&flt0[k]), u_0); |
679 | 76.9M | v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq0, f1_0)); |
680 | | |
681 | 76.9M | const __m256i f1_1 = _mm256_sub_epi32(yy_loadu_256(&flt0[k + 8]), u_1); |
682 | 76.9M | v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq0, f1_1)); |
683 | 76.9M | } |
684 | | |
685 | 82.7M | if (params->r[1] > 0) { |
686 | 76.7M | const __m256i f2_0 = _mm256_sub_epi32(yy_loadu_256(&flt1[k]), u_0); |
687 | 76.7M | v_0 = _mm256_add_epi32(v_0, _mm256_mullo_epi32(xq1, f2_0)); |
688 | | |
689 | 76.7M | const __m256i f2_1 = _mm256_sub_epi32(yy_loadu_256(&flt1[k + 8]), u_1); |
690 | 76.7M | v_1 = _mm256_add_epi32(v_1, _mm256_mullo_epi32(xq1, f2_1)); |
691 | 76.7M | } |
692 | | |
693 | 82.7M | const __m256i rounding = |
694 | 82.7M | round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
695 | 82.7M | const __m256i w_0 = _mm256_srai_epi32( |
696 | 82.7M | _mm256_add_epi32(v_0, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
697 | 82.7M | const __m256i w_1 = _mm256_srai_epi32( |
698 | 82.7M | _mm256_add_epi32(v_1, rounding), SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
699 | | |
700 | 82.7M | if (highbd) { |
701 | | // Pack into 16 bits and clamp to [0, 2^bit_depth) |
702 | | // Note that packing into 16 bits messes up the order of the bits, |
703 | | // so we use a permute function to correct this |
704 | 42.8M | const __m256i tmp = _mm256_packus_epi32(w_0, w_1); |
705 | 42.8M | const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8); |
706 | 42.8M | const __m256i max = _mm256_set1_epi16((1 << bit_depth) - 1); |
707 | 42.8M | const __m256i res = _mm256_min_epi16(tmp2, max); |
708 | 42.8M | yy_storeu_256(CONVERT_TO_SHORTPTR(dst8 + m), res); |
709 | 42.8M | } else { |
710 | | // Pack into 8 bits and clamp to [0, 256) |
711 | | // Note that each pack messes up the order of the bits, |
712 | | // so we use a permute function to correct this |
713 | 39.8M | const __m256i tmp = _mm256_packs_epi32(w_0, w_1); |
714 | 39.8M | const __m256i tmp2 = _mm256_permute4x64_epi64(tmp, 0xd8); |
715 | 39.8M | const __m256i res = |
716 | 39.8M | _mm256_packus_epi16(tmp2, tmp2 /* "don't care" value */); |
717 | 39.8M | const __m128i res2 = |
718 | 39.8M | _mm256_castsi256_si128(_mm256_permute4x64_epi64(res, 0xd8)); |
719 | 39.8M | xx_storeu_128(dst8 + m, res2); |
720 | 39.8M | } |
721 | 82.7M | } |
722 | 23.6M | } |
723 | 550k | return 0; |
724 | 550k | } |