/src/aom/av1/common/x86/selfguided_sse4.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2018, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <smmintrin.h> |
13 | | |
14 | | #include "config/aom_config.h" |
15 | | #include "config/av1_rtcd.h" |
16 | | |
17 | | #include "av1/common/restoration.h" |
18 | | #include "aom_dsp/x86/synonyms.h" |
19 | | |
20 | | // Load 4 bytes from the possibly-misaligned pointer p, extend each byte to |
21 | | // 32-bit precision and return them in an SSE register. |
22 | 0 | static __m128i xx_load_extend_8_32(const void *p) { |
23 | 0 | return _mm_cvtepu8_epi32(xx_loadl_32(p)); |
24 | 0 | } |
25 | | |
26 | | // Load 4 halfwords from the possibly-misaligned pointer p, extend each |
27 | | // halfword to 32-bit precision and return them in an SSE register. |
28 | 0 | static __m128i xx_load_extend_16_32(const void *p) { |
29 | 0 | return _mm_cvtepu16_epi32(xx_loadl_64(p)); |
30 | 0 | } |
31 | | |
32 | | // Compute the scan of an SSE register holding 4 32-bit integers. If the |
33 | | // register holds x0..x3 then the scan will hold x0, x0+x1, x0+x1+x2, |
34 | | // x0+x1+x2+x3 |
35 | 0 | static __m128i scan_32(__m128i x) { |
36 | 0 | const __m128i x01 = _mm_add_epi32(x, _mm_slli_si128(x, 4)); |
37 | 0 | return _mm_add_epi32(x01, _mm_slli_si128(x01, 8)); |
38 | 0 | } |
39 | | |
40 | | // Compute two integral images from src. B sums elements; A sums their |
41 | | // squares. The images are offset by one pixel, so will have width and height |
42 | | // equal to width + 1, height + 1 and the first row and column will be zero. |
43 | | // |
44 | | // A+1 and B+1 should be aligned to 16 bytes. buf_stride should be a multiple |
45 | | // of 4. |
46 | | static void integral_images(const uint8_t *src, int src_stride, int width, |
47 | | int height, int32_t *A, int32_t *B, |
48 | 0 | int buf_stride) { |
49 | | // Write out the zero top row |
50 | 0 | memset(A, 0, sizeof(*A) * (width + 1)); |
51 | 0 | memset(B, 0, sizeof(*B) * (width + 1)); |
52 | |
|
53 | 0 | const __m128i zero = _mm_setzero_si128(); |
54 | 0 | for (int i = 0; i < height; ++i) { |
55 | | // Zero the left column. |
56 | 0 | A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; |
57 | | |
58 | | // ldiff is the difference H - D where H is the output sample immediately |
59 | | // to the left and D is the output sample above it. These are scalars, |
60 | | // replicated across the four lanes. |
61 | 0 | __m128i ldiff1 = zero, ldiff2 = zero; |
62 | 0 | for (int j = 0; j < width; j += 4) { |
63 | 0 | const int ABj = 1 + j; |
64 | |
|
65 | 0 | const __m128i above1 = xx_load_128(B + ABj + i * buf_stride); |
66 | 0 | const __m128i above2 = xx_load_128(A + ABj + i * buf_stride); |
67 | |
|
68 | 0 | const __m128i x1 = xx_load_extend_8_32(src + j + i * src_stride); |
69 | 0 | const __m128i x2 = _mm_madd_epi16(x1, x1); |
70 | |
|
71 | 0 | const __m128i sc1 = scan_32(x1); |
72 | 0 | const __m128i sc2 = scan_32(x2); |
73 | |
|
74 | 0 | const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1); |
75 | 0 | const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2); |
76 | |
|
77 | 0 | xx_store_128(B + ABj + (i + 1) * buf_stride, row1); |
78 | 0 | xx_store_128(A + ABj + (i + 1) * buf_stride, row2); |
79 | | |
80 | | // Calculate the new H - D. |
81 | 0 | ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff); |
82 | 0 | ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff); |
83 | 0 | } |
84 | 0 | } |
85 | 0 | } |
86 | | |
87 | | // Compute two integral images from src. B sums elements; A sums their squares |
88 | | // |
89 | | // A and B should be aligned to 16 bytes. buf_stride should be a multiple of 4. |
90 | | static void integral_images_highbd(const uint16_t *src, int src_stride, |
91 | | int width, int height, int32_t *A, |
92 | 0 | int32_t *B, int buf_stride) { |
93 | | // Write out the zero top row |
94 | 0 | memset(A, 0, sizeof(*A) * (width + 1)); |
95 | 0 | memset(B, 0, sizeof(*B) * (width + 1)); |
96 | |
|
97 | 0 | const __m128i zero = _mm_setzero_si128(); |
98 | 0 | for (int i = 0; i < height; ++i) { |
99 | | // Zero the left column. |
100 | 0 | A[(i + 1) * buf_stride] = B[(i + 1) * buf_stride] = 0; |
101 | | |
102 | | // ldiff is the difference H - D where H is the output sample immediately |
103 | | // to the left and D is the output sample above it. These are scalars, |
104 | | // replicated across the four lanes. |
105 | 0 | __m128i ldiff1 = zero, ldiff2 = zero; |
106 | 0 | for (int j = 0; j < width; j += 4) { |
107 | 0 | const int ABj = 1 + j; |
108 | |
|
109 | 0 | const __m128i above1 = xx_load_128(B + ABj + i * buf_stride); |
110 | 0 | const __m128i above2 = xx_load_128(A + ABj + i * buf_stride); |
111 | |
|
112 | 0 | const __m128i x1 = xx_load_extend_16_32(src + j + i * src_stride); |
113 | 0 | const __m128i x2 = _mm_madd_epi16(x1, x1); |
114 | |
|
115 | 0 | const __m128i sc1 = scan_32(x1); |
116 | 0 | const __m128i sc2 = scan_32(x2); |
117 | |
|
118 | 0 | const __m128i row1 = _mm_add_epi32(_mm_add_epi32(sc1, above1), ldiff1); |
119 | 0 | const __m128i row2 = _mm_add_epi32(_mm_add_epi32(sc2, above2), ldiff2); |
120 | |
|
121 | 0 | xx_store_128(B + ABj + (i + 1) * buf_stride, row1); |
122 | 0 | xx_store_128(A + ABj + (i + 1) * buf_stride, row2); |
123 | | |
124 | | // Calculate the new H - D. |
125 | 0 | ldiff1 = _mm_shuffle_epi32(_mm_sub_epi32(row1, above1), 0xff); |
126 | 0 | ldiff2 = _mm_shuffle_epi32(_mm_sub_epi32(row2, above2), 0xff); |
127 | 0 | } |
128 | 0 | } |
129 | 0 | } |
130 | | |
131 | | // Compute 4 values of boxsum from the given integral image. ii should point |
132 | | // at the middle of the box (for the first value). r is the box radius. |
133 | 0 | static inline __m128i boxsum_from_ii(const int32_t *ii, int stride, int r) { |
134 | 0 | const __m128i tl = xx_loadu_128(ii - (r + 1) - (r + 1) * stride); |
135 | 0 | const __m128i tr = xx_loadu_128(ii + (r + 0) - (r + 1) * stride); |
136 | 0 | const __m128i bl = xx_loadu_128(ii - (r + 1) + r * stride); |
137 | 0 | const __m128i br = xx_loadu_128(ii + (r + 0) + r * stride); |
138 | 0 | const __m128i u = _mm_sub_epi32(tr, tl); |
139 | 0 | const __m128i v = _mm_sub_epi32(br, bl); |
140 | 0 | return _mm_sub_epi32(v, u); |
141 | 0 | } |
142 | | |
143 | 0 | static __m128i round_for_shift(unsigned shift) { |
144 | 0 | return _mm_set1_epi32((1 << shift) >> 1); |
145 | 0 | } |
146 | | |
147 | 0 | static __m128i compute_p(__m128i sum1, __m128i sum2, int bit_depth, int n) { |
148 | 0 | __m128i an, bb; |
149 | 0 | if (bit_depth > 8) { |
150 | 0 | const __m128i rounding_a = round_for_shift(2 * (bit_depth - 8)); |
151 | 0 | const __m128i rounding_b = round_for_shift(bit_depth - 8); |
152 | 0 | const __m128i shift_a = _mm_cvtsi32_si128(2 * (bit_depth - 8)); |
153 | 0 | const __m128i shift_b = _mm_cvtsi32_si128(bit_depth - 8); |
154 | 0 | const __m128i a = _mm_srl_epi32(_mm_add_epi32(sum2, rounding_a), shift_a); |
155 | 0 | const __m128i b = _mm_srl_epi32(_mm_add_epi32(sum1, rounding_b), shift_b); |
156 | | // b < 2^14, so we can use a 16-bit madd rather than a 32-bit |
157 | | // mullo to square it |
158 | 0 | bb = _mm_madd_epi16(b, b); |
159 | 0 | an = _mm_max_epi32(_mm_mullo_epi32(a, _mm_set1_epi32(n)), bb); |
160 | 0 | } else { |
161 | 0 | bb = _mm_madd_epi16(sum1, sum1); |
162 | 0 | an = _mm_mullo_epi32(sum2, _mm_set1_epi32(n)); |
163 | 0 | } |
164 | 0 | return _mm_sub_epi32(an, bb); |
165 | 0 | } |
166 | | |
167 | | // Assumes that C, D are integral images for the original buffer which has been |
168 | | // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels |
169 | | // on the sides. A, B, C, D point at logical position (0, 0). |
170 | | static void calc_ab(int32_t *A, int32_t *B, const int32_t *C, const int32_t *D, |
171 | | int width, int height, int buf_stride, int bit_depth, |
172 | 0 | int sgr_params_idx, int radius_idx) { |
173 | 0 | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
174 | 0 | const int r = params->r[radius_idx]; |
175 | 0 | const int n = (2 * r + 1) * (2 * r + 1); |
176 | 0 | const __m128i s = _mm_set1_epi32(params->s[radius_idx]); |
177 | | // one_over_n[n-1] is 2^12/n, so easily fits in an int16 |
178 | 0 | const __m128i one_over_n = _mm_set1_epi32(av1_one_by_x[n - 1]); |
179 | |
|
180 | 0 | const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); |
181 | 0 | const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); |
182 | | |
183 | | // Set up masks |
184 | 0 | const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0); |
185 | 0 | __m128i mask[4]; |
186 | 0 | for (int idx = 0; idx < 4; idx++) { |
187 | 0 | const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx)); |
188 | 0 | mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); |
189 | 0 | } |
190 | |
|
191 | 0 | for (int i = -1; i < height + 1; ++i) { |
192 | 0 | for (int j = -1; j < width + 1; j += 4) { |
193 | 0 | const int32_t *Cij = C + i * buf_stride + j; |
194 | 0 | const int32_t *Dij = D + i * buf_stride + j; |
195 | |
|
196 | 0 | __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r); |
197 | 0 | __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r); |
198 | | |
199 | | // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain |
200 | | // some uninitialised data in their upper words. We use a mask to |
201 | | // ensure that these bits are set to 0. |
202 | 0 | int idx = AOMMIN(4, width + 1 - j); |
203 | 0 | assert(idx >= 1); |
204 | | |
205 | 0 | if (idx < 4) { |
206 | 0 | sum1 = _mm_and_si128(mask[idx], sum1); |
207 | 0 | sum2 = _mm_and_si128(mask[idx], sum2); |
208 | 0 | } |
209 | |
|
210 | 0 | const __m128i p = compute_p(sum1, sum2, bit_depth, n); |
211 | |
|
212 | 0 | const __m128i z = _mm_min_epi32( |
213 | 0 | _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z), |
214 | 0 | SGRPROJ_MTABLE_BITS), |
215 | 0 | _mm_set1_epi32(255)); |
216 | | |
217 | | // 'Gather' type instructions are not available pre-AVX2, so synthesize a |
218 | | // gather using scalar loads. |
219 | 0 | const __m128i a_res = |
220 | 0 | _mm_set_epi32(av1_x_by_xplus1[_mm_extract_epi32(z, 3)], |
221 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 2)], |
222 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 1)], |
223 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 0)]); |
224 | |
|
225 | 0 | xx_storeu_128(A + i * buf_stride + j, a_res); |
226 | |
|
227 | 0 | const __m128i a_complement = |
228 | 0 | _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); |
229 | | |
230 | | // sum1 might have lanes greater than 2^15, so we can't use madd to do |
231 | | // multiplication involving sum1. However, a_complement and one_over_n |
232 | | // are both less than 256, so we can multiply them first. |
233 | 0 | const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n); |
234 | 0 | const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1); |
235 | 0 | const __m128i b_res = |
236 | 0 | _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS); |
237 | |
|
238 | 0 | xx_storeu_128(B + i * buf_stride + j, b_res); |
239 | 0 | } |
240 | 0 | } |
241 | 0 | } |
242 | | |
243 | | // Calculate 4 values of the "cross sum" starting at buf. This is a 3x3 filter |
244 | | // where the outer four corners have weight 3 and all other pixels have weight |
245 | | // 4. |
246 | | // |
247 | | // Pixels are indexed like this: |
248 | | // xtl xt xtr |
249 | | // xl x xr |
250 | | // xbl xb xbr |
251 | | // |
252 | | // buf points to x |
253 | | // |
254 | | // fours = xl + xt + xr + xb + x |
255 | | // threes = xtl + xtr + xbr + xbl |
256 | | // cross_sum = 4 * fours + 3 * threes |
257 | | // = 4 * (fours + threes) - threes |
258 | | // = (fours + threes) << 2 - threes |
259 | 0 | static inline __m128i cross_sum(const int32_t *buf, int stride) { |
260 | 0 | const __m128i xtl = xx_loadu_128(buf - 1 - stride); |
261 | 0 | const __m128i xt = xx_loadu_128(buf - stride); |
262 | 0 | const __m128i xtr = xx_loadu_128(buf + 1 - stride); |
263 | 0 | const __m128i xl = xx_loadu_128(buf - 1); |
264 | 0 | const __m128i x = xx_loadu_128(buf); |
265 | 0 | const __m128i xr = xx_loadu_128(buf + 1); |
266 | 0 | const __m128i xbl = xx_loadu_128(buf - 1 + stride); |
267 | 0 | const __m128i xb = xx_loadu_128(buf + stride); |
268 | 0 | const __m128i xbr = xx_loadu_128(buf + 1 + stride); |
269 | |
|
270 | 0 | const __m128i fours = _mm_add_epi32( |
271 | 0 | xl, _mm_add_epi32(xt, _mm_add_epi32(xr, _mm_add_epi32(xb, x)))); |
272 | 0 | const __m128i threes = |
273 | 0 | _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl))); |
274 | |
|
275 | 0 | return _mm_sub_epi32(_mm_slli_epi32(_mm_add_epi32(fours, threes), 2), threes); |
276 | 0 | } |
277 | | |
278 | | // The final filter for self-guided restoration. Computes a weighted average |
279 | | // across A, B with "cross sums" (see cross_sum implementation above). |
280 | | static void final_filter(int32_t *dst, int dst_stride, const int32_t *A, |
281 | | const int32_t *B, int buf_stride, const void *dgd8, |
282 | 0 | int dgd_stride, int width, int height, int highbd) { |
283 | 0 | const int nb = 5; |
284 | 0 | const __m128i rounding = |
285 | 0 | round_for_shift(SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); |
286 | 0 | const uint8_t *dgd_real = |
287 | 0 | highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; |
288 | |
|
289 | 0 | for (int i = 0; i < height; ++i) { |
290 | 0 | for (int j = 0; j < width; j += 4) { |
291 | 0 | const __m128i a = cross_sum(A + i * buf_stride + j, buf_stride); |
292 | 0 | const __m128i b = cross_sum(B + i * buf_stride + j, buf_stride); |
293 | 0 | const __m128i raw = |
294 | 0 | xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); |
295 | 0 | const __m128i src = |
296 | 0 | highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); |
297 | |
|
298 | 0 | __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); |
299 | 0 | __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding), |
300 | 0 | SGRPROJ_SGR_BITS + nb - SGRPROJ_RST_BITS); |
301 | |
|
302 | 0 | xx_storeu_128(dst + i * dst_stride + j, w); |
303 | 0 | } |
304 | 0 | } |
305 | 0 | } |
306 | | |
307 | | // Assumes that C, D are integral images for the original buffer which has been |
308 | | // extended to have a padding of SGRPROJ_BORDER_VERT/SGRPROJ_BORDER_HORZ pixels |
309 | | // on the sides. A, B, C, D point at logical position (0, 0). |
310 | | static void calc_ab_fast(int32_t *A, int32_t *B, const int32_t *C, |
311 | | const int32_t *D, int width, int height, |
312 | | int buf_stride, int bit_depth, int sgr_params_idx, |
313 | 0 | int radius_idx) { |
314 | 0 | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
315 | 0 | const int r = params->r[radius_idx]; |
316 | 0 | const int n = (2 * r + 1) * (2 * r + 1); |
317 | 0 | const __m128i s = _mm_set1_epi32(params->s[radius_idx]); |
318 | | // one_over_n[n-1] is 2^12/n, so easily fits in an int16 |
319 | 0 | const __m128i one_over_n = _mm_set1_epi32(av1_one_by_x[n - 1]); |
320 | |
|
321 | 0 | const __m128i rnd_z = round_for_shift(SGRPROJ_MTABLE_BITS); |
322 | 0 | const __m128i rnd_res = round_for_shift(SGRPROJ_RECIP_BITS); |
323 | | |
324 | | // Set up masks |
325 | 0 | const __m128i ones32 = _mm_set_epi32(0, 0, ~0, ~0); |
326 | 0 | __m128i mask[4]; |
327 | 0 | for (int idx = 0; idx < 4; idx++) { |
328 | 0 | const __m128i shift = _mm_cvtsi32_si128(8 * (4 - idx)); |
329 | 0 | mask[idx] = _mm_cvtepi8_epi32(_mm_srl_epi64(ones32, shift)); |
330 | 0 | } |
331 | |
|
332 | 0 | for (int i = -1; i < height + 1; i += 2) { |
333 | 0 | for (int j = -1; j < width + 1; j += 4) { |
334 | 0 | const int32_t *Cij = C + i * buf_stride + j; |
335 | 0 | const int32_t *Dij = D + i * buf_stride + j; |
336 | |
|
337 | 0 | __m128i sum1 = boxsum_from_ii(Dij, buf_stride, r); |
338 | 0 | __m128i sum2 = boxsum_from_ii(Cij, buf_stride, r); |
339 | | |
340 | | // When width + 2 isn't a multiple of 4, sum1 and sum2 will contain |
341 | | // some uninitialised data in their upper words. We use a mask to |
342 | | // ensure that these bits are set to 0. |
343 | 0 | int idx = AOMMIN(4, width + 1 - j); |
344 | 0 | assert(idx >= 1); |
345 | | |
346 | 0 | if (idx < 4) { |
347 | 0 | sum1 = _mm_and_si128(mask[idx], sum1); |
348 | 0 | sum2 = _mm_and_si128(mask[idx], sum2); |
349 | 0 | } |
350 | |
|
351 | 0 | const __m128i p = compute_p(sum1, sum2, bit_depth, n); |
352 | |
|
353 | 0 | const __m128i z = _mm_min_epi32( |
354 | 0 | _mm_srli_epi32(_mm_add_epi32(_mm_mullo_epi32(p, s), rnd_z), |
355 | 0 | SGRPROJ_MTABLE_BITS), |
356 | 0 | _mm_set1_epi32(255)); |
357 | | |
358 | | // 'Gather' type instructions are not available pre-AVX2, so synthesize a |
359 | | // gather using scalar loads. |
360 | 0 | const __m128i a_res = |
361 | 0 | _mm_set_epi32(av1_x_by_xplus1[_mm_extract_epi32(z, 3)], |
362 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 2)], |
363 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 1)], |
364 | 0 | av1_x_by_xplus1[_mm_extract_epi32(z, 0)]); |
365 | |
|
366 | 0 | xx_storeu_128(A + i * buf_stride + j, a_res); |
367 | |
|
368 | 0 | const __m128i a_complement = |
369 | 0 | _mm_sub_epi32(_mm_set1_epi32(SGRPROJ_SGR), a_res); |
370 | | |
371 | | // sum1 might have lanes greater than 2^15, so we can't use madd to do |
372 | | // multiplication involving sum1. However, a_complement and one_over_n |
373 | | // are both less than 256, so we can multiply them first. |
374 | 0 | const __m128i a_comp_over_n = _mm_madd_epi16(a_complement, one_over_n); |
375 | 0 | const __m128i b_int = _mm_mullo_epi32(a_comp_over_n, sum1); |
376 | 0 | const __m128i b_res = |
377 | 0 | _mm_srli_epi32(_mm_add_epi32(b_int, rnd_res), SGRPROJ_RECIP_BITS); |
378 | |
|
379 | 0 | xx_storeu_128(B + i * buf_stride + j, b_res); |
380 | 0 | } |
381 | 0 | } |
382 | 0 | } |
383 | | |
384 | | // Calculate 4 values of the "cross sum" starting at buf. |
385 | | // |
386 | | // Pixels are indexed like this: |
387 | | // xtl xt xtr |
388 | | // - buf - |
389 | | // xbl xb xbr |
390 | | // |
391 | | // Pixels are weighted like this: |
392 | | // 5 6 5 |
393 | | // 0 0 0 |
394 | | // 5 6 5 |
395 | | // |
396 | | // fives = xtl + xtr + xbl + xbr |
397 | | // sixes = xt + xb |
398 | | // cross_sum = 6 * sixes + 5 * fives |
399 | | // = 5 * (fives + sixes) - sixes |
400 | | // = (fives + sixes) << 2 + (fives + sixes) + sixes |
401 | 0 | static inline __m128i cross_sum_fast_even_row(const int32_t *buf, int stride) { |
402 | 0 | const __m128i xtl = xx_loadu_128(buf - 1 - stride); |
403 | 0 | const __m128i xt = xx_loadu_128(buf - stride); |
404 | 0 | const __m128i xtr = xx_loadu_128(buf + 1 - stride); |
405 | 0 | const __m128i xbl = xx_loadu_128(buf - 1 + stride); |
406 | 0 | const __m128i xb = xx_loadu_128(buf + stride); |
407 | 0 | const __m128i xbr = xx_loadu_128(buf + 1 + stride); |
408 | |
|
409 | 0 | const __m128i fives = |
410 | 0 | _mm_add_epi32(xtl, _mm_add_epi32(xtr, _mm_add_epi32(xbr, xbl))); |
411 | 0 | const __m128i sixes = _mm_add_epi32(xt, xb); |
412 | 0 | const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes); |
413 | |
|
414 | 0 | return _mm_add_epi32( |
415 | 0 | _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), |
416 | 0 | sixes); |
417 | 0 | } |
418 | | |
419 | | // Calculate 4 values of the "cross sum" starting at buf. |
420 | | // |
421 | | // Pixels are indexed like this: |
422 | | // xl x xr |
423 | | // |
424 | | // Pixels are weighted like this: |
425 | | // 5 6 5 |
426 | | // |
427 | | // buf points to x |
428 | | // |
429 | | // fives = xl + xr |
430 | | // sixes = x |
431 | | // cross_sum = 5 * fives + 6 * sixes |
432 | | // = 4 * (fives + sixes) + (fives + sixes) + sixes |
433 | | // = (fives + sixes) << 2 + (fives + sixes) + sixes |
434 | 0 | static inline __m128i cross_sum_fast_odd_row(const int32_t *buf) { |
435 | 0 | const __m128i xl = xx_loadu_128(buf - 1); |
436 | 0 | const __m128i x = xx_loadu_128(buf); |
437 | 0 | const __m128i xr = xx_loadu_128(buf + 1); |
438 | |
|
439 | 0 | const __m128i fives = _mm_add_epi32(xl, xr); |
440 | 0 | const __m128i sixes = x; |
441 | |
|
442 | 0 | const __m128i fives_plus_sixes = _mm_add_epi32(fives, sixes); |
443 | |
|
444 | 0 | return _mm_add_epi32( |
445 | 0 | _mm_add_epi32(_mm_slli_epi32(fives_plus_sixes, 2), fives_plus_sixes), |
446 | 0 | sixes); |
447 | 0 | } |
448 | | |
449 | | // The final filter for the self-guided restoration. Computes a |
450 | | // weighted average across A, B with "cross sums" (see cross_sum_... |
451 | | // implementations above). |
452 | | static void final_filter_fast(int32_t *dst, int dst_stride, const int32_t *A, |
453 | | const int32_t *B, int buf_stride, |
454 | | const void *dgd8, int dgd_stride, int width, |
455 | 0 | int height, int highbd) { |
456 | 0 | const int nb0 = 5; |
457 | 0 | const int nb1 = 4; |
458 | |
|
459 | 0 | const __m128i rounding0 = |
460 | 0 | round_for_shift(SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); |
461 | 0 | const __m128i rounding1 = |
462 | 0 | round_for_shift(SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); |
463 | |
|
464 | 0 | const uint8_t *dgd_real = |
465 | 0 | highbd ? (const uint8_t *)CONVERT_TO_SHORTPTR(dgd8) : dgd8; |
466 | |
|
467 | 0 | for (int i = 0; i < height; ++i) { |
468 | 0 | if (!(i & 1)) { // even row |
469 | 0 | for (int j = 0; j < width; j += 4) { |
470 | 0 | const __m128i a = |
471 | 0 | cross_sum_fast_even_row(A + i * buf_stride + j, buf_stride); |
472 | 0 | const __m128i b = |
473 | 0 | cross_sum_fast_even_row(B + i * buf_stride + j, buf_stride); |
474 | 0 | const __m128i raw = |
475 | 0 | xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); |
476 | 0 | const __m128i src = |
477 | 0 | highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); |
478 | |
|
479 | 0 | __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); |
480 | 0 | __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding0), |
481 | 0 | SGRPROJ_SGR_BITS + nb0 - SGRPROJ_RST_BITS); |
482 | |
|
483 | 0 | xx_storeu_128(dst + i * dst_stride + j, w); |
484 | 0 | } |
485 | 0 | } else { // odd row |
486 | 0 | for (int j = 0; j < width; j += 4) { |
487 | 0 | const __m128i a = cross_sum_fast_odd_row(A + i * buf_stride + j); |
488 | 0 | const __m128i b = cross_sum_fast_odd_row(B + i * buf_stride + j); |
489 | 0 | const __m128i raw = |
490 | 0 | xx_loadl_64(dgd_real + ((i * dgd_stride + j) << highbd)); |
491 | 0 | const __m128i src = |
492 | 0 | highbd ? _mm_cvtepu16_epi32(raw) : _mm_cvtepu8_epi32(raw); |
493 | |
|
494 | 0 | __m128i v = _mm_add_epi32(_mm_madd_epi16(a, src), b); |
495 | 0 | __m128i w = _mm_srai_epi32(_mm_add_epi32(v, rounding1), |
496 | 0 | SGRPROJ_SGR_BITS + nb1 - SGRPROJ_RST_BITS); |
497 | |
|
498 | 0 | xx_storeu_128(dst + i * dst_stride + j, w); |
499 | 0 | } |
500 | 0 | } |
501 | 0 | } |
502 | 0 | } |
503 | | |
504 | | int av1_selfguided_restoration_sse4_1(const uint8_t *dgd8, int width, |
505 | | int height, int dgd_stride, int32_t *flt0, |
506 | | int32_t *flt1, int flt_stride, |
507 | | int sgr_params_idx, int bit_depth, |
508 | 0 | int highbd) { |
509 | 0 | int32_t *buf = (int32_t *)aom_memalign( |
510 | 0 | 16, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); |
511 | 0 | if (!buf) return -1; |
512 | 0 | memset(buf, 0, 4 * sizeof(*buf) * RESTORATION_PROC_UNIT_PELS); |
513 | |
|
514 | 0 | const int width_ext = width + 2 * SGRPROJ_BORDER_HORZ; |
515 | 0 | const int height_ext = height + 2 * SGRPROJ_BORDER_VERT; |
516 | | |
517 | | // Adjusting the stride of A and B here appears to avoid bad cache effects, |
518 | | // leading to a significant speed improvement. |
519 | | // We also align the stride to a multiple of 16 bytes for efficiency. |
520 | 0 | int buf_stride = ((width_ext + 3) & ~3) + 16; |
521 | | |
522 | | // The "tl" pointers point at the top-left of the initialised data for the |
523 | | // array. Adding 3 here ensures that column 1 is 16-byte aligned. |
524 | 0 | int32_t *Atl = buf + 0 * RESTORATION_PROC_UNIT_PELS + 3; |
525 | 0 | int32_t *Btl = buf + 1 * RESTORATION_PROC_UNIT_PELS + 3; |
526 | 0 | int32_t *Ctl = buf + 2 * RESTORATION_PROC_UNIT_PELS + 3; |
527 | 0 | int32_t *Dtl = buf + 3 * RESTORATION_PROC_UNIT_PELS + 3; |
528 | | |
529 | | // The "0" pointers are (- SGRPROJ_BORDER_VERT, -SGRPROJ_BORDER_HORZ). Note |
530 | | // there's a zero row and column in A, B (integral images), so we move down |
531 | | // and right one for them. |
532 | 0 | const int buf_diag_border = |
533 | 0 | SGRPROJ_BORDER_HORZ + buf_stride * SGRPROJ_BORDER_VERT; |
534 | |
|
535 | 0 | int32_t *A0 = Atl + 1 + buf_stride; |
536 | 0 | int32_t *B0 = Btl + 1 + buf_stride; |
537 | 0 | int32_t *C0 = Ctl + 1 + buf_stride; |
538 | 0 | int32_t *D0 = Dtl + 1 + buf_stride; |
539 | | |
540 | | // Finally, A, B, C, D point at position (0, 0). |
541 | 0 | int32_t *A = A0 + buf_diag_border; |
542 | 0 | int32_t *B = B0 + buf_diag_border; |
543 | 0 | int32_t *C = C0 + buf_diag_border; |
544 | 0 | int32_t *D = D0 + buf_diag_border; |
545 | |
|
546 | 0 | const int dgd_diag_border = |
547 | 0 | SGRPROJ_BORDER_HORZ + dgd_stride * SGRPROJ_BORDER_VERT; |
548 | 0 | const uint8_t *dgd0 = dgd8 - dgd_diag_border; |
549 | | |
550 | | // Generate integral images from the input. C will contain sums of squares; D |
551 | | // will contain just sums |
552 | 0 | if (highbd) |
553 | 0 | integral_images_highbd(CONVERT_TO_SHORTPTR(dgd0), dgd_stride, width_ext, |
554 | 0 | height_ext, Ctl, Dtl, buf_stride); |
555 | 0 | else |
556 | 0 | integral_images(dgd0, dgd_stride, width_ext, height_ext, Ctl, Dtl, |
557 | 0 | buf_stride); |
558 | |
|
559 | 0 | const sgr_params_type *const params = &av1_sgr_params[sgr_params_idx]; |
560 | | // Write to flt0 and flt1 |
561 | | // If params->r == 0 we skip the corresponding filter. We only allow one of |
562 | | // the radii to be 0, as having both equal to 0 would be equivalent to |
563 | | // skipping SGR entirely. |
564 | 0 | assert(!(params->r[0] == 0 && params->r[1] == 0)); |
565 | 0 | assert(params->r[0] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); |
566 | 0 | assert(params->r[1] < AOMMIN(SGRPROJ_BORDER_VERT, SGRPROJ_BORDER_HORZ)); |
567 | | |
568 | 0 | if (params->r[0] > 0) { |
569 | 0 | calc_ab_fast(A, B, C, D, width, height, buf_stride, bit_depth, |
570 | 0 | sgr_params_idx, 0); |
571 | 0 | final_filter_fast(flt0, flt_stride, A, B, buf_stride, dgd8, dgd_stride, |
572 | 0 | width, height, highbd); |
573 | 0 | } |
574 | |
|
575 | 0 | if (params->r[1] > 0) { |
576 | 0 | calc_ab(A, B, C, D, width, height, buf_stride, bit_depth, sgr_params_idx, |
577 | 0 | 1); |
578 | 0 | final_filter(flt1, flt_stride, A, B, buf_stride, dgd8, dgd_stride, width, |
579 | 0 | height, highbd); |
580 | 0 | } |
581 | 0 | aom_free(buf); |
582 | 0 | return 0; |
583 | 0 | } |
584 | | |
585 | | int av1_apply_selfguided_restoration_sse4_1(const uint8_t *dat8, int width, |
586 | | int height, int stride, int eps, |
587 | | const int *xqd, uint8_t *dst8, |
588 | | int dst_stride, int32_t *tmpbuf, |
589 | 0 | int bit_depth, int highbd) { |
590 | 0 | int32_t *flt0 = tmpbuf; |
591 | 0 | int32_t *flt1 = flt0 + RESTORATION_UNITPELS_MAX; |
592 | 0 | assert(width * height <= RESTORATION_UNITPELS_MAX); |
593 | 0 | const int ret = av1_selfguided_restoration_sse4_1( |
594 | 0 | dat8, width, height, stride, flt0, flt1, width, eps, bit_depth, highbd); |
595 | 0 | if (ret != 0) return ret; |
596 | 0 | const sgr_params_type *const params = &av1_sgr_params[eps]; |
597 | 0 | int xq[2]; |
598 | 0 | av1_decode_xq(xqd, xq, params); |
599 | |
|
600 | 0 | __m128i xq0 = _mm_set1_epi32(xq[0]); |
601 | 0 | __m128i xq1 = _mm_set1_epi32(xq[1]); |
602 | |
|
603 | 0 | for (int i = 0; i < height; ++i) { |
604 | | // Calculate output in batches of 8 pixels |
605 | 0 | for (int j = 0; j < width; j += 8) { |
606 | 0 | const int k = i * width + j; |
607 | 0 | const int m = i * dst_stride + j; |
608 | |
|
609 | 0 | const uint8_t *dat8ij = dat8 + i * stride + j; |
610 | 0 | __m128i src; |
611 | 0 | if (highbd) { |
612 | 0 | src = xx_loadu_128(CONVERT_TO_SHORTPTR(dat8ij)); |
613 | 0 | } else { |
614 | 0 | src = _mm_cvtepu8_epi16(xx_loadl_64(dat8ij)); |
615 | 0 | } |
616 | |
|
617 | 0 | const __m128i u = _mm_slli_epi16(src, SGRPROJ_RST_BITS); |
618 | 0 | const __m128i u_0 = _mm_cvtepu16_epi32(u); |
619 | 0 | const __m128i u_1 = _mm_cvtepu16_epi32(_mm_srli_si128(u, 8)); |
620 | |
|
621 | 0 | __m128i v_0 = _mm_slli_epi32(u_0, SGRPROJ_PRJ_BITS); |
622 | 0 | __m128i v_1 = _mm_slli_epi32(u_1, SGRPROJ_PRJ_BITS); |
623 | |
|
624 | 0 | if (params->r[0] > 0) { |
625 | 0 | const __m128i f1_0 = _mm_sub_epi32(xx_loadu_128(&flt0[k]), u_0); |
626 | 0 | v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq0, f1_0)); |
627 | |
|
628 | 0 | const __m128i f1_1 = _mm_sub_epi32(xx_loadu_128(&flt0[k + 4]), u_1); |
629 | 0 | v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq0, f1_1)); |
630 | 0 | } |
631 | |
|
632 | 0 | if (params->r[1] > 0) { |
633 | 0 | const __m128i f2_0 = _mm_sub_epi32(xx_loadu_128(&flt1[k]), u_0); |
634 | 0 | v_0 = _mm_add_epi32(v_0, _mm_mullo_epi32(xq1, f2_0)); |
635 | |
|
636 | 0 | const __m128i f2_1 = _mm_sub_epi32(xx_loadu_128(&flt1[k + 4]), u_1); |
637 | 0 | v_1 = _mm_add_epi32(v_1, _mm_mullo_epi32(xq1, f2_1)); |
638 | 0 | } |
639 | |
|
640 | 0 | const __m128i rounding = |
641 | 0 | round_for_shift(SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
642 | 0 | const __m128i w_0 = _mm_srai_epi32(_mm_add_epi32(v_0, rounding), |
643 | 0 | SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
644 | 0 | const __m128i w_1 = _mm_srai_epi32(_mm_add_epi32(v_1, rounding), |
645 | 0 | SGRPROJ_PRJ_BITS + SGRPROJ_RST_BITS); |
646 | |
|
647 | 0 | if (highbd) { |
648 | | // Pack into 16 bits and clamp to [0, 2^bit_depth) |
649 | 0 | const __m128i tmp = _mm_packus_epi32(w_0, w_1); |
650 | 0 | const __m128i max = _mm_set1_epi16((1 << bit_depth) - 1); |
651 | 0 | const __m128i res = _mm_min_epi16(tmp, max); |
652 | 0 | xx_storeu_128(CONVERT_TO_SHORTPTR(dst8 + m), res); |
653 | 0 | } else { |
654 | | // Pack into 8 bits and clamp to [0, 256) |
655 | 0 | const __m128i tmp = _mm_packs_epi32(w_0, w_1); |
656 | 0 | const __m128i res = _mm_packus_epi16(tmp, tmp /* "don't care" value */); |
657 | 0 | xx_storel_64(dst8 + m, res); |
658 | 0 | } |
659 | 0 | } |
660 | 0 | } |
661 | 0 | return 0; |
662 | 0 | } |