/src/aom/av1/common/convolve.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <assert.h> |
13 | | #include <string.h> |
14 | | |
15 | | #include "config/aom_dsp_rtcd.h" |
16 | | #include "config/av1_rtcd.h" |
17 | | |
18 | | #include "av1/common/av1_common_int.h" |
19 | | #include "av1/common/blockd.h" |
20 | | #include "av1/common/convolve.h" |
21 | | #include "av1/common/filter.h" |
22 | | #include "av1/common/resize.h" |
23 | | #include "aom_dsp/aom_dsp_common.h" |
24 | | #include "aom_ports/mem.h" |
25 | | |
26 | | void av1_convolve_horiz_rs_c(const uint8_t *src, int src_stride, uint8_t *dst, |
27 | | int dst_stride, int w, int h, |
28 | | const int16_t *x_filters, int x0_qn, |
29 | 0 | int x_step_qn) { |
30 | 0 | src -= UPSCALE_NORMATIVE_TAPS / 2 - 1; |
31 | 0 | for (int y = 0; y < h; ++y) { |
32 | 0 | int x_qn = x0_qn; |
33 | 0 | for (int x = 0; x < w; ++x) { |
34 | 0 | const uint8_t *const src_x = &src[x_qn >> RS_SCALE_SUBPEL_BITS]; |
35 | 0 | const int x_filter_idx = |
36 | 0 | (x_qn & RS_SCALE_SUBPEL_MASK) >> RS_SCALE_EXTRA_BITS; |
37 | 0 | assert(x_filter_idx <= RS_SUBPEL_MASK); |
38 | 0 | const int16_t *const x_filter = |
39 | 0 | &x_filters[x_filter_idx * UPSCALE_NORMATIVE_TAPS]; |
40 | 0 | int sum = 0; |
41 | 0 | for (int k = 0; k < UPSCALE_NORMATIVE_TAPS; ++k) |
42 | 0 | sum += src_x[k] * x_filter[k]; |
43 | 0 | dst[x] = clip_pixel(ROUND_POWER_OF_TWO(sum, FILTER_BITS)); |
44 | 0 | x_qn += x_step_qn; |
45 | 0 | } |
46 | 0 | src += src_stride; |
47 | 0 | dst += dst_stride; |
48 | 0 | } |
49 | 0 | } |
50 | | |
51 | | #if CONFIG_AV1_HIGHBITDEPTH |
52 | | void av1_highbd_convolve_horiz_rs_c(const uint16_t *src, int src_stride, |
53 | | uint16_t *dst, int dst_stride, int w, int h, |
54 | | const int16_t *x_filters, int x0_qn, |
55 | 0 | int x_step_qn, int bd) { |
56 | 0 | src -= UPSCALE_NORMATIVE_TAPS / 2 - 1; |
57 | 0 | for (int y = 0; y < h; ++y) { |
58 | 0 | int x_qn = x0_qn; |
59 | 0 | for (int x = 0; x < w; ++x) { |
60 | 0 | const uint16_t *const src_x = &src[x_qn >> RS_SCALE_SUBPEL_BITS]; |
61 | 0 | const int x_filter_idx = |
62 | 0 | (x_qn & RS_SCALE_SUBPEL_MASK) >> RS_SCALE_EXTRA_BITS; |
63 | 0 | assert(x_filter_idx <= RS_SUBPEL_MASK); |
64 | 0 | const int16_t *const x_filter = |
65 | 0 | &x_filters[x_filter_idx * UPSCALE_NORMATIVE_TAPS]; |
66 | 0 | int sum = 0; |
67 | 0 | for (int k = 0; k < UPSCALE_NORMATIVE_TAPS; ++k) |
68 | 0 | sum += src_x[k] * x_filter[k]; |
69 | 0 | dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd); |
70 | 0 | x_qn += x_step_qn; |
71 | 0 | } |
72 | 0 | src += src_stride; |
73 | 0 | dst += dst_stride; |
74 | 0 | } |
75 | 0 | } |
76 | | #endif // CONFIG_AV1_HIGHBITDEPTH |
77 | | |
78 | | void av1_convolve_2d_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, |
79 | | int dst_stride, int w, int h, |
80 | | const InterpFilterParams *filter_params_x, |
81 | | const InterpFilterParams *filter_params_y, |
82 | | const int subpel_x_qn, const int subpel_y_qn, |
83 | 0 | ConvolveParams *conv_params) { |
84 | 0 | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
85 | 0 | int im_h = h + filter_params_y->taps - 1; |
86 | 0 | int im_stride = w; |
87 | 0 | assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); |
88 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
89 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
90 | 0 | const int bd = 8; |
91 | 0 | const int bits = |
92 | 0 | FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; |
93 | | |
94 | | // horizontal filter |
95 | 0 | const uint8_t *src_horiz = src - fo_vert * src_stride; |
96 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
97 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
98 | 0 | for (int y = 0; y < im_h; ++y) { |
99 | 0 | for (int x = 0; x < w; ++x) { |
100 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
101 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
102 | 0 | sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k]; |
103 | 0 | } |
104 | | |
105 | | // TODO(aomedia:3393): for 12-tap filter, in extreme cases, the result can |
106 | | // be beyond the following range. For better prediction, a clamping can be |
107 | | // added for 12 tap filter to ensure the horizontal filtering result is |
108 | | // within 16 bit. The same applies to the vertical filtering. |
109 | 0 | assert(filter_params_x->taps > 8 || |
110 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
111 | 0 | im_block[y * im_stride + x] = |
112 | 0 | (int16_t)ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
113 | 0 | } |
114 | 0 | } |
115 | | |
116 | | // vertical filter |
117 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
118 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
119 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
120 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
121 | 0 | for (int y = 0; y < h; ++y) { |
122 | 0 | for (int x = 0; x < w; ++x) { |
123 | 0 | int32_t sum = 1 << offset_bits; |
124 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
125 | 0 | sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x]; |
126 | 0 | } |
127 | 0 | assert(filter_params_y->taps > 8 || |
128 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
129 | 0 | int16_t res = ROUND_POWER_OF_TWO(sum, conv_params->round_1) - |
130 | 0 | ((1 << (offset_bits - conv_params->round_1)) + |
131 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
132 | 0 | dst[y * dst_stride + x] = clip_pixel(ROUND_POWER_OF_TWO(res, bits)); |
133 | 0 | } |
134 | 0 | } |
135 | 0 | } |
136 | | |
137 | | void av1_convolve_y_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, |
138 | | int dst_stride, int w, int h, |
139 | | const InterpFilterParams *filter_params_y, |
140 | 0 | const int subpel_y_qn) { |
141 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
142 | | |
143 | | // vertical filter |
144 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
145 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
146 | 0 | for (int y = 0; y < h; ++y) { |
147 | 0 | for (int x = 0; x < w; ++x) { |
148 | 0 | int32_t res = 0; |
149 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
150 | 0 | res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x]; |
151 | 0 | } |
152 | 0 | dst[y * dst_stride + x] = |
153 | 0 | clip_pixel(ROUND_POWER_OF_TWO(res, FILTER_BITS)); |
154 | 0 | } |
155 | 0 | } |
156 | 0 | } |
157 | | |
158 | | void av1_convolve_x_sr_c(const uint8_t *src, int src_stride, uint8_t *dst, |
159 | | int dst_stride, int w, int h, |
160 | | const InterpFilterParams *filter_params_x, |
161 | 0 | const int subpel_x_qn, ConvolveParams *conv_params) { |
162 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
163 | 0 | const int bits = FILTER_BITS - conv_params->round_0; |
164 | |
|
165 | 0 | assert(bits >= 0); |
166 | 0 | assert((FILTER_BITS - conv_params->round_1) >= 0 || |
167 | 0 | ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS)); |
168 | | |
169 | | // horizontal filter |
170 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
171 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
172 | |
|
173 | 0 | for (int y = 0; y < h; ++y) { |
174 | 0 | for (int x = 0; x < w; ++x) { |
175 | 0 | int32_t res = 0; |
176 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
177 | 0 | res += x_filter[k] * src[y * src_stride + x - fo_horiz + k]; |
178 | 0 | } |
179 | 0 | res = ROUND_POWER_OF_TWO(res, conv_params->round_0); |
180 | 0 | dst[y * dst_stride + x] = clip_pixel(ROUND_POWER_OF_TWO(res, bits)); |
181 | 0 | } |
182 | 0 | } |
183 | 0 | } |
184 | | |
185 | | // This function is exactly the same as av1_convolve_2d_sr_c, and is an |
186 | | // optimized version for intrabc. Use the following 2-tap filter: |
187 | | // DECLARE_ALIGNED(256, static const int16_t, |
188 | | // av1_intrabc_bilinear_filter[2 * SUBPEL_SHIFTS]) = { |
189 | | // 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
190 | | // 64, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
191 | | // }; |
192 | | void av1_convolve_2d_sr_intrabc_c(const uint8_t *src, int src_stride, |
193 | | uint8_t *dst, int dst_stride, int w, int h, |
194 | | const InterpFilterParams *filter_params_x, |
195 | | const InterpFilterParams *filter_params_y, |
196 | | const int subpel_x_qn, const int subpel_y_qn, |
197 | 2.74k | ConvolveParams *conv_params) { |
198 | 2.74k | assert(subpel_x_qn == 8); |
199 | 2.74k | assert(subpel_y_qn == 8); |
200 | 2.74k | assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); |
201 | 2.74k | assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); |
202 | 2.74k | (void)filter_params_x; |
203 | 2.74k | (void)subpel_x_qn; |
204 | 2.74k | (void)filter_params_y; |
205 | 2.74k | (void)subpel_y_qn; |
206 | 2.74k | (void)conv_params; |
207 | | |
208 | 2.74k | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
209 | 2.74k | int im_h = h + 1; |
210 | 2.74k | int im_stride = w; |
211 | 2.74k | assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); |
212 | 2.74k | const int bd = 8; |
213 | | |
214 | | // horizontal filter |
215 | | // explicitly operate for subpel_x_qn = 8. |
216 | 2.74k | int16_t *im = im_block; |
217 | 26.8k | for (int y = 0; y < im_h; ++y) { |
218 | 331k | for (int x = 0; x < w; ++x) { |
219 | 307k | const int32_t sum = (1 << bd) + src[x] + src[x + 1]; |
220 | 307k | assert(0 <= sum && sum < (1 << (bd + 2))); |
221 | 307k | im[x] = sum; |
222 | 307k | } |
223 | 24.1k | src += src_stride; |
224 | 24.1k | im += im_stride; |
225 | 24.1k | } |
226 | | |
227 | | // vertical filter |
228 | | // explicitly operate for subpel_y_qn = 8. |
229 | 2.74k | int16_t *src_vert = im_block; |
230 | 24.1k | for (int y = 0; y < h; ++y) { |
231 | 307k | for (int x = 0; x < w; ++x) { |
232 | 285k | const int32_t sum = |
233 | 285k | (1 << (bd + 2)) + src_vert[x] + src_vert[im_stride + x]; |
234 | 285k | assert(0 <= sum && sum < (1 << (bd + 4))); |
235 | 285k | const int16_t res = |
236 | 285k | ROUND_POWER_OF_TWO(sum, 2) - ((1 << bd) + (1 << (bd - 1))); |
237 | 285k | dst[x] = clip_pixel(res); |
238 | 285k | } |
239 | 21.3k | src_vert += im_stride; |
240 | 21.3k | dst += dst_stride; |
241 | 21.3k | } |
242 | 2.74k | } |
243 | | |
244 | | // This function is exactly the same as av1_convolve_y_sr_c, and is an |
245 | | // optimized version for intrabc. |
246 | | void av1_convolve_y_sr_intrabc_c(const uint8_t *src, int src_stride, |
247 | | uint8_t *dst, int dst_stride, int w, int h, |
248 | | const InterpFilterParams *filter_params_y, |
249 | 2.98k | const int subpel_y_qn) { |
250 | 2.98k | assert(subpel_y_qn == 8); |
251 | 2.98k | assert(filter_params_y->taps == 2); |
252 | 2.98k | (void)filter_params_y; |
253 | 2.98k | (void)subpel_y_qn; |
254 | | |
255 | | // vertical filter |
256 | | // explicitly operate for subpel_y_qn = 8. |
257 | 26.3k | for (int y = 0; y < h; ++y) { |
258 | 321k | for (int x = 0; x < w; ++x) { |
259 | 297k | const int32_t res = src[x] + src[src_stride + x]; |
260 | 297k | dst[x] = clip_pixel(ROUND_POWER_OF_TWO(res, 1)); |
261 | 297k | } |
262 | 23.4k | src += src_stride; |
263 | 23.4k | dst += dst_stride; |
264 | 23.4k | } |
265 | 2.98k | } |
266 | | |
267 | | // This function is exactly the same as av1_convolve_x_sr_c, and is an |
268 | | // optimized version for intrabc. |
269 | | void av1_convolve_x_sr_intrabc_c(const uint8_t *src, int src_stride, |
270 | | uint8_t *dst, int dst_stride, int w, int h, |
271 | | const InterpFilterParams *filter_params_x, |
272 | | const int subpel_x_qn, |
273 | 2.93k | ConvolveParams *conv_params) { |
274 | 2.93k | assert(subpel_x_qn == 8); |
275 | 2.93k | assert(filter_params_x->taps == 2); |
276 | 2.93k | assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); |
277 | 2.93k | (void)filter_params_x; |
278 | 2.93k | (void)subpel_x_qn; |
279 | 2.93k | (void)conv_params; |
280 | | |
281 | | // horizontal filter |
282 | | // explicitly operate for subpel_x_qn = 8. |
283 | 25.9k | for (int y = 0; y < h; ++y) { |
284 | 329k | for (int x = 0; x < w; ++x) { |
285 | 306k | const int32_t res = src[x] + src[x + 1]; |
286 | 306k | dst[x] = clip_pixel(ROUND_POWER_OF_TWO(res, 1)); |
287 | 306k | } |
288 | 23.0k | src += src_stride; |
289 | 23.0k | dst += dst_stride; |
290 | 23.0k | } |
291 | 2.93k | } |
292 | | |
293 | | void av1_dist_wtd_convolve_2d_c(const uint8_t *src, int src_stride, |
294 | | uint8_t *dst, int dst_stride, int w, int h, |
295 | | const InterpFilterParams *filter_params_x, |
296 | | const InterpFilterParams *filter_params_y, |
297 | | const int subpel_x_qn, const int subpel_y_qn, |
298 | 0 | ConvolveParams *conv_params) { |
299 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
300 | 0 | int dst16_stride = conv_params->dst_stride; |
301 | 0 | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
302 | 0 | int im_h = h + filter_params_y->taps - 1; |
303 | 0 | int im_stride = w; |
304 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
305 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
306 | 0 | const int bd = 8; |
307 | 0 | const int round_bits = |
308 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
309 | | |
310 | | // horizontal filter |
311 | 0 | const uint8_t *src_horiz = src - fo_vert * src_stride; |
312 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
313 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
314 | 0 | for (int y = 0; y < im_h; ++y) { |
315 | 0 | for (int x = 0; x < w; ++x) { |
316 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
317 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
318 | 0 | sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k]; |
319 | 0 | } |
320 | 0 | assert(filter_params_x->taps > 8 || |
321 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
322 | 0 | im_block[y * im_stride + x] = |
323 | 0 | (int16_t)ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
324 | 0 | } |
325 | 0 | } |
326 | | |
327 | | // vertical filter |
328 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
329 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
330 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
331 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
332 | 0 | for (int y = 0; y < h; ++y) { |
333 | 0 | for (int x = 0; x < w; ++x) { |
334 | 0 | int32_t sum = 1 << offset_bits; |
335 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
336 | 0 | sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x]; |
337 | 0 | } |
338 | 0 | assert(filter_params_y->taps > 8 || |
339 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
340 | 0 | CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); |
341 | 0 | if (conv_params->do_average) { |
342 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
343 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
344 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
345 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
346 | 0 | } else { |
347 | 0 | tmp += res; |
348 | 0 | tmp = tmp >> 1; |
349 | 0 | } |
350 | 0 | tmp -= (1 << (offset_bits - conv_params->round_1)) + |
351 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
352 | 0 | dst[y * dst_stride + x] = |
353 | 0 | clip_pixel(ROUND_POWER_OF_TWO(tmp, round_bits)); |
354 | 0 | } else { |
355 | 0 | dst16[y * dst16_stride + x] = res; |
356 | 0 | } |
357 | 0 | } |
358 | 0 | } |
359 | 0 | } |
360 | | |
361 | | void av1_dist_wtd_convolve_y_c(const uint8_t *src, int src_stride, uint8_t *dst, |
362 | | int dst_stride, int w, int h, |
363 | | const InterpFilterParams *filter_params_y, |
364 | | const int subpel_y_qn, |
365 | 0 | ConvolveParams *conv_params) { |
366 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
367 | 0 | int dst16_stride = conv_params->dst_stride; |
368 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
369 | 0 | const int bits = FILTER_BITS - conv_params->round_0; |
370 | 0 | const int bd = 8; |
371 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
372 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
373 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
374 | 0 | const int round_bits = |
375 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
376 | | |
377 | | // vertical filter |
378 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
379 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
380 | 0 | for (int y = 0; y < h; ++y) { |
381 | 0 | for (int x = 0; x < w; ++x) { |
382 | 0 | int32_t res = 0; |
383 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
384 | 0 | res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x]; |
385 | 0 | } |
386 | 0 | res *= (1 << bits); |
387 | 0 | res = ROUND_POWER_OF_TWO(res, conv_params->round_1) + round_offset; |
388 | |
|
389 | 0 | if (conv_params->do_average) { |
390 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
391 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
392 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
393 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
394 | 0 | } else { |
395 | 0 | tmp += res; |
396 | 0 | tmp = tmp >> 1; |
397 | 0 | } |
398 | 0 | tmp -= round_offset; |
399 | 0 | dst[y * dst_stride + x] = |
400 | 0 | clip_pixel(ROUND_POWER_OF_TWO(tmp, round_bits)); |
401 | 0 | } else { |
402 | 0 | dst16[y * dst16_stride + x] = res; |
403 | 0 | } |
404 | 0 | } |
405 | 0 | } |
406 | 0 | } |
407 | | |
408 | | void av1_dist_wtd_convolve_x_c(const uint8_t *src, int src_stride, uint8_t *dst, |
409 | | int dst_stride, int w, int h, |
410 | | const InterpFilterParams *filter_params_x, |
411 | | const int subpel_x_qn, |
412 | 0 | ConvolveParams *conv_params) { |
413 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
414 | 0 | int dst16_stride = conv_params->dst_stride; |
415 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
416 | 0 | const int bits = FILTER_BITS - conv_params->round_1; |
417 | 0 | const int bd = 8; |
418 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
419 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
420 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
421 | 0 | const int round_bits = |
422 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
423 | | |
424 | | // horizontal filter |
425 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
426 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
427 | 0 | for (int y = 0; y < h; ++y) { |
428 | 0 | for (int x = 0; x < w; ++x) { |
429 | 0 | int32_t res = 0; |
430 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
431 | 0 | res += x_filter[k] * src[y * src_stride + x - fo_horiz + k]; |
432 | 0 | } |
433 | 0 | res = (1 << bits) * ROUND_POWER_OF_TWO(res, conv_params->round_0); |
434 | 0 | res += round_offset; |
435 | |
|
436 | 0 | if (conv_params->do_average) { |
437 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
438 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
439 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
440 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
441 | 0 | } else { |
442 | 0 | tmp += res; |
443 | 0 | tmp = tmp >> 1; |
444 | 0 | } |
445 | 0 | tmp -= round_offset; |
446 | 0 | dst[y * dst_stride + x] = |
447 | 0 | clip_pixel(ROUND_POWER_OF_TWO(tmp, round_bits)); |
448 | 0 | } else { |
449 | 0 | dst16[y * dst16_stride + x] = res; |
450 | 0 | } |
451 | 0 | } |
452 | 0 | } |
453 | 0 | } |
454 | | |
455 | | void av1_dist_wtd_convolve_2d_copy_c(const uint8_t *src, int src_stride, |
456 | | uint8_t *dst, int dst_stride, int w, int h, |
457 | 0 | ConvolveParams *conv_params) { |
458 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
459 | 0 | int dst16_stride = conv_params->dst_stride; |
460 | 0 | const int bits = |
461 | 0 | FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; |
462 | 0 | const int bd = 8; |
463 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
464 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
465 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
466 | |
|
467 | 0 | for (int y = 0; y < h; ++y) { |
468 | 0 | for (int x = 0; x < w; ++x) { |
469 | 0 | CONV_BUF_TYPE res = src[y * src_stride + x] << bits; |
470 | 0 | res += round_offset; |
471 | |
|
472 | 0 | if (conv_params->do_average) { |
473 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
474 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
475 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
476 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
477 | 0 | } else { |
478 | 0 | tmp += res; |
479 | 0 | tmp = tmp >> 1; |
480 | 0 | } |
481 | 0 | tmp -= round_offset; |
482 | 0 | dst[y * dst_stride + x] = clip_pixel(ROUND_POWER_OF_TWO(tmp, bits)); |
483 | 0 | } else { |
484 | 0 | dst16[y * dst16_stride + x] = res; |
485 | 0 | } |
486 | 0 | } |
487 | 0 | } |
488 | 0 | } |
489 | | |
490 | | void av1_convolve_2d_scale_c(const uint8_t *src, int src_stride, uint8_t *dst, |
491 | | int dst_stride, int w, int h, |
492 | | const InterpFilterParams *filter_params_x, |
493 | | const InterpFilterParams *filter_params_y, |
494 | | const int subpel_x_qn, const int x_step_qn, |
495 | | const int subpel_y_qn, const int y_step_qn, |
496 | 0 | ConvolveParams *conv_params) { |
497 | 0 | int16_t im_block[(2 * MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]; |
498 | 0 | int im_h = (((h - 1) * y_step_qn + subpel_y_qn) >> SCALE_SUBPEL_BITS) + |
499 | 0 | filter_params_y->taps; |
500 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
501 | 0 | const int dst16_stride = conv_params->dst_stride; |
502 | 0 | const int bits = |
503 | 0 | FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; |
504 | 0 | assert(bits >= 0); |
505 | 0 | int im_stride = w; |
506 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
507 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
508 | 0 | const int bd = 8; |
509 | | |
510 | | // horizontal filter |
511 | 0 | const uint8_t *src_horiz = src - fo_vert * src_stride; |
512 | 0 | for (int y = 0; y < im_h; ++y) { |
513 | 0 | int x_qn = subpel_x_qn; |
514 | 0 | for (int x = 0; x < w; ++x, x_qn += x_step_qn) { |
515 | 0 | const uint8_t *const src_x = &src_horiz[(x_qn >> SCALE_SUBPEL_BITS)]; |
516 | 0 | const int x_filter_idx = (x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; |
517 | 0 | assert(x_filter_idx < SUBPEL_SHIFTS); |
518 | 0 | const int16_t *x_filter = |
519 | 0 | av1_get_interp_filter_subpel_kernel(filter_params_x, x_filter_idx); |
520 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
521 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
522 | 0 | sum += x_filter[k] * src_x[k - fo_horiz]; |
523 | 0 | } |
524 | 0 | assert(filter_params_x->taps > 8 || |
525 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
526 | 0 | im_block[y * im_stride + x] = |
527 | 0 | (int16_t)ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
528 | 0 | } |
529 | 0 | src_horiz += src_stride; |
530 | 0 | } |
531 | | |
532 | | // vertical filter |
533 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
534 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
535 | 0 | for (int x = 0; x < w; ++x) { |
536 | 0 | int y_qn = subpel_y_qn; |
537 | 0 | for (int y = 0; y < h; ++y, y_qn += y_step_qn) { |
538 | 0 | const int16_t *src_y = &src_vert[(y_qn >> SCALE_SUBPEL_BITS) * im_stride]; |
539 | 0 | const int y_filter_idx = (y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; |
540 | 0 | assert(y_filter_idx < SUBPEL_SHIFTS); |
541 | 0 | const int16_t *y_filter = |
542 | 0 | av1_get_interp_filter_subpel_kernel(filter_params_y, y_filter_idx); |
543 | 0 | int32_t sum = 1 << offset_bits; |
544 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
545 | 0 | sum += y_filter[k] * src_y[(k - fo_vert) * im_stride]; |
546 | 0 | } |
547 | 0 | assert(filter_params_y->taps > 8 || |
548 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
549 | 0 | CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); |
550 | 0 | if (conv_params->is_compound) { |
551 | 0 | if (conv_params->do_average) { |
552 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
553 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
554 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
555 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
556 | 0 | } else { |
557 | 0 | tmp += res; |
558 | 0 | tmp = tmp >> 1; |
559 | 0 | } |
560 | | /* Subtract round offset and convolve round */ |
561 | 0 | tmp = tmp - ((1 << (offset_bits - conv_params->round_1)) + |
562 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
563 | 0 | dst[y * dst_stride + x] = clip_pixel(ROUND_POWER_OF_TWO(tmp, bits)); |
564 | 0 | } else { |
565 | 0 | dst16[y * dst16_stride + x] = res; |
566 | 0 | } |
567 | 0 | } else { |
568 | | /* Subtract round offset and convolve round */ |
569 | 0 | int32_t tmp = res - ((1 << (offset_bits - conv_params->round_1)) + |
570 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
571 | 0 | dst[y * dst_stride + x] = clip_pixel(ROUND_POWER_OF_TWO(tmp, bits)); |
572 | 0 | } |
573 | 0 | } |
574 | 0 | src_vert++; |
575 | 0 | } |
576 | 0 | } |
577 | | |
578 | | static void convolve_2d_scale_wrapper( |
579 | | const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, |
580 | | int h, const InterpFilterParams *filter_params_x, |
581 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
582 | | const int x_step_qn, const int subpel_y_qn, const int y_step_qn, |
583 | 1.59M | ConvolveParams *conv_params) { |
584 | 1.59M | if (conv_params->is_compound) { |
585 | 261k | assert(conv_params->dst != NULL); |
586 | 261k | } |
587 | 1.59M | av1_convolve_2d_scale(src, src_stride, dst, dst_stride, w, h, filter_params_x, |
588 | 1.59M | filter_params_y, subpel_x_qn, x_step_qn, subpel_y_qn, |
589 | 1.59M | y_step_qn, conv_params); |
590 | 1.59M | } |
591 | | |
592 | | static void convolve_2d_facade_compound( |
593 | | const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, |
594 | | int h, const InterpFilterParams *filter_params_x, |
595 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
596 | 1.44M | const int subpel_y_qn, ConvolveParams *conv_params) { |
597 | 1.44M | const bool need_x = subpel_x_qn != 0; |
598 | 1.44M | const bool need_y = subpel_y_qn != 0; |
599 | 1.44M | if (!need_x && !need_y) { |
600 | 975k | av1_dist_wtd_convolve_2d_copy(src, src_stride, dst, dst_stride, w, h, |
601 | 975k | conv_params); |
602 | 975k | } else if (need_x && !need_y) { |
603 | 153k | av1_dist_wtd_convolve_x(src, src_stride, dst, dst_stride, w, h, |
604 | 153k | filter_params_x, subpel_x_qn, conv_params); |
605 | 317k | } else if (!need_x && need_y) { |
606 | 79.3k | av1_dist_wtd_convolve_y(src, src_stride, dst, dst_stride, w, h, |
607 | 79.3k | filter_params_y, subpel_y_qn, conv_params); |
608 | 237k | } else { |
609 | 237k | assert(need_y && need_x); |
610 | 238k | av1_dist_wtd_convolve_2d(src, src_stride, dst, dst_stride, w, h, |
611 | 238k | filter_params_x, filter_params_y, subpel_x_qn, |
612 | 238k | subpel_y_qn, conv_params); |
613 | 238k | } |
614 | 1.44M | } |
615 | | |
616 | | static void convolve_2d_facade_single( |
617 | | const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, int w, |
618 | | int h, const InterpFilterParams *filter_params_x, |
619 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
620 | 5.10M | const int subpel_y_qn, ConvolveParams *conv_params) { |
621 | 5.10M | const bool need_x = subpel_x_qn != 0; |
622 | 5.10M | const bool need_y = subpel_y_qn != 0; |
623 | 5.10M | if (!need_x && !need_y) { |
624 | 1.70M | aom_convolve_copy(src, src_stride, dst, dst_stride, w, h); |
625 | 3.39M | } else if (need_x && !need_y) { |
626 | 828k | av1_convolve_x_sr(src, src_stride, dst, dst_stride, w, h, filter_params_x, |
627 | 828k | subpel_x_qn, conv_params); |
628 | 2.56M | } else if (!need_x && need_y) { |
629 | 747k | av1_convolve_y_sr(src, src_stride, dst, dst_stride, w, h, filter_params_y, |
630 | 747k | subpel_y_qn); |
631 | 1.82M | } else { |
632 | 1.82M | assert(need_x && need_y); |
633 | 1.82M | av1_convolve_2d_sr(src, src_stride, dst, dst_stride, w, h, filter_params_x, |
634 | 1.82M | filter_params_y, subpel_x_qn, subpel_y_qn, conv_params); |
635 | 1.82M | } |
636 | 5.10M | } |
637 | | |
638 | | void av1_convolve_2d_facade(const uint8_t *src, int src_stride, uint8_t *dst, |
639 | | int dst_stride, int w, int h, |
640 | | const InterpFilterParams *interp_filters[2], |
641 | | const int subpel_x_qn, int x_step_q4, |
642 | | const int subpel_y_qn, int y_step_q4, int scaled, |
643 | 8.14M | ConvolveParams *conv_params) { |
644 | 8.14M | (void)x_step_q4; |
645 | 8.14M | (void)y_step_q4; |
646 | 8.14M | (void)dst; |
647 | 8.14M | (void)dst_stride; |
648 | | |
649 | 8.14M | const InterpFilterParams *filter_params_x = interp_filters[0]; |
650 | 8.14M | const InterpFilterParams *filter_params_y = interp_filters[1]; |
651 | | |
652 | | // TODO(jingning, yunqing): Add SIMD support to 2-tap filter case. |
653 | | // 2-tap filter indicates that it is for IntraBC. |
654 | 8.14M | if (filter_params_x->taps == 2 || filter_params_y->taps == 2) { |
655 | 56.5k | assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); |
656 | 56.5k | assert(!scaled); |
657 | 56.5k | if (subpel_x_qn && subpel_y_qn) { |
658 | 2.74k | av1_convolve_2d_sr_intrabc(src, src_stride, dst, dst_stride, w, h, |
659 | 2.74k | filter_params_x, filter_params_y, subpel_x_qn, |
660 | 2.74k | subpel_y_qn, conv_params); |
661 | 2.74k | return; |
662 | 53.7k | } else if (subpel_x_qn) { |
663 | 2.93k | av1_convolve_x_sr_intrabc(src, src_stride, dst, dst_stride, w, h, |
664 | 2.93k | filter_params_x, subpel_x_qn, conv_params); |
665 | 2.93k | return; |
666 | 50.8k | } else if (subpel_y_qn) { |
667 | 2.98k | av1_convolve_y_sr_intrabc(src, src_stride, dst, dst_stride, w, h, |
668 | 2.98k | filter_params_y, subpel_y_qn); |
669 | 2.98k | return; |
670 | 2.98k | } |
671 | 56.5k | } |
672 | | |
673 | 8.14M | if (scaled) { |
674 | 1.59M | convolve_2d_scale_wrapper(src, src_stride, dst, dst_stride, w, h, |
675 | 1.59M | filter_params_x, filter_params_y, subpel_x_qn, |
676 | 1.59M | x_step_q4, subpel_y_qn, y_step_q4, conv_params); |
677 | 6.54M | } else if (conv_params->is_compound) { |
678 | 1.44M | convolve_2d_facade_compound(src, src_stride, dst, dst_stride, w, h, |
679 | 1.44M | filter_params_x, filter_params_y, subpel_x_qn, |
680 | 1.44M | subpel_y_qn, conv_params); |
681 | 5.09M | } else { |
682 | 5.09M | convolve_2d_facade_single(src, src_stride, dst, dst_stride, w, h, |
683 | 5.09M | filter_params_x, filter_params_y, subpel_x_qn, |
684 | 5.09M | subpel_y_qn, conv_params); |
685 | 5.09M | } |
686 | 8.14M | } |
687 | | |
688 | | #if CONFIG_AV1_HIGHBITDEPTH |
689 | | void av1_highbd_convolve_x_sr_c(const uint16_t *src, int src_stride, |
690 | | uint16_t *dst, int dst_stride, int w, int h, |
691 | | const InterpFilterParams *filter_params_x, |
692 | | const int subpel_x_qn, |
693 | 0 | ConvolveParams *conv_params, int bd) { |
694 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
695 | 0 | const int bits = FILTER_BITS - conv_params->round_0; |
696 | |
|
697 | 0 | assert(bits >= 0); |
698 | 0 | assert((FILTER_BITS - conv_params->round_1) >= 0 || |
699 | 0 | ((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS)); |
700 | | |
701 | | // horizontal filter |
702 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
703 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
704 | 0 | for (int y = 0; y < h; ++y) { |
705 | 0 | for (int x = 0; x < w; ++x) { |
706 | 0 | int32_t res = 0; |
707 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
708 | 0 | res += x_filter[k] * src[y * src_stride + x - fo_horiz + k]; |
709 | 0 | } |
710 | 0 | res = ROUND_POWER_OF_TWO(res, conv_params->round_0); |
711 | 0 | dst[y * dst_stride + x] = |
712 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(res, bits), bd); |
713 | 0 | } |
714 | 0 | } |
715 | 0 | } |
716 | | |
717 | | void av1_highbd_convolve_y_sr_c(const uint16_t *src, int src_stride, |
718 | | uint16_t *dst, int dst_stride, int w, int h, |
719 | | const InterpFilterParams *filter_params_y, |
720 | 0 | const int subpel_y_qn, int bd) { |
721 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
722 | | // vertical filter |
723 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
724 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
725 | 0 | for (int y = 0; y < h; ++y) { |
726 | 0 | for (int x = 0; x < w; ++x) { |
727 | 0 | int32_t res = 0; |
728 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
729 | 0 | res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x]; |
730 | 0 | } |
731 | 0 | dst[y * dst_stride + x] = |
732 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(res, FILTER_BITS), bd); |
733 | 0 | } |
734 | 0 | } |
735 | 0 | } |
736 | | |
737 | | void av1_highbd_convolve_2d_sr_c(const uint16_t *src, int src_stride, |
738 | | uint16_t *dst, int dst_stride, int w, int h, |
739 | | const InterpFilterParams *filter_params_x, |
740 | | const InterpFilterParams *filter_params_y, |
741 | | const int subpel_x_qn, const int subpel_y_qn, |
742 | 0 | ConvolveParams *conv_params, int bd) { |
743 | 0 | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
744 | 0 | int im_h = h + filter_params_y->taps - 1; |
745 | 0 | int im_stride = w; |
746 | 0 | assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); |
747 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
748 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
749 | 0 | const int bits = |
750 | 0 | FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; |
751 | 0 | assert(bits >= 0); |
752 | | |
753 | | // horizontal filter |
754 | 0 | const uint16_t *src_horiz = src - fo_vert * src_stride; |
755 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
756 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
757 | 0 | for (int y = 0; y < im_h; ++y) { |
758 | 0 | for (int x = 0; x < w; ++x) { |
759 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
760 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
761 | 0 | sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k]; |
762 | 0 | } |
763 | 0 | assert(filter_params_x->taps > 8 || |
764 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
765 | 0 | im_block[y * im_stride + x] = |
766 | 0 | ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
767 | 0 | } |
768 | 0 | } |
769 | | |
770 | | // vertical filter |
771 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
772 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
773 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
774 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
775 | 0 | for (int y = 0; y < h; ++y) { |
776 | 0 | for (int x = 0; x < w; ++x) { |
777 | 0 | int32_t sum = 1 << offset_bits; |
778 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
779 | 0 | sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x]; |
780 | 0 | } |
781 | 0 | assert(filter_params_y->taps > 8 || |
782 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
783 | 0 | int32_t res = ROUND_POWER_OF_TWO(sum, conv_params->round_1) - |
784 | 0 | ((1 << (offset_bits - conv_params->round_1)) + |
785 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
786 | 0 | dst[y * dst_stride + x] = |
787 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(res, bits), bd); |
788 | 0 | } |
789 | 0 | } |
790 | 0 | } |
791 | | |
792 | | // This function is exactly the same as av1_highbd_convolve_2d_sr_c, and is an |
793 | | // optimized version for intrabc. Use the following 2-tap filter: |
794 | | // DECLARE_ALIGNED(256, static const int16_t, |
795 | | // av1_intrabc_bilinear_filter[2 * SUBPEL_SHIFTS]) = { |
796 | | // 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
797 | | // 64, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
798 | | // }; |
799 | | void av1_highbd_convolve_2d_sr_intrabc_c( |
800 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, |
801 | | int h, const InterpFilterParams *filter_params_x, |
802 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
803 | 1.45k | const int subpel_y_qn, ConvolveParams *conv_params, int bd) { |
804 | 1.45k | const int bits = |
805 | 1.45k | FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; |
806 | 1.45k | assert(bits >= 0); |
807 | 1.45k | assert(subpel_x_qn == 8); |
808 | 1.45k | assert(subpel_y_qn == 8); |
809 | 1.45k | assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); |
810 | 1.45k | assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); |
811 | 1.45k | (void)filter_params_x; |
812 | 1.45k | (void)subpel_x_qn; |
813 | 1.45k | (void)filter_params_y; |
814 | 1.45k | (void)subpel_y_qn; |
815 | 1.45k | (void)conv_params; |
816 | | |
817 | 1.45k | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
818 | 1.45k | int im_h = h + 1; |
819 | 1.45k | int im_stride = w; |
820 | 1.45k | assert(w <= MAX_SB_SIZE && h <= MAX_SB_SIZE); |
821 | | |
822 | | // horizontal filter |
823 | | // explicitly operate for subpel_x_qn = 8. |
824 | 1.45k | int16_t *im = im_block; |
825 | 15.1k | for (int y = 0; y < im_h; ++y) { |
826 | 147k | for (int x = 0; x < w; ++x) { |
827 | 133k | int32_t sum = (1 << (bd + FILTER_BITS - 1)) + 64 * (src[x] + src[x + 1]); |
828 | 133k | assert(0 <= sum && sum < (1 << (bd + FILTER_BITS + 1))); |
829 | 133k | sum = ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
830 | 133k | im[x] = sum; |
831 | 133k | } |
832 | 13.7k | src += src_stride; |
833 | 13.7k | im += im_stride; |
834 | 13.7k | } |
835 | | |
836 | | // vertical filter |
837 | | // explicitly operate for subpel_y_qn = 8. |
838 | 1.45k | int16_t *src_vert = im_block; |
839 | 1.45k | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
840 | 13.7k | for (int y = 0; y < h; ++y) { |
841 | 134k | for (int x = 0; x < w; ++x) { |
842 | 122k | const int32_t sum = |
843 | 122k | (1 << offset_bits) + 64 * (src_vert[x] + src_vert[im_stride + x]); |
844 | 122k | assert(0 <= sum && sum < (1 << (offset_bits + 2))); |
845 | 122k | const int32_t res = ROUND_POWER_OF_TWO(sum, conv_params->round_1) - |
846 | 122k | ((1 << (offset_bits - conv_params->round_1)) + |
847 | 122k | (1 << (offset_bits - conv_params->round_1 - 1))); |
848 | | |
849 | 122k | dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(res, bits), bd); |
850 | 122k | } |
851 | 12.2k | src_vert += im_stride; |
852 | 12.2k | dst += dst_stride; |
853 | 12.2k | } |
854 | 1.45k | } |
855 | | |
856 | | // This function is exactly the same as av1_highbd_convolve_y_sr_c, and is an |
857 | | // optimized version for intrabc. |
858 | | void av1_highbd_convolve_y_sr_intrabc_c( |
859 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, |
860 | | int h, const InterpFilterParams *filter_params_y, const int subpel_y_qn, |
861 | 1.45k | int bd) { |
862 | 1.45k | assert(subpel_y_qn == 8); |
863 | 1.45k | assert(filter_params_y->taps == 2); |
864 | 1.45k | (void)filter_params_y; |
865 | 1.45k | (void)subpel_y_qn; |
866 | | |
867 | | // vertical filter |
868 | | // explicitly operate for subpel_y_qn = 8. |
869 | 14.2k | for (int y = 0; y < h; ++y) { |
870 | 206k | for (int x = 0; x < w; ++x) { |
871 | 193k | const int32_t res = src[x] + src[src_stride + x]; |
872 | 193k | dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(res, 1), bd); |
873 | 193k | } |
874 | 12.8k | src += src_stride; |
875 | 12.8k | dst += dst_stride; |
876 | 12.8k | } |
877 | 1.45k | } |
878 | | |
879 | | // This function is exactly the same as av1_highbd_convolve_x_sr_c, and is an |
880 | | // optimized version for intrabc. |
881 | | void av1_highbd_convolve_x_sr_intrabc_c( |
882 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, |
883 | | int h, const InterpFilterParams *filter_params_x, const int subpel_x_qn, |
884 | 1.41k | ConvolveParams *conv_params, int bd) { |
885 | 1.41k | const int bits = FILTER_BITS - conv_params->round_0; |
886 | 1.41k | assert(bits >= 0); |
887 | 1.41k | assert(subpel_x_qn == 8); |
888 | 1.41k | assert(filter_params_x->taps == 2); |
889 | 1.41k | assert((conv_params->round_0 + conv_params->round_1) == 2 * FILTER_BITS); |
890 | 1.41k | (void)filter_params_x; |
891 | 1.41k | (void)subpel_x_qn; |
892 | | |
893 | | // horizontal filter |
894 | | // explicitly operate for subpel_x_qn = 8. |
895 | 15.5k | for (int y = 0; y < h; ++y) { |
896 | 377k | for (int x = 0; x < w; ++x) { |
897 | 363k | int32_t res = 64 * (src[x] + src[x + 1]); |
898 | 363k | res = ROUND_POWER_OF_TWO(res, conv_params->round_0); |
899 | 363k | dst[x] = clip_pixel_highbd(ROUND_POWER_OF_TWO(res, bits), bd); |
900 | 363k | } |
901 | 14.1k | src += src_stride; |
902 | 14.1k | dst += dst_stride; |
903 | 14.1k | } |
904 | 1.41k | } |
905 | | |
906 | | void av1_highbd_dist_wtd_convolve_2d_c( |
907 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, int w, |
908 | | int h, const InterpFilterParams *filter_params_x, |
909 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
910 | 0 | const int subpel_y_qn, ConvolveParams *conv_params, int bd) { |
911 | 0 | int x, y, k; |
912 | 0 | int16_t im_block[(MAX_SB_SIZE + MAX_FILTER_TAP - 1) * MAX_SB_SIZE]; |
913 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
914 | 0 | int dst16_stride = conv_params->dst_stride; |
915 | 0 | int im_h = h + filter_params_y->taps - 1; |
916 | 0 | int im_stride = w; |
917 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
918 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
919 | 0 | const int round_bits = |
920 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
921 | 0 | assert(round_bits >= 0); |
922 | | |
923 | | // horizontal filter |
924 | 0 | const uint16_t *src_horiz = src - fo_vert * src_stride; |
925 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
926 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
927 | 0 | for (y = 0; y < im_h; ++y) { |
928 | 0 | for (x = 0; x < w; ++x) { |
929 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
930 | 0 | for (k = 0; k < filter_params_x->taps; ++k) { |
931 | 0 | sum += x_filter[k] * src_horiz[y * src_stride + x - fo_horiz + k]; |
932 | 0 | } |
933 | 0 | assert(filter_params_x->taps > 8 || |
934 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
935 | 0 | (void)bd; |
936 | 0 | im_block[y * im_stride + x] = |
937 | 0 | (int16_t)ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
938 | 0 | } |
939 | 0 | } |
940 | | |
941 | | // vertical filter |
942 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
943 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
944 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
945 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
946 | 0 | for (y = 0; y < h; ++y) { |
947 | 0 | for (x = 0; x < w; ++x) { |
948 | 0 | int32_t sum = 1 << offset_bits; |
949 | 0 | for (k = 0; k < filter_params_y->taps; ++k) { |
950 | 0 | sum += y_filter[k] * src_vert[(y - fo_vert + k) * im_stride + x]; |
951 | 0 | } |
952 | 0 | assert(filter_params_y->taps > 8 || |
953 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
954 | 0 | CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); |
955 | 0 | if (conv_params->do_average) { |
956 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
957 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
958 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
959 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
960 | 0 | } else { |
961 | 0 | tmp += res; |
962 | 0 | tmp = tmp >> 1; |
963 | 0 | } |
964 | 0 | tmp -= (1 << (offset_bits - conv_params->round_1)) + |
965 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
966 | 0 | dst[y * dst_stride + x] = |
967 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, round_bits), bd); |
968 | 0 | } else { |
969 | 0 | dst16[y * dst16_stride + x] = res; |
970 | 0 | } |
971 | 0 | } |
972 | 0 | } |
973 | 0 | } |
974 | | |
975 | | void av1_highbd_dist_wtd_convolve_x_c(const uint16_t *src, int src_stride, |
976 | | uint16_t *dst, int dst_stride, int w, |
977 | | int h, |
978 | | const InterpFilterParams *filter_params_x, |
979 | | const int subpel_x_qn, |
980 | 0 | ConvolveParams *conv_params, int bd) { |
981 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
982 | 0 | int dst16_stride = conv_params->dst_stride; |
983 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
984 | 0 | const int bits = FILTER_BITS - conv_params->round_1; |
985 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
986 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
987 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
988 | 0 | const int round_bits = |
989 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
990 | 0 | assert(round_bits >= 0); |
991 | 0 | assert(bits >= 0); |
992 | | // horizontal filter |
993 | 0 | const int16_t *x_filter = av1_get_interp_filter_subpel_kernel( |
994 | 0 | filter_params_x, subpel_x_qn & SUBPEL_MASK); |
995 | 0 | for (int y = 0; y < h; ++y) { |
996 | 0 | for (int x = 0; x < w; ++x) { |
997 | 0 | int32_t res = 0; |
998 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
999 | 0 | res += x_filter[k] * src[y * src_stride + x - fo_horiz + k]; |
1000 | 0 | } |
1001 | 0 | res = (1 << bits) * ROUND_POWER_OF_TWO(res, conv_params->round_0); |
1002 | 0 | res += round_offset; |
1003 | |
|
1004 | 0 | if (conv_params->do_average) { |
1005 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
1006 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
1007 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
1008 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
1009 | 0 | } else { |
1010 | 0 | tmp += res; |
1011 | 0 | tmp = tmp >> 1; |
1012 | 0 | } |
1013 | 0 | tmp -= round_offset; |
1014 | 0 | dst[y * dst_stride + x] = |
1015 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, round_bits), bd); |
1016 | 0 | } else { |
1017 | 0 | dst16[y * dst16_stride + x] = res; |
1018 | 0 | } |
1019 | 0 | } |
1020 | 0 | } |
1021 | 0 | } |
1022 | | |
1023 | | void av1_highbd_dist_wtd_convolve_y_c(const uint16_t *src, int src_stride, |
1024 | | uint16_t *dst, int dst_stride, int w, |
1025 | | int h, |
1026 | | const InterpFilterParams *filter_params_y, |
1027 | | const int subpel_y_qn, |
1028 | 0 | ConvolveParams *conv_params, int bd) { |
1029 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
1030 | 0 | int dst16_stride = conv_params->dst_stride; |
1031 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
1032 | 0 | const int bits = FILTER_BITS - conv_params->round_0; |
1033 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
1034 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
1035 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
1036 | 0 | const int round_bits = |
1037 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1; |
1038 | 0 | assert(round_bits >= 0); |
1039 | 0 | assert(bits >= 0); |
1040 | | // vertical filter |
1041 | 0 | const int16_t *y_filter = av1_get_interp_filter_subpel_kernel( |
1042 | 0 | filter_params_y, subpel_y_qn & SUBPEL_MASK); |
1043 | 0 | for (int y = 0; y < h; ++y) { |
1044 | 0 | for (int x = 0; x < w; ++x) { |
1045 | 0 | int32_t res = 0; |
1046 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
1047 | 0 | res += y_filter[k] * src[(y - fo_vert + k) * src_stride + x]; |
1048 | 0 | } |
1049 | 0 | res *= (1 << bits); |
1050 | 0 | res = ROUND_POWER_OF_TWO(res, conv_params->round_1) + round_offset; |
1051 | |
|
1052 | 0 | if (conv_params->do_average) { |
1053 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
1054 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
1055 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
1056 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
1057 | 0 | } else { |
1058 | 0 | tmp += res; |
1059 | 0 | tmp = tmp >> 1; |
1060 | 0 | } |
1061 | 0 | tmp -= round_offset; |
1062 | 0 | dst[y * dst_stride + x] = |
1063 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, round_bits), bd); |
1064 | 0 | } else { |
1065 | 0 | dst16[y * dst16_stride + x] = res; |
1066 | 0 | } |
1067 | 0 | } |
1068 | 0 | } |
1069 | 0 | } |
1070 | | |
1071 | | void av1_highbd_dist_wtd_convolve_2d_copy_c(const uint16_t *src, int src_stride, |
1072 | | uint16_t *dst, int dst_stride, |
1073 | | int w, int h, |
1074 | | ConvolveParams *conv_params, |
1075 | 0 | int bd) { |
1076 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
1077 | 0 | int dst16_stride = conv_params->dst_stride; |
1078 | 0 | const int bits = |
1079 | 0 | FILTER_BITS * 2 - conv_params->round_1 - conv_params->round_0; |
1080 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
1081 | 0 | const int round_offset = (1 << (offset_bits - conv_params->round_1)) + |
1082 | 0 | (1 << (offset_bits - conv_params->round_1 - 1)); |
1083 | 0 | assert(bits >= 0); |
1084 | | |
1085 | 0 | for (int y = 0; y < h; ++y) { |
1086 | 0 | for (int x = 0; x < w; ++x) { |
1087 | 0 | CONV_BUF_TYPE res = src[y * src_stride + x] << bits; |
1088 | 0 | res += round_offset; |
1089 | 0 | if (conv_params->do_average) { |
1090 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
1091 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
1092 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
1093 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
1094 | 0 | } else { |
1095 | 0 | tmp += res; |
1096 | 0 | tmp = tmp >> 1; |
1097 | 0 | } |
1098 | 0 | tmp -= round_offset; |
1099 | 0 | dst[y * dst_stride + x] = |
1100 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, bits), bd); |
1101 | 0 | } else { |
1102 | 0 | dst16[y * dst16_stride + x] = res; |
1103 | 0 | } |
1104 | 0 | } |
1105 | 0 | } |
1106 | 0 | } |
1107 | | |
1108 | | void av1_highbd_convolve_2d_scale_c(const uint16_t *src, int src_stride, |
1109 | | uint16_t *dst, int dst_stride, int w, int h, |
1110 | | const InterpFilterParams *filter_params_x, |
1111 | | const InterpFilterParams *filter_params_y, |
1112 | | const int subpel_x_qn, const int x_step_qn, |
1113 | | const int subpel_y_qn, const int y_step_qn, |
1114 | 0 | ConvolveParams *conv_params, int bd) { |
1115 | 0 | int16_t im_block[(2 * MAX_SB_SIZE + MAX_FILTER_TAP) * MAX_SB_SIZE]; |
1116 | 0 | int im_h = (((h - 1) * y_step_qn + subpel_y_qn) >> SCALE_SUBPEL_BITS) + |
1117 | 0 | filter_params_y->taps; |
1118 | 0 | int im_stride = w; |
1119 | 0 | const int fo_vert = filter_params_y->taps / 2 - 1; |
1120 | 0 | const int fo_horiz = filter_params_x->taps / 2 - 1; |
1121 | 0 | CONV_BUF_TYPE *dst16 = conv_params->dst; |
1122 | 0 | const int dst16_stride = conv_params->dst_stride; |
1123 | 0 | const int bits = |
1124 | 0 | FILTER_BITS * 2 - conv_params->round_0 - conv_params->round_1; |
1125 | 0 | assert(bits >= 0); |
1126 | | // horizontal filter |
1127 | 0 | const uint16_t *src_horiz = src - fo_vert * src_stride; |
1128 | 0 | for (int y = 0; y < im_h; ++y) { |
1129 | 0 | int x_qn = subpel_x_qn; |
1130 | 0 | for (int x = 0; x < w; ++x, x_qn += x_step_qn) { |
1131 | 0 | const uint16_t *const src_x = &src_horiz[(x_qn >> SCALE_SUBPEL_BITS)]; |
1132 | 0 | const int x_filter_idx = (x_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; |
1133 | 0 | assert(x_filter_idx < SUBPEL_SHIFTS); |
1134 | 0 | const int16_t *x_filter = |
1135 | 0 | av1_get_interp_filter_subpel_kernel(filter_params_x, x_filter_idx); |
1136 | 0 | int32_t sum = (1 << (bd + FILTER_BITS - 1)); |
1137 | 0 | for (int k = 0; k < filter_params_x->taps; ++k) { |
1138 | 0 | sum += x_filter[k] * src_x[k - fo_horiz]; |
1139 | 0 | } |
1140 | 0 | assert(filter_params_x->taps > 8 || |
1141 | 0 | (0 <= sum && sum < (1 << (bd + FILTER_BITS + 1)))); |
1142 | 0 | im_block[y * im_stride + x] = |
1143 | 0 | (int16_t)ROUND_POWER_OF_TWO(sum, conv_params->round_0); |
1144 | 0 | } |
1145 | 0 | src_horiz += src_stride; |
1146 | 0 | } |
1147 | | |
1148 | | // vertical filter |
1149 | 0 | int16_t *src_vert = im_block + fo_vert * im_stride; |
1150 | 0 | const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0; |
1151 | 0 | for (int x = 0; x < w; ++x) { |
1152 | 0 | int y_qn = subpel_y_qn; |
1153 | 0 | for (int y = 0; y < h; ++y, y_qn += y_step_qn) { |
1154 | 0 | const int16_t *src_y = &src_vert[(y_qn >> SCALE_SUBPEL_BITS) * im_stride]; |
1155 | 0 | const int y_filter_idx = (y_qn & SCALE_SUBPEL_MASK) >> SCALE_EXTRA_BITS; |
1156 | 0 | assert(y_filter_idx < SUBPEL_SHIFTS); |
1157 | 0 | const int16_t *y_filter = |
1158 | 0 | av1_get_interp_filter_subpel_kernel(filter_params_y, y_filter_idx); |
1159 | 0 | int32_t sum = 1 << offset_bits; |
1160 | 0 | for (int k = 0; k < filter_params_y->taps; ++k) { |
1161 | 0 | sum += y_filter[k] * src_y[(k - fo_vert) * im_stride]; |
1162 | 0 | } |
1163 | 0 | assert(filter_params_y->taps > 8 || |
1164 | 0 | (0 <= sum && sum < (1 << (offset_bits + 2)))); |
1165 | 0 | CONV_BUF_TYPE res = ROUND_POWER_OF_TWO(sum, conv_params->round_1); |
1166 | 0 | if (conv_params->is_compound) { |
1167 | 0 | if (conv_params->do_average) { |
1168 | 0 | int32_t tmp = dst16[y * dst16_stride + x]; |
1169 | 0 | if (conv_params->use_dist_wtd_comp_avg) { |
1170 | 0 | tmp = tmp * conv_params->fwd_offset + res * conv_params->bck_offset; |
1171 | 0 | tmp = tmp >> DIST_PRECISION_BITS; |
1172 | 0 | } else { |
1173 | 0 | tmp += res; |
1174 | 0 | tmp = tmp >> 1; |
1175 | 0 | } |
1176 | | /* Subtract round offset and convolve round */ |
1177 | 0 | tmp = tmp - ((1 << (offset_bits - conv_params->round_1)) + |
1178 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
1179 | 0 | dst[y * dst_stride + x] = |
1180 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, bits), bd); |
1181 | 0 | } else { |
1182 | 0 | dst16[y * dst16_stride + x] = res; |
1183 | 0 | } |
1184 | 0 | } else { |
1185 | | /* Subtract round offset and convolve round */ |
1186 | 0 | int32_t tmp = res - ((1 << (offset_bits - conv_params->round_1)) + |
1187 | 0 | (1 << (offset_bits - conv_params->round_1 - 1))); |
1188 | 0 | dst[y * dst_stride + x] = |
1189 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(tmp, bits), bd); |
1190 | 0 | } |
1191 | 0 | } |
1192 | 0 | src_vert++; |
1193 | 0 | } |
1194 | 0 | } |
1195 | | |
1196 | | static void highbd_convolve_2d_facade_compound( |
1197 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, |
1198 | | const int w, const int h, const InterpFilterParams *filter_params_x, |
1199 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
1200 | 1.51M | const int subpel_y_qn, ConvolveParams *conv_params, int bd) { |
1201 | 1.51M | const bool need_x = subpel_x_qn != 0; |
1202 | 1.51M | const bool need_y = subpel_y_qn != 0; |
1203 | 1.51M | if (!need_x && !need_y) { |
1204 | 312k | av1_highbd_dist_wtd_convolve_2d_copy(src, src_stride, dst, dst_stride, w, h, |
1205 | 312k | conv_params, bd); |
1206 | 1.20M | } else if (need_x && !need_y) { |
1207 | 282k | av1_highbd_dist_wtd_convolve_x(src, src_stride, dst, dst_stride, w, h, |
1208 | 282k | filter_params_x, subpel_x_qn, conv_params, |
1209 | 282k | bd); |
1210 | 918k | } else if (!need_x && need_y) { |
1211 | 154k | av1_highbd_dist_wtd_convolve_y(src, src_stride, dst, dst_stride, w, h, |
1212 | 154k | filter_params_y, subpel_y_qn, conv_params, |
1213 | 154k | bd); |
1214 | 764k | } else { |
1215 | 764k | assert(need_x && need_y); |
1216 | 764k | av1_highbd_dist_wtd_convolve_2d(src, src_stride, dst, dst_stride, w, h, |
1217 | 764k | filter_params_x, filter_params_y, |
1218 | 764k | subpel_x_qn, subpel_y_qn, conv_params, bd); |
1219 | 764k | } |
1220 | 1.51M | } |
1221 | | |
1222 | | static void highbd_convolve_2d_facade_single( |
1223 | | const uint16_t *src, int src_stride, uint16_t *dst, int dst_stride, |
1224 | | const int w, const int h, const InterpFilterParams *filter_params_x, |
1225 | | const InterpFilterParams *filter_params_y, const int subpel_x_qn, |
1226 | 6.13M | const int subpel_y_qn, ConvolveParams *conv_params, int bd) { |
1227 | 6.13M | const bool need_x = subpel_x_qn != 0; |
1228 | 6.13M | const bool need_y = subpel_y_qn != 0; |
1229 | | |
1230 | 6.13M | if (!need_x && !need_y) { |
1231 | 1.23M | aom_highbd_convolve_copy(src, src_stride, dst, dst_stride, w, h); |
1232 | 4.90M | } else if (need_x && !need_y) { |
1233 | 890k | av1_highbd_convolve_x_sr(src, src_stride, dst, dst_stride, w, h, |
1234 | 890k | filter_params_x, subpel_x_qn, conv_params, bd); |
1235 | 4.01M | } else if (!need_x && need_y) { |
1236 | 1.02M | av1_highbd_convolve_y_sr(src, src_stride, dst, dst_stride, w, h, |
1237 | 1.02M | filter_params_y, subpel_y_qn, bd); |
1238 | 2.99M | } else { |
1239 | 2.99M | assert(need_x && need_y); |
1240 | 2.99M | av1_highbd_convolve_2d_sr(src, src_stride, dst, dst_stride, w, h, |
1241 | 2.99M | filter_params_x, filter_params_y, subpel_x_qn, |
1242 | 2.99M | subpel_y_qn, conv_params, bd); |
1243 | 2.99M | } |
1244 | 6.13M | } |
1245 | | |
1246 | | void av1_highbd_convolve_2d_facade(const uint8_t *src8, int src_stride, |
1247 | | uint8_t *dst8, int dst_stride, int w, int h, |
1248 | | const InterpFilterParams *interp_filters[2], |
1249 | | const int subpel_x_qn, int x_step_q4, |
1250 | | const int subpel_y_qn, int y_step_q4, |
1251 | | int scaled, ConvolveParams *conv_params, |
1252 | 8.38M | int bd) { |
1253 | 8.38M | (void)x_step_q4; |
1254 | 8.38M | (void)y_step_q4; |
1255 | 8.38M | (void)dst_stride; |
1256 | 8.38M | const uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
1257 | | |
1258 | 8.38M | const InterpFilterParams *filter_params_x = interp_filters[0]; |
1259 | 8.38M | const InterpFilterParams *filter_params_y = interp_filters[1]; |
1260 | | |
1261 | 8.38M | uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
1262 | | // 2-tap filter indicates that it is for IntraBC. |
1263 | 8.38M | if (filter_params_x->taps == 2 || filter_params_y->taps == 2) { |
1264 | 91.2k | assert(filter_params_x->taps == 2 && filter_params_y->taps == 2); |
1265 | 91.2k | assert(!scaled); |
1266 | 91.2k | if (subpel_x_qn && subpel_y_qn) { |
1267 | 1.45k | av1_highbd_convolve_2d_sr_intrabc_c( |
1268 | 1.45k | src, src_stride, dst, dst_stride, w, h, filter_params_x, |
1269 | 1.45k | filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd); |
1270 | 1.45k | return; |
1271 | 89.8k | } else if (subpel_x_qn) { |
1272 | 1.41k | av1_highbd_convolve_x_sr_intrabc_c(src, src_stride, dst, dst_stride, w, h, |
1273 | 1.41k | filter_params_x, subpel_x_qn, |
1274 | 1.41k | conv_params, bd); |
1275 | 1.41k | return; |
1276 | 88.4k | } else if (subpel_y_qn) { |
1277 | 1.45k | av1_highbd_convolve_y_sr_intrabc_c(src, src_stride, dst, dst_stride, w, h, |
1278 | 1.45k | filter_params_y, subpel_y_qn, bd); |
1279 | 1.45k | return; |
1280 | 1.45k | } |
1281 | 91.2k | } |
1282 | | |
1283 | 8.37M | if (scaled) { |
1284 | 724k | if (conv_params->is_compound) { |
1285 | 117k | assert(conv_params->dst != NULL); |
1286 | 117k | } |
1287 | 724k | av1_highbd_convolve_2d_scale(src, src_stride, dst, dst_stride, w, h, |
1288 | 724k | filter_params_x, filter_params_y, subpel_x_qn, |
1289 | 724k | x_step_q4, subpel_y_qn, y_step_q4, conv_params, |
1290 | 724k | bd); |
1291 | 7.65M | } else if (conv_params->is_compound) { |
1292 | 1.51M | highbd_convolve_2d_facade_compound( |
1293 | 1.51M | src, src_stride, dst, dst_stride, w, h, filter_params_x, |
1294 | 1.51M | filter_params_y, subpel_x_qn, subpel_y_qn, conv_params, bd); |
1295 | 6.13M | } else { |
1296 | 6.13M | highbd_convolve_2d_facade_single(src, src_stride, dst, dst_stride, w, h, |
1297 | 6.13M | filter_params_x, filter_params_y, |
1298 | 6.13M | subpel_x_qn, subpel_y_qn, conv_params, bd); |
1299 | 6.13M | } |
1300 | 8.37M | } |
1301 | | #endif // CONFIG_AV1_HIGHBITDEPTH |
1302 | | |
1303 | | // Note: Fixed size intermediate buffers, place limits on parameters |
1304 | | // of some functions. 2d filtering proceeds in 2 steps: |
1305 | | // (1) Interpolate horizontally into an intermediate buffer, temp. |
1306 | | // (2) Interpolate temp vertically to derive the sub-pixel result. |
1307 | | // Deriving the maximum number of rows in the temp buffer (135): |
1308 | | // --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative). |
1309 | | // --Largest block size is 128x128 pixels. |
1310 | | // --128 rows in the downscaled frame span a distance of (128 - 1) * 32 in the |
1311 | | // original frame (in 1/16th pixel units). |
1312 | | // --Must round-up because block may be located at sub-pixel position. |
1313 | | // --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails. |
1314 | | // --((128 - 1) * 32 + 15) >> 4 + 8 = 263. |
1315 | | #define WIENER_MAX_EXT_SIZE 263 |
1316 | | |
1317 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1318 | 0 | static inline int horz_scalar_product(const uint8_t *a, const int16_t *b) { |
1319 | 0 | int sum = 0; |
1320 | 0 | for (int k = 0; k < SUBPEL_TAPS; ++k) sum += a[k] * b[k]; |
1321 | 0 | return sum; |
1322 | 0 | } |
1323 | | |
1324 | | #if CONFIG_AV1_HIGHBITDEPTH |
1325 | | static inline int highbd_horz_scalar_product(const uint16_t *a, |
1326 | 0 | const int16_t *b) { |
1327 | 0 | int sum = 0; |
1328 | 0 | for (int k = 0; k < SUBPEL_TAPS; ++k) sum += a[k] * b[k]; |
1329 | 0 | return sum; |
1330 | 0 | } |
1331 | | #endif |
1332 | | |
1333 | | static inline int highbd_vert_scalar_product(const uint16_t *a, |
1334 | | ptrdiff_t a_stride, |
1335 | 0 | const int16_t *b) { |
1336 | 0 | int sum = 0; |
1337 | 0 | for (int k = 0; k < SUBPEL_TAPS; ++k) sum += a[k * a_stride] * b[k]; |
1338 | 0 | return sum; |
1339 | 0 | } |
1340 | | |
1341 | 0 | static const InterpKernel *get_filter_base(const int16_t *filter) { |
1342 | | // NOTE: This assumes that the filter table is 256-byte aligned. |
1343 | | // TODO(agrange) Modify to make independent of table alignment. |
1344 | 0 | return (const InterpKernel *)(((intptr_t)filter) & ~((intptr_t)0xFF)); |
1345 | 0 | } |
1346 | | |
1347 | 0 | static int get_filter_offset(const int16_t *f, const InterpKernel *base) { |
1348 | 0 | return (int)((const InterpKernel *)(intptr_t)f - base); |
1349 | 0 | } |
1350 | | |
1351 | | static void convolve_add_src_horiz_hip(const uint8_t *src, ptrdiff_t src_stride, |
1352 | | uint16_t *dst, ptrdiff_t dst_stride, |
1353 | | const InterpKernel *x_filters, int x0_q4, |
1354 | | int x_step_q4, int w, int h, |
1355 | 0 | int round0_bits) { |
1356 | 0 | const int bd = 8; |
1357 | 0 | src -= SUBPEL_TAPS / 2 - 1; |
1358 | 0 | for (int y = 0; y < h; ++y) { |
1359 | 0 | int x_q4 = x0_q4; |
1360 | 0 | for (int x = 0; x < w; ++x) { |
1361 | 0 | const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; |
1362 | 0 | const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; |
1363 | 0 | const int rounding = ((int)src_x[SUBPEL_TAPS / 2 - 1] << FILTER_BITS) + |
1364 | 0 | (1 << (bd + FILTER_BITS - 1)); |
1365 | 0 | const int sum = horz_scalar_product(src_x, x_filter) + rounding; |
1366 | 0 | dst[x] = (uint16_t)clamp(ROUND_POWER_OF_TWO(sum, round0_bits), 0, |
1367 | 0 | WIENER_CLAMP_LIMIT(round0_bits, bd) - 1); |
1368 | 0 | x_q4 += x_step_q4; |
1369 | 0 | } |
1370 | 0 | src += src_stride; |
1371 | 0 | dst += dst_stride; |
1372 | 0 | } |
1373 | 0 | } |
1374 | | |
1375 | | static void convolve_add_src_vert_hip(const uint16_t *src, ptrdiff_t src_stride, |
1376 | | uint8_t *dst, ptrdiff_t dst_stride, |
1377 | | const InterpKernel *y_filters, int y0_q4, |
1378 | | int y_step_q4, int w, int h, |
1379 | 0 | int round1_bits) { |
1380 | 0 | const int bd = 8; |
1381 | 0 | src -= src_stride * (SUBPEL_TAPS / 2 - 1); |
1382 | |
|
1383 | 0 | for (int x = 0; x < w; ++x) { |
1384 | 0 | int y_q4 = y0_q4; |
1385 | 0 | for (int y = 0; y < h; ++y) { |
1386 | 0 | const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; |
1387 | 0 | const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; |
1388 | 0 | const int rounding = |
1389 | 0 | ((int)src_y[(SUBPEL_TAPS / 2 - 1) * src_stride] << FILTER_BITS) - |
1390 | 0 | (1 << (bd + round1_bits - 1)); |
1391 | 0 | const int sum = |
1392 | 0 | highbd_vert_scalar_product(src_y, src_stride, y_filter) + rounding; |
1393 | 0 | dst[y * dst_stride] = clip_pixel(ROUND_POWER_OF_TWO(sum, round1_bits)); |
1394 | 0 | y_q4 += y_step_q4; |
1395 | 0 | } |
1396 | 0 | ++src; |
1397 | 0 | ++dst; |
1398 | 0 | } |
1399 | 0 | } |
1400 | | |
1401 | | void av1_wiener_convolve_add_src_c(const uint8_t *src, ptrdiff_t src_stride, |
1402 | | uint8_t *dst, ptrdiff_t dst_stride, |
1403 | | const int16_t *filter_x, int x_step_q4, |
1404 | | const int16_t *filter_y, int y_step_q4, |
1405 | | int w, int h, |
1406 | 0 | const WienerConvolveParams *conv_params) { |
1407 | 0 | const InterpKernel *const filters_x = get_filter_base(filter_x); |
1408 | 0 | const int x0_q4 = get_filter_offset(filter_x, filters_x); |
1409 | |
|
1410 | 0 | const InterpKernel *const filters_y = get_filter_base(filter_y); |
1411 | 0 | const int y0_q4 = get_filter_offset(filter_y, filters_y); |
1412 | |
|
1413 | 0 | uint16_t temp[WIENER_MAX_EXT_SIZE * MAX_SB_SIZE]; |
1414 | 0 | const int intermediate_height = |
1415 | 0 | (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS - 1; |
1416 | 0 | memset(temp + (intermediate_height * MAX_SB_SIZE), 0, MAX_SB_SIZE); |
1417 | |
|
1418 | 0 | assert(w <= MAX_SB_SIZE); |
1419 | 0 | assert(h <= MAX_SB_SIZE); |
1420 | 0 | assert(y_step_q4 <= 32); |
1421 | 0 | assert(x_step_q4 <= 32); |
1422 | | |
1423 | 0 | convolve_add_src_horiz_hip(src - src_stride * (SUBPEL_TAPS / 2 - 1), |
1424 | 0 | src_stride, temp, MAX_SB_SIZE, filters_x, x0_q4, |
1425 | 0 | x_step_q4, w, intermediate_height, |
1426 | 0 | conv_params->round_0); |
1427 | 0 | convolve_add_src_vert_hip(temp + MAX_SB_SIZE * (SUBPEL_TAPS / 2 - 1), |
1428 | 0 | MAX_SB_SIZE, dst, dst_stride, filters_y, y0_q4, |
1429 | 0 | y_step_q4, w, h, conv_params->round_1); |
1430 | 0 | } |
1431 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1432 | | |
1433 | | #if CONFIG_AV1_HIGHBITDEPTH |
1434 | | #if !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1435 | | static void highbd_convolve_add_src_horiz_hip( |
1436 | | const uint8_t *src8, ptrdiff_t src_stride, uint16_t *dst, |
1437 | | ptrdiff_t dst_stride, const InterpKernel *x_filters, int x0_q4, |
1438 | 0 | int x_step_q4, int w, int h, int round0_bits, int bd) { |
1439 | 0 | const int extraprec_clamp_limit = WIENER_CLAMP_LIMIT(round0_bits, bd); |
1440 | 0 | uint16_t *src = CONVERT_TO_SHORTPTR(src8); |
1441 | 0 | src -= SUBPEL_TAPS / 2 - 1; |
1442 | 0 | for (int y = 0; y < h; ++y) { |
1443 | 0 | int x_q4 = x0_q4; |
1444 | 0 | for (int x = 0; x < w; ++x) { |
1445 | 0 | const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS]; |
1446 | 0 | const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK]; |
1447 | 0 | const int rounding = ((int)src_x[SUBPEL_TAPS / 2 - 1] << FILTER_BITS) + |
1448 | 0 | (1 << (bd + FILTER_BITS - 1)); |
1449 | 0 | const int sum = highbd_horz_scalar_product(src_x, x_filter) + rounding; |
1450 | 0 | dst[x] = (uint16_t)clamp(ROUND_POWER_OF_TWO(sum, round0_bits), 0, |
1451 | 0 | extraprec_clamp_limit - 1); |
1452 | 0 | x_q4 += x_step_q4; |
1453 | 0 | } |
1454 | 0 | src += src_stride; |
1455 | 0 | dst += dst_stride; |
1456 | 0 | } |
1457 | 0 | } |
1458 | | |
1459 | | static void highbd_convolve_add_src_vert_hip( |
1460 | | const uint16_t *src, ptrdiff_t src_stride, uint8_t *dst8, |
1461 | | ptrdiff_t dst_stride, const InterpKernel *y_filters, int y0_q4, |
1462 | 0 | int y_step_q4, int w, int h, int round1_bits, int bd) { |
1463 | 0 | uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); |
1464 | 0 | src -= src_stride * (SUBPEL_TAPS / 2 - 1); |
1465 | 0 | for (int x = 0; x < w; ++x) { |
1466 | 0 | int y_q4 = y0_q4; |
1467 | 0 | for (int y = 0; y < h; ++y) { |
1468 | 0 | const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride]; |
1469 | 0 | const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK]; |
1470 | 0 | const int rounding = |
1471 | 0 | ((int)src_y[(SUBPEL_TAPS / 2 - 1) * src_stride] << FILTER_BITS) - |
1472 | 0 | (1 << (bd + round1_bits - 1)); |
1473 | 0 | const int sum = |
1474 | 0 | highbd_vert_scalar_product(src_y, src_stride, y_filter) + rounding; |
1475 | 0 | dst[y * dst_stride] = |
1476 | 0 | clip_pixel_highbd(ROUND_POWER_OF_TWO(sum, round1_bits), bd); |
1477 | 0 | y_q4 += y_step_q4; |
1478 | 0 | } |
1479 | 0 | ++src; |
1480 | 0 | ++dst; |
1481 | 0 | } |
1482 | 0 | } |
1483 | | |
1484 | | void av1_highbd_wiener_convolve_add_src_c( |
1485 | | const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, |
1486 | | ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, |
1487 | | const int16_t *filter_y, int y_step_q4, int w, int h, |
1488 | 0 | const WienerConvolveParams *conv_params, int bd) { |
1489 | 0 | const InterpKernel *const filters_x = get_filter_base(filter_x); |
1490 | 0 | const int x0_q4 = get_filter_offset(filter_x, filters_x); |
1491 | |
|
1492 | 0 | const InterpKernel *const filters_y = get_filter_base(filter_y); |
1493 | 0 | const int y0_q4 = get_filter_offset(filter_y, filters_y); |
1494 | |
|
1495 | 0 | uint16_t temp[WIENER_MAX_EXT_SIZE * MAX_SB_SIZE]; |
1496 | 0 | const int intermediate_height = |
1497 | 0 | (((h - 1) * y_step_q4 + y0_q4) >> SUBPEL_BITS) + SUBPEL_TAPS; |
1498 | |
|
1499 | 0 | assert(w <= MAX_SB_SIZE); |
1500 | 0 | assert(h <= MAX_SB_SIZE); |
1501 | 0 | assert(y_step_q4 <= 32); |
1502 | 0 | assert(x_step_q4 <= 32); |
1503 | 0 | assert(bd + FILTER_BITS - conv_params->round_0 + 2 <= 16); |
1504 | | |
1505 | 0 | highbd_convolve_add_src_horiz_hip(src - src_stride * (SUBPEL_TAPS / 2 - 1), |
1506 | 0 | src_stride, temp, MAX_SB_SIZE, filters_x, |
1507 | 0 | x0_q4, x_step_q4, w, intermediate_height, |
1508 | 0 | conv_params->round_0, bd); |
1509 | 0 | highbd_convolve_add_src_vert_hip( |
1510 | 0 | temp + MAX_SB_SIZE * (SUBPEL_TAPS / 2 - 1), MAX_SB_SIZE, dst, dst_stride, |
1511 | 0 | filters_y, y0_q4, y_step_q4, w, h, conv_params->round_1, bd); |
1512 | 0 | } |
1513 | | #endif // !CONFIG_REALTIME_ONLY || CONFIG_AV1_DECODER |
1514 | | #endif // CONFIG_AV1_HIGHBITDEPTH |