/src/aom/av1/common/reconinter.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <assert.h> |
13 | | #include <stdio.h> |
14 | | #include <limits.h> |
15 | | |
16 | | #include "config/aom_config.h" |
17 | | #include "config/aom_dsp_rtcd.h" |
18 | | #include "config/aom_scale_rtcd.h" |
19 | | |
20 | | #include "aom/aom_integer.h" |
21 | | #include "aom_dsp/blend.h" |
22 | | |
23 | | #include "av1/common/av1_common_int.h" |
24 | | #include "av1/common/blockd.h" |
25 | | #include "av1/common/mvref_common.h" |
26 | | #include "av1/common/obmc.h" |
27 | | #include "av1/common/reconinter.h" |
28 | | #include "av1/common/reconintra.h" |
29 | | |
30 | | // This function will determine whether or not to create a warped |
31 | | // prediction. |
32 | | int av1_allow_warp(const MB_MODE_INFO *const mbmi, |
33 | | const WarpTypesAllowed *const warp_types, |
34 | | const WarpedMotionParams *const gm_params, |
35 | | int build_for_obmc, const struct scale_factors *const sf, |
36 | 0 | WarpedMotionParams *final_warp_params) { |
37 | | // Note: As per the spec, we must test the fixed point scales here, which are |
38 | | // at a higher precision (1 << 14) than the xs and ys in subpel_params (that |
39 | | // have 1 << 10 precision). |
40 | 0 | if (av1_is_scaled(sf)) return 0; |
41 | | |
42 | 0 | if (final_warp_params != NULL) *final_warp_params = default_warp_params; |
43 | |
|
44 | 0 | if (build_for_obmc) return 0; |
45 | | |
46 | 0 | if (warp_types->local_warp_allowed && !mbmi->wm_params.invalid) { |
47 | 0 | if (final_warp_params != NULL) |
48 | 0 | memcpy(final_warp_params, &mbmi->wm_params, sizeof(*final_warp_params)); |
49 | 0 | return 1; |
50 | 0 | } else if (warp_types->global_warp_allowed && !gm_params->invalid) { |
51 | 0 | if (final_warp_params != NULL) |
52 | 0 | memcpy(final_warp_params, gm_params, sizeof(*final_warp_params)); |
53 | 0 | return 1; |
54 | 0 | } |
55 | | |
56 | 0 | return 0; |
57 | 0 | } |
58 | | |
59 | | void av1_init_inter_params(InterPredParams *inter_pred_params, int block_width, |
60 | | int block_height, int pix_row, int pix_col, |
61 | | int subsampling_x, int subsampling_y, int bit_depth, |
62 | | int use_hbd_buf, int is_intrabc, |
63 | | const struct scale_factors *sf, |
64 | | const struct buf_2d *ref_buf, |
65 | 24.6k | int_interpfilters interp_filters) { |
66 | 24.6k | inter_pred_params->block_width = block_width; |
67 | 24.6k | inter_pred_params->block_height = block_height; |
68 | 24.6k | inter_pred_params->pix_row = pix_row; |
69 | 24.6k | inter_pred_params->pix_col = pix_col; |
70 | 24.6k | inter_pred_params->subsampling_x = subsampling_x; |
71 | 24.6k | inter_pred_params->subsampling_y = subsampling_y; |
72 | 24.6k | inter_pred_params->bit_depth = bit_depth; |
73 | 24.6k | inter_pred_params->use_hbd_buf = use_hbd_buf; |
74 | 24.6k | inter_pred_params->is_intrabc = is_intrabc; |
75 | 24.6k | inter_pred_params->scale_factors = sf; |
76 | 24.6k | inter_pred_params->ref_frame_buf = *ref_buf; |
77 | 24.6k | inter_pred_params->mode = TRANSLATION_PRED; |
78 | 24.6k | inter_pred_params->comp_mode = UNIFORM_SINGLE; |
79 | | |
80 | 24.6k | if (is_intrabc) { |
81 | 24.6k | inter_pred_params->interp_filter_params[0] = &av1_intrabc_filter_params; |
82 | 24.6k | inter_pred_params->interp_filter_params[1] = &av1_intrabc_filter_params; |
83 | 24.6k | } else { |
84 | 0 | inter_pred_params->interp_filter_params[0] = |
85 | 0 | av1_get_interp_filter_params_with_block_size( |
86 | 0 | interp_filters.as_filters.x_filter, block_width); |
87 | 0 | inter_pred_params->interp_filter_params[1] = |
88 | 0 | av1_get_interp_filter_params_with_block_size( |
89 | 0 | interp_filters.as_filters.y_filter, block_height); |
90 | 0 | } |
91 | 24.6k | } |
92 | | |
93 | 0 | void av1_init_comp_mode(InterPredParams *inter_pred_params) { |
94 | 0 | inter_pred_params->comp_mode = UNIFORM_COMP; |
95 | 0 | } |
96 | | |
97 | | void av1_init_warp_params(InterPredParams *inter_pred_params, |
98 | | const WarpTypesAllowed *warp_types, int ref, |
99 | 24.6k | const MACROBLOCKD *xd, const MB_MODE_INFO *mi) { |
100 | 24.6k | if (inter_pred_params->block_height < 8 || inter_pred_params->block_width < 8) |
101 | 12.8k | return; |
102 | | |
103 | 11.8k | if (xd->cur_frame_force_integer_mv) return; |
104 | | |
105 | 0 | if (av1_allow_warp(mi, warp_types, &xd->global_motion[mi->ref_frame[ref]], 0, |
106 | 0 | inter_pred_params->scale_factors, |
107 | 0 | &inter_pred_params->warp_params)) { |
108 | | #if CONFIG_REALTIME_ONLY |
109 | | aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_FEATURE, |
110 | | "Warped motion is disabled in realtime only build."); |
111 | | #endif |
112 | 0 | inter_pred_params->mode = WARP_PRED; |
113 | 0 | } |
114 | 0 | } |
115 | | |
116 | | void av1_make_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, |
117 | | int dst_stride, |
118 | | InterPredParams *inter_pred_params, |
119 | 24.6k | const SubpelParams *subpel_params) { |
120 | 24.6k | assert(IMPLIES(inter_pred_params->conv_params.is_compound, |
121 | 24.6k | inter_pred_params->conv_params.dst != NULL)); |
122 | | |
123 | 24.6k | if (inter_pred_params->mode == TRANSLATION_PRED) { |
124 | 24.6k | #if CONFIG_AV1_HIGHBITDEPTH |
125 | 24.6k | if (inter_pred_params->use_hbd_buf) { |
126 | 16.4k | highbd_inter_predictor(src, src_stride, dst, dst_stride, subpel_params, |
127 | 16.4k | inter_pred_params->block_width, |
128 | 16.4k | inter_pred_params->block_height, |
129 | 16.4k | &inter_pred_params->conv_params, |
130 | 16.4k | inter_pred_params->interp_filter_params, |
131 | 16.4k | inter_pred_params->bit_depth); |
132 | 16.4k | } else { |
133 | 8.22k | inter_predictor(src, src_stride, dst, dst_stride, subpel_params, |
134 | 8.22k | inter_pred_params->block_width, |
135 | 8.22k | inter_pred_params->block_height, |
136 | 8.22k | &inter_pred_params->conv_params, |
137 | 8.22k | inter_pred_params->interp_filter_params); |
138 | 8.22k | } |
139 | | #else |
140 | | inter_predictor(src, src_stride, dst, dst_stride, subpel_params, |
141 | | inter_pred_params->block_width, |
142 | | inter_pred_params->block_height, |
143 | | &inter_pred_params->conv_params, |
144 | | inter_pred_params->interp_filter_params); |
145 | | #endif |
146 | 24.6k | } |
147 | 0 | #if !CONFIG_REALTIME_ONLY |
148 | | // TODO(jingning): av1_warp_plane() can be further cleaned up. |
149 | 0 | else if (inter_pred_params->mode == WARP_PRED) { |
150 | 0 | av1_warp_plane( |
151 | 0 | &inter_pred_params->warp_params, inter_pred_params->use_hbd_buf, |
152 | 0 | inter_pred_params->bit_depth, inter_pred_params->ref_frame_buf.buf0, |
153 | 0 | inter_pred_params->ref_frame_buf.width, |
154 | 0 | inter_pred_params->ref_frame_buf.height, |
155 | 0 | inter_pred_params->ref_frame_buf.stride, dst, |
156 | 0 | inter_pred_params->pix_col, inter_pred_params->pix_row, |
157 | 0 | inter_pred_params->block_width, inter_pred_params->block_height, |
158 | 0 | dst_stride, inter_pred_params->subsampling_x, |
159 | 0 | inter_pred_params->subsampling_y, &inter_pred_params->conv_params); |
160 | 0 | } |
161 | 0 | #endif |
162 | 0 | else { |
163 | 0 | assert(0 && "Unsupported inter_pred_params->mode"); |
164 | 0 | } |
165 | 24.6k | } |
166 | | |
167 | | static const uint8_t wedge_master_oblique_odd[MASK_MASTER_SIZE] = { |
168 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
169 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 6, 18, |
170 | | 37, 53, 60, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
171 | | 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
172 | | }; |
173 | | static const uint8_t wedge_master_oblique_even[MASK_MASTER_SIZE] = { |
174 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
175 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 4, 11, 27, |
176 | | 46, 58, 62, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
177 | | 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
178 | | }; |
179 | | static const uint8_t wedge_master_vertical[MASK_MASTER_SIZE] = { |
180 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
181 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 7, 21, |
182 | | 43, 57, 62, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
183 | | 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
184 | | }; |
185 | | |
186 | | static AOM_INLINE void shift_copy(const uint8_t *src, uint8_t *dst, int shift, |
187 | 128 | int width) { |
188 | 128 | if (shift >= 0) { |
189 | 66 | memcpy(dst + shift, src, width - shift); |
190 | 66 | memset(dst, src[0], shift); |
191 | 66 | } else { |
192 | 62 | shift = -shift; |
193 | 62 | memcpy(dst, src + shift, width - shift); |
194 | 62 | memset(dst + width - shift, src[width - 1], shift); |
195 | 62 | } |
196 | 128 | } |
197 | | |
198 | | /* clang-format off */ |
199 | | DECLARE_ALIGNED(16, static uint8_t, |
200 | | wedge_signflip_lookup[BLOCK_SIZES_ALL][MAX_WEDGE_TYPES]) = { |
201 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
202 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
203 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
204 | | { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
205 | | { 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
206 | | { 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
207 | | { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
208 | | { 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
209 | | { 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
210 | | { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, }, |
211 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
212 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
213 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
214 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
215 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
216 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
217 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
218 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
219 | | { 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, }, |
220 | | { 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, }, |
221 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
222 | | { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, // not used |
223 | | }; |
224 | | /* clang-format on */ |
225 | | |
226 | | // [negative][direction] |
227 | | DECLARE_ALIGNED( |
228 | | 16, static uint8_t, |
229 | | wedge_mask_obl[2][WEDGE_DIRECTIONS][MASK_MASTER_SIZE * MASK_MASTER_SIZE]); |
230 | | |
231 | | // 4 * MAX_WEDGE_SQUARE is an easy to compute and fairly tight upper bound |
232 | | // on the sum of all mask sizes up to an including MAX_WEDGE_SQUARE. |
233 | | DECLARE_ALIGNED(16, static uint8_t, |
234 | | wedge_mask_buf[2 * MAX_WEDGE_TYPES * 4 * MAX_WEDGE_SQUARE]); |
235 | | |
236 | | DECLARE_ALIGNED(16, static uint8_t, |
237 | | smooth_interintra_mask_buf[INTERINTRA_MODES][BLOCK_SIZES_ALL] |
238 | | [MAX_WEDGE_SQUARE]); |
239 | | |
240 | | static wedge_masks_type wedge_masks[BLOCK_SIZES_ALL][2]; |
241 | | |
242 | | static const wedge_code_type wedge_codebook_16_hgtw[16] = { |
243 | | { WEDGE_OBLIQUE27, 4, 4 }, { WEDGE_OBLIQUE63, 4, 4 }, |
244 | | { WEDGE_OBLIQUE117, 4, 4 }, { WEDGE_OBLIQUE153, 4, 4 }, |
245 | | { WEDGE_HORIZONTAL, 4, 2 }, { WEDGE_HORIZONTAL, 4, 4 }, |
246 | | { WEDGE_HORIZONTAL, 4, 6 }, { WEDGE_VERTICAL, 4, 4 }, |
247 | | { WEDGE_OBLIQUE27, 4, 2 }, { WEDGE_OBLIQUE27, 4, 6 }, |
248 | | { WEDGE_OBLIQUE153, 4, 2 }, { WEDGE_OBLIQUE153, 4, 6 }, |
249 | | { WEDGE_OBLIQUE63, 2, 4 }, { WEDGE_OBLIQUE63, 6, 4 }, |
250 | | { WEDGE_OBLIQUE117, 2, 4 }, { WEDGE_OBLIQUE117, 6, 4 }, |
251 | | }; |
252 | | |
253 | | static const wedge_code_type wedge_codebook_16_hltw[16] = { |
254 | | { WEDGE_OBLIQUE27, 4, 4 }, { WEDGE_OBLIQUE63, 4, 4 }, |
255 | | { WEDGE_OBLIQUE117, 4, 4 }, { WEDGE_OBLIQUE153, 4, 4 }, |
256 | | { WEDGE_VERTICAL, 2, 4 }, { WEDGE_VERTICAL, 4, 4 }, |
257 | | { WEDGE_VERTICAL, 6, 4 }, { WEDGE_HORIZONTAL, 4, 4 }, |
258 | | { WEDGE_OBLIQUE27, 4, 2 }, { WEDGE_OBLIQUE27, 4, 6 }, |
259 | | { WEDGE_OBLIQUE153, 4, 2 }, { WEDGE_OBLIQUE153, 4, 6 }, |
260 | | { WEDGE_OBLIQUE63, 2, 4 }, { WEDGE_OBLIQUE63, 6, 4 }, |
261 | | { WEDGE_OBLIQUE117, 2, 4 }, { WEDGE_OBLIQUE117, 6, 4 }, |
262 | | }; |
263 | | |
264 | | static const wedge_code_type wedge_codebook_16_heqw[16] = { |
265 | | { WEDGE_OBLIQUE27, 4, 4 }, { WEDGE_OBLIQUE63, 4, 4 }, |
266 | | { WEDGE_OBLIQUE117, 4, 4 }, { WEDGE_OBLIQUE153, 4, 4 }, |
267 | | { WEDGE_HORIZONTAL, 4, 2 }, { WEDGE_HORIZONTAL, 4, 6 }, |
268 | | { WEDGE_VERTICAL, 2, 4 }, { WEDGE_VERTICAL, 6, 4 }, |
269 | | { WEDGE_OBLIQUE27, 4, 2 }, { WEDGE_OBLIQUE27, 4, 6 }, |
270 | | { WEDGE_OBLIQUE153, 4, 2 }, { WEDGE_OBLIQUE153, 4, 6 }, |
271 | | { WEDGE_OBLIQUE63, 2, 4 }, { WEDGE_OBLIQUE63, 6, 4 }, |
272 | | { WEDGE_OBLIQUE117, 2, 4 }, { WEDGE_OBLIQUE117, 6, 4 }, |
273 | | }; |
274 | | |
275 | | const wedge_params_type av1_wedge_params_lookup[BLOCK_SIZES_ALL] = { |
276 | | { 0, NULL, NULL, NULL }, |
277 | | { 0, NULL, NULL, NULL }, |
278 | | { 0, NULL, NULL, NULL }, |
279 | | { MAX_WEDGE_TYPES, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_8X8], |
280 | | wedge_masks[BLOCK_8X8] }, |
281 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_8X16], |
282 | | wedge_masks[BLOCK_8X16] }, |
283 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_16X8], |
284 | | wedge_masks[BLOCK_16X8] }, |
285 | | { MAX_WEDGE_TYPES, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_16X16], |
286 | | wedge_masks[BLOCK_16X16] }, |
287 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_16X32], |
288 | | wedge_masks[BLOCK_16X32] }, |
289 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_32X16], |
290 | | wedge_masks[BLOCK_32X16] }, |
291 | | { MAX_WEDGE_TYPES, wedge_codebook_16_heqw, wedge_signflip_lookup[BLOCK_32X32], |
292 | | wedge_masks[BLOCK_32X32] }, |
293 | | { 0, NULL, NULL, NULL }, |
294 | | { 0, NULL, NULL, NULL }, |
295 | | { 0, NULL, NULL, NULL }, |
296 | | { 0, NULL, NULL, NULL }, |
297 | | { 0, NULL, NULL, NULL }, |
298 | | { 0, NULL, NULL, NULL }, |
299 | | { 0, NULL, NULL, NULL }, |
300 | | { 0, NULL, NULL, NULL }, |
301 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hgtw, wedge_signflip_lookup[BLOCK_8X32], |
302 | | wedge_masks[BLOCK_8X32] }, |
303 | | { MAX_WEDGE_TYPES, wedge_codebook_16_hltw, wedge_signflip_lookup[BLOCK_32X8], |
304 | | wedge_masks[BLOCK_32X8] }, |
305 | | { 0, NULL, NULL, NULL }, |
306 | | { 0, NULL, NULL, NULL }, |
307 | | }; |
308 | | |
309 | | static const uint8_t *get_wedge_mask_inplace(int wedge_index, int neg, |
310 | 576 | BLOCK_SIZE sb_type) { |
311 | 576 | const uint8_t *master; |
312 | 576 | const int bh = block_size_high[sb_type]; |
313 | 576 | const int bw = block_size_wide[sb_type]; |
314 | 576 | const wedge_code_type *a = |
315 | 576 | av1_wedge_params_lookup[sb_type].codebook + wedge_index; |
316 | 576 | int woff, hoff; |
317 | 576 | const uint8_t wsignflip = |
318 | 576 | av1_wedge_params_lookup[sb_type].signflip[wedge_index]; |
319 | | |
320 | 576 | assert(wedge_index >= 0 && wedge_index < get_wedge_types_lookup(sb_type)); |
321 | 576 | woff = (a->x_offset * bw) >> 3; |
322 | 576 | hoff = (a->y_offset * bh) >> 3; |
323 | 576 | master = wedge_mask_obl[neg ^ wsignflip][a->direction] + |
324 | 576 | MASK_MASTER_STRIDE * (MASK_MASTER_SIZE / 2 - hoff) + |
325 | 576 | MASK_MASTER_SIZE / 2 - woff; |
326 | 576 | return master; |
327 | 576 | } |
328 | | |
329 | | const uint8_t *av1_get_compound_type_mask( |
330 | 0 | const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type) { |
331 | 0 | (void)sb_type; |
332 | 0 | switch (comp_data->type) { |
333 | 0 | case COMPOUND_WEDGE: |
334 | 0 | return av1_get_contiguous_soft_mask(comp_data->wedge_index, |
335 | 0 | comp_data->wedge_sign, sb_type); |
336 | 0 | default: return comp_data->seg_mask; |
337 | 0 | } |
338 | 0 | } |
339 | | |
340 | | static AOM_INLINE void diffwtd_mask_d16( |
341 | | uint8_t *mask, int which_inverse, int mask_base, const CONV_BUF_TYPE *src0, |
342 | | int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, |
343 | 0 | ConvolveParams *conv_params, int bd) { |
344 | 0 | int round = |
345 | 0 | 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1 + (bd - 8); |
346 | 0 | int i, j, m, diff; |
347 | 0 | for (i = 0; i < h; ++i) { |
348 | 0 | for (j = 0; j < w; ++j) { |
349 | 0 | diff = abs(src0[i * src0_stride + j] - src1[i * src1_stride + j]); |
350 | 0 | diff = ROUND_POWER_OF_TWO(diff, round); |
351 | 0 | m = clamp(mask_base + (diff / DIFF_FACTOR), 0, AOM_BLEND_A64_MAX_ALPHA); |
352 | 0 | mask[i * w + j] = which_inverse ? AOM_BLEND_A64_MAX_ALPHA - m : m; |
353 | 0 | } |
354 | 0 | } |
355 | 0 | } |
356 | | |
357 | | void av1_build_compound_diffwtd_mask_d16_c( |
358 | | uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const CONV_BUF_TYPE *src0, |
359 | | int src0_stride, const CONV_BUF_TYPE *src1, int src1_stride, int h, int w, |
360 | 0 | ConvolveParams *conv_params, int bd) { |
361 | 0 | switch (mask_type) { |
362 | 0 | case DIFFWTD_38: |
363 | 0 | diffwtd_mask_d16(mask, 0, 38, src0, src0_stride, src1, src1_stride, h, w, |
364 | 0 | conv_params, bd); |
365 | 0 | break; |
366 | 0 | case DIFFWTD_38_INV: |
367 | 0 | diffwtd_mask_d16(mask, 1, 38, src0, src0_stride, src1, src1_stride, h, w, |
368 | 0 | conv_params, bd); |
369 | 0 | break; |
370 | 0 | default: assert(0); |
371 | 0 | } |
372 | 0 | } |
373 | | |
374 | | static AOM_INLINE void diffwtd_mask(uint8_t *mask, int which_inverse, |
375 | | int mask_base, const uint8_t *src0, |
376 | | int src0_stride, const uint8_t *src1, |
377 | 0 | int src1_stride, int h, int w) { |
378 | 0 | int i, j, m, diff; |
379 | 0 | for (i = 0; i < h; ++i) { |
380 | 0 | for (j = 0; j < w; ++j) { |
381 | 0 | diff = |
382 | 0 | abs((int)src0[i * src0_stride + j] - (int)src1[i * src1_stride + j]); |
383 | 0 | m = clamp(mask_base + (diff / DIFF_FACTOR), 0, AOM_BLEND_A64_MAX_ALPHA); |
384 | 0 | mask[i * w + j] = which_inverse ? AOM_BLEND_A64_MAX_ALPHA - m : m; |
385 | 0 | } |
386 | 0 | } |
387 | 0 | } |
388 | | |
389 | | void av1_build_compound_diffwtd_mask_c(uint8_t *mask, |
390 | | DIFFWTD_MASK_TYPE mask_type, |
391 | | const uint8_t *src0, int src0_stride, |
392 | | const uint8_t *src1, int src1_stride, |
393 | 0 | int h, int w) { |
394 | 0 | switch (mask_type) { |
395 | 0 | case DIFFWTD_38: |
396 | 0 | diffwtd_mask(mask, 0, 38, src0, src0_stride, src1, src1_stride, h, w); |
397 | 0 | break; |
398 | 0 | case DIFFWTD_38_INV: |
399 | 0 | diffwtd_mask(mask, 1, 38, src0, src0_stride, src1, src1_stride, h, w); |
400 | 0 | break; |
401 | 0 | default: assert(0); |
402 | 0 | } |
403 | 0 | } |
404 | | |
405 | | static AOM_FORCE_INLINE void diffwtd_mask_highbd( |
406 | | uint8_t *mask, int which_inverse, int mask_base, const uint16_t *src0, |
407 | | int src0_stride, const uint16_t *src1, int src1_stride, int h, int w, |
408 | 0 | const unsigned int bd) { |
409 | 0 | assert(bd >= 8); |
410 | 0 | if (bd == 8) { |
411 | 0 | if (which_inverse) { |
412 | 0 | for (int i = 0; i < h; ++i) { |
413 | 0 | for (int j = 0; j < w; ++j) { |
414 | 0 | int diff = abs((int)src0[j] - (int)src1[j]) / DIFF_FACTOR; |
415 | 0 | unsigned int m = negative_to_zero(mask_base + diff); |
416 | 0 | m = AOMMIN(m, AOM_BLEND_A64_MAX_ALPHA); |
417 | 0 | mask[j] = AOM_BLEND_A64_MAX_ALPHA - m; |
418 | 0 | } |
419 | 0 | src0 += src0_stride; |
420 | 0 | src1 += src1_stride; |
421 | 0 | mask += w; |
422 | 0 | } |
423 | 0 | } else { |
424 | 0 | for (int i = 0; i < h; ++i) { |
425 | 0 | for (int j = 0; j < w; ++j) { |
426 | 0 | int diff = abs((int)src0[j] - (int)src1[j]) / DIFF_FACTOR; |
427 | 0 | unsigned int m = negative_to_zero(mask_base + diff); |
428 | 0 | m = AOMMIN(m, AOM_BLEND_A64_MAX_ALPHA); |
429 | 0 | mask[j] = m; |
430 | 0 | } |
431 | 0 | src0 += src0_stride; |
432 | 0 | src1 += src1_stride; |
433 | 0 | mask += w; |
434 | 0 | } |
435 | 0 | } |
436 | 0 | } else { |
437 | 0 | const unsigned int bd_shift = bd - 8; |
438 | 0 | if (which_inverse) { |
439 | 0 | for (int i = 0; i < h; ++i) { |
440 | 0 | for (int j = 0; j < w; ++j) { |
441 | 0 | int diff = |
442 | 0 | (abs((int)src0[j] - (int)src1[j]) >> bd_shift) / DIFF_FACTOR; |
443 | 0 | unsigned int m = negative_to_zero(mask_base + diff); |
444 | 0 | m = AOMMIN(m, AOM_BLEND_A64_MAX_ALPHA); |
445 | 0 | mask[j] = AOM_BLEND_A64_MAX_ALPHA - m; |
446 | 0 | } |
447 | 0 | src0 += src0_stride; |
448 | 0 | src1 += src1_stride; |
449 | 0 | mask += w; |
450 | 0 | } |
451 | 0 | } else { |
452 | 0 | for (int i = 0; i < h; ++i) { |
453 | 0 | for (int j = 0; j < w; ++j) { |
454 | 0 | int diff = |
455 | 0 | (abs((int)src0[j] - (int)src1[j]) >> bd_shift) / DIFF_FACTOR; |
456 | 0 | unsigned int m = negative_to_zero(mask_base + diff); |
457 | 0 | m = AOMMIN(m, AOM_BLEND_A64_MAX_ALPHA); |
458 | 0 | mask[j] = m; |
459 | 0 | } |
460 | 0 | src0 += src0_stride; |
461 | 0 | src1 += src1_stride; |
462 | 0 | mask += w; |
463 | 0 | } |
464 | 0 | } |
465 | 0 | } |
466 | 0 | } |
467 | | |
468 | | void av1_build_compound_diffwtd_mask_highbd_c( |
469 | | uint8_t *mask, DIFFWTD_MASK_TYPE mask_type, const uint8_t *src0, |
470 | | int src0_stride, const uint8_t *src1, int src1_stride, int h, int w, |
471 | 0 | int bd) { |
472 | 0 | switch (mask_type) { |
473 | 0 | case DIFFWTD_38: |
474 | 0 | diffwtd_mask_highbd(mask, 0, 38, CONVERT_TO_SHORTPTR(src0), src0_stride, |
475 | 0 | CONVERT_TO_SHORTPTR(src1), src1_stride, h, w, bd); |
476 | 0 | break; |
477 | 0 | case DIFFWTD_38_INV: |
478 | 0 | diffwtd_mask_highbd(mask, 1, 38, CONVERT_TO_SHORTPTR(src0), src0_stride, |
479 | 0 | CONVERT_TO_SHORTPTR(src1), src1_stride, h, w, bd); |
480 | 0 | break; |
481 | 0 | default: assert(0); |
482 | 0 | } |
483 | 0 | } |
484 | | |
485 | 2 | static AOM_INLINE void init_wedge_master_masks() { |
486 | 2 | int i, j; |
487 | 2 | const int w = MASK_MASTER_SIZE; |
488 | 2 | const int h = MASK_MASTER_SIZE; |
489 | 2 | const int stride = MASK_MASTER_STRIDE; |
490 | | // Note: index [0] stores the masters, and [1] its complement. |
491 | | // Generate prototype by shifting the masters |
492 | 2 | int shift = h / 4; |
493 | 66 | for (i = 0; i < h; i += 2) { |
494 | 64 | shift_copy(wedge_master_oblique_even, |
495 | 64 | &wedge_mask_obl[0][WEDGE_OBLIQUE63][i * stride], shift, |
496 | 64 | MASK_MASTER_SIZE); |
497 | 64 | shift--; |
498 | 64 | shift_copy(wedge_master_oblique_odd, |
499 | 64 | &wedge_mask_obl[0][WEDGE_OBLIQUE63][(i + 1) * stride], shift, |
500 | 64 | MASK_MASTER_SIZE); |
501 | 64 | memcpy(&wedge_mask_obl[0][WEDGE_VERTICAL][i * stride], |
502 | 64 | wedge_master_vertical, |
503 | 64 | MASK_MASTER_SIZE * sizeof(wedge_master_vertical[0])); |
504 | 64 | memcpy(&wedge_mask_obl[0][WEDGE_VERTICAL][(i + 1) * stride], |
505 | 64 | wedge_master_vertical, |
506 | 64 | MASK_MASTER_SIZE * sizeof(wedge_master_vertical[0])); |
507 | 64 | } |
508 | | |
509 | 130 | for (i = 0; i < h; ++i) { |
510 | 8.32k | for (j = 0; j < w; ++j) { |
511 | 8.19k | const int msk = wedge_mask_obl[0][WEDGE_OBLIQUE63][i * stride + j]; |
512 | 8.19k | wedge_mask_obl[0][WEDGE_OBLIQUE27][j * stride + i] = msk; |
513 | 8.19k | wedge_mask_obl[0][WEDGE_OBLIQUE117][i * stride + w - 1 - j] = |
514 | 8.19k | wedge_mask_obl[0][WEDGE_OBLIQUE153][(w - 1 - j) * stride + i] = |
515 | 8.19k | (1 << WEDGE_WEIGHT_BITS) - msk; |
516 | 8.19k | wedge_mask_obl[1][WEDGE_OBLIQUE63][i * stride + j] = |
517 | 8.19k | wedge_mask_obl[1][WEDGE_OBLIQUE27][j * stride + i] = |
518 | 8.19k | (1 << WEDGE_WEIGHT_BITS) - msk; |
519 | 8.19k | wedge_mask_obl[1][WEDGE_OBLIQUE117][i * stride + w - 1 - j] = |
520 | 8.19k | wedge_mask_obl[1][WEDGE_OBLIQUE153][(w - 1 - j) * stride + i] = msk; |
521 | 8.19k | const int mskx = wedge_mask_obl[0][WEDGE_VERTICAL][i * stride + j]; |
522 | 8.19k | wedge_mask_obl[0][WEDGE_HORIZONTAL][j * stride + i] = mskx; |
523 | 8.19k | wedge_mask_obl[1][WEDGE_VERTICAL][i * stride + j] = |
524 | 8.19k | wedge_mask_obl[1][WEDGE_HORIZONTAL][j * stride + i] = |
525 | 8.19k | (1 << WEDGE_WEIGHT_BITS) - mskx; |
526 | 8.19k | } |
527 | 128 | } |
528 | 2 | } |
529 | | |
530 | 2 | static AOM_INLINE void init_wedge_masks() { |
531 | 2 | uint8_t *dst = wedge_mask_buf; |
532 | 2 | BLOCK_SIZE bsize; |
533 | 2 | memset(wedge_masks, 0, sizeof(wedge_masks)); |
534 | 46 | for (bsize = BLOCK_4X4; bsize < BLOCK_SIZES_ALL; ++bsize) { |
535 | 44 | const wedge_params_type *wedge_params = &av1_wedge_params_lookup[bsize]; |
536 | 44 | const int wtypes = wedge_params->wedge_types; |
537 | 44 | if (wtypes == 0) continue; |
538 | 18 | const uint8_t *mask; |
539 | 18 | const int bw = block_size_wide[bsize]; |
540 | 18 | const int bh = block_size_high[bsize]; |
541 | 18 | int w; |
542 | 306 | for (w = 0; w < wtypes; ++w) { |
543 | 288 | mask = get_wedge_mask_inplace(w, 0, bsize); |
544 | 288 | aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw /* dst_stride */, bw, |
545 | 288 | bh); |
546 | 288 | wedge_params->masks[0][w] = dst; |
547 | 288 | dst += bw * bh; |
548 | | |
549 | 288 | mask = get_wedge_mask_inplace(w, 1, bsize); |
550 | 288 | aom_convolve_copy(mask, MASK_MASTER_STRIDE, dst, bw /* dst_stride */, bw, |
551 | 288 | bh); |
552 | 288 | wedge_params->masks[1][w] = dst; |
553 | 288 | dst += bw * bh; |
554 | 288 | } |
555 | 18 | assert(sizeof(wedge_mask_buf) >= (size_t)(dst - wedge_mask_buf)); |
556 | 18 | } |
557 | 2 | } |
558 | | |
559 | | /* clang-format off */ |
560 | | static const uint8_t ii_weights1d[MAX_SB_SIZE] = { |
561 | | 60, 58, 56, 54, 52, 50, 48, 47, 45, 44, 42, 41, 39, 38, 37, 35, 34, 33, 32, |
562 | | 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 22, 21, 20, 19, 19, 18, 18, 17, 16, |
563 | | 16, 15, 15, 14, 14, 13, 13, 12, 12, 12, 11, 11, 10, 10, 10, 9, 9, 9, 8, |
564 | | 8, 8, 8, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 4, 4, |
565 | | 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, |
566 | | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, |
567 | | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 |
568 | | }; |
569 | | static uint8_t ii_size_scales[BLOCK_SIZES_ALL] = { |
570 | | 32, 16, 16, 16, 8, 8, 8, 4, |
571 | | 4, 4, 2, 2, 2, 1, 1, 1, |
572 | | 8, 8, 4, 4, 2, 2 |
573 | | }; |
574 | | /* clang-format on */ |
575 | | |
576 | | static AOM_INLINE void build_smooth_interintra_mask(uint8_t *mask, int stride, |
577 | | BLOCK_SIZE plane_bsize, |
578 | 112 | INTERINTRA_MODE mode) { |
579 | 112 | int i, j; |
580 | 112 | const int bw = block_size_wide[plane_bsize]; |
581 | 112 | const int bh = block_size_high[plane_bsize]; |
582 | 112 | const int size_scale = ii_size_scales[plane_bsize]; |
583 | | |
584 | 112 | switch (mode) { |
585 | 28 | case II_V_PRED: |
586 | 436 | for (i = 0; i < bh; ++i) { |
587 | 408 | memset(mask, ii_weights1d[i * size_scale], bw * sizeof(mask[0])); |
588 | 408 | mask += stride; |
589 | 408 | } |
590 | 28 | break; |
591 | | |
592 | 28 | case II_H_PRED: |
593 | 436 | for (i = 0; i < bh; ++i) { |
594 | 7.09k | for (j = 0; j < bw; ++j) mask[j] = ii_weights1d[j * size_scale]; |
595 | 408 | mask += stride; |
596 | 408 | } |
597 | 28 | break; |
598 | | |
599 | 28 | case II_SMOOTH_PRED: |
600 | 436 | for (i = 0; i < bh; ++i) { |
601 | 7.09k | for (j = 0; j < bw; ++j) |
602 | 6.68k | mask[j] = ii_weights1d[(i < j ? i : j) * size_scale]; |
603 | 408 | mask += stride; |
604 | 408 | } |
605 | 28 | break; |
606 | | |
607 | 28 | case II_DC_PRED: |
608 | 28 | default: |
609 | 436 | for (i = 0; i < bh; ++i) { |
610 | 408 | memset(mask, 32, bw * sizeof(mask[0])); |
611 | 408 | mask += stride; |
612 | 408 | } |
613 | 28 | break; |
614 | 112 | } |
615 | 112 | } |
616 | | |
617 | 2 | static AOM_INLINE void init_smooth_interintra_masks() { |
618 | 10 | for (int m = 0; m < INTERINTRA_MODES; ++m) { |
619 | 184 | for (int bs = 0; bs < BLOCK_SIZES_ALL; ++bs) { |
620 | 176 | const int bw = block_size_wide[bs]; |
621 | 176 | const int bh = block_size_high[bs]; |
622 | 176 | if (bw > MAX_WEDGE_SIZE || bh > MAX_WEDGE_SIZE) continue; |
623 | 112 | build_smooth_interintra_mask(smooth_interintra_mask_buf[m][bs], bw, bs, |
624 | 112 | m); |
625 | 112 | } |
626 | 8 | } |
627 | 2 | } |
628 | | |
629 | | // Equation of line: f(x, y) = a[0]*(x - a[2]*w/8) + a[1]*(y - a[3]*h/8) = 0 |
630 | 2 | void av1_init_wedge_masks() { |
631 | 2 | init_wedge_master_masks(); |
632 | 2 | init_wedge_masks(); |
633 | 2 | init_smooth_interintra_masks(); |
634 | 2 | } |
635 | | |
636 | | static AOM_INLINE void build_masked_compound_no_round( |
637 | | uint8_t *dst, int dst_stride, const CONV_BUF_TYPE *src0, int src0_stride, |
638 | | const CONV_BUF_TYPE *src1, int src1_stride, |
639 | | const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type, int h, |
640 | 0 | int w, InterPredParams *inter_pred_params) { |
641 | 0 | const int ssy = inter_pred_params->subsampling_y; |
642 | 0 | const int ssx = inter_pred_params->subsampling_x; |
643 | 0 | const uint8_t *mask = av1_get_compound_type_mask(comp_data, sb_type); |
644 | 0 | const int mask_stride = block_size_wide[sb_type]; |
645 | 0 | #if CONFIG_AV1_HIGHBITDEPTH |
646 | 0 | if (inter_pred_params->use_hbd_buf) { |
647 | 0 | aom_highbd_blend_a64_d16_mask(dst, dst_stride, src0, src0_stride, src1, |
648 | 0 | src1_stride, mask, mask_stride, w, h, ssx, |
649 | 0 | ssy, &inter_pred_params->conv_params, |
650 | 0 | inter_pred_params->bit_depth); |
651 | 0 | } else { |
652 | 0 | aom_lowbd_blend_a64_d16_mask(dst, dst_stride, src0, src0_stride, src1, |
653 | 0 | src1_stride, mask, mask_stride, w, h, ssx, ssy, |
654 | 0 | &inter_pred_params->conv_params); |
655 | 0 | } |
656 | | #else |
657 | | aom_lowbd_blend_a64_d16_mask(dst, dst_stride, src0, src0_stride, src1, |
658 | | src1_stride, mask, mask_stride, w, h, ssx, ssy, |
659 | | &inter_pred_params->conv_params); |
660 | | #endif |
661 | 0 | } |
662 | | |
663 | | static void make_masked_inter_predictor(const uint8_t *pre, int pre_stride, |
664 | | uint8_t *dst, int dst_stride, |
665 | | InterPredParams *inter_pred_params, |
666 | 0 | const SubpelParams *subpel_params) { |
667 | 0 | const INTERINTER_COMPOUND_DATA *comp_data = &inter_pred_params->mask_comp; |
668 | 0 | BLOCK_SIZE sb_type = inter_pred_params->sb_type; |
669 | | |
670 | | // We're going to call av1_make_inter_predictor to generate a prediction into |
671 | | // a temporary buffer, then will blend that temporary buffer with that from |
672 | | // the other reference. |
673 | 0 | DECLARE_ALIGNED(32, uint8_t, tmp_buf[2 * MAX_SB_SQUARE]); |
674 | 0 | uint8_t *tmp_dst = |
675 | 0 | inter_pred_params->use_hbd_buf ? CONVERT_TO_BYTEPTR(tmp_buf) : tmp_buf; |
676 | |
|
677 | 0 | const int tmp_buf_stride = MAX_SB_SIZE; |
678 | 0 | CONV_BUF_TYPE *org_dst = inter_pred_params->conv_params.dst; |
679 | 0 | int org_dst_stride = inter_pred_params->conv_params.dst_stride; |
680 | 0 | CONV_BUF_TYPE *tmp_buf16 = (CONV_BUF_TYPE *)tmp_buf; |
681 | 0 | inter_pred_params->conv_params.dst = tmp_buf16; |
682 | 0 | inter_pred_params->conv_params.dst_stride = tmp_buf_stride; |
683 | 0 | assert(inter_pred_params->conv_params.do_average == 0); |
684 | | |
685 | | // This will generate a prediction in tmp_buf for the second reference |
686 | 0 | av1_make_inter_predictor(pre, pre_stride, tmp_dst, MAX_SB_SIZE, |
687 | 0 | inter_pred_params, subpel_params); |
688 | |
|
689 | 0 | if (!inter_pred_params->conv_params.plane && |
690 | 0 | comp_data->type == COMPOUND_DIFFWTD) { |
691 | 0 | av1_build_compound_diffwtd_mask_d16( |
692 | 0 | comp_data->seg_mask, comp_data->mask_type, org_dst, org_dst_stride, |
693 | 0 | tmp_buf16, tmp_buf_stride, inter_pred_params->block_height, |
694 | 0 | inter_pred_params->block_width, &inter_pred_params->conv_params, |
695 | 0 | inter_pred_params->bit_depth); |
696 | 0 | } |
697 | 0 | build_masked_compound_no_round( |
698 | 0 | dst, dst_stride, org_dst, org_dst_stride, tmp_buf16, tmp_buf_stride, |
699 | 0 | comp_data, sb_type, inter_pred_params->block_height, |
700 | 0 | inter_pred_params->block_width, inter_pred_params); |
701 | 0 | } |
702 | | |
703 | | void av1_build_one_inter_predictor( |
704 | | uint8_t *dst, int dst_stride, const MV *const src_mv, |
705 | | InterPredParams *inter_pred_params, MACROBLOCKD *xd, int mi_x, int mi_y, |
706 | 24.6k | int ref, uint8_t **mc_buf, CalcSubpelParamsFunc calc_subpel_params_func) { |
707 | 24.6k | SubpelParams subpel_params; |
708 | 24.6k | uint8_t *src; |
709 | 24.6k | int src_stride; |
710 | 24.6k | calc_subpel_params_func(src_mv, inter_pred_params, xd, mi_x, mi_y, ref, |
711 | 24.6k | mc_buf, &src, &subpel_params, &src_stride); |
712 | | |
713 | 24.6k | if (inter_pred_params->comp_mode == UNIFORM_SINGLE || |
714 | 24.6k | inter_pred_params->comp_mode == UNIFORM_COMP) { |
715 | 24.6k | av1_make_inter_predictor(src, src_stride, dst, dst_stride, |
716 | 24.6k | inter_pred_params, &subpel_params); |
717 | 24.6k | } else { |
718 | 0 | make_masked_inter_predictor(src, src_stride, dst, dst_stride, |
719 | 0 | inter_pred_params, &subpel_params); |
720 | 0 | } |
721 | 24.6k | } |
722 | | |
723 | | void av1_dist_wtd_comp_weight_assign(const AV1_COMMON *cm, |
724 | | const MB_MODE_INFO *mbmi, int *fwd_offset, |
725 | | int *bck_offset, |
726 | | int *use_dist_wtd_comp_avg, |
727 | 24.6k | int is_compound) { |
728 | 24.6k | assert(fwd_offset != NULL && bck_offset != NULL); |
729 | 24.6k | if (!is_compound || mbmi->compound_idx) { |
730 | 24.6k | *fwd_offset = 8; |
731 | 24.6k | *bck_offset = 8; |
732 | 24.6k | *use_dist_wtd_comp_avg = 0; |
733 | 24.6k | return; |
734 | 24.6k | } |
735 | | |
736 | 0 | *use_dist_wtd_comp_avg = 1; |
737 | 0 | const RefCntBuffer *const bck_buf = get_ref_frame_buf(cm, mbmi->ref_frame[0]); |
738 | 0 | const RefCntBuffer *const fwd_buf = get_ref_frame_buf(cm, mbmi->ref_frame[1]); |
739 | 0 | const int cur_frame_index = cm->cur_frame->order_hint; |
740 | 0 | int bck_frame_index = 0, fwd_frame_index = 0; |
741 | |
|
742 | 0 | if (bck_buf != NULL) bck_frame_index = bck_buf->order_hint; |
743 | 0 | if (fwd_buf != NULL) fwd_frame_index = fwd_buf->order_hint; |
744 | |
|
745 | 0 | int d0 = clamp(abs(get_relative_dist(&cm->seq_params->order_hint_info, |
746 | 0 | fwd_frame_index, cur_frame_index)), |
747 | 0 | 0, MAX_FRAME_DISTANCE); |
748 | 0 | int d1 = clamp(abs(get_relative_dist(&cm->seq_params->order_hint_info, |
749 | 0 | cur_frame_index, bck_frame_index)), |
750 | 0 | 0, MAX_FRAME_DISTANCE); |
751 | |
|
752 | 0 | const int order = d0 <= d1; |
753 | |
|
754 | 0 | if (d0 == 0 || d1 == 0) { |
755 | 0 | *fwd_offset = quant_dist_lookup_table[3][order]; |
756 | 0 | *bck_offset = quant_dist_lookup_table[3][1 - order]; |
757 | 0 | return; |
758 | 0 | } |
759 | | |
760 | 0 | int i; |
761 | 0 | for (i = 0; i < 3; ++i) { |
762 | 0 | int c0 = quant_dist_weight[i][order]; |
763 | 0 | int c1 = quant_dist_weight[i][!order]; |
764 | 0 | int d0_c0 = d0 * c0; |
765 | 0 | int d1_c1 = d1 * c1; |
766 | 0 | if ((d0 > d1 && d0_c0 < d1_c1) || (d0 <= d1 && d0_c0 > d1_c1)) break; |
767 | 0 | } |
768 | |
|
769 | 0 | *fwd_offset = quant_dist_lookup_table[i][order]; |
770 | 0 | *bck_offset = quant_dist_lookup_table[i][1 - order]; |
771 | 0 | } |
772 | | |
773 | | // True if the following hold: |
774 | | // 1. Not intrabc and not build_for_obmc |
775 | | // 2. At least one dimension is size 4 with subsampling |
776 | | // 3. If sub-sampled, none of the previous blocks around the sub-sample |
777 | | // are intrabc or inter-blocks |
778 | | static bool is_sub8x8_inter(const MACROBLOCKD *xd, int plane, BLOCK_SIZE bsize, |
779 | 24.6k | int is_intrabc, int build_for_obmc) { |
780 | 24.6k | if (is_intrabc || build_for_obmc) { |
781 | 24.6k | return false; |
782 | 24.6k | } |
783 | | |
784 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
785 | 0 | const int ss_x = pd->subsampling_x; |
786 | 0 | const int ss_y = pd->subsampling_y; |
787 | 0 | const int is_sub4_x = (block_size_wide[bsize] == 4) && ss_x; |
788 | 0 | const int is_sub4_y = (block_size_high[bsize] == 4) && ss_y; |
789 | 0 | if (!is_sub4_x && !is_sub4_y) { |
790 | 0 | return false; |
791 | 0 | } |
792 | | |
793 | | // For sub8x8 chroma blocks, we may be covering more than one luma block's |
794 | | // worth of pixels. Thus (mi_x, mi_y) may not be the correct coordinates for |
795 | | // the top-left corner of the prediction source - the correct top-left corner |
796 | | // is at (pre_x, pre_y). |
797 | 0 | const int row_start = is_sub4_y ? -1 : 0; |
798 | 0 | const int col_start = is_sub4_x ? -1 : 0; |
799 | |
|
800 | 0 | for (int row = row_start; row <= 0; ++row) { |
801 | 0 | for (int col = col_start; col <= 0; ++col) { |
802 | 0 | const MB_MODE_INFO *this_mbmi = xd->mi[row * xd->mi_stride + col]; |
803 | 0 | if (!is_inter_block(this_mbmi)) return false; |
804 | 0 | if (is_intrabc_block(this_mbmi)) return false; |
805 | 0 | } |
806 | 0 | } |
807 | 0 | return true; |
808 | 0 | } |
809 | | |
810 | | static void build_inter_predictors_sub8x8( |
811 | | const AV1_COMMON *cm, MACROBLOCKD *xd, int plane, const MB_MODE_INFO *mi, |
812 | | int mi_x, int mi_y, uint8_t **mc_buf, |
813 | 0 | CalcSubpelParamsFunc calc_subpel_params_func) { |
814 | 0 | const BLOCK_SIZE bsize = mi->bsize; |
815 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
816 | 0 | const bool ss_x = pd->subsampling_x; |
817 | 0 | const bool ss_y = pd->subsampling_y; |
818 | 0 | const int b4_w = block_size_wide[bsize] >> ss_x; |
819 | 0 | const int b4_h = block_size_high[bsize] >> ss_y; |
820 | 0 | const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, ss_x, ss_y); |
821 | 0 | const int b8_w = block_size_wide[plane_bsize]; |
822 | 0 | const int b8_h = block_size_high[plane_bsize]; |
823 | 0 | const int is_compound = has_second_ref(mi); |
824 | 0 | assert(!is_compound); |
825 | 0 | assert(!is_intrabc_block(mi)); |
826 | | |
827 | | // For sub8x8 chroma blocks, we may be covering more than one luma block's |
828 | | // worth of pixels. Thus (mi_x, mi_y) may not be the correct coordinates for |
829 | | // the top-left corner of the prediction source - the correct top-left corner |
830 | | // is at (pre_x, pre_y). |
831 | 0 | const int row_start = (block_size_high[bsize] == 4) && ss_y ? -1 : 0; |
832 | 0 | const int col_start = (block_size_wide[bsize] == 4) && ss_x ? -1 : 0; |
833 | 0 | const int pre_x = (mi_x + MI_SIZE * col_start) >> ss_x; |
834 | 0 | const int pre_y = (mi_y + MI_SIZE * row_start) >> ss_y; |
835 | |
|
836 | 0 | int row = row_start; |
837 | 0 | for (int y = 0; y < b8_h; y += b4_h) { |
838 | 0 | int col = col_start; |
839 | 0 | for (int x = 0; x < b8_w; x += b4_w) { |
840 | 0 | MB_MODE_INFO *this_mbmi = xd->mi[row * xd->mi_stride + col]; |
841 | 0 | struct buf_2d *const dst_buf = &pd->dst; |
842 | 0 | uint8_t *dst = dst_buf->buf + dst_buf->stride * y + x; |
843 | 0 | int ref = 0; |
844 | 0 | const RefCntBuffer *ref_buf = |
845 | 0 | get_ref_frame_buf(cm, this_mbmi->ref_frame[ref]); |
846 | 0 | const struct scale_factors *ref_scale_factors = |
847 | 0 | get_ref_scale_factors_const(cm, this_mbmi->ref_frame[ref]); |
848 | 0 | const struct scale_factors *const sf = ref_scale_factors; |
849 | 0 | const struct buf_2d pre_buf = { |
850 | 0 | NULL, |
851 | 0 | (plane == 1) ? ref_buf->buf.u_buffer : ref_buf->buf.v_buffer, |
852 | 0 | ref_buf->buf.uv_crop_width, |
853 | 0 | ref_buf->buf.uv_crop_height, |
854 | 0 | ref_buf->buf.uv_stride, |
855 | 0 | }; |
856 | |
|
857 | 0 | const MV mv = this_mbmi->mv[ref].as_mv; |
858 | |
|
859 | 0 | InterPredParams inter_pred_params; |
860 | 0 | av1_init_inter_params(&inter_pred_params, b4_w, b4_h, pre_y + y, |
861 | 0 | pre_x + x, pd->subsampling_x, pd->subsampling_y, |
862 | 0 | xd->bd, is_cur_buf_hbd(xd), mi->use_intrabc, sf, |
863 | 0 | &pre_buf, this_mbmi->interp_filters); |
864 | 0 | inter_pred_params.conv_params = |
865 | 0 | get_conv_params_no_round(ref, plane, NULL, 0, is_compound, xd->bd); |
866 | |
|
867 | 0 | av1_build_one_inter_predictor(dst, dst_buf->stride, &mv, |
868 | 0 | &inter_pred_params, xd, mi_x + x, mi_y + y, |
869 | 0 | ref, mc_buf, calc_subpel_params_func); |
870 | |
|
871 | 0 | ++col; |
872 | 0 | } |
873 | 0 | ++row; |
874 | 0 | } |
875 | 0 | } |
876 | | |
877 | | static void build_inter_predictors_8x8_and_bigger( |
878 | | const AV1_COMMON *cm, MACROBLOCKD *xd, int plane, const MB_MODE_INFO *mi, |
879 | | int build_for_obmc, int bw, int bh, int mi_x, int mi_y, uint8_t **mc_buf, |
880 | 24.6k | CalcSubpelParamsFunc calc_subpel_params_func) { |
881 | 24.6k | const int is_compound = has_second_ref(mi); |
882 | 24.6k | const int is_intrabc = is_intrabc_block(mi); |
883 | 24.6k | assert(IMPLIES(is_intrabc, !is_compound)); |
884 | 24.6k | struct macroblockd_plane *const pd = &xd->plane[plane]; |
885 | 24.6k | struct buf_2d *const dst_buf = &pd->dst; |
886 | 24.6k | uint8_t *const dst = dst_buf->buf; |
887 | | |
888 | 24.6k | int is_global[2] = { 0, 0 }; |
889 | 49.2k | for (int ref = 0; ref < 1 + is_compound; ++ref) { |
890 | 24.6k | const WarpedMotionParams *const wm = &xd->global_motion[mi->ref_frame[ref]]; |
891 | 24.6k | is_global[ref] = is_global_mv_block(mi, wm->wmtype); |
892 | 24.6k | } |
893 | | |
894 | 24.6k | const BLOCK_SIZE bsize = mi->bsize; |
895 | 24.6k | const int ss_x = pd->subsampling_x; |
896 | 24.6k | const int ss_y = pd->subsampling_y; |
897 | 24.6k | const int row_start = |
898 | 24.6k | (block_size_high[bsize] == 4) && ss_y && !build_for_obmc ? -1 : 0; |
899 | 24.6k | const int col_start = |
900 | 24.6k | (block_size_wide[bsize] == 4) && ss_x && !build_for_obmc ? -1 : 0; |
901 | 24.6k | const int pre_x = (mi_x + MI_SIZE * col_start) >> ss_x; |
902 | 24.6k | const int pre_y = (mi_y + MI_SIZE * row_start) >> ss_y; |
903 | | |
904 | 49.2k | for (int ref = 0; ref < 1 + is_compound; ++ref) { |
905 | 24.6k | const struct scale_factors *const sf = |
906 | 24.6k | is_intrabc ? &cm->sf_identity : xd->block_ref_scale_factors[ref]; |
907 | 24.6k | struct buf_2d *const pre_buf = is_intrabc ? dst_buf : &pd->pre[ref]; |
908 | 24.6k | const MV mv = mi->mv[ref].as_mv; |
909 | 24.6k | const WarpTypesAllowed warp_types = { is_global[ref], |
910 | 24.6k | mi->motion_mode == WARPED_CAUSAL }; |
911 | | |
912 | 24.6k | InterPredParams inter_pred_params; |
913 | 24.6k | av1_init_inter_params(&inter_pred_params, bw, bh, pre_y, pre_x, |
914 | 24.6k | pd->subsampling_x, pd->subsampling_y, xd->bd, |
915 | 24.6k | is_cur_buf_hbd(xd), mi->use_intrabc, sf, pre_buf, |
916 | 24.6k | mi->interp_filters); |
917 | 24.6k | if (is_compound) av1_init_comp_mode(&inter_pred_params); |
918 | 24.6k | inter_pred_params.conv_params = get_conv_params_no_round( |
919 | 24.6k | ref, plane, xd->tmp_conv_dst, MAX_SB_SIZE, is_compound, xd->bd); |
920 | | |
921 | 24.6k | av1_dist_wtd_comp_weight_assign( |
922 | 24.6k | cm, mi, &inter_pred_params.conv_params.fwd_offset, |
923 | 24.6k | &inter_pred_params.conv_params.bck_offset, |
924 | 24.6k | &inter_pred_params.conv_params.use_dist_wtd_comp_avg, is_compound); |
925 | | |
926 | 24.6k | if (!build_for_obmc) |
927 | 24.6k | av1_init_warp_params(&inter_pred_params, &warp_types, ref, xd, mi); |
928 | | |
929 | 24.6k | if (is_masked_compound_type(mi->interinter_comp.type)) { |
930 | 0 | inter_pred_params.sb_type = mi->bsize; |
931 | 0 | inter_pred_params.mask_comp = mi->interinter_comp; |
932 | 0 | if (ref == 1) { |
933 | 0 | inter_pred_params.conv_params.do_average = 0; |
934 | 0 | inter_pred_params.comp_mode = MASK_COMP; |
935 | 0 | } |
936 | | // Assign physical buffer. |
937 | 0 | inter_pred_params.mask_comp.seg_mask = xd->seg_mask; |
938 | 0 | } |
939 | | |
940 | 24.6k | av1_build_one_inter_predictor(dst, dst_buf->stride, &mv, &inter_pred_params, |
941 | 24.6k | xd, mi_x, mi_y, ref, mc_buf, |
942 | 24.6k | calc_subpel_params_func); |
943 | 24.6k | } |
944 | 24.6k | } |
945 | | |
946 | | void av1_build_inter_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd, |
947 | | int plane, const MB_MODE_INFO *mi, |
948 | | int build_for_obmc, int bw, int bh, int mi_x, |
949 | | int mi_y, uint8_t **mc_buf, |
950 | 24.6k | CalcSubpelParamsFunc calc_subpel_params_func) { |
951 | 24.6k | if (is_sub8x8_inter(xd, plane, mi->bsize, is_intrabc_block(mi), |
952 | 24.6k | build_for_obmc)) { |
953 | 0 | assert(bw < 8 || bh < 8); |
954 | 0 | build_inter_predictors_sub8x8(cm, xd, plane, mi, mi_x, mi_y, mc_buf, |
955 | 0 | calc_subpel_params_func); |
956 | 24.6k | } else { |
957 | 24.6k | build_inter_predictors_8x8_and_bigger(cm, xd, plane, mi, build_for_obmc, bw, |
958 | 24.6k | bh, mi_x, mi_y, mc_buf, |
959 | 24.6k | calc_subpel_params_func); |
960 | 24.6k | } |
961 | 24.6k | } |
962 | | void av1_setup_dst_planes(struct macroblockd_plane *planes, BLOCK_SIZE bsize, |
963 | | const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, |
964 | 4.69M | const int plane_start, const int plane_end) { |
965 | | // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet |
966 | | // the static analysis warnings. |
967 | 18.6M | for (int i = plane_start; i < AOMMIN(plane_end, MAX_MB_PLANE); ++i) { |
968 | 13.9M | struct macroblockd_plane *const pd = &planes[i]; |
969 | 13.9M | const int is_uv = i > 0; |
970 | 13.9M | setup_pred_plane(&pd->dst, bsize, src->buffers[i], src->crop_widths[is_uv], |
971 | 13.9M | src->crop_heights[is_uv], src->strides[is_uv], mi_row, |
972 | 13.9M | mi_col, NULL, pd->subsampling_x, pd->subsampling_y); |
973 | 13.9M | } |
974 | 4.69M | } |
975 | | |
976 | | void av1_setup_pre_planes(MACROBLOCKD *xd, int idx, |
977 | | const YV12_BUFFER_CONFIG *src, int mi_row, int mi_col, |
978 | | const struct scale_factors *sf, |
979 | 0 | const int num_planes) { |
980 | 0 | if (src != NULL) { |
981 | | // We use AOMMIN(num_planes, MAX_MB_PLANE) instead of num_planes to quiet |
982 | | // the static analysis warnings. |
983 | 0 | for (int i = 0; i < AOMMIN(num_planes, MAX_MB_PLANE); ++i) { |
984 | 0 | struct macroblockd_plane *const pd = &xd->plane[i]; |
985 | 0 | const int is_uv = i > 0; |
986 | 0 | setup_pred_plane(&pd->pre[idx], xd->mi[0]->bsize, src->buffers[i], |
987 | 0 | src->crop_widths[is_uv], src->crop_heights[is_uv], |
988 | 0 | src->strides[is_uv], mi_row, mi_col, sf, |
989 | 0 | pd->subsampling_x, pd->subsampling_y); |
990 | 0 | } |
991 | 0 | } |
992 | 0 | } |
993 | | |
994 | | // obmc_mask_N[overlap_position] |
995 | | static const uint8_t obmc_mask_1[1] = { 64 }; |
996 | | DECLARE_ALIGNED(2, static const uint8_t, obmc_mask_2[2]) = { 45, 64 }; |
997 | | |
998 | | DECLARE_ALIGNED(4, static const uint8_t, obmc_mask_4[4]) = { 39, 50, 59, 64 }; |
999 | | |
1000 | | static const uint8_t obmc_mask_8[8] = { 36, 42, 48, 53, 57, 61, 64, 64 }; |
1001 | | |
1002 | | static const uint8_t obmc_mask_16[16] = { 34, 37, 40, 43, 46, 49, 52, 54, |
1003 | | 56, 58, 60, 61, 64, 64, 64, 64 }; |
1004 | | |
1005 | | static const uint8_t obmc_mask_32[32] = { 33, 35, 36, 38, 40, 41, 43, 44, |
1006 | | 45, 47, 48, 50, 51, 52, 53, 55, |
1007 | | 56, 57, 58, 59, 60, 60, 61, 62, |
1008 | | 64, 64, 64, 64, 64, 64, 64, 64 }; |
1009 | | |
1010 | | static const uint8_t obmc_mask_64[64] = { |
1011 | | 33, 34, 35, 35, 36, 37, 38, 39, 40, 40, 41, 42, 43, 44, 44, 44, |
1012 | | 45, 46, 47, 47, 48, 49, 50, 51, 51, 51, 52, 52, 53, 54, 55, 56, |
1013 | | 56, 56, 57, 57, 58, 58, 59, 60, 60, 60, 60, 60, 61, 62, 62, 62, |
1014 | | 62, 62, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, |
1015 | | }; |
1016 | | |
1017 | 0 | const uint8_t *av1_get_obmc_mask(int length) { |
1018 | 0 | switch (length) { |
1019 | 0 | case 1: return obmc_mask_1; |
1020 | 0 | case 2: return obmc_mask_2; |
1021 | 0 | case 4: return obmc_mask_4; |
1022 | 0 | case 8: return obmc_mask_8; |
1023 | 0 | case 16: return obmc_mask_16; |
1024 | 0 | case 32: return obmc_mask_32; |
1025 | 0 | case 64: return obmc_mask_64; |
1026 | 0 | default: assert(0); return NULL; |
1027 | 0 | } |
1028 | 0 | } |
1029 | | |
1030 | | static INLINE void increment_int_ptr(MACROBLOCKD *xd, int rel_mi_row, |
1031 | | int rel_mi_col, uint8_t op_mi_size, |
1032 | | int dir, MB_MODE_INFO *mi, void *fun_ctxt, |
1033 | 0 | const int num_planes) { |
1034 | 0 | (void)xd; |
1035 | 0 | (void)rel_mi_row; |
1036 | 0 | (void)rel_mi_col; |
1037 | 0 | (void)op_mi_size; |
1038 | 0 | (void)dir; |
1039 | 0 | (void)mi; |
1040 | 0 | ++*(int *)fun_ctxt; |
1041 | 0 | (void)num_planes; |
1042 | 0 | } |
1043 | | |
1044 | 0 | void av1_count_overlappable_neighbors(const AV1_COMMON *cm, MACROBLOCKD *xd) { |
1045 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
1046 | |
|
1047 | 0 | mbmi->overlappable_neighbors = 0; |
1048 | |
|
1049 | 0 | if (!is_motion_variation_allowed_bsize(mbmi->bsize)) return; |
1050 | | |
1051 | 0 | foreach_overlappable_nb_above(cm, xd, INT_MAX, increment_int_ptr, |
1052 | 0 | &mbmi->overlappable_neighbors); |
1053 | 0 | if (mbmi->overlappable_neighbors) return; |
1054 | 0 | foreach_overlappable_nb_left(cm, xd, INT_MAX, increment_int_ptr, |
1055 | 0 | &mbmi->overlappable_neighbors); |
1056 | 0 | } |
1057 | | |
1058 | | // HW does not support < 4x4 prediction. To limit the bandwidth requirement, if |
1059 | | // block-size of current plane is smaller than 8x8, always only blend with the |
1060 | | // left neighbor(s) (skip blending with the above side). |
1061 | | #define DISABLE_CHROMA_U8X8_OBMC 0 // 0: one-sided obmc; 1: disable |
1062 | | |
1063 | | int av1_skip_u4x4_pred_in_obmc(BLOCK_SIZE bsize, |
1064 | 0 | const struct macroblockd_plane *pd, int dir) { |
1065 | 0 | assert(is_motion_variation_allowed_bsize(bsize)); |
1066 | |
|
1067 | 0 | const BLOCK_SIZE bsize_plane = |
1068 | 0 | get_plane_block_size(bsize, pd->subsampling_x, pd->subsampling_y); |
1069 | 0 | switch (bsize_plane) { |
1070 | | #if DISABLE_CHROMA_U8X8_OBMC |
1071 | | case BLOCK_4X4: |
1072 | | case BLOCK_8X4: |
1073 | | case BLOCK_4X8: return 1; break; |
1074 | | #else |
1075 | 0 | case BLOCK_4X4: |
1076 | 0 | case BLOCK_8X4: |
1077 | 0 | case BLOCK_4X8: return dir == 0; break; |
1078 | 0 | #endif |
1079 | 0 | default: return 0; |
1080 | 0 | } |
1081 | 0 | } |
1082 | | |
1083 | 0 | void av1_modify_neighbor_predictor_for_obmc(MB_MODE_INFO *mbmi) { |
1084 | 0 | mbmi->ref_frame[1] = NONE_FRAME; |
1085 | 0 | mbmi->interinter_comp.type = COMPOUND_AVERAGE; |
1086 | |
|
1087 | 0 | return; |
1088 | 0 | } |
1089 | | |
1090 | | struct obmc_inter_pred_ctxt { |
1091 | | uint8_t **adjacent; |
1092 | | int *adjacent_stride; |
1093 | | }; |
1094 | | |
1095 | | static INLINE void build_obmc_inter_pred_above( |
1096 | | MACROBLOCKD *xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size, |
1097 | 0 | int dir, MB_MODE_INFO *above_mi, void *fun_ctxt, const int num_planes) { |
1098 | 0 | (void)above_mi; |
1099 | 0 | (void)rel_mi_row; |
1100 | 0 | (void)dir; |
1101 | 0 | struct obmc_inter_pred_ctxt *ctxt = (struct obmc_inter_pred_ctxt *)fun_ctxt; |
1102 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->bsize; |
1103 | 0 | const int overlap = |
1104 | 0 | AOMMIN(block_size_high[bsize], block_size_high[BLOCK_64X64]) >> 1; |
1105 | |
|
1106 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1107 | 0 | const struct macroblockd_plane *pd = &xd->plane[plane]; |
1108 | 0 | const int bw = (op_mi_size * MI_SIZE) >> pd->subsampling_x; |
1109 | 0 | const int bh = overlap >> pd->subsampling_y; |
1110 | 0 | const int plane_col = (rel_mi_col * MI_SIZE) >> pd->subsampling_x; |
1111 | |
|
1112 | 0 | if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 0)) continue; |
1113 | | |
1114 | 0 | const int dst_stride = pd->dst.stride; |
1115 | 0 | uint8_t *const dst = &pd->dst.buf[plane_col]; |
1116 | 0 | const int tmp_stride = ctxt->adjacent_stride[plane]; |
1117 | 0 | const uint8_t *const tmp = &ctxt->adjacent[plane][plane_col]; |
1118 | 0 | const uint8_t *const mask = av1_get_obmc_mask(bh); |
1119 | 0 | #if CONFIG_AV1_HIGHBITDEPTH |
1120 | 0 | const int is_hbd = is_cur_buf_hbd(xd); |
1121 | 0 | if (is_hbd) |
1122 | 0 | aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp, |
1123 | 0 | tmp_stride, mask, bw, bh, xd->bd); |
1124 | 0 | else |
1125 | 0 | aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride, |
1126 | 0 | mask, bw, bh); |
1127 | | #else |
1128 | | aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride, mask, |
1129 | | bw, bh); |
1130 | | #endif |
1131 | 0 | } |
1132 | 0 | } |
1133 | | |
1134 | | static INLINE void build_obmc_inter_pred_left( |
1135 | | MACROBLOCKD *xd, int rel_mi_row, int rel_mi_col, uint8_t op_mi_size, |
1136 | 0 | int dir, MB_MODE_INFO *left_mi, void *fun_ctxt, const int num_planes) { |
1137 | 0 | (void)left_mi; |
1138 | 0 | (void)rel_mi_col; |
1139 | 0 | (void)dir; |
1140 | 0 | struct obmc_inter_pred_ctxt *ctxt = (struct obmc_inter_pred_ctxt *)fun_ctxt; |
1141 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->bsize; |
1142 | 0 | const int overlap = |
1143 | 0 | AOMMIN(block_size_wide[bsize], block_size_wide[BLOCK_64X64]) >> 1; |
1144 | |
|
1145 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1146 | 0 | const struct macroblockd_plane *pd = &xd->plane[plane]; |
1147 | 0 | const int bw = overlap >> pd->subsampling_x; |
1148 | 0 | const int bh = (op_mi_size * MI_SIZE) >> pd->subsampling_y; |
1149 | 0 | const int plane_row = (rel_mi_row * MI_SIZE) >> pd->subsampling_y; |
1150 | |
|
1151 | 0 | if (av1_skip_u4x4_pred_in_obmc(bsize, pd, 1)) continue; |
1152 | | |
1153 | 0 | const int dst_stride = pd->dst.stride; |
1154 | 0 | uint8_t *const dst = &pd->dst.buf[plane_row * dst_stride]; |
1155 | 0 | const int tmp_stride = ctxt->adjacent_stride[plane]; |
1156 | 0 | const uint8_t *const tmp = &ctxt->adjacent[plane][plane_row * tmp_stride]; |
1157 | 0 | const uint8_t *const mask = av1_get_obmc_mask(bw); |
1158 | |
|
1159 | 0 | #if CONFIG_AV1_HIGHBITDEPTH |
1160 | 0 | const int is_hbd = is_cur_buf_hbd(xd); |
1161 | 0 | if (is_hbd) |
1162 | 0 | aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp, |
1163 | 0 | tmp_stride, mask, bw, bh, xd->bd); |
1164 | 0 | else |
1165 | 0 | aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride, |
1166 | 0 | mask, bw, bh); |
1167 | | #else |
1168 | | aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, tmp, tmp_stride, mask, |
1169 | | bw, bh); |
1170 | | #endif |
1171 | 0 | } |
1172 | 0 | } |
1173 | | |
1174 | | // This function combines motion compensated predictions that are generated by |
1175 | | // top/left neighboring blocks' inter predictors with the regular inter |
1176 | | // prediction. We assume the original prediction (bmc) is stored in |
1177 | | // xd->plane[].dst.buf |
1178 | | void av1_build_obmc_inter_prediction(const AV1_COMMON *cm, MACROBLOCKD *xd, |
1179 | | uint8_t *above[MAX_MB_PLANE], |
1180 | | int above_stride[MAX_MB_PLANE], |
1181 | | uint8_t *left[MAX_MB_PLANE], |
1182 | 0 | int left_stride[MAX_MB_PLANE]) { |
1183 | 0 | const BLOCK_SIZE bsize = xd->mi[0]->bsize; |
1184 | | |
1185 | | // handle above row |
1186 | 0 | struct obmc_inter_pred_ctxt ctxt_above = { above, above_stride }; |
1187 | 0 | foreach_overlappable_nb_above(cm, xd, |
1188 | 0 | max_neighbor_obmc[mi_size_wide_log2[bsize]], |
1189 | 0 | build_obmc_inter_pred_above, &ctxt_above); |
1190 | | |
1191 | | // handle left column |
1192 | 0 | struct obmc_inter_pred_ctxt ctxt_left = { left, left_stride }; |
1193 | 0 | foreach_overlappable_nb_left(cm, xd, |
1194 | 0 | max_neighbor_obmc[mi_size_high_log2[bsize]], |
1195 | 0 | build_obmc_inter_pred_left, &ctxt_left); |
1196 | 0 | } |
1197 | | |
1198 | | void av1_setup_obmc_dst_bufs(MACROBLOCKD *xd, uint8_t **dst_buf1, |
1199 | 0 | uint8_t **dst_buf2) { |
1200 | 0 | if (is_cur_buf_hbd(xd)) { |
1201 | 0 | int len = sizeof(uint16_t); |
1202 | 0 | dst_buf1[0] = CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0]); |
1203 | 0 | dst_buf1[1] = |
1204 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * len); |
1205 | 0 | dst_buf1[2] = |
1206 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * 2 * len); |
1207 | 0 | dst_buf2[0] = CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1]); |
1208 | 0 | dst_buf2[1] = |
1209 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * len); |
1210 | 0 | dst_buf2[2] = |
1211 | 0 | CONVERT_TO_BYTEPTR(xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * 2 * len); |
1212 | 0 | } else { |
1213 | 0 | dst_buf1[0] = xd->tmp_obmc_bufs[0]; |
1214 | 0 | dst_buf1[1] = xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE; |
1215 | 0 | dst_buf1[2] = xd->tmp_obmc_bufs[0] + MAX_SB_SQUARE * 2; |
1216 | 0 | dst_buf2[0] = xd->tmp_obmc_bufs[1]; |
1217 | 0 | dst_buf2[1] = xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE; |
1218 | 0 | dst_buf2[2] = xd->tmp_obmc_bufs[1] + MAX_SB_SQUARE * 2; |
1219 | 0 | } |
1220 | 0 | } |
1221 | | |
1222 | | void av1_setup_build_prediction_by_above_pred( |
1223 | | MACROBLOCKD *xd, int rel_mi_col, uint8_t above_mi_width, |
1224 | | MB_MODE_INFO *above_mbmi, struct build_prediction_ctxt *ctxt, |
1225 | 0 | const int num_planes) { |
1226 | 0 | const BLOCK_SIZE a_bsize = AOMMAX(BLOCK_8X8, above_mbmi->bsize); |
1227 | 0 | const int above_mi_col = xd->mi_col + rel_mi_col; |
1228 | |
|
1229 | 0 | av1_modify_neighbor_predictor_for_obmc(above_mbmi); |
1230 | |
|
1231 | 0 | for (int j = 0; j < num_planes; ++j) { |
1232 | 0 | struct macroblockd_plane *const pd = &xd->plane[j]; |
1233 | 0 | setup_pred_plane(&pd->dst, a_bsize, ctxt->tmp_buf[j], ctxt->tmp_width[j], |
1234 | 0 | ctxt->tmp_height[j], ctxt->tmp_stride[j], 0, rel_mi_col, |
1235 | 0 | NULL, pd->subsampling_x, pd->subsampling_y); |
1236 | 0 | } |
1237 | |
|
1238 | 0 | const int num_refs = 1 + has_second_ref(above_mbmi); |
1239 | |
|
1240 | 0 | for (int ref = 0; ref < num_refs; ++ref) { |
1241 | 0 | const MV_REFERENCE_FRAME frame = above_mbmi->ref_frame[ref]; |
1242 | |
|
1243 | 0 | const RefCntBuffer *const ref_buf = get_ref_frame_buf(ctxt->cm, frame); |
1244 | 0 | const struct scale_factors *const sf = |
1245 | 0 | get_ref_scale_factors_const(ctxt->cm, frame); |
1246 | 0 | xd->block_ref_scale_factors[ref] = sf; |
1247 | 0 | if ((!av1_is_valid_scale(sf))) |
1248 | 0 | aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM, |
1249 | 0 | "Reference frame has invalid dimensions"); |
1250 | 0 | av1_setup_pre_planes(xd, ref, &ref_buf->buf, xd->mi_row, above_mi_col, sf, |
1251 | 0 | num_planes); |
1252 | 0 | } |
1253 | |
|
1254 | 0 | xd->mb_to_left_edge = 8 * MI_SIZE * (-above_mi_col); |
1255 | 0 | xd->mb_to_right_edge = |
1256 | 0 | ctxt->mb_to_far_edge + |
1257 | 0 | (xd->width - rel_mi_col - above_mi_width) * MI_SIZE * 8; |
1258 | 0 | } |
1259 | | |
1260 | | void av1_setup_build_prediction_by_left_pred(MACROBLOCKD *xd, int rel_mi_row, |
1261 | | uint8_t left_mi_height, |
1262 | | MB_MODE_INFO *left_mbmi, |
1263 | | struct build_prediction_ctxt *ctxt, |
1264 | 0 | const int num_planes) { |
1265 | 0 | const BLOCK_SIZE l_bsize = AOMMAX(BLOCK_8X8, left_mbmi->bsize); |
1266 | 0 | const int left_mi_row = xd->mi_row + rel_mi_row; |
1267 | |
|
1268 | 0 | av1_modify_neighbor_predictor_for_obmc(left_mbmi); |
1269 | |
|
1270 | 0 | for (int j = 0; j < num_planes; ++j) { |
1271 | 0 | struct macroblockd_plane *const pd = &xd->plane[j]; |
1272 | 0 | setup_pred_plane(&pd->dst, l_bsize, ctxt->tmp_buf[j], ctxt->tmp_width[j], |
1273 | 0 | ctxt->tmp_height[j], ctxt->tmp_stride[j], rel_mi_row, 0, |
1274 | 0 | NULL, pd->subsampling_x, pd->subsampling_y); |
1275 | 0 | } |
1276 | |
|
1277 | 0 | const int num_refs = 1 + has_second_ref(left_mbmi); |
1278 | |
|
1279 | 0 | for (int ref = 0; ref < num_refs; ++ref) { |
1280 | 0 | const MV_REFERENCE_FRAME frame = left_mbmi->ref_frame[ref]; |
1281 | |
|
1282 | 0 | const RefCntBuffer *const ref_buf = get_ref_frame_buf(ctxt->cm, frame); |
1283 | 0 | const struct scale_factors *const ref_scale_factors = |
1284 | 0 | get_ref_scale_factors_const(ctxt->cm, frame); |
1285 | |
|
1286 | 0 | xd->block_ref_scale_factors[ref] = ref_scale_factors; |
1287 | 0 | if ((!av1_is_valid_scale(ref_scale_factors))) |
1288 | 0 | aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM, |
1289 | 0 | "Reference frame has invalid dimensions"); |
1290 | 0 | av1_setup_pre_planes(xd, ref, &ref_buf->buf, left_mi_row, xd->mi_col, |
1291 | 0 | ref_scale_factors, num_planes); |
1292 | 0 | } |
1293 | |
|
1294 | 0 | xd->mb_to_top_edge = GET_MV_SUBPEL(MI_SIZE * (-left_mi_row)); |
1295 | 0 | xd->mb_to_bottom_edge = |
1296 | 0 | ctxt->mb_to_far_edge + |
1297 | 0 | GET_MV_SUBPEL((xd->height - rel_mi_row - left_mi_height) * MI_SIZE); |
1298 | 0 | } |
1299 | | |
1300 | | static AOM_INLINE void combine_interintra( |
1301 | | INTERINTRA_MODE mode, int8_t use_wedge_interintra, int8_t wedge_index, |
1302 | | int8_t wedge_sign, BLOCK_SIZE bsize, BLOCK_SIZE plane_bsize, |
1303 | | uint8_t *comppred, int compstride, const uint8_t *interpred, |
1304 | 0 | int interstride, const uint8_t *intrapred, int intrastride) { |
1305 | 0 | const int bw = block_size_wide[plane_bsize]; |
1306 | 0 | const int bh = block_size_high[plane_bsize]; |
1307 | |
|
1308 | 0 | if (use_wedge_interintra) { |
1309 | 0 | if (av1_is_wedge_used(bsize)) { |
1310 | 0 | const uint8_t *mask = |
1311 | 0 | av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize); |
1312 | 0 | const int subw = 2 * mi_size_wide[bsize] == bw; |
1313 | 0 | const int subh = 2 * mi_size_high[bsize] == bh; |
1314 | 0 | aom_blend_a64_mask(comppred, compstride, intrapred, intrastride, |
1315 | 0 | interpred, interstride, mask, block_size_wide[bsize], |
1316 | 0 | bw, bh, subw, subh); |
1317 | 0 | } |
1318 | 0 | return; |
1319 | 0 | } |
1320 | | |
1321 | 0 | const uint8_t *mask = smooth_interintra_mask_buf[mode][plane_bsize]; |
1322 | 0 | aom_blend_a64_mask(comppred, compstride, intrapred, intrastride, interpred, |
1323 | 0 | interstride, mask, bw, bw, bh, 0, 0); |
1324 | 0 | } |
1325 | | |
1326 | | #if CONFIG_AV1_HIGHBITDEPTH |
1327 | | static AOM_INLINE void combine_interintra_highbd( |
1328 | | INTERINTRA_MODE mode, int8_t use_wedge_interintra, int8_t wedge_index, |
1329 | | int8_t wedge_sign, BLOCK_SIZE bsize, BLOCK_SIZE plane_bsize, |
1330 | | uint8_t *comppred8, int compstride, const uint8_t *interpred8, |
1331 | 0 | int interstride, const uint8_t *intrapred8, int intrastride, int bd) { |
1332 | 0 | const int bw = block_size_wide[plane_bsize]; |
1333 | 0 | const int bh = block_size_high[plane_bsize]; |
1334 | |
|
1335 | 0 | if (use_wedge_interintra) { |
1336 | 0 | if (av1_is_wedge_used(bsize)) { |
1337 | 0 | const uint8_t *mask = |
1338 | 0 | av1_get_contiguous_soft_mask(wedge_index, wedge_sign, bsize); |
1339 | 0 | const int subh = 2 * mi_size_high[bsize] == bh; |
1340 | 0 | const int subw = 2 * mi_size_wide[bsize] == bw; |
1341 | 0 | aom_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride, |
1342 | 0 | interpred8, interstride, mask, |
1343 | 0 | block_size_wide[bsize], bw, bh, subw, subh, bd); |
1344 | 0 | } |
1345 | 0 | return; |
1346 | 0 | } |
1347 | | |
1348 | 0 | uint8_t mask[MAX_SB_SQUARE]; |
1349 | 0 | build_smooth_interintra_mask(mask, bw, plane_bsize, mode); |
1350 | 0 | aom_highbd_blend_a64_mask(comppred8, compstride, intrapred8, intrastride, |
1351 | 0 | interpred8, interstride, mask, bw, bw, bh, 0, 0, |
1352 | 0 | bd); |
1353 | 0 | } |
1354 | | #endif |
1355 | | |
1356 | | void av1_build_intra_predictors_for_interintra(const AV1_COMMON *cm, |
1357 | | MACROBLOCKD *xd, |
1358 | | BLOCK_SIZE bsize, int plane, |
1359 | | const BUFFER_SET *ctx, |
1360 | 0 | uint8_t *dst, int dst_stride) { |
1361 | 0 | struct macroblockd_plane *const pd = &xd->plane[plane]; |
1362 | 0 | const int ssx = xd->plane[plane].subsampling_x; |
1363 | 0 | const int ssy = xd->plane[plane].subsampling_y; |
1364 | 0 | BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, ssx, ssy); |
1365 | 0 | PREDICTION_MODE mode = interintra_to_intra_mode[xd->mi[0]->interintra_mode]; |
1366 | 0 | assert(xd->mi[0]->angle_delta[PLANE_TYPE_Y] == 0); |
1367 | 0 | assert(xd->mi[0]->angle_delta[PLANE_TYPE_UV] == 0); |
1368 | 0 | assert(xd->mi[0]->filter_intra_mode_info.use_filter_intra == 0); |
1369 | 0 | assert(xd->mi[0]->use_intrabc == 0); |
1370 | 0 | const SequenceHeader *seq_params = cm->seq_params; |
1371 | |
|
1372 | 0 | av1_predict_intra_block(xd, seq_params->sb_size, |
1373 | 0 | seq_params->enable_intra_edge_filter, pd->width, |
1374 | 0 | pd->height, max_txsize_rect_lookup[plane_bsize], mode, |
1375 | 0 | 0, 0, FILTER_INTRA_MODES, ctx->plane[plane], |
1376 | 0 | ctx->stride[plane], dst, dst_stride, 0, 0, plane); |
1377 | 0 | } |
1378 | | |
1379 | | void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane, |
1380 | | const uint8_t *inter_pred, int inter_stride, |
1381 | 0 | const uint8_t *intra_pred, int intra_stride) { |
1382 | 0 | const int ssx = xd->plane[plane].subsampling_x; |
1383 | 0 | const int ssy = xd->plane[plane].subsampling_y; |
1384 | 0 | const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, ssx, ssy); |
1385 | 0 | #if CONFIG_AV1_HIGHBITDEPTH |
1386 | 0 | if (is_cur_buf_hbd(xd)) { |
1387 | 0 | combine_interintra_highbd( |
1388 | 0 | xd->mi[0]->interintra_mode, xd->mi[0]->use_wedge_interintra, |
1389 | 0 | xd->mi[0]->interintra_wedge_index, INTERINTRA_WEDGE_SIGN, bsize, |
1390 | 0 | plane_bsize, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride, |
1391 | 0 | inter_pred, inter_stride, intra_pred, intra_stride, xd->bd); |
1392 | 0 | return; |
1393 | 0 | } |
1394 | 0 | #endif |
1395 | 0 | combine_interintra( |
1396 | 0 | xd->mi[0]->interintra_mode, xd->mi[0]->use_wedge_interintra, |
1397 | 0 | xd->mi[0]->interintra_wedge_index, INTERINTRA_WEDGE_SIGN, bsize, |
1398 | 0 | plane_bsize, xd->plane[plane].dst.buf, xd->plane[plane].dst.stride, |
1399 | 0 | inter_pred, inter_stride, intra_pred, intra_stride); |
1400 | 0 | } |
1401 | | |
1402 | | // build interintra_predictors for one plane |
1403 | | void av1_build_interintra_predictor(const AV1_COMMON *cm, MACROBLOCKD *xd, |
1404 | | uint8_t *pred, int stride, |
1405 | | const BUFFER_SET *ctx, int plane, |
1406 | 0 | BLOCK_SIZE bsize) { |
1407 | 0 | assert(bsize < BLOCK_SIZES_ALL); |
1408 | 0 | if (is_cur_buf_hbd(xd)) { |
1409 | 0 | DECLARE_ALIGNED(16, uint16_t, intrapredictor[MAX_SB_SQUARE]); |
1410 | 0 | av1_build_intra_predictors_for_interintra( |
1411 | 0 | cm, xd, bsize, plane, ctx, CONVERT_TO_BYTEPTR(intrapredictor), |
1412 | 0 | MAX_SB_SIZE); |
1413 | 0 | av1_combine_interintra(xd, bsize, plane, pred, stride, |
1414 | 0 | CONVERT_TO_BYTEPTR(intrapredictor), MAX_SB_SIZE); |
1415 | 0 | } else { |
1416 | 0 | DECLARE_ALIGNED(16, uint8_t, intrapredictor[MAX_SB_SQUARE]); |
1417 | 0 | av1_build_intra_predictors_for_interintra(cm, xd, bsize, plane, ctx, |
1418 | 0 | intrapredictor, MAX_SB_SIZE); |
1419 | 0 | av1_combine_interintra(xd, bsize, plane, pred, stride, intrapredictor, |
1420 | 0 | MAX_SB_SIZE); |
1421 | 0 | } |
1422 | 0 | } |