/src/aom/av1/encoder/bitstream.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include <assert.h> |
13 | | #include <limits.h> |
14 | | #include <stdbool.h> |
15 | | #include <stdint.h> |
16 | | #include <stdio.h> |
17 | | #include <string.h> |
18 | | |
19 | | #include "aom/aom_encoder.h" |
20 | | #include "aom_dsp/aom_dsp_common.h" |
21 | | #include "aom_dsp/binary_codes_writer.h" |
22 | | #include "aom_dsp/bitwriter_buffer.h" |
23 | | #include "aom_mem/aom_mem.h" |
24 | | #include "aom_ports/bitops.h" |
25 | | #include "aom_ports/mem_ops.h" |
26 | | #if CONFIG_BITSTREAM_DEBUG |
27 | | #include "aom_util/debug_util.h" |
28 | | #endif // CONFIG_BITSTREAM_DEBUG |
29 | | |
30 | | #include "av1/common/cdef.h" |
31 | | #include "av1/common/cfl.h" |
32 | | #include "av1/common/debugmodes.h" |
33 | | #include "av1/common/entropy.h" |
34 | | #include "av1/common/entropymode.h" |
35 | | #include "av1/common/entropymv.h" |
36 | | #include "av1/common/mvref_common.h" |
37 | | #include "av1/common/pred_common.h" |
38 | | #include "av1/common/reconinter.h" |
39 | | #include "av1/common/reconintra.h" |
40 | | #include "av1/common/seg_common.h" |
41 | | #include "av1/common/tile_common.h" |
42 | | |
43 | | #include "av1/encoder/bitstream.h" |
44 | | #include "av1/encoder/cost.h" |
45 | | #include "av1/encoder/encodemv.h" |
46 | | #include "av1/encoder/encodetxb.h" |
47 | | #include "av1/encoder/ethread.h" |
48 | | #include "av1/encoder/mcomp.h" |
49 | | #include "av1/encoder/palette.h" |
50 | | #include "av1/encoder/pickrst.h" |
51 | | #include "av1/encoder/segmentation.h" |
52 | | #include "av1/encoder/tokenize.h" |
53 | | |
54 | | #define ENC_MISMATCH_DEBUG 0 |
55 | 0 | #define SETUP_TIME_OH_CONST 5 // Setup time overhead constant per worker |
56 | 0 | #define JOB_DISP_TIME_OH_CONST 1 // Job dispatch time overhead per tile |
57 | | |
58 | 0 | static inline void write_uniform(aom_writer *w, int n, int v) { |
59 | 0 | const int l = get_unsigned_bits(n); |
60 | 0 | const int m = (1 << l) - n; |
61 | 0 | if (l == 0) return; |
62 | 0 | if (v < m) { |
63 | 0 | aom_write_literal(w, v, l - 1); |
64 | 0 | } else { |
65 | 0 | aom_write_literal(w, m + ((v - m) >> 1), l - 1); |
66 | 0 | aom_write_literal(w, (v - m) & 1, 1); |
67 | 0 | } |
68 | 0 | } |
69 | | |
70 | | #if !CONFIG_REALTIME_ONLY |
71 | | static inline void loop_restoration_write_sb_coeffs( |
72 | | const AV1_COMMON *const cm, MACROBLOCKD *xd, int runit_idx, |
73 | | aom_writer *const w, int plane, FRAME_COUNTS *counts); |
74 | | #endif |
75 | | |
76 | | static inline void write_intra_y_mode_kf(FRAME_CONTEXT *frame_ctx, |
77 | | const MB_MODE_INFO *mi, |
78 | | const MB_MODE_INFO *above_mi, |
79 | | const MB_MODE_INFO *left_mi, |
80 | 0 | PREDICTION_MODE mode, aom_writer *w) { |
81 | 0 | assert(!is_intrabc_block(mi)); |
82 | 0 | (void)mi; |
83 | 0 | aom_write_symbol(w, mode, get_y_mode_cdf(frame_ctx, above_mi, left_mi), |
84 | 0 | INTRA_MODES); |
85 | 0 | } |
86 | | |
87 | | static inline void write_inter_mode(aom_writer *w, PREDICTION_MODE mode, |
88 | | FRAME_CONTEXT *ec_ctx, |
89 | 0 | const int16_t mode_ctx) { |
90 | 0 | const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK; |
91 | |
|
92 | 0 | aom_write_symbol(w, mode != NEWMV, ec_ctx->newmv_cdf[newmv_ctx], 2); |
93 | |
|
94 | 0 | if (mode != NEWMV) { |
95 | 0 | const int16_t zeromv_ctx = |
96 | 0 | (mode_ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK; |
97 | 0 | aom_write_symbol(w, mode != GLOBALMV, ec_ctx->zeromv_cdf[zeromv_ctx], 2); |
98 | |
|
99 | 0 | if (mode != GLOBALMV) { |
100 | 0 | int16_t refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK; |
101 | 0 | aom_write_symbol(w, mode != NEARESTMV, ec_ctx->refmv_cdf[refmv_ctx], 2); |
102 | 0 | } |
103 | 0 | } |
104 | 0 | } |
105 | | |
106 | | static inline void write_drl_idx(FRAME_CONTEXT *ec_ctx, |
107 | | const MB_MODE_INFO *mbmi, |
108 | | const MB_MODE_INFO_EXT_FRAME *mbmi_ext_frame, |
109 | 0 | aom_writer *w) { |
110 | 0 | assert(mbmi->ref_mv_idx < 3); |
111 | |
|
112 | 0 | const int new_mv = mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV; |
113 | 0 | if (new_mv) { |
114 | 0 | int idx; |
115 | 0 | for (idx = 0; idx < 2; ++idx) { |
116 | 0 | if (mbmi_ext_frame->ref_mv_count > idx + 1) { |
117 | 0 | uint8_t drl_ctx = av1_drl_ctx(mbmi_ext_frame->weight, idx); |
118 | |
|
119 | 0 | aom_write_symbol(w, mbmi->ref_mv_idx != idx, ec_ctx->drl_cdf[drl_ctx], |
120 | 0 | 2); |
121 | 0 | if (mbmi->ref_mv_idx == idx) return; |
122 | 0 | } |
123 | 0 | } |
124 | 0 | return; |
125 | 0 | } |
126 | | |
127 | 0 | if (have_nearmv_in_inter_mode(mbmi->mode)) { |
128 | 0 | int idx; |
129 | | // TODO(jingning): Temporary solution to compensate the NEARESTMV offset. |
130 | 0 | for (idx = 1; idx < 3; ++idx) { |
131 | 0 | if (mbmi_ext_frame->ref_mv_count > idx + 1) { |
132 | 0 | uint8_t drl_ctx = av1_drl_ctx(mbmi_ext_frame->weight, idx); |
133 | 0 | aom_write_symbol(w, mbmi->ref_mv_idx != (idx - 1), |
134 | 0 | ec_ctx->drl_cdf[drl_ctx], 2); |
135 | 0 | if (mbmi->ref_mv_idx == (idx - 1)) return; |
136 | 0 | } |
137 | 0 | } |
138 | 0 | return; |
139 | 0 | } |
140 | 0 | } |
141 | | |
142 | | static inline void write_inter_compound_mode(MACROBLOCKD *xd, aom_writer *w, |
143 | | PREDICTION_MODE mode, |
144 | 0 | const int16_t mode_ctx) { |
145 | 0 | assert(is_inter_compound_mode(mode)); |
146 | 0 | aom_write_symbol(w, INTER_COMPOUND_OFFSET(mode), |
147 | 0 | xd->tile_ctx->inter_compound_mode_cdf[mode_ctx], |
148 | 0 | INTER_COMPOUND_MODES); |
149 | 0 | } |
150 | | |
151 | | static inline void write_tx_size_vartx(MACROBLOCKD *xd, |
152 | | const MB_MODE_INFO *mbmi, |
153 | | TX_SIZE tx_size, int depth, int blk_row, |
154 | 0 | int blk_col, aom_writer *w) { |
155 | 0 | FRAME_CONTEXT *const ec_ctx = xd->tile_ctx; |
156 | 0 | const int max_blocks_high = max_block_high(xd, mbmi->bsize, 0); |
157 | 0 | const int max_blocks_wide = max_block_wide(xd, mbmi->bsize, 0); |
158 | |
|
159 | 0 | if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return; |
160 | | |
161 | 0 | if (depth == MAX_VARTX_DEPTH) { |
162 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
163 | 0 | xd->left_txfm_context + blk_row, tx_size, tx_size); |
164 | 0 | return; |
165 | 0 | } |
166 | | |
167 | 0 | const int ctx = txfm_partition_context(xd->above_txfm_context + blk_col, |
168 | 0 | xd->left_txfm_context + blk_row, |
169 | 0 | mbmi->bsize, tx_size); |
170 | 0 | const int txb_size_index = |
171 | 0 | av1_get_txb_size_index(mbmi->bsize, blk_row, blk_col); |
172 | 0 | const int write_txfm_partition = |
173 | 0 | tx_size == mbmi->inter_tx_size[txb_size_index]; |
174 | 0 | if (write_txfm_partition) { |
175 | 0 | aom_write_symbol(w, 0, ec_ctx->txfm_partition_cdf[ctx], 2); |
176 | |
|
177 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
178 | 0 | xd->left_txfm_context + blk_row, tx_size, tx_size); |
179 | | // TODO(yuec): set correct txfm partition update for qttx |
180 | 0 | } else { |
181 | 0 | const TX_SIZE sub_txs = sub_tx_size_map[tx_size]; |
182 | 0 | const int bsw = tx_size_wide_unit[sub_txs]; |
183 | 0 | const int bsh = tx_size_high_unit[sub_txs]; |
184 | |
|
185 | 0 | aom_write_symbol(w, 1, ec_ctx->txfm_partition_cdf[ctx], 2); |
186 | |
|
187 | 0 | if (sub_txs == TX_4X4) { |
188 | 0 | txfm_partition_update(xd->above_txfm_context + blk_col, |
189 | 0 | xd->left_txfm_context + blk_row, sub_txs, tx_size); |
190 | 0 | return; |
191 | 0 | } |
192 | | |
193 | 0 | assert(bsw > 0 && bsh > 0); |
194 | 0 | for (int row = 0; row < tx_size_high_unit[tx_size]; row += bsh) { |
195 | 0 | const int offsetr = blk_row + row; |
196 | 0 | for (int col = 0; col < tx_size_wide_unit[tx_size]; col += bsw) { |
197 | 0 | const int offsetc = blk_col + col; |
198 | 0 | write_tx_size_vartx(xd, mbmi, sub_txs, depth + 1, offsetr, offsetc, w); |
199 | 0 | } |
200 | 0 | } |
201 | 0 | } |
202 | 0 | } |
203 | | |
204 | | static inline void write_selected_tx_size(const MACROBLOCKD *xd, |
205 | 0 | aom_writer *w) { |
206 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
207 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
208 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
209 | 0 | if (block_signals_txsize(bsize)) { |
210 | 0 | const TX_SIZE tx_size = mbmi->tx_size; |
211 | 0 | const int tx_size_ctx = get_tx_size_context(xd); |
212 | 0 | const int depth = tx_size_to_depth(tx_size, bsize); |
213 | 0 | const int max_depths = bsize_to_max_depth(bsize); |
214 | 0 | const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize); |
215 | |
|
216 | 0 | assert(depth >= 0 && depth <= max_depths); |
217 | 0 | assert(!is_inter_block(mbmi)); |
218 | 0 | assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi))); |
219 | |
|
220 | 0 | aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx], |
221 | 0 | max_depths + 1); |
222 | 0 | } |
223 | 0 | } |
224 | | |
225 | | static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd, |
226 | | uint8_t segment_id, const MB_MODE_INFO *mi, |
227 | 0 | aom_writer *w) { |
228 | 0 | if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { |
229 | 0 | return 1; |
230 | 0 | } else { |
231 | 0 | const int skip_txfm = mi->skip_txfm; |
232 | 0 | const int ctx = av1_get_skip_txfm_context(xd); |
233 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
234 | 0 | aom_write_symbol(w, skip_txfm, ec_ctx->skip_txfm_cdfs[ctx], 2); |
235 | 0 | return skip_txfm; |
236 | 0 | } |
237 | 0 | } |
238 | | |
239 | | static int write_skip_mode(const AV1_COMMON *cm, const MACROBLOCKD *xd, |
240 | | uint8_t segment_id, const MB_MODE_INFO *mi, |
241 | 0 | aom_writer *w) { |
242 | 0 | if (!cm->current_frame.skip_mode_info.skip_mode_flag) return 0; |
243 | 0 | if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { |
244 | 0 | return 0; |
245 | 0 | } |
246 | 0 | const int skip_mode = mi->skip_mode; |
247 | 0 | if (!is_comp_ref_allowed(mi->bsize)) { |
248 | 0 | assert(!skip_mode); |
249 | 0 | return 0; |
250 | 0 | } |
251 | 0 | if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME) || |
252 | 0 | segfeature_active(&cm->seg, segment_id, SEG_LVL_GLOBALMV)) { |
253 | | // These features imply single-reference mode, while skip mode implies |
254 | | // compound reference. Hence, the two are mutually exclusive. |
255 | | // In other words, skip_mode is implicitly 0 here. |
256 | 0 | assert(!skip_mode); |
257 | 0 | return 0; |
258 | 0 | } |
259 | 0 | const int ctx = av1_get_skip_mode_context(xd); |
260 | 0 | aom_write_symbol(w, skip_mode, xd->tile_ctx->skip_mode_cdfs[ctx], 2); |
261 | 0 | return skip_mode; |
262 | 0 | } |
263 | | |
264 | | static inline void write_is_inter(const AV1_COMMON *cm, const MACROBLOCKD *xd, |
265 | | uint8_t segment_id, aom_writer *w, |
266 | 0 | const int is_inter) { |
267 | 0 | if (!segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
268 | 0 | if (segfeature_active(&cm->seg, segment_id, SEG_LVL_GLOBALMV)) { |
269 | 0 | assert(is_inter); |
270 | 0 | return; |
271 | 0 | } |
272 | 0 | const int ctx = av1_get_intra_inter_context(xd); |
273 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
274 | 0 | aom_write_symbol(w, is_inter, ec_ctx->intra_inter_cdf[ctx], 2); |
275 | 0 | } |
276 | 0 | } |
277 | | |
278 | | static inline void write_motion_mode(const AV1_COMMON *cm, MACROBLOCKD *xd, |
279 | 0 | const MB_MODE_INFO *mbmi, aom_writer *w) { |
280 | 0 | MOTION_MODE last_motion_mode_allowed = |
281 | 0 | cm->features.switchable_motion_mode |
282 | 0 | ? motion_mode_allowed(cm->global_motion, xd, mbmi, |
283 | 0 | cm->features.allow_warped_motion) |
284 | 0 | : SIMPLE_TRANSLATION; |
285 | 0 | assert(mbmi->motion_mode <= last_motion_mode_allowed); |
286 | 0 | switch (last_motion_mode_allowed) { |
287 | 0 | case SIMPLE_TRANSLATION: break; |
288 | 0 | case OBMC_CAUSAL: |
289 | 0 | aom_write_symbol(w, mbmi->motion_mode == OBMC_CAUSAL, |
290 | 0 | xd->tile_ctx->obmc_cdf[mbmi->bsize], 2); |
291 | 0 | break; |
292 | 0 | default: |
293 | 0 | aom_write_symbol(w, mbmi->motion_mode, |
294 | 0 | xd->tile_ctx->motion_mode_cdf[mbmi->bsize], |
295 | 0 | MOTION_MODES); |
296 | 0 | } |
297 | 0 | } |
298 | | |
299 | | static inline void write_delta_qindex(const MACROBLOCKD *xd, int delta_qindex, |
300 | 0 | aom_writer *w) { |
301 | 0 | int sign = delta_qindex < 0; |
302 | 0 | int abs = sign ? -delta_qindex : delta_qindex; |
303 | 0 | int rem_bits, thr; |
304 | 0 | int smallval = abs < DELTA_Q_SMALL ? 1 : 0; |
305 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
306 | |
|
307 | 0 | aom_write_symbol(w, AOMMIN(abs, DELTA_Q_SMALL), ec_ctx->delta_q_cdf, |
308 | 0 | DELTA_Q_PROBS + 1); |
309 | |
|
310 | 0 | if (!smallval) { |
311 | 0 | rem_bits = get_msb(abs - 1); |
312 | 0 | thr = (1 << rem_bits) + 1; |
313 | 0 | aom_write_literal(w, rem_bits - 1, 3); |
314 | 0 | aom_write_literal(w, abs - thr, rem_bits); |
315 | 0 | } |
316 | 0 | if (abs > 0) { |
317 | 0 | aom_write_bit(w, sign); |
318 | 0 | } |
319 | 0 | } |
320 | | |
321 | | static inline void write_delta_lflevel(const AV1_COMMON *cm, |
322 | | const MACROBLOCKD *xd, int lf_id, |
323 | | int delta_lflevel, int delta_lf_multi, |
324 | 0 | aom_writer *w) { |
325 | 0 | int sign = delta_lflevel < 0; |
326 | 0 | int abs = sign ? -delta_lflevel : delta_lflevel; |
327 | 0 | int rem_bits, thr; |
328 | 0 | int smallval = abs < DELTA_LF_SMALL ? 1 : 0; |
329 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
330 | 0 | (void)cm; |
331 | |
|
332 | 0 | if (delta_lf_multi) { |
333 | 0 | assert(lf_id >= 0 && lf_id < (av1_num_planes(cm) > 1 ? FRAME_LF_COUNT |
334 | 0 | : FRAME_LF_COUNT - 2)); |
335 | 0 | aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), |
336 | 0 | ec_ctx->delta_lf_multi_cdf[lf_id], DELTA_LF_PROBS + 1); |
337 | 0 | } else { |
338 | 0 | aom_write_symbol(w, AOMMIN(abs, DELTA_LF_SMALL), ec_ctx->delta_lf_cdf, |
339 | 0 | DELTA_LF_PROBS + 1); |
340 | 0 | } |
341 | |
|
342 | 0 | if (!smallval) { |
343 | 0 | rem_bits = get_msb(abs - 1); |
344 | 0 | thr = (1 << rem_bits) + 1; |
345 | 0 | aom_write_literal(w, rem_bits - 1, 3); |
346 | 0 | aom_write_literal(w, abs - thr, rem_bits); |
347 | 0 | } |
348 | 0 | if (abs > 0) { |
349 | 0 | aom_write_bit(w, sign); |
350 | 0 | } |
351 | 0 | } |
352 | | |
353 | | static inline void pack_map_tokens(aom_writer *w, const TokenExtra **tp, int n, |
354 | 0 | int num, MapCdf map_pb_cdf) { |
355 | 0 | const TokenExtra *p = *tp; |
356 | 0 | const int palette_size_idx = n - PALETTE_MIN_SIZE; |
357 | 0 | write_uniform(w, n, p->token); // The first color index. |
358 | 0 | ++p; |
359 | 0 | --num; |
360 | 0 | for (int i = 0; i < num; ++i) { |
361 | 0 | assert((p->color_ctx >= 0) && |
362 | 0 | (p->color_ctx < PALETTE_COLOR_INDEX_CONTEXTS)); |
363 | 0 | aom_cdf_prob *color_map_cdf = map_pb_cdf[palette_size_idx][p->color_ctx]; |
364 | 0 | aom_write_symbol(w, p->token, color_map_cdf, n); |
365 | 0 | ++p; |
366 | 0 | } |
367 | 0 | *tp = p; |
368 | 0 | } |
369 | | |
370 | | static inline void pack_txb_tokens( |
371 | | aom_writer *w, AV1_COMMON *cm, MACROBLOCK *const x, const TokenExtra **tp, |
372 | | const TokenExtra *const tok_end, MACROBLOCKD *xd, MB_MODE_INFO *mbmi, |
373 | | int plane, BLOCK_SIZE plane_bsize, aom_bit_depth_t bit_depth, int block, |
374 | 0 | int blk_row, int blk_col, TX_SIZE tx_size, TOKEN_STATS *token_stats) { |
375 | 0 | const int max_blocks_high = max_block_high(xd, plane_bsize, plane); |
376 | 0 | const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane); |
377 | |
|
378 | 0 | if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return; |
379 | | |
380 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
381 | 0 | const TX_SIZE plane_tx_size = |
382 | 0 | plane ? av1_get_max_uv_txsize(mbmi->bsize, pd->subsampling_x, |
383 | 0 | pd->subsampling_y) |
384 | 0 | : mbmi->inter_tx_size[av1_get_txb_size_index(plane_bsize, blk_row, |
385 | 0 | blk_col)]; |
386 | |
|
387 | 0 | if (tx_size == plane_tx_size || plane) { |
388 | 0 | av1_write_coeffs_txb(cm, x, w, blk_row, blk_col, plane, block, tx_size); |
389 | | #if CONFIG_RD_DEBUG |
390 | | TOKEN_STATS tmp_token_stats; |
391 | | init_token_stats(&tmp_token_stats); |
392 | | token_stats->cost += tmp_token_stats.cost; |
393 | | #endif |
394 | 0 | } else { |
395 | 0 | const TX_SIZE sub_txs = sub_tx_size_map[tx_size]; |
396 | 0 | const int bsw = tx_size_wide_unit[sub_txs]; |
397 | 0 | const int bsh = tx_size_high_unit[sub_txs]; |
398 | 0 | const int step = bsh * bsw; |
399 | 0 | const int row_end = |
400 | 0 | AOMMIN(tx_size_high_unit[tx_size], max_blocks_high - blk_row); |
401 | 0 | const int col_end = |
402 | 0 | AOMMIN(tx_size_wide_unit[tx_size], max_blocks_wide - blk_col); |
403 | |
|
404 | 0 | assert(bsw > 0 && bsh > 0); |
405 | |
|
406 | 0 | for (int r = 0; r < row_end; r += bsh) { |
407 | 0 | const int offsetr = blk_row + r; |
408 | 0 | for (int c = 0; c < col_end; c += bsw) { |
409 | 0 | const int offsetc = blk_col + c; |
410 | 0 | pack_txb_tokens(w, cm, x, tp, tok_end, xd, mbmi, plane, plane_bsize, |
411 | 0 | bit_depth, block, offsetr, offsetc, sub_txs, |
412 | 0 | token_stats); |
413 | 0 | block += step; |
414 | 0 | } |
415 | 0 | } |
416 | 0 | } |
417 | 0 | } |
418 | | |
419 | | static inline void set_spatial_segment_id( |
420 | | const CommonModeInfoParams *const mi_params, uint8_t *segment_ids, |
421 | 0 | BLOCK_SIZE bsize, int mi_row, int mi_col, uint8_t segment_id) { |
422 | 0 | const int mi_offset = mi_row * mi_params->mi_cols + mi_col; |
423 | 0 | const int bw = mi_size_wide[bsize]; |
424 | 0 | const int bh = mi_size_high[bsize]; |
425 | 0 | const int xmis = AOMMIN(mi_params->mi_cols - mi_col, bw); |
426 | 0 | const int ymis = AOMMIN(mi_params->mi_rows - mi_row, bh); |
427 | |
|
428 | 0 | const int mi_stride = mi_params->mi_cols; |
429 | |
|
430 | 0 | set_segment_id(segment_ids, mi_offset, xmis, ymis, mi_stride, segment_id); |
431 | 0 | } |
432 | | |
433 | 0 | int av1_neg_interleave(int x, int ref, int max) { |
434 | 0 | assert(x < max); |
435 | 0 | const int diff = x - ref; |
436 | 0 | if (!ref) return x; |
437 | 0 | if (ref >= (max - 1)) return -x + max - 1; |
438 | 0 | if (2 * ref < max) { |
439 | 0 | if (abs(diff) <= ref) { |
440 | 0 | if (diff > 0) |
441 | 0 | return (diff << 1) - 1; |
442 | 0 | else |
443 | 0 | return ((-diff) << 1); |
444 | 0 | } |
445 | 0 | return x; |
446 | 0 | } else { |
447 | 0 | if (abs(diff) < (max - ref)) { |
448 | 0 | if (diff > 0) |
449 | 0 | return (diff << 1) - 1; |
450 | 0 | else |
451 | 0 | return ((-diff) << 1); |
452 | 0 | } |
453 | 0 | return (max - x) - 1; |
454 | 0 | } |
455 | 0 | } |
456 | | |
457 | | static inline void write_segment_id(AV1_COMP *cpi, MACROBLOCKD *const xd, |
458 | | const MB_MODE_INFO *const mbmi, |
459 | | aom_writer *w, |
460 | | const struct segmentation *seg, |
461 | | struct segmentation_probs *segp, |
462 | 0 | int skip_txfm) { |
463 | 0 | if (!seg->enabled || !seg->update_map) return; |
464 | | |
465 | 0 | AV1_COMMON *const cm = &cpi->common; |
466 | 0 | int cdf_num; |
467 | 0 | const uint8_t pred = av1_get_spatial_seg_pred( |
468 | 0 | cm, xd, &cdf_num, cpi->cyclic_refresh->skip_over4x4); |
469 | 0 | const int mi_row = xd->mi_row; |
470 | 0 | const int mi_col = xd->mi_col; |
471 | |
|
472 | 0 | if (skip_txfm) { |
473 | | // Still need to transmit tx size for intra blocks even if skip_txfm is |
474 | | // true. Changing segment_id may make the tx size become invalid, e.g |
475 | | // changing from lossless to lossy. |
476 | 0 | assert(is_inter_block(mbmi) || !cpi->enc_seg.has_lossless_segment); |
477 | |
|
478 | 0 | set_spatial_segment_id(&cm->mi_params, cm->cur_frame->seg_map, mbmi->bsize, |
479 | 0 | mi_row, mi_col, pred); |
480 | 0 | set_spatial_segment_id(&cm->mi_params, cpi->enc_seg.map, mbmi->bsize, |
481 | 0 | mi_row, mi_col, pred); |
482 | | /* mbmi is read only but we need to update segment_id */ |
483 | 0 | ((MB_MODE_INFO *)mbmi)->segment_id = pred; |
484 | 0 | return; |
485 | 0 | } |
486 | | |
487 | 0 | const int coded_id = |
488 | 0 | av1_neg_interleave(mbmi->segment_id, pred, seg->last_active_segid + 1); |
489 | 0 | aom_cdf_prob *pred_cdf = segp->spatial_pred_seg_cdf[cdf_num]; |
490 | 0 | aom_write_symbol(w, coded_id, pred_cdf, MAX_SEGMENTS); |
491 | 0 | set_spatial_segment_id(&cm->mi_params, cm->cur_frame->seg_map, mbmi->bsize, |
492 | 0 | mi_row, mi_col, mbmi->segment_id); |
493 | 0 | } |
494 | | |
495 | | #define WRITE_REF_BIT(bname, pname) \ |
496 | 0 | aom_write_symbol(w, bname, av1_get_pred_cdf_##pname(xd), 2) |
497 | | |
498 | | // This function encodes the reference frame |
499 | | static inline void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd, |
500 | 0 | aom_writer *w) { |
501 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
502 | 0 | const int is_compound = has_second_ref(mbmi); |
503 | 0 | const uint8_t segment_id = mbmi->segment_id; |
504 | | |
505 | | // If segment level coding of this signal is disabled... |
506 | | // or the segment allows multiple reference frame options |
507 | 0 | if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) { |
508 | 0 | assert(!is_compound); |
509 | 0 | assert(mbmi->ref_frame[0] == |
510 | 0 | get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME)); |
511 | 0 | } else if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP) || |
512 | 0 | segfeature_active(&cm->seg, segment_id, SEG_LVL_GLOBALMV)) { |
513 | 0 | assert(!is_compound); |
514 | 0 | assert(mbmi->ref_frame[0] == LAST_FRAME); |
515 | 0 | } else { |
516 | | // does the feature use compound prediction or not |
517 | | // (if not specified at the frame/segment level) |
518 | 0 | if (cm->current_frame.reference_mode == REFERENCE_MODE_SELECT) { |
519 | 0 | if (is_comp_ref_allowed(mbmi->bsize)) |
520 | 0 | aom_write_symbol(w, is_compound, av1_get_reference_mode_cdf(xd), 2); |
521 | 0 | } else { |
522 | 0 | assert((!is_compound) == |
523 | 0 | (cm->current_frame.reference_mode == SINGLE_REFERENCE)); |
524 | 0 | } |
525 | |
|
526 | 0 | if (is_compound) { |
527 | 0 | const COMP_REFERENCE_TYPE comp_ref_type = has_uni_comp_refs(mbmi) |
528 | 0 | ? UNIDIR_COMP_REFERENCE |
529 | 0 | : BIDIR_COMP_REFERENCE; |
530 | 0 | aom_write_symbol(w, comp_ref_type, av1_get_comp_reference_type_cdf(xd), |
531 | 0 | 2); |
532 | |
|
533 | 0 | if (comp_ref_type == UNIDIR_COMP_REFERENCE) { |
534 | 0 | const int bit = mbmi->ref_frame[0] == BWDREF_FRAME; |
535 | 0 | WRITE_REF_BIT(bit, uni_comp_ref_p); |
536 | |
|
537 | 0 | if (!bit) { |
538 | 0 | assert(mbmi->ref_frame[0] == LAST_FRAME); |
539 | 0 | const int bit1 = mbmi->ref_frame[1] == LAST3_FRAME || |
540 | 0 | mbmi->ref_frame[1] == GOLDEN_FRAME; |
541 | 0 | WRITE_REF_BIT(bit1, uni_comp_ref_p1); |
542 | 0 | if (bit1) { |
543 | 0 | const int bit2 = mbmi->ref_frame[1] == GOLDEN_FRAME; |
544 | 0 | WRITE_REF_BIT(bit2, uni_comp_ref_p2); |
545 | 0 | } |
546 | 0 | } else { |
547 | 0 | assert(mbmi->ref_frame[1] == ALTREF_FRAME); |
548 | 0 | } |
549 | |
|
550 | 0 | return; |
551 | 0 | } |
552 | | |
553 | 0 | assert(comp_ref_type == BIDIR_COMP_REFERENCE); |
554 | |
|
555 | 0 | const int bit = (mbmi->ref_frame[0] == GOLDEN_FRAME || |
556 | 0 | mbmi->ref_frame[0] == LAST3_FRAME); |
557 | 0 | WRITE_REF_BIT(bit, comp_ref_p); |
558 | |
|
559 | 0 | if (!bit) { |
560 | 0 | const int bit1 = mbmi->ref_frame[0] == LAST2_FRAME; |
561 | 0 | WRITE_REF_BIT(bit1, comp_ref_p1); |
562 | 0 | } else { |
563 | 0 | const int bit2 = mbmi->ref_frame[0] == GOLDEN_FRAME; |
564 | 0 | WRITE_REF_BIT(bit2, comp_ref_p2); |
565 | 0 | } |
566 | |
|
567 | 0 | const int bit_bwd = mbmi->ref_frame[1] == ALTREF_FRAME; |
568 | 0 | WRITE_REF_BIT(bit_bwd, comp_bwdref_p); |
569 | |
|
570 | 0 | if (!bit_bwd) { |
571 | 0 | WRITE_REF_BIT(mbmi->ref_frame[1] == ALTREF2_FRAME, comp_bwdref_p1); |
572 | 0 | } |
573 | |
|
574 | 0 | } else { |
575 | 0 | const int bit0 = (mbmi->ref_frame[0] <= ALTREF_FRAME && |
576 | 0 | mbmi->ref_frame[0] >= BWDREF_FRAME); |
577 | 0 | WRITE_REF_BIT(bit0, single_ref_p1); |
578 | |
|
579 | 0 | if (bit0) { |
580 | 0 | const int bit1 = mbmi->ref_frame[0] == ALTREF_FRAME; |
581 | 0 | WRITE_REF_BIT(bit1, single_ref_p2); |
582 | |
|
583 | 0 | if (!bit1) { |
584 | 0 | WRITE_REF_BIT(mbmi->ref_frame[0] == ALTREF2_FRAME, single_ref_p6); |
585 | 0 | } |
586 | 0 | } else { |
587 | 0 | const int bit2 = (mbmi->ref_frame[0] == LAST3_FRAME || |
588 | 0 | mbmi->ref_frame[0] == GOLDEN_FRAME); |
589 | 0 | WRITE_REF_BIT(bit2, single_ref_p3); |
590 | |
|
591 | 0 | if (!bit2) { |
592 | 0 | const int bit3 = mbmi->ref_frame[0] != LAST_FRAME; |
593 | 0 | WRITE_REF_BIT(bit3, single_ref_p4); |
594 | 0 | } else { |
595 | 0 | const int bit4 = mbmi->ref_frame[0] != LAST3_FRAME; |
596 | 0 | WRITE_REF_BIT(bit4, single_ref_p5); |
597 | 0 | } |
598 | 0 | } |
599 | 0 | } |
600 | 0 | } |
601 | 0 | } |
602 | | |
603 | | static inline void write_filter_intra_mode_info(const AV1_COMMON *cm, |
604 | | const MACROBLOCKD *xd, |
605 | | const MB_MODE_INFO *const mbmi, |
606 | 0 | aom_writer *w) { |
607 | 0 | if (av1_filter_intra_allowed(cm, mbmi)) { |
608 | 0 | aom_write_symbol(w, mbmi->filter_intra_mode_info.use_filter_intra, |
609 | 0 | xd->tile_ctx->filter_intra_cdfs[mbmi->bsize], 2); |
610 | 0 | if (mbmi->filter_intra_mode_info.use_filter_intra) { |
611 | 0 | const FILTER_INTRA_MODE mode = |
612 | 0 | mbmi->filter_intra_mode_info.filter_intra_mode; |
613 | 0 | aom_write_symbol(w, mode, xd->tile_ctx->filter_intra_mode_cdf, |
614 | 0 | FILTER_INTRA_MODES); |
615 | 0 | } |
616 | 0 | } |
617 | 0 | } |
618 | | |
619 | | static inline void write_angle_delta(aom_writer *w, int angle_delta, |
620 | 0 | aom_cdf_prob *cdf) { |
621 | 0 | aom_write_symbol(w, angle_delta + MAX_ANGLE_DELTA, cdf, |
622 | 0 | 2 * MAX_ANGLE_DELTA + 1); |
623 | 0 | } |
624 | | |
625 | | static inline void write_mb_interp_filter(AV1_COMMON *const cm, ThreadData *td, |
626 | 0 | aom_writer *w) { |
627 | 0 | const MACROBLOCKD *xd = &td->mb.e_mbd; |
628 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
629 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
630 | |
|
631 | 0 | if (!av1_is_interp_needed(xd)) { |
632 | 0 | int_interpfilters filters = av1_broadcast_interp_filter( |
633 | 0 | av1_unswitchable_filter(cm->features.interp_filter)); |
634 | 0 | assert(mbmi->interp_filters.as_int == filters.as_int); |
635 | 0 | (void)filters; |
636 | 0 | return; |
637 | 0 | } |
638 | 0 | if (cm->features.interp_filter == SWITCHABLE) { |
639 | 0 | int dir; |
640 | 0 | for (dir = 0; dir < 2; ++dir) { |
641 | 0 | const int ctx = av1_get_pred_context_switchable_interp(xd, dir); |
642 | 0 | InterpFilter filter = |
643 | 0 | av1_extract_interp_filter(mbmi->interp_filters, dir); |
644 | 0 | aom_write_symbol(w, filter, ec_ctx->switchable_interp_cdf[ctx], |
645 | 0 | SWITCHABLE_FILTERS); |
646 | 0 | ++td->interp_filter_selected[filter]; |
647 | 0 | if (cm->seq_params->enable_dual_filter == 0) return; |
648 | 0 | } |
649 | 0 | } |
650 | 0 | } |
651 | | |
652 | | // Transmit color values with delta encoding. Write the first value as |
653 | | // literal, and the deltas between each value and the previous one. "min_val" is |
654 | | // the smallest possible value of the deltas. |
655 | | static inline void delta_encode_palette_colors(const int *colors, int num, |
656 | | int bit_depth, int min_val, |
657 | 0 | aom_writer *w) { |
658 | 0 | if (num <= 0) return; |
659 | 0 | assert(colors[0] < (1 << bit_depth)); |
660 | 0 | aom_write_literal(w, colors[0], bit_depth); |
661 | 0 | if (num == 1) return; |
662 | 0 | int max_delta = 0; |
663 | 0 | int deltas[PALETTE_MAX_SIZE]; |
664 | 0 | memset(deltas, 0, sizeof(deltas)); |
665 | 0 | for (int i = 1; i < num; ++i) { |
666 | 0 | assert(colors[i] < (1 << bit_depth)); |
667 | 0 | const int delta = colors[i] - colors[i - 1]; |
668 | 0 | deltas[i - 1] = delta; |
669 | 0 | assert(delta >= min_val); |
670 | 0 | if (delta > max_delta) max_delta = delta; |
671 | 0 | } |
672 | 0 | const int min_bits = bit_depth - 3; |
673 | 0 | int bits = AOMMAX(av1_ceil_log2(max_delta + 1 - min_val), min_bits); |
674 | 0 | assert(bits <= bit_depth); |
675 | 0 | int range = (1 << bit_depth) - colors[0] - min_val; |
676 | 0 | aom_write_literal(w, bits - min_bits, 2); |
677 | 0 | for (int i = 0; i < num - 1; ++i) { |
678 | 0 | aom_write_literal(w, deltas[i] - min_val, bits); |
679 | 0 | range -= deltas[i]; |
680 | 0 | bits = AOMMIN(bits, av1_ceil_log2(range)); |
681 | 0 | } |
682 | 0 | } |
683 | | |
684 | | // Transmit luma palette color values. First signal if each color in the color |
685 | | // cache is used. Those colors that are not in the cache are transmitted with |
686 | | // delta encoding. |
687 | | static inline void write_palette_colors_y(const MACROBLOCKD *const xd, |
688 | | const PALETTE_MODE_INFO *const pmi, |
689 | 0 | int bit_depth, aom_writer *w) { |
690 | 0 | const int n = pmi->palette_size[0]; |
691 | 0 | uint16_t color_cache[2 * PALETTE_MAX_SIZE]; |
692 | 0 | const int n_cache = av1_get_palette_cache(xd, 0, color_cache); |
693 | 0 | int out_cache_colors[PALETTE_MAX_SIZE]; |
694 | 0 | uint8_t cache_color_found[2 * PALETTE_MAX_SIZE]; |
695 | 0 | const int n_out_cache = |
696 | 0 | av1_index_color_cache(color_cache, n_cache, pmi->palette_colors, n, |
697 | 0 | cache_color_found, out_cache_colors); |
698 | 0 | int n_in_cache = 0; |
699 | 0 | for (int i = 0; i < n_cache && n_in_cache < n; ++i) { |
700 | 0 | const int found = cache_color_found[i]; |
701 | 0 | aom_write_bit(w, found); |
702 | 0 | n_in_cache += found; |
703 | 0 | } |
704 | 0 | assert(n_in_cache + n_out_cache == n); |
705 | 0 | delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 1, w); |
706 | 0 | } |
707 | | |
708 | | // Write chroma palette color values. U channel is handled similarly to the luma |
709 | | // channel. For v channel, either use delta encoding or transmit raw values |
710 | | // directly, whichever costs less. |
711 | | static inline void write_palette_colors_uv(const MACROBLOCKD *const xd, |
712 | | const PALETTE_MODE_INFO *const pmi, |
713 | 0 | int bit_depth, aom_writer *w) { |
714 | 0 | const int n = pmi->palette_size[1]; |
715 | 0 | const uint16_t *colors_u = pmi->palette_colors + PALETTE_MAX_SIZE; |
716 | 0 | const uint16_t *colors_v = pmi->palette_colors + 2 * PALETTE_MAX_SIZE; |
717 | | // U channel colors. |
718 | 0 | uint16_t color_cache[2 * PALETTE_MAX_SIZE]; |
719 | 0 | const int n_cache = av1_get_palette_cache(xd, 1, color_cache); |
720 | 0 | int out_cache_colors[PALETTE_MAX_SIZE]; |
721 | 0 | uint8_t cache_color_found[2 * PALETTE_MAX_SIZE]; |
722 | 0 | const int n_out_cache = av1_index_color_cache( |
723 | 0 | color_cache, n_cache, colors_u, n, cache_color_found, out_cache_colors); |
724 | 0 | int n_in_cache = 0; |
725 | 0 | for (int i = 0; i < n_cache && n_in_cache < n; ++i) { |
726 | 0 | const int found = cache_color_found[i]; |
727 | 0 | aom_write_bit(w, found); |
728 | 0 | n_in_cache += found; |
729 | 0 | } |
730 | 0 | delta_encode_palette_colors(out_cache_colors, n_out_cache, bit_depth, 0, w); |
731 | | |
732 | | // V channel colors. Don't use color cache as the colors are not sorted. |
733 | 0 | const int max_val = 1 << bit_depth; |
734 | 0 | int zero_count = 0, min_bits_v = 0; |
735 | 0 | int bits_v = |
736 | 0 | av1_get_palette_delta_bits_v(pmi, bit_depth, &zero_count, &min_bits_v); |
737 | 0 | const int rate_using_delta = |
738 | 0 | 2 + bit_depth + (bits_v + 1) * (n - 1) - zero_count; |
739 | 0 | const int rate_using_raw = bit_depth * n; |
740 | 0 | if (rate_using_delta < rate_using_raw) { // delta encoding |
741 | 0 | assert(colors_v[0] < (1 << bit_depth)); |
742 | 0 | aom_write_bit(w, 1); |
743 | 0 | aom_write_literal(w, bits_v - min_bits_v, 2); |
744 | 0 | aom_write_literal(w, colors_v[0], bit_depth); |
745 | 0 | for (int i = 1; i < n; ++i) { |
746 | 0 | assert(colors_v[i] < (1 << bit_depth)); |
747 | 0 | if (colors_v[i] == colors_v[i - 1]) { // No need to signal sign bit. |
748 | 0 | aom_write_literal(w, 0, bits_v); |
749 | 0 | continue; |
750 | 0 | } |
751 | 0 | const int delta = abs((int)colors_v[i] - colors_v[i - 1]); |
752 | 0 | const int sign_bit = colors_v[i] < colors_v[i - 1]; |
753 | 0 | if (delta <= max_val - delta) { |
754 | 0 | aom_write_literal(w, delta, bits_v); |
755 | 0 | aom_write_bit(w, sign_bit); |
756 | 0 | } else { |
757 | 0 | aom_write_literal(w, max_val - delta, bits_v); |
758 | 0 | aom_write_bit(w, !sign_bit); |
759 | 0 | } |
760 | 0 | } |
761 | 0 | } else { // Transmit raw values. |
762 | 0 | aom_write_bit(w, 0); |
763 | 0 | for (int i = 0; i < n; ++i) { |
764 | 0 | assert(colors_v[i] < (1 << bit_depth)); |
765 | 0 | aom_write_literal(w, colors_v[i], bit_depth); |
766 | 0 | } |
767 | 0 | } |
768 | 0 | } |
769 | | |
770 | | static inline void write_palette_mode_info(const AV1_COMMON *cm, |
771 | | const MACROBLOCKD *xd, |
772 | | const MB_MODE_INFO *const mbmi, |
773 | 0 | aom_writer *w) { |
774 | 0 | const int num_planes = av1_num_planes(cm); |
775 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
776 | 0 | assert(av1_allow_palette(cm->features.allow_screen_content_tools, bsize)); |
777 | 0 | const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info; |
778 | 0 | const int bsize_ctx = av1_get_palette_bsize_ctx(bsize); |
779 | |
|
780 | 0 | if (mbmi->mode == DC_PRED) { |
781 | 0 | const int n = pmi->palette_size[0]; |
782 | 0 | const int palette_y_mode_ctx = av1_get_palette_mode_ctx(xd); |
783 | 0 | aom_write_symbol( |
784 | 0 | w, n > 0, |
785 | 0 | xd->tile_ctx->palette_y_mode_cdf[bsize_ctx][palette_y_mode_ctx], 2); |
786 | 0 | if (n > 0) { |
787 | 0 | aom_write_symbol(w, n - PALETTE_MIN_SIZE, |
788 | 0 | xd->tile_ctx->palette_y_size_cdf[bsize_ctx], |
789 | 0 | PALETTE_SIZES); |
790 | 0 | write_palette_colors_y(xd, pmi, cm->seq_params->bit_depth, w); |
791 | 0 | } |
792 | 0 | } |
793 | |
|
794 | 0 | const int uv_dc_pred = |
795 | 0 | num_planes > 1 && mbmi->uv_mode == UV_DC_PRED && xd->is_chroma_ref; |
796 | 0 | if (uv_dc_pred) { |
797 | 0 | const int n = pmi->palette_size[1]; |
798 | 0 | const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0); |
799 | 0 | aom_write_symbol(w, n > 0, |
800 | 0 | xd->tile_ctx->palette_uv_mode_cdf[palette_uv_mode_ctx], 2); |
801 | 0 | if (n > 0) { |
802 | 0 | aom_write_symbol(w, n - PALETTE_MIN_SIZE, |
803 | 0 | xd->tile_ctx->palette_uv_size_cdf[bsize_ctx], |
804 | 0 | PALETTE_SIZES); |
805 | 0 | write_palette_colors_uv(xd, pmi, cm->seq_params->bit_depth, w); |
806 | 0 | } |
807 | 0 | } |
808 | 0 | } |
809 | | |
810 | | void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd, |
811 | 0 | TX_TYPE tx_type, TX_SIZE tx_size, aom_writer *w) { |
812 | 0 | MB_MODE_INFO *mbmi = xd->mi[0]; |
813 | 0 | const FeatureFlags *const features = &cm->features; |
814 | 0 | const int is_inter = is_inter_block(mbmi); |
815 | 0 | if (get_ext_tx_types(tx_size, is_inter, features->reduced_tx_set_used) > 1 && |
816 | 0 | ((!cm->seg.enabled && cm->quant_params.base_qindex > 0) || |
817 | 0 | (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) && |
818 | 0 | !mbmi->skip_txfm && |
819 | 0 | !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { |
820 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
821 | 0 | const TX_SIZE square_tx_size = txsize_sqr_map[tx_size]; |
822 | 0 | const TxSetType tx_set_type = av1_get_ext_tx_set_type( |
823 | 0 | tx_size, is_inter, features->reduced_tx_set_used); |
824 | 0 | const int eset = |
825 | 0 | get_ext_tx_set(tx_size, is_inter, features->reduced_tx_set_used); |
826 | | // eset == 0 should correspond to a set with only DCT_DCT and there |
827 | | // is no need to send the tx_type |
828 | 0 | assert(eset > 0); |
829 | 0 | assert(av1_ext_tx_used[tx_set_type][tx_type]); |
830 | 0 | if (is_inter) { |
831 | 0 | aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type], |
832 | 0 | ec_ctx->inter_ext_tx_cdf[eset][square_tx_size], |
833 | 0 | av1_num_ext_tx_set[tx_set_type]); |
834 | 0 | } else { |
835 | 0 | PREDICTION_MODE intra_dir; |
836 | 0 | if (mbmi->filter_intra_mode_info.use_filter_intra) |
837 | 0 | intra_dir = |
838 | 0 | fimode_to_intradir[mbmi->filter_intra_mode_info.filter_intra_mode]; |
839 | 0 | else |
840 | 0 | intra_dir = mbmi->mode; |
841 | 0 | aom_write_symbol( |
842 | 0 | w, av1_ext_tx_ind[tx_set_type][tx_type], |
843 | 0 | ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][intra_dir], |
844 | 0 | av1_num_ext_tx_set[tx_set_type]); |
845 | 0 | } |
846 | 0 | } |
847 | 0 | } |
848 | | |
849 | | static inline void write_intra_y_mode_nonkf(FRAME_CONTEXT *frame_ctx, |
850 | | BLOCK_SIZE bsize, |
851 | | PREDICTION_MODE mode, |
852 | 0 | aom_writer *w) { |
853 | 0 | aom_write_symbol(w, mode, frame_ctx->y_mode_cdf[size_group_lookup[bsize]], |
854 | 0 | INTRA_MODES); |
855 | 0 | } |
856 | | |
857 | | static inline void write_intra_uv_mode(FRAME_CONTEXT *frame_ctx, |
858 | | UV_PREDICTION_MODE uv_mode, |
859 | | PREDICTION_MODE y_mode, |
860 | | CFL_ALLOWED_TYPE cfl_allowed, |
861 | 0 | aom_writer *w) { |
862 | 0 | aom_write_symbol(w, uv_mode, frame_ctx->uv_mode_cdf[cfl_allowed][y_mode], |
863 | 0 | UV_INTRA_MODES - !cfl_allowed); |
864 | 0 | } |
865 | | |
866 | | static inline void write_cfl_alphas(FRAME_CONTEXT *const ec_ctx, uint8_t idx, |
867 | 0 | int8_t joint_sign, aom_writer *w) { |
868 | 0 | aom_write_symbol(w, joint_sign, ec_ctx->cfl_sign_cdf, CFL_JOINT_SIGNS); |
869 | | // Magnitudes are only signaled for nonzero codes. |
870 | 0 | if (CFL_SIGN_U(joint_sign) != CFL_SIGN_ZERO) { |
871 | 0 | aom_cdf_prob *cdf_u = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_U(joint_sign)]; |
872 | 0 | aom_write_symbol(w, CFL_IDX_U(idx), cdf_u, CFL_ALPHABET_SIZE); |
873 | 0 | } |
874 | 0 | if (CFL_SIGN_V(joint_sign) != CFL_SIGN_ZERO) { |
875 | 0 | aom_cdf_prob *cdf_v = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_V(joint_sign)]; |
876 | 0 | aom_write_symbol(w, CFL_IDX_V(idx), cdf_v, CFL_ALPHABET_SIZE); |
877 | 0 | } |
878 | 0 | } |
879 | | |
880 | | static inline void write_cdef(AV1_COMMON *cm, MACROBLOCKD *const xd, |
881 | 0 | aom_writer *w, int skip) { |
882 | 0 | if (cm->features.coded_lossless || cm->features.allow_intrabc) return; |
883 | | |
884 | | // At the start of a superblock, mark that we haven't yet written CDEF |
885 | | // strengths for any of the CDEF units contained in this superblock. |
886 | 0 | const int sb_mask = (cm->seq_params->mib_size - 1); |
887 | 0 | const int mi_row_in_sb = (xd->mi_row & sb_mask); |
888 | 0 | const int mi_col_in_sb = (xd->mi_col & sb_mask); |
889 | 0 | if (mi_row_in_sb == 0 && mi_col_in_sb == 0) { |
890 | 0 | xd->cdef_transmitted[0] = xd->cdef_transmitted[1] = |
891 | 0 | xd->cdef_transmitted[2] = xd->cdef_transmitted[3] = false; |
892 | 0 | } |
893 | | |
894 | | // CDEF unit size is 64x64 irrespective of the superblock size. |
895 | 0 | const int cdef_size = 1 << (6 - MI_SIZE_LOG2); |
896 | | |
897 | | // Find index of this CDEF unit in this superblock. |
898 | 0 | const int index_mask = cdef_size; |
899 | 0 | const int cdef_unit_row_in_sb = ((xd->mi_row & index_mask) != 0); |
900 | 0 | const int cdef_unit_col_in_sb = ((xd->mi_col & index_mask) != 0); |
901 | 0 | const int index = (cm->seq_params->sb_size == BLOCK_128X128) |
902 | 0 | ? cdef_unit_col_in_sb + 2 * cdef_unit_row_in_sb |
903 | 0 | : 0; |
904 | | |
905 | | // Write CDEF strength to the first non-skip coding block in this CDEF unit. |
906 | 0 | if (!xd->cdef_transmitted[index] && !skip) { |
907 | | // CDEF strength for this CDEF unit needs to be stored in the MB_MODE_INFO |
908 | | // of the 1st block in this CDEF unit. |
909 | 0 | const int first_block_mask = ~(cdef_size - 1); |
910 | 0 | const CommonModeInfoParams *const mi_params = &cm->mi_params; |
911 | 0 | const int grid_idx = |
912 | 0 | get_mi_grid_idx(mi_params, xd->mi_row & first_block_mask, |
913 | 0 | xd->mi_col & first_block_mask); |
914 | 0 | const MB_MODE_INFO *const mbmi = mi_params->mi_grid_base[grid_idx]; |
915 | 0 | aom_write_literal(w, mbmi->cdef_strength, cm->cdef_info.cdef_bits); |
916 | 0 | xd->cdef_transmitted[index] = true; |
917 | 0 | } |
918 | 0 | } |
919 | | |
920 | | static inline void write_inter_segment_id(AV1_COMP *cpi, MACROBLOCKD *const xd, |
921 | | aom_writer *w, |
922 | | const struct segmentation *const seg, |
923 | | struct segmentation_probs *const segp, |
924 | 0 | int skip, int preskip) { |
925 | 0 | MB_MODE_INFO *const mbmi = xd->mi[0]; |
926 | 0 | AV1_COMMON *const cm = &cpi->common; |
927 | 0 | const int mi_row = xd->mi_row; |
928 | 0 | const int mi_col = xd->mi_col; |
929 | |
|
930 | 0 | if (seg->update_map) { |
931 | 0 | if (preskip) { |
932 | 0 | if (!seg->segid_preskip) return; |
933 | 0 | } else { |
934 | 0 | if (seg->segid_preskip) return; |
935 | 0 | if (skip) { |
936 | 0 | write_segment_id(cpi, xd, mbmi, w, seg, segp, 1); |
937 | 0 | if (seg->temporal_update) mbmi->seg_id_predicted = 0; |
938 | 0 | return; |
939 | 0 | } |
940 | 0 | } |
941 | 0 | if (seg->temporal_update) { |
942 | 0 | const int pred_flag = mbmi->seg_id_predicted; |
943 | 0 | aom_cdf_prob *pred_cdf = av1_get_pred_cdf_seg_id(segp, xd); |
944 | 0 | aom_write_symbol(w, pred_flag, pred_cdf, 2); |
945 | 0 | if (!pred_flag) { |
946 | 0 | write_segment_id(cpi, xd, mbmi, w, seg, segp, 0); |
947 | 0 | } |
948 | 0 | if (pred_flag) { |
949 | 0 | set_spatial_segment_id(&cm->mi_params, cm->cur_frame->seg_map, |
950 | 0 | mbmi->bsize, mi_row, mi_col, mbmi->segment_id); |
951 | 0 | } |
952 | 0 | } else { |
953 | 0 | write_segment_id(cpi, xd, mbmi, w, seg, segp, 0); |
954 | 0 | } |
955 | 0 | } |
956 | 0 | } |
957 | | |
958 | | // If delta q is present, writes delta_q index. |
959 | | // Also writes delta_q loop filter levels, if present. |
960 | | static inline void write_delta_q_params(AV1_COMMON *const cm, |
961 | | MACROBLOCKD *const xd, int skip, |
962 | 0 | aom_writer *w) { |
963 | 0 | const DeltaQInfo *const delta_q_info = &cm->delta_q_info; |
964 | |
|
965 | 0 | if (delta_q_info->delta_q_present_flag) { |
966 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
967 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
968 | 0 | const int super_block_upper_left = |
969 | 0 | ((xd->mi_row & (cm->seq_params->mib_size - 1)) == 0) && |
970 | 0 | ((xd->mi_col & (cm->seq_params->mib_size - 1)) == 0); |
971 | |
|
972 | 0 | if ((bsize != cm->seq_params->sb_size || skip == 0) && |
973 | 0 | super_block_upper_left) { |
974 | 0 | assert(mbmi->current_qindex > 0); |
975 | 0 | const int reduced_delta_qindex = |
976 | 0 | (mbmi->current_qindex - xd->current_base_qindex) / |
977 | 0 | delta_q_info->delta_q_res; |
978 | 0 | write_delta_qindex(xd, reduced_delta_qindex, w); |
979 | 0 | xd->current_base_qindex = mbmi->current_qindex; |
980 | 0 | if (delta_q_info->delta_lf_present_flag) { |
981 | 0 | if (delta_q_info->delta_lf_multi) { |
982 | 0 | const int frame_lf_count = |
983 | 0 | av1_num_planes(cm) > 1 ? FRAME_LF_COUNT : FRAME_LF_COUNT - 2; |
984 | 0 | for (int lf_id = 0; lf_id < frame_lf_count; ++lf_id) { |
985 | 0 | int reduced_delta_lflevel = |
986 | 0 | (mbmi->delta_lf[lf_id] - xd->delta_lf[lf_id]) / |
987 | 0 | delta_q_info->delta_lf_res; |
988 | 0 | write_delta_lflevel(cm, xd, lf_id, reduced_delta_lflevel, 1, w); |
989 | 0 | xd->delta_lf[lf_id] = mbmi->delta_lf[lf_id]; |
990 | 0 | } |
991 | 0 | } else { |
992 | 0 | int reduced_delta_lflevel = |
993 | 0 | (mbmi->delta_lf_from_base - xd->delta_lf_from_base) / |
994 | 0 | delta_q_info->delta_lf_res; |
995 | 0 | write_delta_lflevel(cm, xd, -1, reduced_delta_lflevel, 0, w); |
996 | 0 | xd->delta_lf_from_base = mbmi->delta_lf_from_base; |
997 | 0 | } |
998 | 0 | } |
999 | 0 | } |
1000 | 0 | } |
1001 | 0 | } |
1002 | | |
1003 | | static inline void write_intra_prediction_modes(const AV1_COMMON *cm, |
1004 | | MACROBLOCKD *const xd, |
1005 | | int is_keyframe, |
1006 | 0 | aom_writer *w) { |
1007 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1008 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
1009 | 0 | const PREDICTION_MODE mode = mbmi->mode; |
1010 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
1011 | | |
1012 | | // Y mode. |
1013 | 0 | if (is_keyframe) { |
1014 | 0 | const MB_MODE_INFO *const above_mi = xd->above_mbmi; |
1015 | 0 | const MB_MODE_INFO *const left_mi = xd->left_mbmi; |
1016 | 0 | write_intra_y_mode_kf(ec_ctx, mbmi, above_mi, left_mi, mode, w); |
1017 | 0 | } else { |
1018 | 0 | write_intra_y_mode_nonkf(ec_ctx, bsize, mode, w); |
1019 | 0 | } |
1020 | | |
1021 | | // Y angle delta. |
1022 | 0 | const int use_angle_delta = av1_use_angle_delta(bsize); |
1023 | 0 | if (use_angle_delta && av1_is_directional_mode(mode)) { |
1024 | 0 | write_angle_delta(w, mbmi->angle_delta[PLANE_TYPE_Y], |
1025 | 0 | ec_ctx->angle_delta_cdf[mode - V_PRED]); |
1026 | 0 | } |
1027 | | |
1028 | | // UV mode and UV angle delta. |
1029 | 0 | if (!cm->seq_params->monochrome && xd->is_chroma_ref) { |
1030 | 0 | const UV_PREDICTION_MODE uv_mode = mbmi->uv_mode; |
1031 | 0 | write_intra_uv_mode(ec_ctx, uv_mode, mode, is_cfl_allowed(xd), w); |
1032 | 0 | if (uv_mode == UV_CFL_PRED) |
1033 | 0 | write_cfl_alphas(ec_ctx, mbmi->cfl_alpha_idx, mbmi->cfl_alpha_signs, w); |
1034 | 0 | const PREDICTION_MODE intra_mode = get_uv_mode(uv_mode); |
1035 | 0 | if (use_angle_delta && av1_is_directional_mode(intra_mode)) { |
1036 | 0 | write_angle_delta(w, mbmi->angle_delta[PLANE_TYPE_UV], |
1037 | 0 | ec_ctx->angle_delta_cdf[intra_mode - V_PRED]); |
1038 | 0 | } |
1039 | 0 | } |
1040 | | |
1041 | | // Palette. |
1042 | 0 | if (av1_allow_palette(cm->features.allow_screen_content_tools, bsize)) { |
1043 | 0 | write_palette_mode_info(cm, xd, mbmi, w); |
1044 | 0 | } |
1045 | | |
1046 | | // Filter intra. |
1047 | 0 | write_filter_intra_mode_info(cm, xd, mbmi, w); |
1048 | 0 | } |
1049 | | |
1050 | | static inline int16_t mode_context_analyzer( |
1051 | 0 | const int16_t mode_context, const MV_REFERENCE_FRAME *const rf) { |
1052 | 0 | if (rf[1] <= INTRA_FRAME) return mode_context; |
1053 | | |
1054 | 0 | const int16_t newmv_ctx = mode_context & NEWMV_CTX_MASK; |
1055 | 0 | const int16_t refmv_ctx = (mode_context >> REFMV_OFFSET) & REFMV_CTX_MASK; |
1056 | |
|
1057 | 0 | const int16_t comp_ctx = compound_mode_ctx_map[refmv_ctx >> 1][AOMMIN( |
1058 | 0 | newmv_ctx, COMP_NEWMV_CTXS - 1)]; |
1059 | 0 | return comp_ctx; |
1060 | 0 | } |
1061 | | |
1062 | | static inline int_mv get_ref_mv_from_stack( |
1063 | | int ref_idx, const MV_REFERENCE_FRAME *ref_frame, int ref_mv_idx, |
1064 | 0 | const MB_MODE_INFO_EXT_FRAME *mbmi_ext_frame) { |
1065 | 0 | const int8_t ref_frame_type = av1_ref_frame_type(ref_frame); |
1066 | 0 | const CANDIDATE_MV *curr_ref_mv_stack = mbmi_ext_frame->ref_mv_stack; |
1067 | |
|
1068 | 0 | if (ref_frame[1] > INTRA_FRAME) { |
1069 | 0 | assert(ref_idx == 0 || ref_idx == 1); |
1070 | 0 | return ref_idx ? curr_ref_mv_stack[ref_mv_idx].comp_mv |
1071 | 0 | : curr_ref_mv_stack[ref_mv_idx].this_mv; |
1072 | 0 | } |
1073 | | |
1074 | 0 | assert(ref_idx == 0); |
1075 | 0 | return ref_mv_idx < mbmi_ext_frame->ref_mv_count |
1076 | 0 | ? curr_ref_mv_stack[ref_mv_idx].this_mv |
1077 | 0 | : mbmi_ext_frame->global_mvs[ref_frame_type]; |
1078 | 0 | } |
1079 | | |
1080 | 0 | static inline int_mv get_ref_mv(const MACROBLOCK *x, int ref_idx) { |
1081 | 0 | const MACROBLOCKD *xd = &x->e_mbd; |
1082 | 0 | const MB_MODE_INFO *mbmi = xd->mi[0]; |
1083 | 0 | int ref_mv_idx = mbmi->ref_mv_idx; |
1084 | 0 | if (mbmi->mode == NEAR_NEWMV || mbmi->mode == NEW_NEARMV) { |
1085 | 0 | assert(has_second_ref(mbmi)); |
1086 | 0 | ref_mv_idx += 1; |
1087 | 0 | } |
1088 | 0 | return get_ref_mv_from_stack(ref_idx, mbmi->ref_frame, ref_mv_idx, |
1089 | 0 | x->mbmi_ext_frame); |
1090 | 0 | } |
1091 | | |
1092 | | static inline void pack_inter_mode_mvs(AV1_COMP *cpi, ThreadData *const td, |
1093 | 0 | aom_writer *w) { |
1094 | 0 | AV1_COMMON *const cm = &cpi->common; |
1095 | 0 | MACROBLOCK *const x = &td->mb; |
1096 | 0 | MACROBLOCKD *const xd = &x->e_mbd; |
1097 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1098 | 0 | const struct segmentation *const seg = &cm->seg; |
1099 | 0 | struct segmentation_probs *const segp = &ec_ctx->seg; |
1100 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
1101 | 0 | const MB_MODE_INFO_EXT_FRAME *const mbmi_ext_frame = x->mbmi_ext_frame; |
1102 | 0 | const PREDICTION_MODE mode = mbmi->mode; |
1103 | 0 | const uint8_t segment_id = mbmi->segment_id; |
1104 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
1105 | 0 | const int allow_hp = cm->features.allow_high_precision_mv; |
1106 | 0 | const int is_inter = is_inter_block(mbmi); |
1107 | 0 | const int is_compound = has_second_ref(mbmi); |
1108 | 0 | int ref; |
1109 | |
|
1110 | 0 | write_inter_segment_id(cpi, xd, w, seg, segp, 0, 1); |
1111 | |
|
1112 | 0 | write_skip_mode(cm, xd, segment_id, mbmi, w); |
1113 | |
|
1114 | 0 | assert(IMPLIES(mbmi->skip_mode, mbmi->skip_txfm)); |
1115 | 0 | const int skip = |
1116 | 0 | mbmi->skip_mode ? 1 : write_skip(cm, xd, segment_id, mbmi, w); |
1117 | |
|
1118 | 0 | write_inter_segment_id(cpi, xd, w, seg, segp, skip, 0); |
1119 | |
|
1120 | 0 | write_cdef(cm, xd, w, skip); |
1121 | |
|
1122 | 0 | write_delta_q_params(cm, xd, skip, w); |
1123 | |
|
1124 | 0 | if (!mbmi->skip_mode) write_is_inter(cm, xd, mbmi->segment_id, w, is_inter); |
1125 | |
|
1126 | 0 | if (mbmi->skip_mode) return; |
1127 | | |
1128 | 0 | if (!is_inter) { |
1129 | 0 | write_intra_prediction_modes(cm, xd, 0, w); |
1130 | 0 | } else { |
1131 | 0 | int16_t mode_ctx; |
1132 | |
|
1133 | 0 | av1_collect_neighbors_ref_counts(xd); |
1134 | |
|
1135 | 0 | write_ref_frames(cm, xd, w); |
1136 | |
|
1137 | 0 | mode_ctx = |
1138 | 0 | mode_context_analyzer(mbmi_ext_frame->mode_context, mbmi->ref_frame); |
1139 | | |
1140 | | // If segment skip is not enabled code the mode. |
1141 | 0 | if (!segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { |
1142 | 0 | if (is_inter_compound_mode(mode)) |
1143 | 0 | write_inter_compound_mode(xd, w, mode, mode_ctx); |
1144 | 0 | else if (is_inter_singleref_mode(mode)) |
1145 | 0 | write_inter_mode(w, mode, ec_ctx, mode_ctx); |
1146 | |
|
1147 | 0 | if (mode == NEWMV || mode == NEW_NEWMV || have_nearmv_in_inter_mode(mode)) |
1148 | 0 | write_drl_idx(ec_ctx, mbmi, mbmi_ext_frame, w); |
1149 | 0 | else |
1150 | 0 | assert(mbmi->ref_mv_idx == 0); |
1151 | 0 | } |
1152 | |
|
1153 | 0 | if (mode == NEWMV || mode == NEW_NEWMV) { |
1154 | 0 | for (ref = 0; ref < 1 + is_compound; ++ref) { |
1155 | 0 | nmv_context *nmvc = &ec_ctx->nmvc; |
1156 | 0 | const int_mv ref_mv = get_ref_mv(x, ref); |
1157 | 0 | av1_encode_mv(cpi, w, td, &mbmi->mv[ref].as_mv, &ref_mv.as_mv, nmvc, |
1158 | 0 | allow_hp); |
1159 | 0 | } |
1160 | 0 | } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) { |
1161 | 0 | nmv_context *nmvc = &ec_ctx->nmvc; |
1162 | 0 | const int_mv ref_mv = get_ref_mv(x, 1); |
1163 | 0 | av1_encode_mv(cpi, w, td, &mbmi->mv[1].as_mv, &ref_mv.as_mv, nmvc, |
1164 | 0 | allow_hp); |
1165 | 0 | } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) { |
1166 | 0 | nmv_context *nmvc = &ec_ctx->nmvc; |
1167 | 0 | const int_mv ref_mv = get_ref_mv(x, 0); |
1168 | 0 | av1_encode_mv(cpi, w, td, &mbmi->mv[0].as_mv, &ref_mv.as_mv, nmvc, |
1169 | 0 | allow_hp); |
1170 | 0 | } |
1171 | |
|
1172 | 0 | if (cpi->common.current_frame.reference_mode != COMPOUND_REFERENCE && |
1173 | 0 | cpi->common.seq_params->enable_interintra_compound && |
1174 | 0 | is_interintra_allowed(mbmi)) { |
1175 | 0 | const int interintra = mbmi->ref_frame[1] == INTRA_FRAME; |
1176 | 0 | const int bsize_group = size_group_lookup[bsize]; |
1177 | 0 | aom_write_symbol(w, interintra, ec_ctx->interintra_cdf[bsize_group], 2); |
1178 | 0 | if (interintra) { |
1179 | 0 | aom_write_symbol(w, mbmi->interintra_mode, |
1180 | 0 | ec_ctx->interintra_mode_cdf[bsize_group], |
1181 | 0 | INTERINTRA_MODES); |
1182 | 0 | if (av1_is_wedge_used(bsize)) { |
1183 | 0 | aom_write_symbol(w, mbmi->use_wedge_interintra, |
1184 | 0 | ec_ctx->wedge_interintra_cdf[bsize], 2); |
1185 | 0 | if (mbmi->use_wedge_interintra) { |
1186 | 0 | aom_write_symbol(w, mbmi->interintra_wedge_index, |
1187 | 0 | ec_ctx->wedge_idx_cdf[bsize], MAX_WEDGE_TYPES); |
1188 | 0 | } |
1189 | 0 | } |
1190 | 0 | } |
1191 | 0 | } |
1192 | |
|
1193 | 0 | if (mbmi->ref_frame[1] != INTRA_FRAME) write_motion_mode(cm, xd, mbmi, w); |
1194 | | |
1195 | | // First write idx to indicate current compound inter prediction mode group |
1196 | | // Group A (0): dist_wtd_comp, compound_average |
1197 | | // Group B (1): interintra, compound_diffwtd, wedge |
1198 | 0 | if (has_second_ref(mbmi)) { |
1199 | 0 | const int masked_compound_used = is_any_masked_compound_used(bsize) && |
1200 | 0 | cm->seq_params->enable_masked_compound; |
1201 | |
|
1202 | 0 | if (masked_compound_used) { |
1203 | 0 | const int ctx_comp_group_idx = get_comp_group_idx_context(xd); |
1204 | 0 | aom_write_symbol(w, mbmi->comp_group_idx, |
1205 | 0 | ec_ctx->comp_group_idx_cdf[ctx_comp_group_idx], 2); |
1206 | 0 | } else { |
1207 | 0 | assert(mbmi->comp_group_idx == 0); |
1208 | 0 | } |
1209 | |
|
1210 | 0 | if (mbmi->comp_group_idx == 0) { |
1211 | 0 | if (mbmi->compound_idx) |
1212 | 0 | assert(mbmi->interinter_comp.type == COMPOUND_AVERAGE); |
1213 | |
|
1214 | 0 | if (cm->seq_params->order_hint_info.enable_dist_wtd_comp) { |
1215 | 0 | const int comp_index_ctx = get_comp_index_context(cm, xd); |
1216 | 0 | aom_write_symbol(w, mbmi->compound_idx, |
1217 | 0 | ec_ctx->compound_index_cdf[comp_index_ctx], 2); |
1218 | 0 | } else { |
1219 | 0 | assert(mbmi->compound_idx == 1); |
1220 | 0 | } |
1221 | 0 | } else { |
1222 | 0 | assert(cpi->common.current_frame.reference_mode != SINGLE_REFERENCE && |
1223 | 0 | is_inter_compound_mode(mbmi->mode) && |
1224 | 0 | mbmi->motion_mode == SIMPLE_TRANSLATION); |
1225 | 0 | assert(masked_compound_used); |
1226 | | // compound_diffwtd, wedge |
1227 | 0 | assert(mbmi->interinter_comp.type == COMPOUND_WEDGE || |
1228 | 0 | mbmi->interinter_comp.type == COMPOUND_DIFFWTD); |
1229 | |
|
1230 | 0 | if (is_interinter_compound_used(COMPOUND_WEDGE, bsize)) |
1231 | 0 | aom_write_symbol(w, mbmi->interinter_comp.type - COMPOUND_WEDGE, |
1232 | 0 | ec_ctx->compound_type_cdf[bsize], |
1233 | 0 | MASKED_COMPOUND_TYPES); |
1234 | |
|
1235 | 0 | if (mbmi->interinter_comp.type == COMPOUND_WEDGE) { |
1236 | 0 | assert(is_interinter_compound_used(COMPOUND_WEDGE, bsize)); |
1237 | 0 | aom_write_symbol(w, mbmi->interinter_comp.wedge_index, |
1238 | 0 | ec_ctx->wedge_idx_cdf[bsize], MAX_WEDGE_TYPES); |
1239 | 0 | aom_write_bit(w, mbmi->interinter_comp.wedge_sign); |
1240 | 0 | } else { |
1241 | 0 | assert(mbmi->interinter_comp.type == COMPOUND_DIFFWTD); |
1242 | 0 | aom_write_literal(w, mbmi->interinter_comp.mask_type, |
1243 | 0 | MAX_DIFFWTD_MASK_BITS); |
1244 | 0 | } |
1245 | 0 | } |
1246 | 0 | } |
1247 | 0 | write_mb_interp_filter(cm, td, w); |
1248 | 0 | } |
1249 | 0 | } |
1250 | | |
1251 | | static inline void write_intrabc_info( |
1252 | | MACROBLOCKD *xd, const MB_MODE_INFO_EXT_FRAME *mbmi_ext_frame, |
1253 | 0 | aom_writer *w) { |
1254 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
1255 | 0 | int use_intrabc = is_intrabc_block(mbmi); |
1256 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1257 | 0 | aom_write_symbol(w, use_intrabc, ec_ctx->intrabc_cdf, 2); |
1258 | 0 | if (use_intrabc) { |
1259 | 0 | assert(mbmi->mode == DC_PRED); |
1260 | 0 | assert(mbmi->uv_mode == UV_DC_PRED); |
1261 | 0 | assert(mbmi->motion_mode == SIMPLE_TRANSLATION); |
1262 | 0 | int_mv dv_ref = mbmi_ext_frame->ref_mv_stack[0].this_mv; |
1263 | 0 | av1_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, &ec_ctx->ndvc); |
1264 | 0 | } |
1265 | 0 | } |
1266 | | |
1267 | | static inline void write_mb_modes_kf( |
1268 | | AV1_COMP *cpi, MACROBLOCKD *xd, |
1269 | 0 | const MB_MODE_INFO_EXT_FRAME *mbmi_ext_frame, aom_writer *w) { |
1270 | 0 | AV1_COMMON *const cm = &cpi->common; |
1271 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1272 | 0 | const struct segmentation *const seg = &cm->seg; |
1273 | 0 | struct segmentation_probs *const segp = &ec_ctx->seg; |
1274 | 0 | const MB_MODE_INFO *const mbmi = xd->mi[0]; |
1275 | |
|
1276 | 0 | if (seg->segid_preskip && seg->update_map) |
1277 | 0 | write_segment_id(cpi, xd, mbmi, w, seg, segp, 0); |
1278 | |
|
1279 | 0 | const int skip = write_skip(cm, xd, mbmi->segment_id, mbmi, w); |
1280 | |
|
1281 | 0 | if (!seg->segid_preskip && seg->update_map) |
1282 | 0 | write_segment_id(cpi, xd, mbmi, w, seg, segp, skip); |
1283 | |
|
1284 | 0 | write_cdef(cm, xd, w, skip); |
1285 | |
|
1286 | 0 | write_delta_q_params(cm, xd, skip, w); |
1287 | |
|
1288 | 0 | if (av1_allow_intrabc(cm)) { |
1289 | 0 | write_intrabc_info(xd, mbmi_ext_frame, w); |
1290 | 0 | if (is_intrabc_block(mbmi)) return; |
1291 | 0 | } |
1292 | | |
1293 | 0 | write_intra_prediction_modes(cm, xd, 1, w); |
1294 | 0 | } |
1295 | | |
1296 | | #if CONFIG_RD_DEBUG |
1297 | | static inline void dump_mode_info(MB_MODE_INFO *mi) { |
1298 | | printf("\nmi->mi_row == %d\n", mi->mi_row); |
1299 | | printf("&& mi->mi_col == %d\n", mi->mi_col); |
1300 | | printf("&& mi->bsize == %d\n", mi->bsize); |
1301 | | printf("&& mi->tx_size == %d\n", mi->tx_size); |
1302 | | printf("&& mi->mode == %d\n", mi->mode); |
1303 | | } |
1304 | | |
1305 | | static int rd_token_stats_mismatch(RD_STATS *rd_stats, TOKEN_STATS *token_stats, |
1306 | | int plane) { |
1307 | | if (rd_stats->txb_coeff_cost[plane] != token_stats->cost) { |
1308 | | printf("\nplane %d rd_stats->txb_coeff_cost %d token_stats->cost %d\n", |
1309 | | plane, rd_stats->txb_coeff_cost[plane], token_stats->cost); |
1310 | | return 1; |
1311 | | } |
1312 | | return 0; |
1313 | | } |
1314 | | #endif |
1315 | | |
1316 | | #if ENC_MISMATCH_DEBUG |
1317 | | static inline void enc_dump_logs( |
1318 | | const AV1_COMMON *const cm, |
1319 | | const MBMIExtFrameBufferInfo *const mbmi_ext_info, int mi_row, int mi_col) { |
1320 | | const MB_MODE_INFO *const mbmi = *( |
1321 | | cm->mi_params.mi_grid_base + (mi_row * cm->mi_params.mi_stride + mi_col)); |
1322 | | const MB_MODE_INFO_EXT_FRAME *const mbmi_ext_frame = |
1323 | | mbmi_ext_info->frame_base + get_mi_ext_idx(mi_row, mi_col, |
1324 | | cm->mi_params.mi_alloc_bsize, |
1325 | | mbmi_ext_info->stride); |
1326 | | if (is_inter_block(mbmi)) { |
1327 | | #define FRAME_TO_CHECK 11 |
1328 | | if (cm->current_frame.frame_number == FRAME_TO_CHECK && |
1329 | | cm->show_frame == 1) { |
1330 | | const BLOCK_SIZE bsize = mbmi->bsize; |
1331 | | |
1332 | | int_mv mv[2] = { 0 }; |
1333 | | const int is_comp_ref = has_second_ref(mbmi); |
1334 | | |
1335 | | for (int ref = 0; ref < 1 + is_comp_ref; ++ref) |
1336 | | mv[ref].as_mv = mbmi->mv[ref].as_mv; |
1337 | | |
1338 | | if (!is_comp_ref) { |
1339 | | mv[1].as_int = 0; |
1340 | | } |
1341 | | |
1342 | | const int16_t mode_ctx = |
1343 | | is_comp_ref ? 0 |
1344 | | : mode_context_analyzer(mbmi_ext_frame->mode_context, |
1345 | | mbmi->ref_frame); |
1346 | | |
1347 | | const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK; |
1348 | | int16_t zeromv_ctx = -1; |
1349 | | int16_t refmv_ctx = -1; |
1350 | | |
1351 | | if (mbmi->mode != NEWMV) { |
1352 | | zeromv_ctx = (mode_ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK; |
1353 | | if (mbmi->mode != GLOBALMV) |
1354 | | refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK; |
1355 | | } |
1356 | | |
1357 | | printf( |
1358 | | "=== ENCODER ===: " |
1359 | | "Frame=%d, (mi_row,mi_col)=(%d,%d), skip_mode=%d, mode=%d, bsize=%d, " |
1360 | | "show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, " |
1361 | | "ref[1]=%d, motion_mode=%d, mode_ctx=%d, " |
1362 | | "newmv_ctx=%d, zeromv_ctx=%d, refmv_ctx=%d, tx_size=%d\n", |
1363 | | cm->current_frame.frame_number, mi_row, mi_col, mbmi->skip_mode, |
1364 | | mbmi->mode, bsize, cm->show_frame, mv[0].as_mv.row, mv[0].as_mv.col, |
1365 | | mv[1].as_mv.row, mv[1].as_mv.col, mbmi->ref_frame[0], |
1366 | | mbmi->ref_frame[1], mbmi->motion_mode, mode_ctx, newmv_ctx, |
1367 | | zeromv_ctx, refmv_ctx, mbmi->tx_size); |
1368 | | } |
1369 | | } |
1370 | | } |
1371 | | #endif // ENC_MISMATCH_DEBUG |
1372 | | |
1373 | | static inline void write_mbmi_b(AV1_COMP *cpi, ThreadData *const td, |
1374 | 0 | aom_writer *w) { |
1375 | 0 | AV1_COMMON *const cm = &cpi->common; |
1376 | 0 | MACROBLOCKD *const xd = &td->mb.e_mbd; |
1377 | 0 | MB_MODE_INFO *m = xd->mi[0]; |
1378 | |
|
1379 | 0 | if (frame_is_intra_only(cm)) { |
1380 | 0 | write_mb_modes_kf(cpi, xd, td->mb.mbmi_ext_frame, w); |
1381 | 0 | } else { |
1382 | | // has_subpel_mv_component needs the ref frame buffers set up to look |
1383 | | // up if they are scaled. has_subpel_mv_component is in turn needed by |
1384 | | // write_switchable_interp_filter, which is called by pack_inter_mode_mvs. |
1385 | 0 | set_ref_ptrs(cm, xd, m->ref_frame[0], m->ref_frame[1]); |
1386 | |
|
1387 | | #if ENC_MISMATCH_DEBUG |
1388 | | enc_dump_logs(cm, &cpi->mbmi_ext_info, xd->mi_row, xd->mi_col); |
1389 | | #endif // ENC_MISMATCH_DEBUG |
1390 | |
|
1391 | 0 | pack_inter_mode_mvs(cpi, td, w); |
1392 | 0 | } |
1393 | 0 | } |
1394 | | |
1395 | | static inline void write_inter_txb_coeff( |
1396 | | AV1_COMMON *const cm, MACROBLOCK *const x, MB_MODE_INFO *const mbmi, |
1397 | | aom_writer *w, const TokenExtra **tok, const TokenExtra *const tok_end, |
1398 | | TOKEN_STATS *token_stats, const int row, const int col, int *block, |
1399 | 0 | const int plane) { |
1400 | 0 | MACROBLOCKD *const xd = &x->e_mbd; |
1401 | 0 | const struct macroblockd_plane *const pd = &xd->plane[plane]; |
1402 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
1403 | 0 | assert(bsize < BLOCK_SIZES_ALL); |
1404 | 0 | const int ss_x = pd->subsampling_x; |
1405 | 0 | const int ss_y = pd->subsampling_y; |
1406 | 0 | const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, ss_x, ss_y); |
1407 | 0 | assert(plane_bsize < BLOCK_SIZES_ALL); |
1408 | 0 | const TX_SIZE max_tx_size = get_vartx_max_txsize(xd, plane_bsize, plane); |
1409 | 0 | const int step = |
1410 | 0 | tx_size_wide_unit[max_tx_size] * tx_size_high_unit[max_tx_size]; |
1411 | 0 | const int bkw = tx_size_wide_unit[max_tx_size]; |
1412 | 0 | const int bkh = tx_size_high_unit[max_tx_size]; |
1413 | 0 | const BLOCK_SIZE max_unit_bsize = |
1414 | 0 | get_plane_block_size(BLOCK_64X64, ss_x, ss_y); |
1415 | 0 | const int num_4x4_w = mi_size_wide[plane_bsize]; |
1416 | 0 | const int num_4x4_h = mi_size_high[plane_bsize]; |
1417 | 0 | const int mu_blocks_wide = mi_size_wide[max_unit_bsize]; |
1418 | 0 | const int mu_blocks_high = mi_size_high[max_unit_bsize]; |
1419 | 0 | const int unit_height = AOMMIN(mu_blocks_high + (row >> ss_y), num_4x4_h); |
1420 | 0 | const int unit_width = AOMMIN(mu_blocks_wide + (col >> ss_x), num_4x4_w); |
1421 | 0 | for (int blk_row = row >> ss_y; blk_row < unit_height; blk_row += bkh) { |
1422 | 0 | for (int blk_col = col >> ss_x; blk_col < unit_width; blk_col += bkw) { |
1423 | 0 | pack_txb_tokens(w, cm, x, tok, tok_end, xd, mbmi, plane, plane_bsize, |
1424 | 0 | cm->seq_params->bit_depth, *block, blk_row, blk_col, |
1425 | 0 | max_tx_size, token_stats); |
1426 | 0 | *block += step; |
1427 | 0 | } |
1428 | 0 | } |
1429 | 0 | } |
1430 | | |
1431 | | static inline void write_tokens_b(AV1_COMP *cpi, MACROBLOCK *const x, |
1432 | | aom_writer *w, const TokenExtra **tok, |
1433 | 0 | const TokenExtra *const tok_end) { |
1434 | 0 | AV1_COMMON *const cm = &cpi->common; |
1435 | 0 | MACROBLOCKD *const xd = &x->e_mbd; |
1436 | 0 | MB_MODE_INFO *const mbmi = xd->mi[0]; |
1437 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
1438 | |
|
1439 | 0 | assert(!mbmi->skip_txfm); |
1440 | |
|
1441 | 0 | const int is_inter = is_inter_block(mbmi); |
1442 | 0 | if (!is_inter) { |
1443 | 0 | av1_write_intra_coeffs_mb(cm, x, w, bsize); |
1444 | 0 | } else { |
1445 | 0 | int block[MAX_MB_PLANE] = { 0 }; |
1446 | 0 | assert(bsize == get_plane_block_size(bsize, xd->plane[0].subsampling_x, |
1447 | 0 | xd->plane[0].subsampling_y)); |
1448 | 0 | const int num_4x4_w = mi_size_wide[bsize]; |
1449 | 0 | const int num_4x4_h = mi_size_high[bsize]; |
1450 | 0 | TOKEN_STATS token_stats; |
1451 | 0 | init_token_stats(&token_stats); |
1452 | |
|
1453 | 0 | const BLOCK_SIZE max_unit_bsize = BLOCK_64X64; |
1454 | 0 | assert(max_unit_bsize == get_plane_block_size(BLOCK_64X64, |
1455 | 0 | xd->plane[0].subsampling_x, |
1456 | 0 | xd->plane[0].subsampling_y)); |
1457 | 0 | int mu_blocks_wide = mi_size_wide[max_unit_bsize]; |
1458 | 0 | int mu_blocks_high = mi_size_high[max_unit_bsize]; |
1459 | 0 | mu_blocks_wide = AOMMIN(num_4x4_w, mu_blocks_wide); |
1460 | 0 | mu_blocks_high = AOMMIN(num_4x4_h, mu_blocks_high); |
1461 | |
|
1462 | 0 | const int num_planes = av1_num_planes(cm); |
1463 | 0 | for (int row = 0; row < num_4x4_h; row += mu_blocks_high) { |
1464 | 0 | for (int col = 0; col < num_4x4_w; col += mu_blocks_wide) { |
1465 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1466 | 0 | if (plane && !xd->is_chroma_ref) break; |
1467 | 0 | write_inter_txb_coeff(cm, x, mbmi, w, tok, tok_end, &token_stats, row, |
1468 | 0 | col, &block[plane], plane); |
1469 | 0 | } |
1470 | 0 | } |
1471 | 0 | } |
1472 | | #if CONFIG_RD_DEBUG |
1473 | | for (int plane = 0; plane < num_planes; ++plane) { |
1474 | | if (mbmi->bsize >= BLOCK_8X8 && |
1475 | | rd_token_stats_mismatch(&mbmi->rd_stats, &token_stats, plane)) { |
1476 | | dump_mode_info(mbmi); |
1477 | | assert(0); |
1478 | | } |
1479 | | } |
1480 | | #endif // CONFIG_RD_DEBUG |
1481 | 0 | } |
1482 | 0 | } |
1483 | | |
1484 | | static inline void write_modes_b(AV1_COMP *cpi, ThreadData *const td, |
1485 | | const TileInfo *const tile, aom_writer *w, |
1486 | | const TokenExtra **tok, |
1487 | | const TokenExtra *const tok_end, int mi_row, |
1488 | 0 | int mi_col) { |
1489 | 0 | const AV1_COMMON *cm = &cpi->common; |
1490 | 0 | const CommonModeInfoParams *const mi_params = &cm->mi_params; |
1491 | 0 | MACROBLOCKD *xd = &td->mb.e_mbd; |
1492 | 0 | FRAME_CONTEXT *tile_ctx = xd->tile_ctx; |
1493 | 0 | const int grid_idx = mi_row * mi_params->mi_stride + mi_col; |
1494 | 0 | xd->mi = mi_params->mi_grid_base + grid_idx; |
1495 | 0 | td->mb.mbmi_ext_frame = |
1496 | 0 | cpi->mbmi_ext_info.frame_base + |
1497 | 0 | get_mi_ext_idx(mi_row, mi_col, cm->mi_params.mi_alloc_bsize, |
1498 | 0 | cpi->mbmi_ext_info.stride); |
1499 | 0 | xd->tx_type_map = mi_params->tx_type_map + grid_idx; |
1500 | 0 | xd->tx_type_map_stride = mi_params->mi_stride; |
1501 | |
|
1502 | 0 | const MB_MODE_INFO *mbmi = xd->mi[0]; |
1503 | 0 | const BLOCK_SIZE bsize = mbmi->bsize; |
1504 | 0 | assert(bsize <= cm->seq_params->sb_size || |
1505 | 0 | (bsize >= BLOCK_SIZES && bsize < BLOCK_SIZES_ALL)); |
1506 | |
|
1507 | 0 | const int bh = mi_size_high[bsize]; |
1508 | 0 | const int bw = mi_size_wide[bsize]; |
1509 | 0 | set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, mi_params->mi_rows, |
1510 | 0 | mi_params->mi_cols); |
1511 | |
|
1512 | 0 | xd->above_txfm_context = cm->above_contexts.txfm[tile->tile_row] + mi_col; |
1513 | 0 | xd->left_txfm_context = |
1514 | 0 | xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK); |
1515 | |
|
1516 | 0 | write_mbmi_b(cpi, td, w); |
1517 | |
|
1518 | 0 | for (int plane = 0; plane < AOMMIN(2, av1_num_planes(cm)); ++plane) { |
1519 | 0 | const uint8_t palette_size_plane = |
1520 | 0 | mbmi->palette_mode_info.palette_size[plane]; |
1521 | 0 | assert(!mbmi->skip_mode || !palette_size_plane); |
1522 | 0 | if (palette_size_plane > 0) { |
1523 | 0 | assert(mbmi->use_intrabc == 0); |
1524 | 0 | assert(av1_allow_palette(cm->features.allow_screen_content_tools, |
1525 | 0 | mbmi->bsize)); |
1526 | 0 | assert(!plane || xd->is_chroma_ref); |
1527 | 0 | int rows, cols; |
1528 | 0 | av1_get_block_dimensions(mbmi->bsize, plane, xd, NULL, NULL, &rows, |
1529 | 0 | &cols); |
1530 | 0 | assert(*tok < tok_end); |
1531 | 0 | MapCdf map_pb_cdf = plane ? tile_ctx->palette_uv_color_index_cdf |
1532 | 0 | : tile_ctx->palette_y_color_index_cdf; |
1533 | 0 | pack_map_tokens(w, tok, palette_size_plane, rows * cols, map_pb_cdf); |
1534 | 0 | } |
1535 | 0 | } |
1536 | |
|
1537 | 0 | const int is_inter_tx = is_inter_block(mbmi); |
1538 | 0 | const int skip_txfm = mbmi->skip_txfm; |
1539 | 0 | const uint8_t segment_id = mbmi->segment_id; |
1540 | 0 | if (cm->features.tx_mode == TX_MODE_SELECT && block_signals_txsize(bsize) && |
1541 | 0 | !(is_inter_tx && skip_txfm) && !xd->lossless[segment_id]) { |
1542 | 0 | if (is_inter_tx) { // This implies skip flag is 0. |
1543 | 0 | const TX_SIZE max_tx_size = get_vartx_max_txsize(xd, bsize, 0); |
1544 | 0 | const int txbh = tx_size_high_unit[max_tx_size]; |
1545 | 0 | const int txbw = tx_size_wide_unit[max_tx_size]; |
1546 | 0 | const int width = mi_size_wide[bsize]; |
1547 | 0 | const int height = mi_size_high[bsize]; |
1548 | 0 | for (int idy = 0; idy < height; idy += txbh) { |
1549 | 0 | for (int idx = 0; idx < width; idx += txbw) { |
1550 | 0 | write_tx_size_vartx(xd, mbmi, max_tx_size, 0, idy, idx, w); |
1551 | 0 | } |
1552 | 0 | } |
1553 | 0 | } else { |
1554 | 0 | write_selected_tx_size(xd, w); |
1555 | 0 | set_txfm_ctxs(mbmi->tx_size, xd->width, xd->height, 0, xd); |
1556 | 0 | } |
1557 | 0 | } else { |
1558 | 0 | set_txfm_ctxs(mbmi->tx_size, xd->width, xd->height, |
1559 | 0 | skip_txfm && is_inter_tx, xd); |
1560 | 0 | } |
1561 | |
|
1562 | 0 | if (!mbmi->skip_txfm) { |
1563 | 0 | int start = aom_tell_size(w); |
1564 | |
|
1565 | 0 | write_tokens_b(cpi, &td->mb, w, tok, tok_end); |
1566 | |
|
1567 | 0 | const int end = aom_tell_size(w); |
1568 | 0 | td->coefficient_size += end - start; |
1569 | 0 | } |
1570 | 0 | } |
1571 | | |
1572 | | static inline void write_partition(const AV1_COMMON *const cm, |
1573 | | const MACROBLOCKD *const xd, int hbs, |
1574 | | int mi_row, int mi_col, PARTITION_TYPE p, |
1575 | 0 | BLOCK_SIZE bsize, aom_writer *w) { |
1576 | 0 | const int is_partition_point = bsize >= BLOCK_8X8; |
1577 | |
|
1578 | 0 | if (!is_partition_point) return; |
1579 | | |
1580 | 0 | const int has_rows = (mi_row + hbs) < cm->mi_params.mi_rows; |
1581 | 0 | const int has_cols = (mi_col + hbs) < cm->mi_params.mi_cols; |
1582 | 0 | const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize); |
1583 | 0 | FRAME_CONTEXT *ec_ctx = xd->tile_ctx; |
1584 | |
|
1585 | 0 | if (!has_rows && !has_cols) { |
1586 | 0 | assert(p == PARTITION_SPLIT); |
1587 | 0 | return; |
1588 | 0 | } |
1589 | | |
1590 | 0 | if (has_rows && has_cols) { |
1591 | 0 | aom_write_symbol(w, p, ec_ctx->partition_cdf[ctx], |
1592 | 0 | partition_cdf_length(bsize)); |
1593 | 0 | } else if (!has_rows && has_cols) { |
1594 | 0 | assert(p == PARTITION_SPLIT || p == PARTITION_HORZ); |
1595 | 0 | assert(bsize > BLOCK_8X8); |
1596 | 0 | aom_cdf_prob cdf[2]; |
1597 | 0 | partition_gather_vert_alike(cdf, ec_ctx->partition_cdf[ctx], bsize); |
1598 | 0 | aom_write_cdf(w, p == PARTITION_SPLIT, cdf, 2); |
1599 | 0 | } else { |
1600 | 0 | assert(has_rows && !has_cols); |
1601 | 0 | assert(p == PARTITION_SPLIT || p == PARTITION_VERT); |
1602 | 0 | assert(bsize > BLOCK_8X8); |
1603 | 0 | aom_cdf_prob cdf[2]; |
1604 | 0 | partition_gather_horz_alike(cdf, ec_ctx->partition_cdf[ctx], bsize); |
1605 | 0 | aom_write_cdf(w, p == PARTITION_SPLIT, cdf, 2); |
1606 | 0 | } |
1607 | 0 | } |
1608 | | |
1609 | | static inline void write_modes_sb(AV1_COMP *const cpi, ThreadData *const td, |
1610 | | const TileInfo *const tile, |
1611 | | aom_writer *const w, const TokenExtra **tok, |
1612 | | const TokenExtra *const tok_end, int mi_row, |
1613 | 0 | int mi_col, BLOCK_SIZE bsize) { |
1614 | 0 | const AV1_COMMON *const cm = &cpi->common; |
1615 | 0 | const CommonModeInfoParams *const mi_params = &cm->mi_params; |
1616 | 0 | MACROBLOCKD *const xd = &td->mb.e_mbd; |
1617 | 0 | assert(bsize < BLOCK_SIZES_ALL); |
1618 | 0 | const int hbs = mi_size_wide[bsize] / 2; |
1619 | 0 | const int quarter_step = mi_size_wide[bsize] / 4; |
1620 | 0 | int i; |
1621 | 0 | const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize); |
1622 | 0 | const BLOCK_SIZE subsize = get_partition_subsize(bsize, partition); |
1623 | |
|
1624 | 0 | if (mi_row >= mi_params->mi_rows || mi_col >= mi_params->mi_cols) return; |
1625 | | |
1626 | 0 | #if !CONFIG_REALTIME_ONLY |
1627 | 0 | const int num_planes = av1_num_planes(cm); |
1628 | 0 | for (int plane = 0; plane < num_planes; ++plane) { |
1629 | 0 | int rcol0, rcol1, rrow0, rrow1; |
1630 | | |
1631 | | // Skip some unnecessary work if loop restoration is disabled |
1632 | 0 | if (cm->rst_info[plane].frame_restoration_type == RESTORE_NONE) continue; |
1633 | | |
1634 | 0 | if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize, |
1635 | 0 | &rcol0, &rcol1, &rrow0, &rrow1)) { |
1636 | 0 | const int rstride = cm->rst_info[plane].horz_units; |
1637 | 0 | for (int rrow = rrow0; rrow < rrow1; ++rrow) { |
1638 | 0 | for (int rcol = rcol0; rcol < rcol1; ++rcol) { |
1639 | 0 | const int runit_idx = rcol + rrow * rstride; |
1640 | 0 | loop_restoration_write_sb_coeffs(cm, xd, runit_idx, w, plane, |
1641 | 0 | td->counts); |
1642 | 0 | } |
1643 | 0 | } |
1644 | 0 | } |
1645 | 0 | } |
1646 | 0 | #endif |
1647 | |
|
1648 | 0 | write_partition(cm, xd, hbs, mi_row, mi_col, partition, bsize, w); |
1649 | 0 | switch (partition) { |
1650 | 0 | case PARTITION_NONE: |
1651 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1652 | 0 | break; |
1653 | 0 | case PARTITION_HORZ: |
1654 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1655 | 0 | if (mi_row + hbs < mi_params->mi_rows) |
1656 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col); |
1657 | 0 | break; |
1658 | 0 | case PARTITION_VERT: |
1659 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1660 | 0 | if (mi_col + hbs < mi_params->mi_cols) |
1661 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col + hbs); |
1662 | 0 | break; |
1663 | 0 | case PARTITION_SPLIT: |
1664 | 0 | write_modes_sb(cpi, td, tile, w, tok, tok_end, mi_row, mi_col, subsize); |
1665 | 0 | write_modes_sb(cpi, td, tile, w, tok, tok_end, mi_row, mi_col + hbs, |
1666 | 0 | subsize); |
1667 | 0 | write_modes_sb(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col, |
1668 | 0 | subsize); |
1669 | 0 | write_modes_sb(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs, |
1670 | 0 | subsize); |
1671 | 0 | break; |
1672 | 0 | case PARTITION_HORZ_A: |
1673 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1674 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col + hbs); |
1675 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col); |
1676 | 0 | break; |
1677 | 0 | case PARTITION_HORZ_B: |
1678 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1679 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col); |
1680 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs); |
1681 | 0 | break; |
1682 | 0 | case PARTITION_VERT_A: |
1683 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1684 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col); |
1685 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col + hbs); |
1686 | 0 | break; |
1687 | 0 | case PARTITION_VERT_B: |
1688 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col); |
1689 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, mi_col + hbs); |
1690 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row + hbs, mi_col + hbs); |
1691 | 0 | break; |
1692 | 0 | case PARTITION_HORZ_4: |
1693 | 0 | for (i = 0; i < 4; ++i) { |
1694 | 0 | int this_mi_row = mi_row + i * quarter_step; |
1695 | 0 | if (i > 0 && this_mi_row >= mi_params->mi_rows) break; |
1696 | | |
1697 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, this_mi_row, mi_col); |
1698 | 0 | } |
1699 | 0 | break; |
1700 | 0 | case PARTITION_VERT_4: |
1701 | 0 | for (i = 0; i < 4; ++i) { |
1702 | 0 | int this_mi_col = mi_col + i * quarter_step; |
1703 | 0 | if (i > 0 && this_mi_col >= mi_params->mi_cols) break; |
1704 | | |
1705 | 0 | write_modes_b(cpi, td, tile, w, tok, tok_end, mi_row, this_mi_col); |
1706 | 0 | } |
1707 | 0 | break; |
1708 | 0 | default: assert(0); |
1709 | 0 | } |
1710 | | |
1711 | | // update partition context |
1712 | 0 | update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition); |
1713 | 0 | } |
1714 | | |
1715 | | // Populate token pointers appropriately based on token_info. |
1716 | | static inline void get_token_pointers(const TokenInfo *token_info, |
1717 | | const int tile_row, int tile_col, |
1718 | | const int sb_row_in_tile, |
1719 | | const TokenExtra **tok, |
1720 | 0 | const TokenExtra **tok_end) { |
1721 | 0 | if (!is_token_info_allocated(token_info)) { |
1722 | 0 | *tok = NULL; |
1723 | 0 | *tok_end = NULL; |
1724 | 0 | return; |
1725 | 0 | } |
1726 | 0 | *tok = token_info->tplist[tile_row][tile_col][sb_row_in_tile].start; |
1727 | 0 | *tok_end = |
1728 | 0 | *tok + token_info->tplist[tile_row][tile_col][sb_row_in_tile].count; |
1729 | 0 | } |
1730 | | |
1731 | | static inline void write_modes(AV1_COMP *const cpi, ThreadData *const td, |
1732 | | const TileInfo *const tile, aom_writer *const w, |
1733 | 0 | int tile_row, int tile_col) { |
1734 | 0 | AV1_COMMON *const cm = &cpi->common; |
1735 | 0 | MACROBLOCKD *const xd = &td->mb.e_mbd; |
1736 | 0 | const int mi_row_start = tile->mi_row_start; |
1737 | 0 | const int mi_row_end = tile->mi_row_end; |
1738 | 0 | const int mi_col_start = tile->mi_col_start; |
1739 | 0 | const int mi_col_end = tile->mi_col_end; |
1740 | 0 | const int num_planes = av1_num_planes(cm); |
1741 | |
|
1742 | 0 | av1_zero_above_context(cm, xd, mi_col_start, mi_col_end, tile->tile_row); |
1743 | 0 | av1_init_above_context(&cm->above_contexts, num_planes, tile->tile_row, xd); |
1744 | |
|
1745 | 0 | if (cpi->common.delta_q_info.delta_q_present_flag) { |
1746 | 0 | xd->current_base_qindex = cpi->common.quant_params.base_qindex; |
1747 | 0 | if (cpi->common.delta_q_info.delta_lf_present_flag) { |
1748 | 0 | av1_reset_loop_filter_delta(xd, num_planes); |
1749 | 0 | } |
1750 | 0 | } |
1751 | |
|
1752 | 0 | for (int mi_row = mi_row_start; mi_row < mi_row_end; |
1753 | 0 | mi_row += cm->seq_params->mib_size) { |
1754 | 0 | const int sb_row_in_tile = |
1755 | 0 | (mi_row - tile->mi_row_start) >> cm->seq_params->mib_size_log2; |
1756 | 0 | const TokenInfo *token_info = &cpi->token_info; |
1757 | 0 | const TokenExtra *tok; |
1758 | 0 | const TokenExtra *tok_end; |
1759 | 0 | get_token_pointers(token_info, tile_row, tile_col, sb_row_in_tile, &tok, |
1760 | 0 | &tok_end); |
1761 | |
|
1762 | 0 | av1_zero_left_context(xd); |
1763 | |
|
1764 | 0 | for (int mi_col = mi_col_start; mi_col < mi_col_end; |
1765 | 0 | mi_col += cm->seq_params->mib_size) { |
1766 | 0 | td->mb.cb_coef_buff = av1_get_cb_coeff_buffer(cpi, mi_row, mi_col); |
1767 | 0 | write_modes_sb(cpi, td, tile, w, &tok, tok_end, mi_row, mi_col, |
1768 | 0 | cm->seq_params->sb_size); |
1769 | 0 | } |
1770 | 0 | assert(tok == tok_end); |
1771 | 0 | } |
1772 | 0 | } |
1773 | | |
1774 | | static inline void encode_restoration_mode(AV1_COMMON *cm, |
1775 | 0 | struct aom_write_bit_buffer *wb) { |
1776 | 0 | assert(!cm->features.all_lossless); |
1777 | 0 | if (!cm->seq_params->enable_restoration) return; |
1778 | 0 | if (cm->features.allow_intrabc) return; |
1779 | 0 | const int num_planes = av1_num_planes(cm); |
1780 | 0 | int all_none = 1, chroma_none = 1; |
1781 | 0 | for (int p = 0; p < num_planes; ++p) { |
1782 | 0 | RestorationInfo *rsi = &cm->rst_info[p]; |
1783 | 0 | if (rsi->frame_restoration_type != RESTORE_NONE) { |
1784 | 0 | all_none = 0; |
1785 | 0 | chroma_none &= p == 0; |
1786 | 0 | } |
1787 | 0 | switch (rsi->frame_restoration_type) { |
1788 | 0 | case RESTORE_NONE: |
1789 | 0 | aom_wb_write_bit(wb, 0); |
1790 | 0 | aom_wb_write_bit(wb, 0); |
1791 | 0 | break; |
1792 | 0 | case RESTORE_WIENER: |
1793 | 0 | aom_wb_write_bit(wb, 1); |
1794 | 0 | aom_wb_write_bit(wb, 0); |
1795 | 0 | break; |
1796 | 0 | case RESTORE_SGRPROJ: |
1797 | 0 | aom_wb_write_bit(wb, 1); |
1798 | 0 | aom_wb_write_bit(wb, 1); |
1799 | 0 | break; |
1800 | 0 | case RESTORE_SWITCHABLE: |
1801 | 0 | aom_wb_write_bit(wb, 0); |
1802 | 0 | aom_wb_write_bit(wb, 1); |
1803 | 0 | break; |
1804 | 0 | default: assert(0); |
1805 | 0 | } |
1806 | 0 | } |
1807 | 0 | if (!all_none) { |
1808 | 0 | assert(cm->seq_params->sb_size == BLOCK_64X64 || |
1809 | 0 | cm->seq_params->sb_size == BLOCK_128X128); |
1810 | 0 | const int sb_size = cm->seq_params->sb_size == BLOCK_128X128 ? 128 : 64; |
1811 | |
|
1812 | 0 | RestorationInfo *rsi = &cm->rst_info[0]; |
1813 | |
|
1814 | 0 | assert(rsi->restoration_unit_size >= sb_size); |
1815 | 0 | assert(RESTORATION_UNITSIZE_MAX == 256); |
1816 | |
|
1817 | 0 | if (sb_size == 64) { |
1818 | 0 | aom_wb_write_bit(wb, rsi->restoration_unit_size > 64); |
1819 | 0 | } |
1820 | 0 | if (rsi->restoration_unit_size > 64) { |
1821 | 0 | aom_wb_write_bit(wb, rsi->restoration_unit_size > 128); |
1822 | 0 | } |
1823 | 0 | } |
1824 | |
|
1825 | 0 | if (num_planes > 1) { |
1826 | 0 | int s = |
1827 | 0 | AOMMIN(cm->seq_params->subsampling_x, cm->seq_params->subsampling_y); |
1828 | 0 | if (s && !chroma_none) { |
1829 | 0 | aom_wb_write_bit(wb, cm->rst_info[1].restoration_unit_size != |
1830 | 0 | cm->rst_info[0].restoration_unit_size); |
1831 | 0 | assert(cm->rst_info[1].restoration_unit_size == |
1832 | 0 | cm->rst_info[0].restoration_unit_size || |
1833 | 0 | cm->rst_info[1].restoration_unit_size == |
1834 | 0 | (cm->rst_info[0].restoration_unit_size >> s)); |
1835 | 0 | assert(cm->rst_info[2].restoration_unit_size == |
1836 | 0 | cm->rst_info[1].restoration_unit_size); |
1837 | 0 | } else if (!s) { |
1838 | 0 | assert(cm->rst_info[1].restoration_unit_size == |
1839 | 0 | cm->rst_info[0].restoration_unit_size); |
1840 | 0 | assert(cm->rst_info[2].restoration_unit_size == |
1841 | 0 | cm->rst_info[1].restoration_unit_size); |
1842 | 0 | } |
1843 | 0 | } |
1844 | 0 | } |
1845 | | |
1846 | | #if !CONFIG_REALTIME_ONLY |
1847 | | static inline void write_wiener_filter(int wiener_win, |
1848 | | const WienerInfo *wiener_info, |
1849 | | WienerInfo *ref_wiener_info, |
1850 | 0 | aom_writer *wb) { |
1851 | 0 | if (wiener_win == WIENER_WIN) |
1852 | 0 | aom_write_primitive_refsubexpfin( |
1853 | 0 | wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, |
1854 | 0 | WIENER_FILT_TAP0_SUBEXP_K, |
1855 | 0 | ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, |
1856 | 0 | wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV); |
1857 | 0 | else |
1858 | 0 | assert(wiener_info->vfilter[0] == 0 && |
1859 | 0 | wiener_info->vfilter[WIENER_WIN - 1] == 0); |
1860 | 0 | aom_write_primitive_refsubexpfin( |
1861 | 0 | wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, |
1862 | 0 | WIENER_FILT_TAP1_SUBEXP_K, |
1863 | 0 | ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, |
1864 | 0 | wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV); |
1865 | 0 | aom_write_primitive_refsubexpfin( |
1866 | 0 | wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, |
1867 | 0 | WIENER_FILT_TAP2_SUBEXP_K, |
1868 | 0 | ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, |
1869 | 0 | wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV); |
1870 | 0 | if (wiener_win == WIENER_WIN) |
1871 | 0 | aom_write_primitive_refsubexpfin( |
1872 | 0 | wb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, |
1873 | 0 | WIENER_FILT_TAP0_SUBEXP_K, |
1874 | 0 | ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, |
1875 | 0 | wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV); |
1876 | 0 | else |
1877 | 0 | assert(wiener_info->hfilter[0] == 0 && |
1878 | 0 | wiener_info->hfilter[WIENER_WIN - 1] == 0); |
1879 | 0 | aom_write_primitive_refsubexpfin( |
1880 | 0 | wb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, |
1881 | 0 | WIENER_FILT_TAP1_SUBEXP_K, |
1882 | 0 | ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, |
1883 | 0 | wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV); |
1884 | 0 | aom_write_primitive_refsubexpfin( |
1885 | 0 | wb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, |
1886 | 0 | WIENER_FILT_TAP2_SUBEXP_K, |
1887 | 0 | ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, |
1888 | 0 | wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV); |
1889 | 0 | memcpy(ref_wiener_info, wiener_info, sizeof(*wiener_info)); |
1890 | 0 | } |
1891 | | |
1892 | | static inline void write_sgrproj_filter(const SgrprojInfo *sgrproj_info, |
1893 | | SgrprojInfo *ref_sgrproj_info, |
1894 | 0 | aom_writer *wb) { |
1895 | 0 | aom_write_literal(wb, sgrproj_info->ep, SGRPROJ_PARAMS_BITS); |
1896 | 0 | const sgr_params_type *params = &av1_sgr_params[sgrproj_info->ep]; |
1897 | |
|
1898 | 0 | if (params->r[0] == 0) { |
1899 | 0 | assert(sgrproj_info->xqd[0] == 0); |
1900 | 0 | aom_write_primitive_refsubexpfin( |
1901 | 0 | wb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K, |
1902 | 0 | ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, |
1903 | 0 | sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1); |
1904 | 0 | } else if (params->r[1] == 0) { |
1905 | 0 | aom_write_primitive_refsubexpfin( |
1906 | 0 | wb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K, |
1907 | 0 | ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, |
1908 | 0 | sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0); |
1909 | 0 | } else { |
1910 | 0 | aom_write_primitive_refsubexpfin( |
1911 | 0 | wb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K, |
1912 | 0 | ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, |
1913 | 0 | sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0); |
1914 | 0 | aom_write_primitive_refsubexpfin( |
1915 | 0 | wb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K, |
1916 | 0 | ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, |
1917 | 0 | sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1); |
1918 | 0 | } |
1919 | |
|
1920 | 0 | memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info)); |
1921 | 0 | } |
1922 | | |
1923 | | static inline void loop_restoration_write_sb_coeffs( |
1924 | | const AV1_COMMON *const cm, MACROBLOCKD *xd, int runit_idx, |
1925 | 0 | aom_writer *const w, int plane, FRAME_COUNTS *counts) { |
1926 | 0 | const RestorationUnitInfo *rui = &cm->rst_info[plane].unit_info[runit_idx]; |
1927 | 0 | const RestorationInfo *rsi = cm->rst_info + plane; |
1928 | 0 | RestorationType frame_rtype = rsi->frame_restoration_type; |
1929 | 0 | assert(frame_rtype != RESTORE_NONE); |
1930 | |
|
1931 | 0 | (void)counts; |
1932 | 0 | assert(!cm->features.all_lossless); |
1933 | |
|
1934 | 0 | const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN; |
1935 | 0 | WienerInfo *ref_wiener_info = &xd->wiener_info[plane]; |
1936 | 0 | SgrprojInfo *ref_sgrproj_info = &xd->sgrproj_info[plane]; |
1937 | 0 | RestorationType unit_rtype = rui->restoration_type; |
1938 | |
|
1939 | 0 | if (frame_rtype == RESTORE_SWITCHABLE) { |
1940 | 0 | aom_write_symbol(w, unit_rtype, xd->tile_ctx->switchable_restore_cdf, |
1941 | 0 | RESTORE_SWITCHABLE_TYPES); |
1942 | | #if CONFIG_ENTROPY_STATS |
1943 | | ++counts->switchable_restore[unit_rtype]; |
1944 | | #endif |
1945 | 0 | switch (unit_rtype) { |
1946 | 0 | case RESTORE_WIENER: |
1947 | | #if DEBUG_LR_COSTING |
1948 | | assert(!memcmp( |
1949 | | ref_wiener_info, |
1950 | | &lr_ref_params[RESTORE_SWITCHABLE][plane][runit_idx].wiener_info, |
1951 | | sizeof(*ref_wiener_info))); |
1952 | | #endif |
1953 | 0 | write_wiener_filter(wiener_win, &rui->wiener_info, ref_wiener_info, w); |
1954 | 0 | break; |
1955 | 0 | case RESTORE_SGRPROJ: |
1956 | | #if DEBUG_LR_COSTING |
1957 | | assert(!memcmp(&ref_sgrproj_info->xqd, |
1958 | | &lr_ref_params[RESTORE_SWITCHABLE][plane][runit_idx] |
1959 | | .sgrproj_info.xqd, |
1960 | | sizeof(ref_sgrproj_info->xqd))); |
1961 | | #endif |
1962 | 0 | write_sgrproj_filter(&rui->sgrproj_info, ref_sgrproj_info, w); |
1963 | 0 | break; |
1964 | 0 | default: assert(unit_rtype == RESTORE_NONE); break; |
1965 | 0 | } |
1966 | 0 | } else if (frame_rtype == RESTORE_WIENER) { |
1967 | 0 | aom_write_symbol(w, unit_rtype != RESTORE_NONE, |
1968 | 0 | xd->tile_ctx->wiener_restore_cdf, 2); |
1969 | | #if CONFIG_ENTROPY_STATS |
1970 | | ++counts->wiener_restore[unit_rtype != RESTORE_NONE]; |
1971 | | #endif |
1972 | 0 | if (unit_rtype != RESTORE_NONE) { |
1973 | | #if DEBUG_LR_COSTING |
1974 | | assert( |
1975 | | !memcmp(ref_wiener_info, |
1976 | | &lr_ref_params[RESTORE_WIENER][plane][runit_idx].wiener_info, |
1977 | | sizeof(*ref_wiener_info))); |
1978 | | #endif |
1979 | 0 | write_wiener_filter(wiener_win, &rui->wiener_info, ref_wiener_info, w); |
1980 | 0 | } |
1981 | 0 | } else if (frame_rtype == RESTORE_SGRPROJ) { |
1982 | 0 | aom_write_symbol(w, unit_rtype != RESTORE_NONE, |
1983 | 0 | xd->tile_ctx->sgrproj_restore_cdf, 2); |
1984 | | #if CONFIG_ENTROPY_STATS |
1985 | | ++counts->sgrproj_restore[unit_rtype != RESTORE_NONE]; |
1986 | | #endif |
1987 | 0 | if (unit_rtype != RESTORE_NONE) { |
1988 | | #if DEBUG_LR_COSTING |
1989 | | assert(!memcmp( |
1990 | | &ref_sgrproj_info->xqd, |
1991 | | &lr_ref_params[RESTORE_SGRPROJ][plane][runit_idx].sgrproj_info.xqd, |
1992 | | sizeof(ref_sgrproj_info->xqd))); |
1993 | | #endif |
1994 | 0 | write_sgrproj_filter(&rui->sgrproj_info, ref_sgrproj_info, w); |
1995 | 0 | } |
1996 | 0 | } |
1997 | 0 | } |
1998 | | #endif // !CONFIG_REALTIME_ONLY |
1999 | | |
2000 | | // Only write out the ref delta section if any of the elements |
2001 | | // will signal a delta. |
2002 | 0 | static bool is_mode_ref_delta_meaningful(AV1_COMMON *cm) { |
2003 | 0 | struct loopfilter *lf = &cm->lf; |
2004 | 0 | if (!lf->mode_ref_delta_update) { |
2005 | 0 | return 0; |
2006 | 0 | } |
2007 | 0 | const RefCntBuffer *buf = get_primary_ref_frame_buf(cm); |
2008 | 0 | int8_t last_ref_deltas[REF_FRAMES]; |
2009 | 0 | int8_t last_mode_deltas[MAX_MODE_LF_DELTAS]; |
2010 | 0 | if (buf == NULL) { |
2011 | 0 | av1_set_default_ref_deltas(last_ref_deltas); |
2012 | 0 | av1_set_default_mode_deltas(last_mode_deltas); |
2013 | 0 | } else { |
2014 | 0 | memcpy(last_ref_deltas, buf->ref_deltas, REF_FRAMES); |
2015 | 0 | memcpy(last_mode_deltas, buf->mode_deltas, MAX_MODE_LF_DELTAS); |
2016 | 0 | } |
2017 | 0 | for (int i = 0; i < REF_FRAMES; i++) { |
2018 | 0 | if (lf->ref_deltas[i] != last_ref_deltas[i]) { |
2019 | 0 | return true; |
2020 | 0 | } |
2021 | 0 | } |
2022 | 0 | for (int i = 0; i < MAX_MODE_LF_DELTAS; i++) { |
2023 | 0 | if (lf->mode_deltas[i] != last_mode_deltas[i]) { |
2024 | 0 | return true; |
2025 | 0 | } |
2026 | 0 | } |
2027 | 0 | return false; |
2028 | 0 | } |
2029 | | |
2030 | | static inline void encode_loopfilter(AV1_COMMON *cm, |
2031 | 0 | struct aom_write_bit_buffer *wb) { |
2032 | 0 | assert(!cm->features.coded_lossless); |
2033 | 0 | if (cm->features.allow_intrabc) return; |
2034 | 0 | const int num_planes = av1_num_planes(cm); |
2035 | 0 | struct loopfilter *lf = &cm->lf; |
2036 | | |
2037 | | // Encode the loop filter level and type |
2038 | 0 | aom_wb_write_literal(wb, lf->filter_level[0], 6); |
2039 | 0 | aom_wb_write_literal(wb, lf->filter_level[1], 6); |
2040 | 0 | if (num_planes > 1) { |
2041 | 0 | if (lf->filter_level[0] || lf->filter_level[1]) { |
2042 | 0 | aom_wb_write_literal(wb, lf->filter_level_u, 6); |
2043 | 0 | aom_wb_write_literal(wb, lf->filter_level_v, 6); |
2044 | 0 | } |
2045 | 0 | } |
2046 | 0 | aom_wb_write_literal(wb, lf->sharpness_level, 3); |
2047 | |
|
2048 | 0 | aom_wb_write_bit(wb, lf->mode_ref_delta_enabled); |
2049 | | |
2050 | | // Write out loop filter deltas applied at the MB level based on mode or |
2051 | | // ref frame (if they are enabled), only if there is information to write. |
2052 | 0 | int meaningful = is_mode_ref_delta_meaningful(cm); |
2053 | 0 | aom_wb_write_bit(wb, meaningful); |
2054 | 0 | if (!meaningful) { |
2055 | 0 | return; |
2056 | 0 | } |
2057 | | |
2058 | 0 | const RefCntBuffer *buf = get_primary_ref_frame_buf(cm); |
2059 | 0 | int8_t last_ref_deltas[REF_FRAMES]; |
2060 | 0 | int8_t last_mode_deltas[MAX_MODE_LF_DELTAS]; |
2061 | 0 | if (buf == NULL) { |
2062 | 0 | av1_set_default_ref_deltas(last_ref_deltas); |
2063 | 0 | av1_set_default_mode_deltas(last_mode_deltas); |
2064 | 0 | } else { |
2065 | 0 | memcpy(last_ref_deltas, buf->ref_deltas, REF_FRAMES); |
2066 | 0 | memcpy(last_mode_deltas, buf->mode_deltas, MAX_MODE_LF_DELTAS); |
2067 | 0 | } |
2068 | 0 | for (int i = 0; i < REF_FRAMES; i++) { |
2069 | 0 | const int delta = lf->ref_deltas[i]; |
2070 | 0 | const int changed = delta != last_ref_deltas[i]; |
2071 | 0 | aom_wb_write_bit(wb, changed); |
2072 | 0 | if (changed) aom_wb_write_inv_signed_literal(wb, delta, 6); |
2073 | 0 | } |
2074 | 0 | for (int i = 0; i < MAX_MODE_LF_DELTAS; i++) { |
2075 | 0 | const int delta = lf->mode_deltas[i]; |
2076 | 0 | const int changed = delta != last_mode_deltas[i]; |
2077 | 0 | aom_wb_write_bit(wb, changed); |
2078 | 0 | if (changed) aom_wb_write_inv_signed_literal(wb, delta, 6); |
2079 | 0 | } |
2080 | 0 | } |
2081 | | |
2082 | | static inline void encode_cdef(const AV1_COMMON *cm, |
2083 | 0 | struct aom_write_bit_buffer *wb) { |
2084 | 0 | assert(!cm->features.coded_lossless); |
2085 | 0 | if (!cm->seq_params->enable_cdef) return; |
2086 | 0 | if (cm->features.allow_intrabc) return; |
2087 | 0 | const int num_planes = av1_num_planes(cm); |
2088 | 0 | int i; |
2089 | 0 | aom_wb_write_literal(wb, cm->cdef_info.cdef_damping - 3, 2); |
2090 | 0 | aom_wb_write_literal(wb, cm->cdef_info.cdef_bits, 2); |
2091 | 0 | for (i = 0; i < cm->cdef_info.nb_cdef_strengths; i++) { |
2092 | 0 | aom_wb_write_literal(wb, cm->cdef_info.cdef_strengths[i], |
2093 | 0 | CDEF_STRENGTH_BITS); |
2094 | 0 | if (num_planes > 1) |
2095 | 0 | aom_wb_write_literal(wb, cm->cdef_info.cdef_uv_strengths[i], |
2096 | 0 | CDEF_STRENGTH_BITS); |
2097 | 0 | } |
2098 | 0 | } |
2099 | | |
2100 | 0 | static inline void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) { |
2101 | 0 | if (delta_q != 0) { |
2102 | 0 | aom_wb_write_bit(wb, 1); |
2103 | 0 | aom_wb_write_inv_signed_literal(wb, delta_q, 6); |
2104 | 0 | } else { |
2105 | 0 | aom_wb_write_bit(wb, 0); |
2106 | 0 | } |
2107 | 0 | } |
2108 | | |
2109 | | static inline void encode_quantization( |
2110 | | const CommonQuantParams *const quant_params, int num_planes, |
2111 | 0 | bool separate_uv_delta_q, struct aom_write_bit_buffer *wb) { |
2112 | 0 | aom_wb_write_literal(wb, quant_params->base_qindex, QINDEX_BITS); |
2113 | 0 | write_delta_q(wb, quant_params->y_dc_delta_q); |
2114 | 0 | if (num_planes > 1) { |
2115 | 0 | int diff_uv_delta = |
2116 | 0 | (quant_params->u_dc_delta_q != quant_params->v_dc_delta_q) || |
2117 | 0 | (quant_params->u_ac_delta_q != quant_params->v_ac_delta_q); |
2118 | 0 | if (separate_uv_delta_q) aom_wb_write_bit(wb, diff_uv_delta); |
2119 | 0 | write_delta_q(wb, quant_params->u_dc_delta_q); |
2120 | 0 | write_delta_q(wb, quant_params->u_ac_delta_q); |
2121 | 0 | if (diff_uv_delta) { |
2122 | 0 | write_delta_q(wb, quant_params->v_dc_delta_q); |
2123 | 0 | write_delta_q(wb, quant_params->v_ac_delta_q); |
2124 | 0 | } |
2125 | 0 | } |
2126 | 0 | aom_wb_write_bit(wb, quant_params->using_qmatrix); |
2127 | 0 | if (quant_params->using_qmatrix) { |
2128 | 0 | aom_wb_write_literal(wb, quant_params->qmatrix_level_y, QM_LEVEL_BITS); |
2129 | 0 | aom_wb_write_literal(wb, quant_params->qmatrix_level_u, QM_LEVEL_BITS); |
2130 | 0 | if (!separate_uv_delta_q) |
2131 | 0 | assert(quant_params->qmatrix_level_u == quant_params->qmatrix_level_v); |
2132 | 0 | else |
2133 | 0 | aom_wb_write_literal(wb, quant_params->qmatrix_level_v, QM_LEVEL_BITS); |
2134 | 0 | } |
2135 | 0 | } |
2136 | | |
2137 | | static inline void encode_segmentation(AV1_COMMON *cm, |
2138 | 0 | struct aom_write_bit_buffer *wb) { |
2139 | 0 | int i, j; |
2140 | 0 | struct segmentation *seg = &cm->seg; |
2141 | |
|
2142 | 0 | aom_wb_write_bit(wb, seg->enabled); |
2143 | 0 | if (!seg->enabled) return; |
2144 | | |
2145 | | // Write update flags |
2146 | 0 | if (cm->features.primary_ref_frame != PRIMARY_REF_NONE) { |
2147 | 0 | aom_wb_write_bit(wb, seg->update_map); |
2148 | 0 | if (seg->update_map) aom_wb_write_bit(wb, seg->temporal_update); |
2149 | 0 | aom_wb_write_bit(wb, seg->update_data); |
2150 | 0 | } |
2151 | | |
2152 | | // Segmentation data |
2153 | 0 | if (seg->update_data) { |
2154 | 0 | for (i = 0; i < MAX_SEGMENTS; i++) { |
2155 | 0 | for (j = 0; j < SEG_LVL_MAX; j++) { |
2156 | 0 | const int active = segfeature_active(seg, i, j); |
2157 | 0 | aom_wb_write_bit(wb, active); |
2158 | 0 | if (active) { |
2159 | 0 | const int data_max = av1_seg_feature_data_max(j); |
2160 | 0 | const int data_min = -data_max; |
2161 | 0 | const int ubits = get_unsigned_bits(data_max); |
2162 | 0 | const int data = clamp(get_segdata(seg, i, j), data_min, data_max); |
2163 | |
|
2164 | 0 | if (av1_is_segfeature_signed(j)) { |
2165 | 0 | aom_wb_write_inv_signed_literal(wb, data, ubits); |
2166 | 0 | } else { |
2167 | 0 | aom_wb_write_literal(wb, data, ubits); |
2168 | 0 | } |
2169 | 0 | } |
2170 | 0 | } |
2171 | 0 | } |
2172 | 0 | } |
2173 | 0 | } |
2174 | | |
2175 | | static inline void write_frame_interp_filter(InterpFilter filter, |
2176 | 0 | struct aom_write_bit_buffer *wb) { |
2177 | 0 | aom_wb_write_bit(wb, filter == SWITCHABLE); |
2178 | 0 | if (filter != SWITCHABLE) |
2179 | 0 | aom_wb_write_literal(wb, filter, LOG_SWITCHABLE_FILTERS); |
2180 | 0 | } |
2181 | | |
2182 | | // Same function as write_uniform but writing to uncompresses header wb |
2183 | | static inline void wb_write_uniform(struct aom_write_bit_buffer *wb, int n, |
2184 | 0 | int v) { |
2185 | 0 | const int l = get_unsigned_bits(n); |
2186 | 0 | const int m = (1 << l) - n; |
2187 | 0 | if (l == 0) return; |
2188 | 0 | if (v < m) { |
2189 | 0 | aom_wb_write_literal(wb, v, l - 1); |
2190 | 0 | } else { |
2191 | 0 | aom_wb_write_literal(wb, m + ((v - m) >> 1), l - 1); |
2192 | 0 | aom_wb_write_literal(wb, (v - m) & 1, 1); |
2193 | 0 | } |
2194 | 0 | } |
2195 | | |
2196 | | static inline void write_tile_info_max_tile(const AV1_COMMON *const cm, |
2197 | 0 | struct aom_write_bit_buffer *wb) { |
2198 | 0 | int width_sb = |
2199 | 0 | CEIL_POWER_OF_TWO(cm->mi_params.mi_cols, cm->seq_params->mib_size_log2); |
2200 | 0 | int height_sb = |
2201 | 0 | CEIL_POWER_OF_TWO(cm->mi_params.mi_rows, cm->seq_params->mib_size_log2); |
2202 | 0 | int size_sb, i; |
2203 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
2204 | |
|
2205 | 0 | aom_wb_write_bit(wb, tiles->uniform_spacing); |
2206 | |
|
2207 | 0 | if (tiles->uniform_spacing) { |
2208 | 0 | int ones = tiles->log2_cols - tiles->min_log2_cols; |
2209 | 0 | while (ones--) { |
2210 | 0 | aom_wb_write_bit(wb, 1); |
2211 | 0 | } |
2212 | 0 | if (tiles->log2_cols < tiles->max_log2_cols) { |
2213 | 0 | aom_wb_write_bit(wb, 0); |
2214 | 0 | } |
2215 | | |
2216 | | // rows |
2217 | 0 | ones = tiles->log2_rows - tiles->min_log2_rows; |
2218 | 0 | while (ones--) { |
2219 | 0 | aom_wb_write_bit(wb, 1); |
2220 | 0 | } |
2221 | 0 | if (tiles->log2_rows < tiles->max_log2_rows) { |
2222 | 0 | aom_wb_write_bit(wb, 0); |
2223 | 0 | } |
2224 | 0 | } else { |
2225 | | // Explicit tiles with configurable tile widths and heights |
2226 | | // columns |
2227 | 0 | for (i = 0; i < tiles->cols; i++) { |
2228 | 0 | size_sb = tiles->col_start_sb[i + 1] - tiles->col_start_sb[i]; |
2229 | 0 | wb_write_uniform(wb, AOMMIN(width_sb, tiles->max_width_sb), size_sb - 1); |
2230 | 0 | width_sb -= size_sb; |
2231 | 0 | } |
2232 | 0 | assert(width_sb == 0); |
2233 | | |
2234 | | // rows |
2235 | 0 | for (i = 0; i < tiles->rows; i++) { |
2236 | 0 | size_sb = tiles->row_start_sb[i + 1] - tiles->row_start_sb[i]; |
2237 | 0 | wb_write_uniform(wb, AOMMIN(height_sb, tiles->max_height_sb), |
2238 | 0 | size_sb - 1); |
2239 | 0 | height_sb -= size_sb; |
2240 | 0 | } |
2241 | 0 | assert(height_sb == 0); |
2242 | 0 | } |
2243 | 0 | } |
2244 | | |
2245 | | static inline void write_tile_info(const AV1_COMMON *const cm, |
2246 | | struct aom_write_bit_buffer *saved_wb, |
2247 | 0 | struct aom_write_bit_buffer *wb) { |
2248 | 0 | write_tile_info_max_tile(cm, wb); |
2249 | |
|
2250 | 0 | *saved_wb = *wb; |
2251 | 0 | if (cm->tiles.rows * cm->tiles.cols > 1) { |
2252 | | // tile id used for cdf update |
2253 | 0 | aom_wb_write_literal(wb, 0, cm->tiles.log2_cols + cm->tiles.log2_rows); |
2254 | | // Number of bytes in tile size - 1 |
2255 | 0 | aom_wb_write_literal(wb, 3, 2); |
2256 | 0 | } |
2257 | 0 | } |
2258 | | |
2259 | | static inline void write_ext_tile_info(const AV1_COMMON *const cm, |
2260 | | struct aom_write_bit_buffer *saved_wb, |
2261 | 0 | struct aom_write_bit_buffer *wb) { |
2262 | | // This information is stored as a separate byte. |
2263 | 0 | int mod = wb->bit_offset % CHAR_BIT; |
2264 | 0 | if (mod > 0) aom_wb_write_literal(wb, 0, CHAR_BIT - mod); |
2265 | 0 | assert(aom_wb_is_byte_aligned(wb)); |
2266 | |
|
2267 | 0 | *saved_wb = *wb; |
2268 | 0 | if (cm->tiles.rows * cm->tiles.cols > 1) { |
2269 | | // Note that the last item in the uncompressed header is the data |
2270 | | // describing tile configuration. |
2271 | | // Number of bytes in tile column size - 1 |
2272 | 0 | aom_wb_write_literal(wb, 0, 2); |
2273 | | // Number of bytes in tile size - 1 |
2274 | 0 | aom_wb_write_literal(wb, 0, 2); |
2275 | 0 | } |
2276 | 0 | } |
2277 | | |
2278 | | static inline int find_identical_tile( |
2279 | | const int tile_row, const int tile_col, |
2280 | 0 | TileBufferEnc (*const tile_buffers)[MAX_TILE_COLS]) { |
2281 | 0 | const MV32 candidate_offset[1] = { { 1, 0 } }; |
2282 | 0 | const uint8_t *const cur_tile_data = |
2283 | 0 | tile_buffers[tile_row][tile_col].data + 4; |
2284 | 0 | const size_t cur_tile_size = tile_buffers[tile_row][tile_col].size; |
2285 | |
|
2286 | 0 | int i; |
2287 | |
|
2288 | 0 | if (tile_row == 0) return 0; |
2289 | | |
2290 | | // (TODO: yunqingwang) For now, only above tile is checked and used. |
2291 | | // More candidates such as left tile can be added later. |
2292 | 0 | for (i = 0; i < 1; i++) { |
2293 | 0 | int row_offset = candidate_offset[0].row; |
2294 | 0 | int col_offset = candidate_offset[0].col; |
2295 | 0 | int row = tile_row - row_offset; |
2296 | 0 | int col = tile_col - col_offset; |
2297 | 0 | const uint8_t *tile_data; |
2298 | 0 | TileBufferEnc *candidate; |
2299 | |
|
2300 | 0 | if (row < 0 || col < 0) continue; |
2301 | | |
2302 | 0 | const uint32_t tile_hdr = mem_get_le32(tile_buffers[row][col].data); |
2303 | | |
2304 | | // Read out tile-copy-mode bit: |
2305 | 0 | if ((tile_hdr >> 31) == 1) { |
2306 | | // The candidate is a copy tile itself: the offset is stored in bits |
2307 | | // 30 through 24 inclusive. |
2308 | 0 | row_offset += (tile_hdr >> 24) & 0x7f; |
2309 | 0 | row = tile_row - row_offset; |
2310 | 0 | } |
2311 | |
|
2312 | 0 | candidate = &tile_buffers[row][col]; |
2313 | |
|
2314 | 0 | if (row_offset >= 128 || candidate->size != cur_tile_size) continue; |
2315 | | |
2316 | 0 | tile_data = candidate->data + 4; |
2317 | |
|
2318 | 0 | if (memcmp(tile_data, cur_tile_data, cur_tile_size) != 0) continue; |
2319 | | |
2320 | | // Identical tile found |
2321 | 0 | assert(row_offset > 0); |
2322 | 0 | return row_offset; |
2323 | 0 | } |
2324 | | |
2325 | | // No identical tile found |
2326 | 0 | return 0; |
2327 | 0 | } |
2328 | | |
2329 | | static inline void write_render_size(const AV1_COMMON *cm, |
2330 | 0 | struct aom_write_bit_buffer *wb) { |
2331 | 0 | const int scaling_active = av1_resize_scaled(cm); |
2332 | 0 | aom_wb_write_bit(wb, scaling_active); |
2333 | 0 | if (scaling_active) { |
2334 | 0 | aom_wb_write_literal(wb, cm->render_width - 1, 16); |
2335 | 0 | aom_wb_write_literal(wb, cm->render_height - 1, 16); |
2336 | 0 | } |
2337 | 0 | } |
2338 | | |
2339 | | static inline void write_superres_scale(const AV1_COMMON *const cm, |
2340 | 0 | struct aom_write_bit_buffer *wb) { |
2341 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
2342 | 0 | if (!seq_params->enable_superres) { |
2343 | 0 | assert(cm->superres_scale_denominator == SCALE_NUMERATOR); |
2344 | 0 | return; |
2345 | 0 | } |
2346 | | |
2347 | | // First bit is whether to to scale or not |
2348 | 0 | if (cm->superres_scale_denominator == SCALE_NUMERATOR) { |
2349 | 0 | aom_wb_write_bit(wb, 0); // no scaling |
2350 | 0 | } else { |
2351 | 0 | aom_wb_write_bit(wb, 1); // scaling, write scale factor |
2352 | 0 | assert(cm->superres_scale_denominator >= SUPERRES_SCALE_DENOMINATOR_MIN); |
2353 | 0 | assert(cm->superres_scale_denominator < |
2354 | 0 | SUPERRES_SCALE_DENOMINATOR_MIN + (1 << SUPERRES_SCALE_BITS)); |
2355 | 0 | aom_wb_write_literal( |
2356 | 0 | wb, cm->superres_scale_denominator - SUPERRES_SCALE_DENOMINATOR_MIN, |
2357 | 0 | SUPERRES_SCALE_BITS); |
2358 | 0 | } |
2359 | 0 | } |
2360 | | |
2361 | | static inline void write_frame_size(const AV1_COMMON *cm, |
2362 | | int frame_size_override, |
2363 | 0 | struct aom_write_bit_buffer *wb) { |
2364 | 0 | const int coded_width = cm->superres_upscaled_width - 1; |
2365 | 0 | const int coded_height = cm->superres_upscaled_height - 1; |
2366 | |
|
2367 | 0 | if (frame_size_override) { |
2368 | 0 | const SequenceHeader *seq_params = cm->seq_params; |
2369 | 0 | int num_bits_width = seq_params->num_bits_width; |
2370 | 0 | int num_bits_height = seq_params->num_bits_height; |
2371 | 0 | aom_wb_write_literal(wb, coded_width, num_bits_width); |
2372 | 0 | aom_wb_write_literal(wb, coded_height, num_bits_height); |
2373 | 0 | } |
2374 | |
|
2375 | 0 | write_superres_scale(cm, wb); |
2376 | 0 | write_render_size(cm, wb); |
2377 | 0 | } |
2378 | | |
2379 | | static inline void write_frame_size_with_refs(const AV1_COMMON *const cm, |
2380 | 0 | struct aom_write_bit_buffer *wb) { |
2381 | 0 | int found = 0; |
2382 | |
|
2383 | 0 | MV_REFERENCE_FRAME ref_frame; |
2384 | 0 | for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { |
2385 | 0 | const YV12_BUFFER_CONFIG *cfg = get_ref_frame_yv12_buf(cm, ref_frame); |
2386 | |
|
2387 | 0 | if (cfg != NULL) { |
2388 | 0 | found = cm->superres_upscaled_width == cfg->y_crop_width && |
2389 | 0 | cm->superres_upscaled_height == cfg->y_crop_height; |
2390 | 0 | found &= cm->render_width == cfg->render_width && |
2391 | 0 | cm->render_height == cfg->render_height; |
2392 | 0 | } |
2393 | 0 | aom_wb_write_bit(wb, found); |
2394 | 0 | if (found) { |
2395 | 0 | write_superres_scale(cm, wb); |
2396 | 0 | break; |
2397 | 0 | } |
2398 | 0 | } |
2399 | |
|
2400 | 0 | if (!found) { |
2401 | 0 | int frame_size_override = 1; // Always equal to 1 in this function |
2402 | 0 | write_frame_size(cm, frame_size_override, wb); |
2403 | 0 | } |
2404 | 0 | } |
2405 | | |
2406 | | static inline void write_profile(BITSTREAM_PROFILE profile, |
2407 | 0 | struct aom_write_bit_buffer *wb) { |
2408 | 0 | assert(profile >= PROFILE_0 && profile < MAX_PROFILES); |
2409 | 0 | aom_wb_write_literal(wb, profile, PROFILE_BITS); |
2410 | 0 | } |
2411 | | |
2412 | | static inline void write_bitdepth(const SequenceHeader *const seq_params, |
2413 | 0 | struct aom_write_bit_buffer *wb) { |
2414 | | // Profile 0/1: [0] for 8 bit, [1] 10-bit |
2415 | | // Profile 2: [0] for 8 bit, [10] 10-bit, [11] - 12-bit |
2416 | 0 | aom_wb_write_bit(wb, seq_params->bit_depth == AOM_BITS_8 ? 0 : 1); |
2417 | 0 | if (seq_params->profile == PROFILE_2 && seq_params->bit_depth != AOM_BITS_8) { |
2418 | 0 | aom_wb_write_bit(wb, seq_params->bit_depth == AOM_BITS_10 ? 0 : 1); |
2419 | 0 | } |
2420 | 0 | } |
2421 | | |
2422 | | static inline void write_color_config(const SequenceHeader *const seq_params, |
2423 | 0 | struct aom_write_bit_buffer *wb) { |
2424 | 0 | write_bitdepth(seq_params, wb); |
2425 | 0 | const int is_monochrome = seq_params->monochrome; |
2426 | | // monochrome bit |
2427 | 0 | if (seq_params->profile != PROFILE_1) |
2428 | 0 | aom_wb_write_bit(wb, is_monochrome); |
2429 | 0 | else |
2430 | 0 | assert(!is_monochrome); |
2431 | 0 | if (seq_params->color_primaries == AOM_CICP_CP_UNSPECIFIED && |
2432 | 0 | seq_params->transfer_characteristics == AOM_CICP_TC_UNSPECIFIED && |
2433 | 0 | seq_params->matrix_coefficients == AOM_CICP_MC_UNSPECIFIED) { |
2434 | 0 | aom_wb_write_bit(wb, 0); // No color description present |
2435 | 0 | } else { |
2436 | 0 | aom_wb_write_bit(wb, 1); // Color description present |
2437 | 0 | aom_wb_write_literal(wb, seq_params->color_primaries, 8); |
2438 | 0 | aom_wb_write_literal(wb, seq_params->transfer_characteristics, 8); |
2439 | 0 | aom_wb_write_literal(wb, seq_params->matrix_coefficients, 8); |
2440 | 0 | } |
2441 | 0 | if (is_monochrome) { |
2442 | | // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] |
2443 | 0 | aom_wb_write_bit(wb, seq_params->color_range); |
2444 | 0 | return; |
2445 | 0 | } |
2446 | 0 | if (seq_params->color_primaries == AOM_CICP_CP_BT_709 && |
2447 | 0 | seq_params->transfer_characteristics == AOM_CICP_TC_SRGB && |
2448 | 0 | seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY) { |
2449 | 0 | assert(seq_params->subsampling_x == 0 && seq_params->subsampling_y == 0); |
2450 | 0 | assert(seq_params->profile == PROFILE_1 || |
2451 | 0 | (seq_params->profile == PROFILE_2 && |
2452 | 0 | seq_params->bit_depth == AOM_BITS_12)); |
2453 | 0 | } else { |
2454 | | // 0: [16, 235] (i.e. xvYCC), 1: [0, 255] |
2455 | 0 | aom_wb_write_bit(wb, seq_params->color_range); |
2456 | 0 | if (seq_params->profile == PROFILE_0) { |
2457 | | // 420 only |
2458 | 0 | assert(seq_params->subsampling_x == 1 && seq_params->subsampling_y == 1); |
2459 | 0 | } else if (seq_params->profile == PROFILE_1) { |
2460 | | // 444 only |
2461 | 0 | assert(seq_params->subsampling_x == 0 && seq_params->subsampling_y == 0); |
2462 | 0 | } else if (seq_params->profile == PROFILE_2) { |
2463 | 0 | if (seq_params->bit_depth == AOM_BITS_12) { |
2464 | | // 420, 444 or 422 |
2465 | 0 | aom_wb_write_bit(wb, seq_params->subsampling_x); |
2466 | 0 | if (seq_params->subsampling_x == 0) { |
2467 | 0 | assert(seq_params->subsampling_y == 0 && |
2468 | 0 | "4:4:0 subsampling not allowed in AV1"); |
2469 | 0 | } else { |
2470 | 0 | aom_wb_write_bit(wb, seq_params->subsampling_y); |
2471 | 0 | } |
2472 | 0 | } else { |
2473 | | // 422 only |
2474 | 0 | assert(seq_params->subsampling_x == 1 && |
2475 | 0 | seq_params->subsampling_y == 0); |
2476 | 0 | } |
2477 | 0 | } |
2478 | 0 | if (seq_params->matrix_coefficients == AOM_CICP_MC_IDENTITY) { |
2479 | 0 | assert(seq_params->subsampling_x == 0 && seq_params->subsampling_y == 0); |
2480 | 0 | } |
2481 | 0 | if (seq_params->subsampling_x == 1 && seq_params->subsampling_y == 1) { |
2482 | 0 | aom_wb_write_literal(wb, seq_params->chroma_sample_position, 2); |
2483 | 0 | } |
2484 | 0 | } |
2485 | 0 | aom_wb_write_bit(wb, seq_params->separate_uv_delta_q); |
2486 | 0 | } |
2487 | | |
2488 | | static inline void write_timing_info_header( |
2489 | | const aom_timing_info_t *const timing_info, |
2490 | 0 | struct aom_write_bit_buffer *wb) { |
2491 | 0 | aom_wb_write_unsigned_literal(wb, timing_info->num_units_in_display_tick, 32); |
2492 | 0 | aom_wb_write_unsigned_literal(wb, timing_info->time_scale, 32); |
2493 | 0 | aom_wb_write_bit(wb, timing_info->equal_picture_interval); |
2494 | 0 | if (timing_info->equal_picture_interval) { |
2495 | 0 | aom_wb_write_uvlc(wb, timing_info->num_ticks_per_picture - 1); |
2496 | 0 | } |
2497 | 0 | } |
2498 | | |
2499 | | static inline void write_decoder_model_info( |
2500 | | const aom_dec_model_info_t *const decoder_model_info, |
2501 | 0 | struct aom_write_bit_buffer *wb) { |
2502 | 0 | aom_wb_write_literal( |
2503 | 0 | wb, decoder_model_info->encoder_decoder_buffer_delay_length - 1, 5); |
2504 | 0 | aom_wb_write_unsigned_literal( |
2505 | 0 | wb, decoder_model_info->num_units_in_decoding_tick, 32); |
2506 | 0 | aom_wb_write_literal(wb, decoder_model_info->buffer_removal_time_length - 1, |
2507 | 0 | 5); |
2508 | 0 | aom_wb_write_literal( |
2509 | 0 | wb, decoder_model_info->frame_presentation_time_length - 1, 5); |
2510 | 0 | } |
2511 | | |
2512 | | static inline void write_dec_model_op_parameters( |
2513 | | const aom_dec_model_op_parameters_t *op_params, int buffer_delay_length, |
2514 | 0 | struct aom_write_bit_buffer *wb) { |
2515 | 0 | aom_wb_write_unsigned_literal(wb, op_params->decoder_buffer_delay, |
2516 | 0 | buffer_delay_length); |
2517 | 0 | aom_wb_write_unsigned_literal(wb, op_params->encoder_buffer_delay, |
2518 | 0 | buffer_delay_length); |
2519 | 0 | aom_wb_write_bit(wb, op_params->low_delay_mode_flag); |
2520 | 0 | } |
2521 | | |
2522 | | static inline void write_tu_pts_info(AV1_COMMON *const cm, |
2523 | 0 | struct aom_write_bit_buffer *wb) { |
2524 | 0 | aom_wb_write_unsigned_literal( |
2525 | 0 | wb, cm->frame_presentation_time, |
2526 | 0 | cm->seq_params->decoder_model_info.frame_presentation_time_length); |
2527 | 0 | } |
2528 | | |
2529 | | static inline void write_film_grain_params(const AV1_COMP *const cpi, |
2530 | 0 | struct aom_write_bit_buffer *wb) { |
2531 | 0 | const AV1_COMMON *const cm = &cpi->common; |
2532 | 0 | const aom_film_grain_t *const pars = &cm->cur_frame->film_grain_params; |
2533 | 0 | aom_wb_write_bit(wb, pars->apply_grain); |
2534 | 0 | if (!pars->apply_grain) return; |
2535 | | |
2536 | 0 | aom_wb_write_literal(wb, pars->random_seed, 16); |
2537 | |
|
2538 | 0 | if (cm->current_frame.frame_type == INTER_FRAME) |
2539 | 0 | aom_wb_write_bit(wb, pars->update_parameters); |
2540 | |
|
2541 | 0 | if (!pars->update_parameters) { |
2542 | 0 | int ref_frame, ref_idx; |
2543 | 0 | for (ref_frame = LAST_FRAME; ref_frame < REF_FRAMES; ref_frame++) { |
2544 | 0 | ref_idx = get_ref_frame_map_idx(cm, ref_frame); |
2545 | 0 | assert(ref_idx != INVALID_IDX); |
2546 | 0 | const RefCntBuffer *const buf = cm->ref_frame_map[ref_idx]; |
2547 | 0 | if (buf->film_grain_params_present && |
2548 | 0 | aom_check_grain_params_equiv(pars, &buf->film_grain_params)) { |
2549 | 0 | break; |
2550 | 0 | } |
2551 | 0 | } |
2552 | 0 | assert(ref_frame < REF_FRAMES); |
2553 | 0 | aom_wb_write_literal(wb, ref_idx, 3); |
2554 | 0 | return; |
2555 | 0 | } |
2556 | | |
2557 | | // Scaling functions parameters |
2558 | 0 | aom_wb_write_literal(wb, pars->num_y_points, 4); // max 14 |
2559 | 0 | for (int i = 0; i < pars->num_y_points; i++) { |
2560 | 0 | aom_wb_write_literal(wb, pars->scaling_points_y[i][0], 8); |
2561 | 0 | aom_wb_write_literal(wb, pars->scaling_points_y[i][1], 8); |
2562 | 0 | } |
2563 | |
|
2564 | 0 | if (!cm->seq_params->monochrome) { |
2565 | 0 | aom_wb_write_bit(wb, pars->chroma_scaling_from_luma); |
2566 | 0 | } else { |
2567 | 0 | assert(!pars->chroma_scaling_from_luma); |
2568 | 0 | } |
2569 | |
|
2570 | 0 | if (cm->seq_params->monochrome || pars->chroma_scaling_from_luma || |
2571 | 0 | ((cm->seq_params->subsampling_x == 1) && |
2572 | 0 | (cm->seq_params->subsampling_y == 1) && (pars->num_y_points == 0))) { |
2573 | 0 | assert(pars->num_cb_points == 0 && pars->num_cr_points == 0); |
2574 | 0 | } else { |
2575 | 0 | aom_wb_write_literal(wb, pars->num_cb_points, 4); // max 10 |
2576 | 0 | for (int i = 0; i < pars->num_cb_points; i++) { |
2577 | 0 | aom_wb_write_literal(wb, pars->scaling_points_cb[i][0], 8); |
2578 | 0 | aom_wb_write_literal(wb, pars->scaling_points_cb[i][1], 8); |
2579 | 0 | } |
2580 | |
|
2581 | 0 | aom_wb_write_literal(wb, pars->num_cr_points, 4); // max 10 |
2582 | 0 | for (int i = 0; i < pars->num_cr_points; i++) { |
2583 | 0 | aom_wb_write_literal(wb, pars->scaling_points_cr[i][0], 8); |
2584 | 0 | aom_wb_write_literal(wb, pars->scaling_points_cr[i][1], 8); |
2585 | 0 | } |
2586 | 0 | } |
2587 | |
|
2588 | 0 | aom_wb_write_literal(wb, pars->scaling_shift - 8, 2); // 8 + value |
2589 | | |
2590 | | // AR coefficients |
2591 | | // Only sent if the corresponsing scaling function has |
2592 | | // more than 0 points |
2593 | |
|
2594 | 0 | aom_wb_write_literal(wb, pars->ar_coeff_lag, 2); |
2595 | |
|
2596 | 0 | int num_pos_luma = 2 * pars->ar_coeff_lag * (pars->ar_coeff_lag + 1); |
2597 | 0 | int num_pos_chroma = num_pos_luma; |
2598 | 0 | if (pars->num_y_points > 0) ++num_pos_chroma; |
2599 | |
|
2600 | 0 | if (pars->num_y_points) |
2601 | 0 | for (int i = 0; i < num_pos_luma; i++) |
2602 | 0 | aom_wb_write_literal(wb, pars->ar_coeffs_y[i] + 128, 8); |
2603 | |
|
2604 | 0 | if (pars->num_cb_points || pars->chroma_scaling_from_luma) |
2605 | 0 | for (int i = 0; i < num_pos_chroma; i++) |
2606 | 0 | aom_wb_write_literal(wb, pars->ar_coeffs_cb[i] + 128, 8); |
2607 | |
|
2608 | 0 | if (pars->num_cr_points || pars->chroma_scaling_from_luma) |
2609 | 0 | for (int i = 0; i < num_pos_chroma; i++) |
2610 | 0 | aom_wb_write_literal(wb, pars->ar_coeffs_cr[i] + 128, 8); |
2611 | |
|
2612 | 0 | aom_wb_write_literal(wb, pars->ar_coeff_shift - 6, 2); // 8 + value |
2613 | |
|
2614 | 0 | aom_wb_write_literal(wb, pars->grain_scale_shift, 2); |
2615 | |
|
2616 | 0 | if (pars->num_cb_points) { |
2617 | 0 | aom_wb_write_literal(wb, pars->cb_mult, 8); |
2618 | 0 | aom_wb_write_literal(wb, pars->cb_luma_mult, 8); |
2619 | 0 | aom_wb_write_literal(wb, pars->cb_offset, 9); |
2620 | 0 | } |
2621 | |
|
2622 | 0 | if (pars->num_cr_points) { |
2623 | 0 | aom_wb_write_literal(wb, pars->cr_mult, 8); |
2624 | 0 | aom_wb_write_literal(wb, pars->cr_luma_mult, 8); |
2625 | 0 | aom_wb_write_literal(wb, pars->cr_offset, 9); |
2626 | 0 | } |
2627 | |
|
2628 | 0 | aom_wb_write_bit(wb, pars->overlap_flag); |
2629 | |
|
2630 | 0 | aom_wb_write_bit(wb, pars->clip_to_restricted_range); |
2631 | 0 | } |
2632 | | |
2633 | | static inline void write_sb_size(const SequenceHeader *const seq_params, |
2634 | 0 | struct aom_write_bit_buffer *wb) { |
2635 | 0 | (void)seq_params; |
2636 | 0 | (void)wb; |
2637 | 0 | assert(seq_params->mib_size == mi_size_wide[seq_params->sb_size]); |
2638 | 0 | assert(seq_params->mib_size == 1 << seq_params->mib_size_log2); |
2639 | 0 | assert(seq_params->sb_size == BLOCK_128X128 || |
2640 | 0 | seq_params->sb_size == BLOCK_64X64); |
2641 | 0 | aom_wb_write_bit(wb, seq_params->sb_size == BLOCK_128X128 ? 1 : 0); |
2642 | 0 | } |
2643 | | |
2644 | | static inline void write_sequence_header(const SequenceHeader *const seq_params, |
2645 | 0 | struct aom_write_bit_buffer *wb) { |
2646 | 0 | aom_wb_write_literal(wb, seq_params->num_bits_width - 1, 4); |
2647 | 0 | aom_wb_write_literal(wb, seq_params->num_bits_height - 1, 4); |
2648 | 0 | aom_wb_write_literal(wb, seq_params->max_frame_width - 1, |
2649 | 0 | seq_params->num_bits_width); |
2650 | 0 | aom_wb_write_literal(wb, seq_params->max_frame_height - 1, |
2651 | 0 | seq_params->num_bits_height); |
2652 | |
|
2653 | 0 | if (!seq_params->reduced_still_picture_hdr) { |
2654 | 0 | aom_wb_write_bit(wb, seq_params->frame_id_numbers_present_flag); |
2655 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
2656 | | // We must always have delta_frame_id_length < frame_id_length, |
2657 | | // in order for a frame to be referenced with a unique delta. |
2658 | | // Avoid wasting bits by using a coding that enforces this restriction. |
2659 | 0 | aom_wb_write_literal(wb, seq_params->delta_frame_id_length - 2, 4); |
2660 | 0 | aom_wb_write_literal( |
2661 | 0 | wb, |
2662 | 0 | seq_params->frame_id_length - seq_params->delta_frame_id_length - 1, |
2663 | 0 | 3); |
2664 | 0 | } |
2665 | 0 | } |
2666 | |
|
2667 | 0 | write_sb_size(seq_params, wb); |
2668 | |
|
2669 | 0 | aom_wb_write_bit(wb, seq_params->enable_filter_intra); |
2670 | 0 | aom_wb_write_bit(wb, seq_params->enable_intra_edge_filter); |
2671 | |
|
2672 | 0 | if (!seq_params->reduced_still_picture_hdr) { |
2673 | 0 | aom_wb_write_bit(wb, seq_params->enable_interintra_compound); |
2674 | 0 | aom_wb_write_bit(wb, seq_params->enable_masked_compound); |
2675 | 0 | aom_wb_write_bit(wb, seq_params->enable_warped_motion); |
2676 | 0 | aom_wb_write_bit(wb, seq_params->enable_dual_filter); |
2677 | |
|
2678 | 0 | aom_wb_write_bit(wb, seq_params->order_hint_info.enable_order_hint); |
2679 | |
|
2680 | 0 | if (seq_params->order_hint_info.enable_order_hint) { |
2681 | 0 | aom_wb_write_bit(wb, seq_params->order_hint_info.enable_dist_wtd_comp); |
2682 | 0 | aom_wb_write_bit(wb, seq_params->order_hint_info.enable_ref_frame_mvs); |
2683 | 0 | } |
2684 | 0 | if (seq_params->force_screen_content_tools == 2) { |
2685 | 0 | aom_wb_write_bit(wb, 1); |
2686 | 0 | } else { |
2687 | 0 | aom_wb_write_bit(wb, 0); |
2688 | 0 | aom_wb_write_bit(wb, seq_params->force_screen_content_tools); |
2689 | 0 | } |
2690 | 0 | if (seq_params->force_screen_content_tools > 0) { |
2691 | 0 | if (seq_params->force_integer_mv == 2) { |
2692 | 0 | aom_wb_write_bit(wb, 1); |
2693 | 0 | } else { |
2694 | 0 | aom_wb_write_bit(wb, 0); |
2695 | 0 | aom_wb_write_bit(wb, seq_params->force_integer_mv); |
2696 | 0 | } |
2697 | 0 | } else { |
2698 | 0 | assert(seq_params->force_integer_mv == 2); |
2699 | 0 | } |
2700 | 0 | if (seq_params->order_hint_info.enable_order_hint) |
2701 | 0 | aom_wb_write_literal( |
2702 | 0 | wb, seq_params->order_hint_info.order_hint_bits_minus_1, 3); |
2703 | 0 | } |
2704 | |
|
2705 | 0 | aom_wb_write_bit(wb, seq_params->enable_superres); |
2706 | 0 | aom_wb_write_bit(wb, seq_params->enable_cdef); |
2707 | 0 | aom_wb_write_bit(wb, seq_params->enable_restoration); |
2708 | 0 | } |
2709 | | |
2710 | | static inline void write_global_motion_params( |
2711 | | const WarpedMotionParams *params, const WarpedMotionParams *ref_params, |
2712 | 0 | struct aom_write_bit_buffer *wb, int allow_hp) { |
2713 | 0 | const TransformationType type = params->wmtype; |
2714 | | |
2715 | | // As a workaround for an AV1 spec bug, we avoid choosing TRANSLATION |
2716 | | // type models. Check here that we don't accidentally pick one somehow. |
2717 | | // See comments in gm_get_motion_vector() for details on the bug we're |
2718 | | // working around here |
2719 | 0 | assert(type != TRANSLATION); |
2720 | |
|
2721 | 0 | aom_wb_write_bit(wb, type != IDENTITY); |
2722 | 0 | if (type != IDENTITY) { |
2723 | 0 | aom_wb_write_bit(wb, type == ROTZOOM); |
2724 | 0 | if (type != ROTZOOM) aom_wb_write_bit(wb, type == TRANSLATION); |
2725 | 0 | } |
2726 | |
|
2727 | 0 | if (type >= ROTZOOM) { |
2728 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2729 | 0 | wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
2730 | 0 | (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - |
2731 | 0 | (1 << GM_ALPHA_PREC_BITS), |
2732 | 0 | (params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - (1 << GM_ALPHA_PREC_BITS)); |
2733 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2734 | 0 | wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
2735 | 0 | (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF), |
2736 | 0 | (params->wmmat[3] >> GM_ALPHA_PREC_DIFF)); |
2737 | 0 | } |
2738 | |
|
2739 | 0 | if (type >= AFFINE) { |
2740 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2741 | 0 | wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
2742 | 0 | (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF), |
2743 | 0 | (params->wmmat[4] >> GM_ALPHA_PREC_DIFF)); |
2744 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2745 | 0 | wb, GM_ALPHA_MAX + 1, SUBEXPFIN_K, |
2746 | 0 | (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) - |
2747 | 0 | (1 << GM_ALPHA_PREC_BITS), |
2748 | 0 | (params->wmmat[5] >> GM_ALPHA_PREC_DIFF) - (1 << GM_ALPHA_PREC_BITS)); |
2749 | 0 | } |
2750 | |
|
2751 | 0 | if (type >= TRANSLATION) { |
2752 | 0 | const int trans_bits = (type == TRANSLATION) |
2753 | 0 | ? GM_ABS_TRANS_ONLY_BITS - !allow_hp |
2754 | 0 | : GM_ABS_TRANS_BITS; |
2755 | 0 | const int trans_prec_diff = (type == TRANSLATION) |
2756 | 0 | ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp |
2757 | 0 | : GM_TRANS_PREC_DIFF; |
2758 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2759 | 0 | wb, (1 << trans_bits) + 1, SUBEXPFIN_K, |
2760 | 0 | (ref_params->wmmat[0] >> trans_prec_diff), |
2761 | 0 | (params->wmmat[0] >> trans_prec_diff)); |
2762 | 0 | aom_wb_write_signed_primitive_refsubexpfin( |
2763 | 0 | wb, (1 << trans_bits) + 1, SUBEXPFIN_K, |
2764 | 0 | (ref_params->wmmat[1] >> trans_prec_diff), |
2765 | 0 | (params->wmmat[1] >> trans_prec_diff)); |
2766 | 0 | } |
2767 | 0 | } |
2768 | | |
2769 | | static inline void write_global_motion(AV1_COMP *cpi, |
2770 | 0 | struct aom_write_bit_buffer *wb) { |
2771 | 0 | AV1_COMMON *const cm = &cpi->common; |
2772 | 0 | int frame; |
2773 | 0 | for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) { |
2774 | 0 | const WarpedMotionParams *ref_params = |
2775 | 0 | cm->prev_frame ? &cm->prev_frame->global_motion[frame] |
2776 | 0 | : &default_warp_params; |
2777 | 0 | write_global_motion_params(&cm->global_motion[frame], ref_params, wb, |
2778 | 0 | cm->features.allow_high_precision_mv); |
2779 | | // TODO(sarahparker, debargha): The logic in the commented out code below |
2780 | | // does not work currently and causes mismatches when resize is on. |
2781 | | // Fix it before turning the optimization back on. |
2782 | | /* |
2783 | | YV12_BUFFER_CONFIG *ref_buf = get_ref_frame_yv12_buf(cpi, frame); |
2784 | | if (cpi->source->y_crop_width == ref_buf->y_crop_width && |
2785 | | cpi->source->y_crop_height == ref_buf->y_crop_height) { |
2786 | | write_global_motion_params(&cm->global_motion[frame], |
2787 | | &cm->prev_frame->global_motion[frame], wb, |
2788 | | cm->features.allow_high_precision_mv); |
2789 | | } else { |
2790 | | assert(cm->global_motion[frame].wmtype == IDENTITY && |
2791 | | "Invalid warp type for frames of different resolutions"); |
2792 | | } |
2793 | | */ |
2794 | | /* |
2795 | | printf("Frame %d/%d: Enc Ref %d: %d %d %d %d\n", |
2796 | | cm->current_frame.frame_number, cm->show_frame, frame, |
2797 | | cm->global_motion[frame].wmmat[0], |
2798 | | cm->global_motion[frame].wmmat[1], cm->global_motion[frame].wmmat[2], |
2799 | | cm->global_motion[frame].wmmat[3]); |
2800 | | */ |
2801 | 0 | } |
2802 | 0 | } |
2803 | | |
2804 | | static int check_frame_refs_short_signaling(AV1_COMMON *const cm, |
2805 | 0 | bool enable_ref_short_signaling) { |
2806 | | // In rtc case when res < 360p and speed >= 9, we turn on |
2807 | | // frame_refs_short_signaling if it won't break the decoder. |
2808 | 0 | if (enable_ref_short_signaling) { |
2809 | 0 | const int gld_map_idx = get_ref_frame_map_idx(cm, GOLDEN_FRAME); |
2810 | 0 | const int base = |
2811 | 0 | 1 << (cm->seq_params->order_hint_info.order_hint_bits_minus_1 + 1); |
2812 | |
|
2813 | 0 | const int order_hint_group_cur = |
2814 | 0 | cm->current_frame.display_order_hint / base; |
2815 | 0 | const int order_hint_group_gld = |
2816 | 0 | cm->ref_frame_map[gld_map_idx]->display_order_hint / base; |
2817 | 0 | const int relative_dist = cm->current_frame.order_hint - |
2818 | 0 | cm->ref_frame_map[gld_map_idx]->order_hint; |
2819 | | |
2820 | | // If current frame and GOLDEN frame are in the same order_hint group, and |
2821 | | // they are not far apart (i.e., > 64 frames), then return 1. |
2822 | 0 | if (order_hint_group_cur == order_hint_group_gld && relative_dist >= 0 && |
2823 | 0 | relative_dist <= 64) { |
2824 | 0 | return 1; |
2825 | 0 | } |
2826 | 0 | return 0; |
2827 | 0 | } |
2828 | | |
2829 | | // Check whether all references are distinct frames. |
2830 | 0 | const RefCntBuffer *seen_bufs[INTER_REFS_PER_FRAME] = { NULL }; |
2831 | 0 | int num_refs = 0; |
2832 | 0 | for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { |
2833 | 0 | const RefCntBuffer *const buf = get_ref_frame_buf(cm, ref_frame); |
2834 | 0 | if (buf != NULL) { |
2835 | 0 | int seen = 0; |
2836 | 0 | for (int i = 0; i < num_refs; i++) { |
2837 | 0 | if (seen_bufs[i] == buf) { |
2838 | 0 | seen = 1; |
2839 | 0 | break; |
2840 | 0 | } |
2841 | 0 | } |
2842 | 0 | if (!seen) seen_bufs[num_refs++] = buf; |
2843 | 0 | } |
2844 | 0 | } |
2845 | | |
2846 | | // We only turn on frame_refs_short_signaling when all references are |
2847 | | // distinct. |
2848 | 0 | if (num_refs < INTER_REFS_PER_FRAME) { |
2849 | | // It indicates that there exist more than one reference frame pointing to |
2850 | | // the same reference buffer, i.e. two or more references are duplicate. |
2851 | 0 | return 0; |
2852 | 0 | } |
2853 | | |
2854 | | // Check whether the encoder side ref frame choices are aligned with that to |
2855 | | // be derived at the decoder side. |
2856 | 0 | int remapped_ref_idx_decoder[REF_FRAMES]; |
2857 | |
|
2858 | 0 | const int lst_map_idx = get_ref_frame_map_idx(cm, LAST_FRAME); |
2859 | 0 | const int gld_map_idx = get_ref_frame_map_idx(cm, GOLDEN_FRAME); |
2860 | | |
2861 | | // Set up the frame refs mapping indexes according to the |
2862 | | // frame_refs_short_signaling policy. |
2863 | 0 | av1_set_frame_refs(cm, remapped_ref_idx_decoder, lst_map_idx, gld_map_idx); |
2864 | | |
2865 | | // We only turn on frame_refs_short_signaling when the encoder side decision |
2866 | | // on ref frames is identical to that at the decoder side. |
2867 | 0 | int frame_refs_short_signaling = 1; |
2868 | 0 | for (int ref_idx = 0; ref_idx < INTER_REFS_PER_FRAME; ++ref_idx) { |
2869 | | // Compare the buffer index between two reference frames indexed |
2870 | | // respectively by the encoder and the decoder side decisions. |
2871 | 0 | RefCntBuffer *ref_frame_buf_new = NULL; |
2872 | 0 | if (remapped_ref_idx_decoder[ref_idx] != INVALID_IDX) { |
2873 | 0 | ref_frame_buf_new = cm->ref_frame_map[remapped_ref_idx_decoder[ref_idx]]; |
2874 | 0 | } |
2875 | 0 | if (get_ref_frame_buf(cm, LAST_FRAME + ref_idx) != ref_frame_buf_new) { |
2876 | 0 | frame_refs_short_signaling = 0; |
2877 | 0 | break; |
2878 | 0 | } |
2879 | 0 | } |
2880 | |
|
2881 | | #if 0 // For debug |
2882 | | printf("\nFrame=%d: \n", cm->current_frame.frame_number); |
2883 | | printf("***frame_refs_short_signaling=%d\n", frame_refs_short_signaling); |
2884 | | for (int ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { |
2885 | | printf("enc_ref(map_idx=%d)=%d, vs. " |
2886 | | "dec_ref(map_idx=%d)=%d\n", |
2887 | | get_ref_frame_map_idx(cm, ref_frame), ref_frame, |
2888 | | cm->remapped_ref_idx[ref_frame - LAST_FRAME], |
2889 | | ref_frame); |
2890 | | } |
2891 | | #endif // 0 |
2892 | |
|
2893 | 0 | return frame_refs_short_signaling; |
2894 | 0 | } |
2895 | | |
2896 | | // New function based on HLS R18 |
2897 | | static inline void write_uncompressed_header_obu( |
2898 | | AV1_COMP *cpi, MACROBLOCKD *const xd, struct aom_write_bit_buffer *saved_wb, |
2899 | 0 | struct aom_write_bit_buffer *wb) { |
2900 | 0 | AV1_COMMON *const cm = &cpi->common; |
2901 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
2902 | 0 | const CommonQuantParams *quant_params = &cm->quant_params; |
2903 | 0 | CurrentFrame *const current_frame = &cm->current_frame; |
2904 | 0 | FeatureFlags *const features = &cm->features; |
2905 | |
|
2906 | 0 | if (!cpi->sf.rt_sf.enable_ref_short_signaling || |
2907 | 0 | !seq_params->order_hint_info.enable_order_hint || |
2908 | 0 | seq_params->order_hint_info.enable_ref_frame_mvs) { |
2909 | 0 | current_frame->frame_refs_short_signaling = 0; |
2910 | 0 | } else { |
2911 | 0 | current_frame->frame_refs_short_signaling = 1; |
2912 | 0 | } |
2913 | |
|
2914 | 0 | if (seq_params->still_picture) { |
2915 | 0 | assert(cm->show_existing_frame == 0); |
2916 | 0 | assert(cm->show_frame == 1); |
2917 | 0 | assert(current_frame->frame_type == KEY_FRAME); |
2918 | 0 | } |
2919 | 0 | if (!seq_params->reduced_still_picture_hdr) { |
2920 | 0 | if (encode_show_existing_frame(cm)) { |
2921 | 0 | aom_wb_write_bit(wb, 1); // show_existing_frame |
2922 | 0 | aom_wb_write_literal(wb, cpi->existing_fb_idx_to_show, 3); |
2923 | |
|
2924 | 0 | if (seq_params->decoder_model_info_present_flag && |
2925 | 0 | seq_params->timing_info.equal_picture_interval == 0) { |
2926 | 0 | write_tu_pts_info(cm, wb); |
2927 | 0 | } |
2928 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
2929 | 0 | int frame_id_len = seq_params->frame_id_length; |
2930 | 0 | int display_frame_id = cm->ref_frame_id[cpi->existing_fb_idx_to_show]; |
2931 | 0 | aom_wb_write_literal(wb, display_frame_id, frame_id_len); |
2932 | 0 | } |
2933 | 0 | return; |
2934 | 0 | } else { |
2935 | 0 | aom_wb_write_bit(wb, 0); // show_existing_frame |
2936 | 0 | } |
2937 | | |
2938 | 0 | aom_wb_write_literal(wb, current_frame->frame_type, 2); |
2939 | |
|
2940 | 0 | aom_wb_write_bit(wb, cm->show_frame); |
2941 | 0 | if (cm->show_frame) { |
2942 | 0 | if (seq_params->decoder_model_info_present_flag && |
2943 | 0 | seq_params->timing_info.equal_picture_interval == 0) |
2944 | 0 | write_tu_pts_info(cm, wb); |
2945 | 0 | } else { |
2946 | 0 | aom_wb_write_bit(wb, cm->showable_frame); |
2947 | 0 | } |
2948 | 0 | if (frame_is_sframe(cm)) { |
2949 | 0 | assert(features->error_resilient_mode); |
2950 | 0 | } else if (!(current_frame->frame_type == KEY_FRAME && cm->show_frame)) { |
2951 | 0 | aom_wb_write_bit(wb, features->error_resilient_mode); |
2952 | 0 | } |
2953 | 0 | } |
2954 | 0 | aom_wb_write_bit(wb, features->disable_cdf_update); |
2955 | |
|
2956 | 0 | if (seq_params->force_screen_content_tools == 2) { |
2957 | 0 | aom_wb_write_bit(wb, features->allow_screen_content_tools); |
2958 | 0 | } else { |
2959 | 0 | assert(features->allow_screen_content_tools == |
2960 | 0 | seq_params->force_screen_content_tools); |
2961 | 0 | } |
2962 | |
|
2963 | 0 | if (features->allow_screen_content_tools) { |
2964 | 0 | if (seq_params->force_integer_mv == 2) { |
2965 | 0 | aom_wb_write_bit(wb, features->cur_frame_force_integer_mv); |
2966 | 0 | } else { |
2967 | 0 | assert(features->cur_frame_force_integer_mv == |
2968 | 0 | seq_params->force_integer_mv); |
2969 | 0 | } |
2970 | 0 | } else { |
2971 | 0 | assert(features->cur_frame_force_integer_mv == 0); |
2972 | 0 | } |
2973 | |
|
2974 | 0 | int frame_size_override_flag = 0; |
2975 | |
|
2976 | 0 | if (seq_params->reduced_still_picture_hdr) { |
2977 | 0 | assert(cm->superres_upscaled_width == seq_params->max_frame_width && |
2978 | 0 | cm->superres_upscaled_height == seq_params->max_frame_height); |
2979 | 0 | } else { |
2980 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
2981 | 0 | int frame_id_len = seq_params->frame_id_length; |
2982 | 0 | aom_wb_write_literal(wb, cm->current_frame_id, frame_id_len); |
2983 | 0 | } |
2984 | |
|
2985 | 0 | if (cm->superres_upscaled_width > seq_params->max_frame_width || |
2986 | 0 | cm->superres_upscaled_height > seq_params->max_frame_height) { |
2987 | 0 | aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
2988 | 0 | "Frame dimensions are larger than the maximum values"); |
2989 | 0 | } |
2990 | |
|
2991 | 0 | frame_size_override_flag = |
2992 | 0 | frame_is_sframe(cm) |
2993 | 0 | ? 1 |
2994 | 0 | : (cm->superres_upscaled_width != seq_params->max_frame_width || |
2995 | 0 | cm->superres_upscaled_height != seq_params->max_frame_height); |
2996 | 0 | if (!frame_is_sframe(cm)) aom_wb_write_bit(wb, frame_size_override_flag); |
2997 | |
|
2998 | 0 | if (seq_params->order_hint_info.enable_order_hint) |
2999 | 0 | aom_wb_write_literal( |
3000 | 0 | wb, current_frame->order_hint, |
3001 | 0 | seq_params->order_hint_info.order_hint_bits_minus_1 + 1); |
3002 | |
|
3003 | 0 | if (!features->error_resilient_mode && !frame_is_intra_only(cm)) { |
3004 | 0 | aom_wb_write_literal(wb, features->primary_ref_frame, PRIMARY_REF_BITS); |
3005 | 0 | } |
3006 | 0 | } |
3007 | |
|
3008 | 0 | if (seq_params->decoder_model_info_present_flag) { |
3009 | 0 | aom_wb_write_bit(wb, cpi->ppi->buffer_removal_time_present); |
3010 | 0 | if (cpi->ppi->buffer_removal_time_present) { |
3011 | 0 | for (int op_num = 0; |
3012 | 0 | op_num < seq_params->operating_points_cnt_minus_1 + 1; op_num++) { |
3013 | 0 | if (seq_params->op_params[op_num].decoder_model_param_present_flag) { |
3014 | 0 | if (seq_params->operating_point_idc[op_num] == 0 || |
3015 | 0 | ((seq_params->operating_point_idc[op_num] >> |
3016 | 0 | cm->temporal_layer_id) & |
3017 | 0 | 0x1 && |
3018 | 0 | (seq_params->operating_point_idc[op_num] >> |
3019 | 0 | (cm->spatial_layer_id + 8)) & |
3020 | 0 | 0x1)) { |
3021 | 0 | aom_wb_write_unsigned_literal( |
3022 | 0 | wb, cm->buffer_removal_times[op_num], |
3023 | 0 | seq_params->decoder_model_info.buffer_removal_time_length); |
3024 | 0 | cm->buffer_removal_times[op_num]++; |
3025 | 0 | if (cm->buffer_removal_times[op_num] == 0) { |
3026 | 0 | aom_internal_error(cm->error, AOM_CODEC_UNSUP_BITSTREAM, |
3027 | 0 | "buffer_removal_time overflowed"); |
3028 | 0 | } |
3029 | 0 | } |
3030 | 0 | } |
3031 | 0 | } |
3032 | 0 | } |
3033 | 0 | } |
3034 | | |
3035 | | // Shown keyframes and switch-frames automatically refreshes all reference |
3036 | | // frames. For all other frame types, we need to write refresh_frame_flags. |
3037 | 0 | if ((current_frame->frame_type == KEY_FRAME && !cm->show_frame) || |
3038 | 0 | current_frame->frame_type == INTER_FRAME || |
3039 | 0 | current_frame->frame_type == INTRA_ONLY_FRAME) |
3040 | 0 | aom_wb_write_literal(wb, current_frame->refresh_frame_flags, REF_FRAMES); |
3041 | |
|
3042 | 0 | if (!frame_is_intra_only(cm) || current_frame->refresh_frame_flags != 0xff) { |
3043 | | // Write all ref frame order hints if error_resilient_mode == 1 |
3044 | 0 | if (features->error_resilient_mode && |
3045 | 0 | seq_params->order_hint_info.enable_order_hint) { |
3046 | 0 | for (int ref_idx = 0; ref_idx < REF_FRAMES; ref_idx++) { |
3047 | 0 | aom_wb_write_literal( |
3048 | 0 | wb, cm->ref_frame_map[ref_idx]->order_hint, |
3049 | 0 | seq_params->order_hint_info.order_hint_bits_minus_1 + 1); |
3050 | 0 | } |
3051 | 0 | } |
3052 | 0 | } |
3053 | |
|
3054 | 0 | if (current_frame->frame_type == KEY_FRAME) { |
3055 | 0 | write_frame_size(cm, frame_size_override_flag, wb); |
3056 | 0 | assert(!av1_superres_scaled(cm) || !features->allow_intrabc); |
3057 | 0 | if (features->allow_screen_content_tools && !av1_superres_scaled(cm)) |
3058 | 0 | aom_wb_write_bit(wb, features->allow_intrabc); |
3059 | 0 | } else { |
3060 | 0 | if (current_frame->frame_type == INTRA_ONLY_FRAME) { |
3061 | 0 | write_frame_size(cm, frame_size_override_flag, wb); |
3062 | 0 | assert(!av1_superres_scaled(cm) || !features->allow_intrabc); |
3063 | 0 | if (features->allow_screen_content_tools && !av1_superres_scaled(cm)) |
3064 | 0 | aom_wb_write_bit(wb, features->allow_intrabc); |
3065 | 0 | } else if (current_frame->frame_type == INTER_FRAME || |
3066 | 0 | frame_is_sframe(cm)) { |
3067 | 0 | MV_REFERENCE_FRAME ref_frame; |
3068 | | |
3069 | | // NOTE: Error resilient mode turns off frame_refs_short_signaling |
3070 | | // automatically. |
3071 | 0 | #define FRAME_REFS_SHORT_SIGNALING 0 |
3072 | | #if FRAME_REFS_SHORT_SIGNALING |
3073 | | current_frame->frame_refs_short_signaling = |
3074 | | seq_params->order_hint_info.enable_order_hint; |
3075 | | #endif // FRAME_REFS_SHORT_SIGNALING |
3076 | |
|
3077 | 0 | if (current_frame->frame_refs_short_signaling) { |
3078 | | // In rtc case when cpi->sf.rt_sf.enable_ref_short_signaling is true, |
3079 | | // we turn on frame_refs_short_signaling when the current frame and |
3080 | | // golden frame are in the same order_hint group, and their relative |
3081 | | // distance is <= 64 (in order to be decodable). |
3082 | | |
3083 | | // For other cases, an example solution for encoder-side |
3084 | | // implementation on frame_refs_short_signaling is also provided in |
3085 | | // this function, where frame_refs_short_signaling is only turned on |
3086 | | // when the encoder side decision on ref frames is identical to that |
3087 | | // at the decoder side. |
3088 | |
|
3089 | 0 | current_frame->frame_refs_short_signaling = |
3090 | 0 | check_frame_refs_short_signaling( |
3091 | 0 | cm, cpi->sf.rt_sf.enable_ref_short_signaling); |
3092 | 0 | } |
3093 | |
|
3094 | 0 | if (seq_params->order_hint_info.enable_order_hint) |
3095 | 0 | aom_wb_write_bit(wb, current_frame->frame_refs_short_signaling); |
3096 | |
|
3097 | 0 | if (current_frame->frame_refs_short_signaling) { |
3098 | 0 | const int lst_ref = get_ref_frame_map_idx(cm, LAST_FRAME); |
3099 | 0 | aom_wb_write_literal(wb, lst_ref, REF_FRAMES_LOG2); |
3100 | |
|
3101 | 0 | const int gld_ref = get_ref_frame_map_idx(cm, GOLDEN_FRAME); |
3102 | 0 | aom_wb_write_literal(wb, gld_ref, REF_FRAMES_LOG2); |
3103 | 0 | } |
3104 | 0 | int first_ref_map_idx = INVALID_IDX; |
3105 | 0 | if (cpi->ppi->rtc_ref.set_ref_frame_config) { |
3106 | 0 | for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { |
3107 | 0 | if (cpi->ppi->rtc_ref.reference[ref_frame - 1] == 1) { |
3108 | 0 | first_ref_map_idx = cpi->ppi->rtc_ref.ref_idx[ref_frame - 1]; |
3109 | 0 | break; |
3110 | 0 | } |
3111 | 0 | } |
3112 | 0 | } |
3113 | 0 | for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { |
3114 | 0 | assert(get_ref_frame_map_idx(cm, ref_frame) != INVALID_IDX); |
3115 | 0 | if (!current_frame->frame_refs_short_signaling) { |
3116 | 0 | if (cpi->ppi->rtc_ref.set_ref_frame_config && |
3117 | 0 | first_ref_map_idx != INVALID_IDX && |
3118 | 0 | cpi->svc.number_spatial_layers == 1 && |
3119 | 0 | !seq_params->order_hint_info.enable_order_hint) { |
3120 | | // For the usage of set_ref_frame_config: |
3121 | | // for any reference not used set their ref_map_idx |
3122 | | // to the first used reference. |
3123 | 0 | const int map_idx = cpi->ppi->rtc_ref.reference[ref_frame - 1] |
3124 | 0 | ? get_ref_frame_map_idx(cm, ref_frame) |
3125 | 0 | : first_ref_map_idx; |
3126 | 0 | aom_wb_write_literal(wb, map_idx, REF_FRAMES_LOG2); |
3127 | 0 | } else { |
3128 | 0 | aom_wb_write_literal(wb, get_ref_frame_map_idx(cm, ref_frame), |
3129 | 0 | REF_FRAMES_LOG2); |
3130 | 0 | } |
3131 | 0 | } |
3132 | 0 | if (seq_params->frame_id_numbers_present_flag) { |
3133 | 0 | int i = get_ref_frame_map_idx(cm, ref_frame); |
3134 | 0 | int frame_id_len = seq_params->frame_id_length; |
3135 | 0 | int diff_len = seq_params->delta_frame_id_length; |
3136 | 0 | int delta_frame_id_minus_1 = |
3137 | 0 | ((cm->current_frame_id - cm->ref_frame_id[i] + |
3138 | 0 | (1 << frame_id_len)) % |
3139 | 0 | (1 << frame_id_len)) - |
3140 | 0 | 1; |
3141 | 0 | if (delta_frame_id_minus_1 < 0 || |
3142 | 0 | delta_frame_id_minus_1 >= (1 << diff_len)) { |
3143 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
3144 | 0 | "Invalid delta_frame_id_minus_1"); |
3145 | 0 | } |
3146 | 0 | aom_wb_write_literal(wb, delta_frame_id_minus_1, diff_len); |
3147 | 0 | } |
3148 | 0 | } |
3149 | |
|
3150 | 0 | if (!features->error_resilient_mode && frame_size_override_flag) { |
3151 | 0 | write_frame_size_with_refs(cm, wb); |
3152 | 0 | } else { |
3153 | 0 | write_frame_size(cm, frame_size_override_flag, wb); |
3154 | 0 | } |
3155 | |
|
3156 | 0 | if (!features->cur_frame_force_integer_mv) |
3157 | 0 | aom_wb_write_bit(wb, features->allow_high_precision_mv); |
3158 | 0 | write_frame_interp_filter(features->interp_filter, wb); |
3159 | 0 | aom_wb_write_bit(wb, features->switchable_motion_mode); |
3160 | 0 | if (frame_might_allow_ref_frame_mvs(cm)) { |
3161 | 0 | aom_wb_write_bit(wb, features->allow_ref_frame_mvs); |
3162 | 0 | } else { |
3163 | 0 | assert(features->allow_ref_frame_mvs == 0); |
3164 | 0 | } |
3165 | 0 | } |
3166 | 0 | } |
3167 | |
|
3168 | 0 | const int might_bwd_adapt = !(seq_params->reduced_still_picture_hdr) && |
3169 | 0 | !(features->disable_cdf_update); |
3170 | 0 | if (cm->tiles.large_scale) |
3171 | 0 | assert(features->refresh_frame_context == REFRESH_FRAME_CONTEXT_DISABLED); |
3172 | |
|
3173 | 0 | if (might_bwd_adapt) { |
3174 | 0 | aom_wb_write_bit( |
3175 | 0 | wb, features->refresh_frame_context == REFRESH_FRAME_CONTEXT_DISABLED); |
3176 | 0 | } |
3177 | |
|
3178 | 0 | write_tile_info(cm, saved_wb, wb); |
3179 | 0 | encode_quantization(quant_params, av1_num_planes(cm), |
3180 | 0 | cm->seq_params->separate_uv_delta_q, wb); |
3181 | 0 | encode_segmentation(cm, wb); |
3182 | |
|
3183 | 0 | const DeltaQInfo *const delta_q_info = &cm->delta_q_info; |
3184 | 0 | if (delta_q_info->delta_q_present_flag) assert(quant_params->base_qindex > 0); |
3185 | 0 | if (quant_params->base_qindex > 0) { |
3186 | 0 | aom_wb_write_bit(wb, delta_q_info->delta_q_present_flag); |
3187 | 0 | if (delta_q_info->delta_q_present_flag) { |
3188 | 0 | aom_wb_write_literal(wb, get_msb(delta_q_info->delta_q_res), 2); |
3189 | 0 | xd->current_base_qindex = quant_params->base_qindex; |
3190 | 0 | if (features->allow_intrabc) |
3191 | 0 | assert(delta_q_info->delta_lf_present_flag == 0); |
3192 | 0 | else |
3193 | 0 | aom_wb_write_bit(wb, delta_q_info->delta_lf_present_flag); |
3194 | 0 | if (delta_q_info->delta_lf_present_flag) { |
3195 | 0 | aom_wb_write_literal(wb, get_msb(delta_q_info->delta_lf_res), 2); |
3196 | 0 | aom_wb_write_bit(wb, delta_q_info->delta_lf_multi); |
3197 | 0 | av1_reset_loop_filter_delta(xd, av1_num_planes(cm)); |
3198 | 0 | } |
3199 | 0 | } |
3200 | 0 | } |
3201 | |
|
3202 | 0 | if (features->all_lossless) { |
3203 | 0 | assert(!av1_superres_scaled(cm)); |
3204 | 0 | } else { |
3205 | 0 | if (!features->coded_lossless) { |
3206 | 0 | encode_loopfilter(cm, wb); |
3207 | 0 | encode_cdef(cm, wb); |
3208 | 0 | } |
3209 | 0 | encode_restoration_mode(cm, wb); |
3210 | 0 | } |
3211 | | |
3212 | | // Write TX mode |
3213 | 0 | if (features->coded_lossless) |
3214 | 0 | assert(features->tx_mode == ONLY_4X4); |
3215 | 0 | else |
3216 | 0 | aom_wb_write_bit(wb, features->tx_mode == TX_MODE_SELECT); |
3217 | |
|
3218 | 0 | if (!frame_is_intra_only(cm)) { |
3219 | 0 | const int use_hybrid_pred = |
3220 | 0 | current_frame->reference_mode == REFERENCE_MODE_SELECT; |
3221 | |
|
3222 | 0 | aom_wb_write_bit(wb, use_hybrid_pred); |
3223 | 0 | } |
3224 | |
|
3225 | 0 | if (current_frame->skip_mode_info.skip_mode_allowed) |
3226 | 0 | aom_wb_write_bit(wb, current_frame->skip_mode_info.skip_mode_flag); |
3227 | |
|
3228 | 0 | if (frame_might_allow_warped_motion(cm)) |
3229 | 0 | aom_wb_write_bit(wb, features->allow_warped_motion); |
3230 | 0 | else |
3231 | 0 | assert(!features->allow_warped_motion); |
3232 | |
|
3233 | 0 | aom_wb_write_bit(wb, features->reduced_tx_set_used); |
3234 | |
|
3235 | 0 | if (!frame_is_intra_only(cm)) write_global_motion(cpi, wb); |
3236 | |
|
3237 | 0 | if (seq_params->film_grain_params_present && |
3238 | 0 | (cm->show_frame || cm->showable_frame)) |
3239 | 0 | write_film_grain_params(cpi, wb); |
3240 | |
|
3241 | 0 | if (cm->tiles.large_scale) write_ext_tile_info(cm, saved_wb, wb); |
3242 | 0 | } |
3243 | | |
3244 | 0 | static int choose_size_bytes(uint32_t size, int spare_msbs) { |
3245 | | // Choose the number of bytes required to represent size, without |
3246 | | // using the 'spare_msbs' number of most significant bits. |
3247 | | |
3248 | | // Make sure we will fit in 4 bytes to start with.. |
3249 | 0 | if (spare_msbs > 0 && size >> (32 - spare_msbs) != 0) return -1; |
3250 | | |
3251 | | // Normalise to 32 bits |
3252 | 0 | size <<= spare_msbs; |
3253 | |
|
3254 | 0 | if (size >> 24 != 0) |
3255 | 0 | return 4; |
3256 | 0 | else if (size >> 16 != 0) |
3257 | 0 | return 3; |
3258 | 0 | else if (size >> 8 != 0) |
3259 | 0 | return 2; |
3260 | 0 | else |
3261 | 0 | return 1; |
3262 | 0 | } |
3263 | | |
3264 | | static inline void mem_put_varsize(uint8_t *const dst, const int sz, |
3265 | 0 | const int val) { |
3266 | 0 | switch (sz) { |
3267 | 0 | case 1: dst[0] = (uint8_t)(val & 0xff); break; |
3268 | 0 | case 2: mem_put_le16(dst, val); break; |
3269 | 0 | case 3: mem_put_le24(dst, val); break; |
3270 | 0 | case 4: mem_put_le32(dst, val); break; |
3271 | 0 | default: assert(0 && "Invalid size"); break; |
3272 | 0 | } |
3273 | 0 | } |
3274 | | |
3275 | | static int remux_tiles(const CommonTileParams *const tiles, uint8_t *dst, |
3276 | | const uint32_t data_size, const uint32_t max_tile_size, |
3277 | | const uint32_t max_tile_col_size, |
3278 | | int *const tile_size_bytes, |
3279 | 0 | int *const tile_col_size_bytes) { |
3280 | | // Choose the tile size bytes (tsb) and tile column size bytes (tcsb) |
3281 | 0 | int tsb; |
3282 | 0 | int tcsb; |
3283 | |
|
3284 | 0 | if (tiles->large_scale) { |
3285 | | // The top bit in the tile size field indicates tile copy mode, so we |
3286 | | // have 1 less bit to code the tile size |
3287 | 0 | tsb = choose_size_bytes(max_tile_size, 1); |
3288 | 0 | tcsb = choose_size_bytes(max_tile_col_size, 0); |
3289 | 0 | } else { |
3290 | 0 | tsb = choose_size_bytes(max_tile_size, 0); |
3291 | 0 | tcsb = 4; // This is ignored |
3292 | 0 | (void)max_tile_col_size; |
3293 | 0 | } |
3294 | |
|
3295 | 0 | assert(tsb > 0); |
3296 | 0 | assert(tcsb > 0); |
3297 | |
|
3298 | 0 | *tile_size_bytes = tsb; |
3299 | 0 | *tile_col_size_bytes = tcsb; |
3300 | 0 | if (tsb == 4 && tcsb == 4) return data_size; |
3301 | | |
3302 | 0 | uint32_t wpos = 0; |
3303 | 0 | uint32_t rpos = 0; |
3304 | |
|
3305 | 0 | if (tiles->large_scale) { |
3306 | 0 | int tile_row; |
3307 | 0 | int tile_col; |
3308 | |
|
3309 | 0 | for (tile_col = 0; tile_col < tiles->cols; tile_col++) { |
3310 | | // All but the last column has a column header |
3311 | 0 | if (tile_col < tiles->cols - 1) { |
3312 | 0 | uint32_t tile_col_size = mem_get_le32(dst + rpos); |
3313 | 0 | rpos += 4; |
3314 | | |
3315 | | // Adjust the tile column size by the number of bytes removed |
3316 | | // from the tile size fields. |
3317 | 0 | tile_col_size -= (4 - tsb) * tiles->rows; |
3318 | |
|
3319 | 0 | mem_put_varsize(dst + wpos, tcsb, tile_col_size); |
3320 | 0 | wpos += tcsb; |
3321 | 0 | } |
3322 | |
|
3323 | 0 | for (tile_row = 0; tile_row < tiles->rows; tile_row++) { |
3324 | | // All, including the last row has a header |
3325 | 0 | uint32_t tile_header = mem_get_le32(dst + rpos); |
3326 | 0 | rpos += 4; |
3327 | | |
3328 | | // If this is a copy tile, we need to shift the MSB to the |
3329 | | // top bit of the new width, and there is no data to copy. |
3330 | 0 | if (tile_header >> 31 != 0) { |
3331 | 0 | if (tsb < 4) tile_header >>= 32 - 8 * tsb; |
3332 | 0 | mem_put_varsize(dst + wpos, tsb, tile_header); |
3333 | 0 | wpos += tsb; |
3334 | 0 | } else { |
3335 | 0 | mem_put_varsize(dst + wpos, tsb, tile_header); |
3336 | 0 | wpos += tsb; |
3337 | |
|
3338 | 0 | tile_header += AV1_MIN_TILE_SIZE_BYTES; |
3339 | 0 | memmove(dst + wpos, dst + rpos, tile_header); |
3340 | 0 | rpos += tile_header; |
3341 | 0 | wpos += tile_header; |
3342 | 0 | } |
3343 | 0 | } |
3344 | 0 | } |
3345 | |
|
3346 | 0 | assert(rpos > wpos); |
3347 | 0 | assert(rpos == data_size); |
3348 | |
|
3349 | 0 | return wpos; |
3350 | 0 | } |
3351 | 0 | const int n_tiles = tiles->cols * tiles->rows; |
3352 | 0 | int n; |
3353 | |
|
3354 | 0 | for (n = 0; n < n_tiles; n++) { |
3355 | 0 | int tile_size; |
3356 | |
|
3357 | 0 | if (n == n_tiles - 1) { |
3358 | 0 | tile_size = data_size - rpos; |
3359 | 0 | } else { |
3360 | 0 | tile_size = mem_get_le32(dst + rpos); |
3361 | 0 | rpos += 4; |
3362 | 0 | mem_put_varsize(dst + wpos, tsb, tile_size); |
3363 | 0 | tile_size += AV1_MIN_TILE_SIZE_BYTES; |
3364 | 0 | wpos += tsb; |
3365 | 0 | } |
3366 | |
|
3367 | 0 | memmove(dst + wpos, dst + rpos, tile_size); |
3368 | |
|
3369 | 0 | rpos += tile_size; |
3370 | 0 | wpos += tile_size; |
3371 | 0 | } |
3372 | |
|
3373 | 0 | assert(rpos > wpos); |
3374 | 0 | assert(rpos == data_size); |
3375 | |
|
3376 | 0 | return wpos; |
3377 | 0 | } |
3378 | | |
3379 | | uint32_t av1_write_obu_header(AV1LevelParams *const level_params, |
3380 | | int *frame_header_count, OBU_TYPE obu_type, |
3381 | | bool has_nonzero_operating_point_idc, |
3382 | | bool is_layer_specific_obu, int obu_extension, |
3383 | 0 | uint8_t *const dst) { |
3384 | 0 | assert(IMPLIES(!has_nonzero_operating_point_idc, obu_extension == 0)); |
3385 | |
|
3386 | 0 | if (level_params->keep_level_stats && |
3387 | 0 | (obu_type == OBU_FRAME || obu_type == OBU_FRAME_HEADER)) |
3388 | 0 | ++(*frame_header_count); |
3389 | |
|
3390 | 0 | uint32_t size = 0; |
3391 | | |
3392 | | // The AV1 spec draft version (as of git commit 5e04f) |
3393 | | // has the following requirements on the OBU extension header: |
3394 | | // |
3395 | | // 6.4.1. General sequence header OBU semantics: |
3396 | | // If operating_point_idc[ op ] is not equal to 0 for any value of op from 0 |
3397 | | // to operating_points_cnt_minus_1, it is a requirement of bitstream |
3398 | | // conformance that obu_extension_flag is equal to 1 for all layer-specific |
3399 | | // OBUs in the coded video sequence. |
3400 | | // (...) |
3401 | | // It is a requirement of bitstream conformance that if OperatingPointIdc |
3402 | | // is equal to 0, then obu_extension_flag is equal to 0 for all OBUs that |
3403 | | // follow this sequence header until the next sequence header. |
3404 | | // |
3405 | | // Set obu_extension_flag to satisfy these requirements. |
3406 | 0 | const int obu_extension_flag = |
3407 | 0 | has_nonzero_operating_point_idc && is_layer_specific_obu; |
3408 | 0 | const int obu_has_size_field = 1; |
3409 | |
|
3410 | 0 | dst[0] = ((int)obu_type << 3) | (obu_extension_flag << 2) | |
3411 | 0 | (obu_has_size_field << 1); |
3412 | 0 | size++; |
3413 | |
|
3414 | 0 | if (obu_extension_flag) { |
3415 | 0 | dst[1] = obu_extension & 0xFF; |
3416 | 0 | size++; |
3417 | 0 | } |
3418 | |
|
3419 | 0 | return size; |
3420 | 0 | } |
3421 | | |
3422 | | int av1_write_uleb_obu_size(size_t obu_payload_size, uint8_t *dest, |
3423 | 0 | size_t dest_size) { |
3424 | 0 | size_t coded_obu_size = 0; |
3425 | |
|
3426 | 0 | if (aom_uleb_encode(obu_payload_size, dest_size, dest, &coded_obu_size) != |
3427 | 0 | 0) { |
3428 | 0 | return AOM_CODEC_ERROR; |
3429 | 0 | } |
3430 | 0 | if (coded_obu_size != dest_size) { |
3431 | 0 | return AOM_CODEC_ERROR; |
3432 | 0 | } |
3433 | | |
3434 | 0 | return AOM_CODEC_OK; |
3435 | 0 | } |
3436 | | |
3437 | | // Deprecated. Use av1_write_uleb_obu_size() instead. |
3438 | | static int av1_write_uleb_obu_size_unsafe(size_t obu_payload_size, |
3439 | 0 | uint8_t *dest) { |
3440 | 0 | size_t coded_obu_size = 0; |
3441 | |
|
3442 | 0 | if (aom_uleb_encode(obu_payload_size, sizeof(uint32_t), dest, |
3443 | 0 | &coded_obu_size) != 0) { |
3444 | 0 | return AOM_CODEC_ERROR; |
3445 | 0 | } |
3446 | | |
3447 | 0 | return AOM_CODEC_OK; |
3448 | 0 | } |
3449 | | |
3450 | | // Returns 0 on failure. |
3451 | | static size_t obu_memmove(size_t obu_header_size, size_t obu_payload_size, |
3452 | 0 | uint8_t *data, size_t data_size) { |
3453 | 0 | const size_t length_field_size = aom_uleb_size_in_bytes(obu_payload_size); |
3454 | 0 | const size_t move_dst_offset = obu_header_size + length_field_size; |
3455 | 0 | const size_t move_src_offset = obu_header_size; |
3456 | 0 | const size_t move_size = obu_payload_size; |
3457 | 0 | if (move_size > data_size || move_src_offset > data_size - move_size) { |
3458 | 0 | assert(0 && "obu_memmove: output buffer overflow"); |
3459 | 0 | return 0; |
3460 | 0 | } |
3461 | 0 | if (move_dst_offset > data_size - move_size) { |
3462 | | // Buffer full. |
3463 | 0 | return 0; |
3464 | 0 | } |
3465 | 0 | memmove(data + move_dst_offset, data + move_src_offset, move_size); |
3466 | 0 | return length_field_size; |
3467 | 0 | } |
3468 | | |
3469 | | // Deprecated. Use obu_memmove() instead. |
3470 | | static size_t obu_memmove_unsafe(size_t obu_header_size, |
3471 | 0 | size_t obu_payload_size, uint8_t *data) { |
3472 | 0 | const size_t length_field_size = aom_uleb_size_in_bytes(obu_payload_size); |
3473 | 0 | const size_t move_dst_offset = obu_header_size + length_field_size; |
3474 | 0 | const size_t move_src_offset = obu_header_size; |
3475 | 0 | const size_t move_size = obu_payload_size; |
3476 | 0 | memmove(data + move_dst_offset, data + move_src_offset, move_size); |
3477 | 0 | return length_field_size; |
3478 | 0 | } |
3479 | | |
3480 | 0 | static inline void add_trailing_bits(struct aom_write_bit_buffer *wb) { |
3481 | 0 | if (aom_wb_is_byte_aligned(wb)) { |
3482 | 0 | aom_wb_write_literal(wb, 0x80, 8); |
3483 | 0 | } else { |
3484 | | // assumes that the other bits are already 0s |
3485 | 0 | aom_wb_write_bit(wb, 1); |
3486 | 0 | } |
3487 | 0 | } |
3488 | | |
3489 | | static inline void write_bitstream_level(AV1_LEVEL seq_level_idx, |
3490 | 0 | struct aom_write_bit_buffer *wb) { |
3491 | 0 | assert(is_valid_seq_level_idx(seq_level_idx)); |
3492 | 0 | aom_wb_write_literal(wb, seq_level_idx, LEVEL_BITS); |
3493 | 0 | } |
3494 | | |
3495 | | uint32_t av1_write_sequence_header_obu(const SequenceHeader *seq_params, |
3496 | 0 | uint8_t *const dst, size_t dst_size) { |
3497 | | // TODO: bug 42302568 - Use dst_size. |
3498 | 0 | (void)dst_size; |
3499 | 0 | struct aom_write_bit_buffer wb = { dst, 0 }; |
3500 | 0 | uint32_t size = 0; |
3501 | |
|
3502 | 0 | write_profile(seq_params->profile, &wb); |
3503 | | |
3504 | | // Still picture or not |
3505 | 0 | aom_wb_write_bit(&wb, seq_params->still_picture); |
3506 | 0 | assert(IMPLIES(!seq_params->still_picture, |
3507 | 0 | !seq_params->reduced_still_picture_hdr)); |
3508 | | // whether to use reduced still picture header |
3509 | 0 | aom_wb_write_bit(&wb, seq_params->reduced_still_picture_hdr); |
3510 | |
|
3511 | 0 | if (seq_params->reduced_still_picture_hdr) { |
3512 | 0 | assert(seq_params->timing_info_present == 0); |
3513 | 0 | assert(seq_params->decoder_model_info_present_flag == 0); |
3514 | 0 | assert(seq_params->display_model_info_present_flag == 0); |
3515 | 0 | write_bitstream_level(seq_params->seq_level_idx[0], &wb); |
3516 | 0 | } else { |
3517 | 0 | aom_wb_write_bit( |
3518 | 0 | &wb, seq_params->timing_info_present); // timing info present flag |
3519 | |
|
3520 | 0 | if (seq_params->timing_info_present) { |
3521 | | // timing_info |
3522 | 0 | write_timing_info_header(&seq_params->timing_info, &wb); |
3523 | 0 | aom_wb_write_bit(&wb, seq_params->decoder_model_info_present_flag); |
3524 | 0 | if (seq_params->decoder_model_info_present_flag) { |
3525 | 0 | write_decoder_model_info(&seq_params->decoder_model_info, &wb); |
3526 | 0 | } |
3527 | 0 | } |
3528 | 0 | aom_wb_write_bit(&wb, seq_params->display_model_info_present_flag); |
3529 | 0 | aom_wb_write_literal(&wb, seq_params->operating_points_cnt_minus_1, |
3530 | 0 | OP_POINTS_CNT_MINUS_1_BITS); |
3531 | 0 | int i; |
3532 | 0 | for (i = 0; i < seq_params->operating_points_cnt_minus_1 + 1; i++) { |
3533 | 0 | aom_wb_write_literal(&wb, seq_params->operating_point_idc[i], |
3534 | 0 | OP_POINTS_IDC_BITS); |
3535 | 0 | write_bitstream_level(seq_params->seq_level_idx[i], &wb); |
3536 | 0 | if (seq_params->seq_level_idx[i] >= SEQ_LEVEL_4_0) |
3537 | 0 | aom_wb_write_bit(&wb, seq_params->tier[i]); |
3538 | 0 | if (seq_params->decoder_model_info_present_flag) { |
3539 | 0 | aom_wb_write_bit( |
3540 | 0 | &wb, seq_params->op_params[i].decoder_model_param_present_flag); |
3541 | 0 | if (seq_params->op_params[i].decoder_model_param_present_flag) { |
3542 | 0 | write_dec_model_op_parameters( |
3543 | 0 | &seq_params->op_params[i], |
3544 | 0 | seq_params->decoder_model_info |
3545 | 0 | .encoder_decoder_buffer_delay_length, |
3546 | 0 | &wb); |
3547 | 0 | } |
3548 | 0 | } |
3549 | 0 | if (seq_params->display_model_info_present_flag) { |
3550 | 0 | aom_wb_write_bit( |
3551 | 0 | &wb, seq_params->op_params[i].display_model_param_present_flag); |
3552 | 0 | if (seq_params->op_params[i].display_model_param_present_flag) { |
3553 | 0 | assert(seq_params->op_params[i].initial_display_delay >= 1); |
3554 | 0 | assert(seq_params->op_params[i].initial_display_delay <= 10); |
3555 | 0 | aom_wb_write_literal( |
3556 | 0 | &wb, seq_params->op_params[i].initial_display_delay - 1, 4); |
3557 | 0 | } |
3558 | 0 | } |
3559 | 0 | } |
3560 | 0 | } |
3561 | 0 | write_sequence_header(seq_params, &wb); |
3562 | |
|
3563 | 0 | write_color_config(seq_params, &wb); |
3564 | |
|
3565 | 0 | aom_wb_write_bit(&wb, seq_params->film_grain_params_present); |
3566 | |
|
3567 | 0 | add_trailing_bits(&wb); |
3568 | |
|
3569 | 0 | size = aom_wb_bytes_written(&wb); |
3570 | 0 | return size; |
3571 | 0 | } |
3572 | | |
3573 | | static uint32_t write_frame_header_obu(AV1_COMP *cpi, MACROBLOCKD *const xd, |
3574 | | struct aom_write_bit_buffer *saved_wb, |
3575 | | uint8_t *const dst, |
3576 | 0 | int append_trailing_bits) { |
3577 | 0 | struct aom_write_bit_buffer wb = { dst, 0 }; |
3578 | 0 | write_uncompressed_header_obu(cpi, xd, saved_wb, &wb); |
3579 | 0 | if (append_trailing_bits) add_trailing_bits(&wb); |
3580 | 0 | return aom_wb_bytes_written(&wb); |
3581 | 0 | } |
3582 | | |
3583 | | static uint32_t write_tile_group_header(uint8_t *const dst, int start_tile, |
3584 | | int end_tile, int tiles_log2, |
3585 | 0 | int tile_start_and_end_present_flag) { |
3586 | 0 | struct aom_write_bit_buffer wb = { dst, 0 }; |
3587 | 0 | uint32_t size = 0; |
3588 | |
|
3589 | 0 | if (!tiles_log2) return size; |
3590 | | |
3591 | 0 | aom_wb_write_bit(&wb, tile_start_and_end_present_flag); |
3592 | |
|
3593 | 0 | if (tile_start_and_end_present_flag) { |
3594 | 0 | aom_wb_write_literal(&wb, start_tile, tiles_log2); |
3595 | 0 | aom_wb_write_literal(&wb, end_tile, tiles_log2); |
3596 | 0 | } |
3597 | |
|
3598 | 0 | size = aom_wb_bytes_written(&wb); |
3599 | 0 | return size; |
3600 | 0 | } |
3601 | | |
3602 | | typedef struct { |
3603 | | uint32_t tg_hdr_size; |
3604 | | uint32_t frame_header_size; |
3605 | | } LargeTileFrameOBU; |
3606 | | |
3607 | | // Initialize OBU header for large scale tile case. |
3608 | | static uint32_t init_large_scale_tile_obu_header( |
3609 | | AV1_COMP *const cpi, uint8_t **data, struct aom_write_bit_buffer *saved_wb, |
3610 | 0 | uint8_t obu_extension_header, LargeTileFrameOBU *lst_obu) { |
3611 | 0 | AV1LevelParams *const level_params = &cpi->ppi->level_params; |
3612 | 0 | CurrentFrame *const current_frame = &cpi->common.current_frame; |
3613 | | // For large_scale_tile case, we always have only one tile group, so it can |
3614 | | // be written as an OBU_FRAME. |
3615 | 0 | const OBU_TYPE obu_type = OBU_FRAME; |
3616 | 0 | lst_obu->tg_hdr_size = av1_write_obu_header( |
3617 | 0 | level_params, &cpi->frame_header_count, obu_type, |
3618 | 0 | cpi->common.seq_params->has_nonzero_operating_point_idc, |
3619 | | /*is_layer_specific_obu=*/true, obu_extension_header, *data); |
3620 | 0 | *data += lst_obu->tg_hdr_size; |
3621 | |
|
3622 | 0 | const uint32_t frame_header_size = |
3623 | 0 | write_frame_header_obu(cpi, &cpi->td.mb.e_mbd, saved_wb, *data, 0); |
3624 | 0 | *data += frame_header_size; |
3625 | 0 | lst_obu->frame_header_size = frame_header_size; |
3626 | | // (yunqing) This test ensures the correctness of large scale tile coding. |
3627 | 0 | if (cpi->oxcf.tile_cfg.enable_ext_tile_debug) { |
3628 | 0 | char fn[20] = "./fh"; |
3629 | 0 | fn[4] = current_frame->frame_number / 100 + '0'; |
3630 | 0 | fn[5] = (current_frame->frame_number % 100) / 10 + '0'; |
3631 | 0 | fn[6] = (current_frame->frame_number % 10) + '0'; |
3632 | 0 | fn[7] = '\0'; |
3633 | 0 | av1_print_uncompressed_frame_header(*data - frame_header_size, |
3634 | 0 | frame_header_size, fn); |
3635 | 0 | } |
3636 | 0 | return frame_header_size; |
3637 | 0 | } |
3638 | | |
3639 | | // Write total buffer size and related information into the OBU header for large |
3640 | | // scale tile case. |
3641 | | static void write_large_scale_tile_obu_size( |
3642 | | const CommonTileParams *const tiles, uint8_t *const dst, uint8_t *data, |
3643 | | struct aom_write_bit_buffer *saved_wb, LargeTileFrameOBU *const lst_obu, |
3644 | | int have_tiles, uint32_t *total_size, int max_tile_size, |
3645 | 0 | int max_tile_col_size) { |
3646 | 0 | int tile_size_bytes = 0; |
3647 | 0 | int tile_col_size_bytes = 0; |
3648 | 0 | if (have_tiles) { |
3649 | 0 | *total_size = remux_tiles( |
3650 | 0 | tiles, data, *total_size - lst_obu->frame_header_size, max_tile_size, |
3651 | 0 | max_tile_col_size, &tile_size_bytes, &tile_col_size_bytes); |
3652 | 0 | *total_size += lst_obu->frame_header_size; |
3653 | 0 | } |
3654 | | |
3655 | | // In EXT_TILE case, only use 1 tile group. Follow the obu syntax, write |
3656 | | // current tile group size before tile data(include tile column header). |
3657 | | // Tile group size doesn't include the bytes storing tg size. |
3658 | 0 | *total_size += lst_obu->tg_hdr_size; |
3659 | 0 | const uint32_t obu_payload_size = *total_size - lst_obu->tg_hdr_size; |
3660 | 0 | const size_t length_field_size = |
3661 | 0 | obu_memmove_unsafe(lst_obu->tg_hdr_size, obu_payload_size, dst); |
3662 | 0 | if (av1_write_uleb_obu_size_unsafe( |
3663 | 0 | obu_payload_size, dst + lst_obu->tg_hdr_size) != AOM_CODEC_OK) |
3664 | 0 | assert(0); |
3665 | |
|
3666 | 0 | *total_size += (uint32_t)length_field_size; |
3667 | 0 | saved_wb->bit_buffer += length_field_size; |
3668 | | |
3669 | | // Now fill in the gaps in the uncompressed header. |
3670 | 0 | if (have_tiles) { |
3671 | 0 | assert(tile_col_size_bytes >= 1 && tile_col_size_bytes <= 4); |
3672 | 0 | aom_wb_overwrite_literal(saved_wb, tile_col_size_bytes - 1, 2); |
3673 | |
|
3674 | 0 | assert(tile_size_bytes >= 1 && tile_size_bytes <= 4); |
3675 | 0 | aom_wb_overwrite_literal(saved_wb, tile_size_bytes - 1, 2); |
3676 | 0 | } |
3677 | 0 | } |
3678 | | |
3679 | | // Store information on each large scale tile in the OBU header. |
3680 | | static void write_large_scale_tile_obu( |
3681 | | AV1_COMP *const cpi, uint8_t *const dst, LargeTileFrameOBU *const lst_obu, |
3682 | | int *const largest_tile_id, uint32_t *total_size, const int have_tiles, |
3683 | 0 | unsigned int *const max_tile_size, unsigned int *const max_tile_col_size) { |
3684 | 0 | AV1_COMMON *const cm = &cpi->common; |
3685 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
3686 | |
|
3687 | 0 | TileBufferEnc tile_buffers[MAX_TILE_ROWS][MAX_TILE_COLS]; |
3688 | 0 | const int tile_cols = tiles->cols; |
3689 | 0 | const int tile_rows = tiles->rows; |
3690 | 0 | unsigned int tile_size = 0; |
3691 | |
|
3692 | 0 | av1_reset_pack_bs_thread_data(&cpi->td); |
3693 | 0 | for (int tile_col = 0; tile_col < tile_cols; tile_col++) { |
3694 | 0 | TileInfo tile_info; |
3695 | 0 | const int is_last_col = (tile_col == tile_cols - 1); |
3696 | 0 | const uint32_t col_offset = *total_size; |
3697 | |
|
3698 | 0 | av1_tile_set_col(&tile_info, cm, tile_col); |
3699 | | |
3700 | | // The last column does not have a column header |
3701 | 0 | if (!is_last_col) *total_size += 4; |
3702 | |
|
3703 | 0 | for (int tile_row = 0; tile_row < tile_rows; tile_row++) { |
3704 | 0 | TileBufferEnc *const buf = &tile_buffers[tile_row][tile_col]; |
3705 | 0 | const int data_offset = have_tiles ? 4 : 0; |
3706 | 0 | const int tile_idx = tile_row * tile_cols + tile_col; |
3707 | 0 | TileDataEnc *this_tile = &cpi->tile_data[tile_idx]; |
3708 | 0 | av1_tile_set_row(&tile_info, cm, tile_row); |
3709 | 0 | aom_writer mode_bc; |
3710 | |
|
3711 | 0 | buf->data = dst + *total_size + lst_obu->tg_hdr_size; |
3712 | | |
3713 | | // Is CONFIG_EXT_TILE = 1, every tile in the row has a header, |
3714 | | // even for the last one, unless no tiling is used at all. |
3715 | 0 | *total_size += data_offset; |
3716 | 0 | cpi->td.mb.e_mbd.tile_ctx = &this_tile->tctx; |
3717 | 0 | mode_bc.allow_update_cdf = !tiles->large_scale; |
3718 | 0 | mode_bc.allow_update_cdf = |
3719 | 0 | mode_bc.allow_update_cdf && !cm->features.disable_cdf_update; |
3720 | 0 | aom_start_encode(&mode_bc, buf->data + data_offset); |
3721 | 0 | write_modes(cpi, &cpi->td, &tile_info, &mode_bc, tile_row, tile_col); |
3722 | 0 | if (aom_stop_encode(&mode_bc) < 0) { |
3723 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, "Error writing modes"); |
3724 | 0 | } |
3725 | 0 | tile_size = mode_bc.pos; |
3726 | 0 | buf->size = tile_size; |
3727 | | |
3728 | | // Record the maximum tile size we see, so we can compact headers later. |
3729 | 0 | if (tile_size > *max_tile_size) { |
3730 | 0 | *max_tile_size = tile_size; |
3731 | 0 | *largest_tile_id = tile_cols * tile_row + tile_col; |
3732 | 0 | } |
3733 | |
|
3734 | 0 | if (have_tiles) { |
3735 | | // tile header: size of this tile, or copy offset |
3736 | 0 | uint32_t tile_header = tile_size - AV1_MIN_TILE_SIZE_BYTES; |
3737 | 0 | const int tile_copy_mode = |
3738 | 0 | ((AOMMAX(tiles->width, tiles->height) << MI_SIZE_LOG2) <= 256) ? 1 |
3739 | 0 | : 0; |
3740 | | |
3741 | | // If tile_copy_mode = 1, check if this tile is a copy tile. |
3742 | | // Very low chances to have copy tiles on the key frames, so don't |
3743 | | // search on key frames to reduce unnecessary search. |
3744 | 0 | if (cm->current_frame.frame_type != KEY_FRAME && tile_copy_mode) { |
3745 | 0 | const int identical_tile_offset = |
3746 | 0 | find_identical_tile(tile_row, tile_col, tile_buffers); |
3747 | | |
3748 | | // Indicate a copy-tile by setting the most significant bit. |
3749 | | // The row-offset to copy from is stored in the highest byte. |
3750 | | // remux_tiles will move these around later |
3751 | 0 | if (identical_tile_offset > 0) { |
3752 | 0 | tile_size = 0; |
3753 | 0 | tile_header = identical_tile_offset | 0x80; |
3754 | 0 | tile_header <<= 24; |
3755 | 0 | } |
3756 | 0 | } |
3757 | |
|
3758 | 0 | mem_put_le32(buf->data, (MEM_VALUE_T)tile_header); |
3759 | 0 | } |
3760 | |
|
3761 | 0 | *total_size += tile_size; |
3762 | 0 | } |
3763 | 0 | if (!is_last_col) { |
3764 | 0 | uint32_t col_size = *total_size - col_offset - 4; |
3765 | 0 | mem_put_le32(dst + col_offset + lst_obu->tg_hdr_size, col_size); |
3766 | | |
3767 | | // Record the maximum tile column size we see. |
3768 | 0 | *max_tile_col_size = AOMMAX(*max_tile_col_size, col_size); |
3769 | 0 | } |
3770 | 0 | } |
3771 | 0 | av1_accumulate_pack_bs_thread_data(cpi, &cpi->td); |
3772 | 0 | } |
3773 | | |
3774 | | // Packs information in the obu header for large scale tiles. |
3775 | | static inline uint32_t pack_large_scale_tiles_in_tg_obus( |
3776 | | AV1_COMP *const cpi, uint8_t *const dst, |
3777 | | struct aom_write_bit_buffer *saved_wb, uint8_t obu_extension_header, |
3778 | 0 | int *const largest_tile_id) { |
3779 | 0 | AV1_COMMON *const cm = &cpi->common; |
3780 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
3781 | 0 | uint32_t total_size = 0; |
3782 | 0 | unsigned int max_tile_size = 0; |
3783 | 0 | unsigned int max_tile_col_size = 0; |
3784 | 0 | const int have_tiles = tiles->cols * tiles->rows > 1; |
3785 | 0 | uint8_t *data = dst; |
3786 | |
|
3787 | 0 | LargeTileFrameOBU lst_obu; |
3788 | |
|
3789 | 0 | total_size += init_large_scale_tile_obu_header( |
3790 | 0 | cpi, &data, saved_wb, obu_extension_header, &lst_obu); |
3791 | |
|
3792 | 0 | write_large_scale_tile_obu(cpi, dst, &lst_obu, largest_tile_id, &total_size, |
3793 | 0 | have_tiles, &max_tile_size, &max_tile_col_size); |
3794 | |
|
3795 | 0 | write_large_scale_tile_obu_size(tiles, dst, data, saved_wb, &lst_obu, |
3796 | 0 | have_tiles, &total_size, max_tile_size, |
3797 | 0 | max_tile_col_size); |
3798 | |
|
3799 | 0 | return total_size; |
3800 | 0 | } |
3801 | | |
3802 | | // Writes obu, tile group and uncompressed headers to bitstream. |
3803 | | void av1_write_obu_tg_tile_headers(AV1_COMP *const cpi, MACROBLOCKD *const xd, |
3804 | | PackBSParams *const pack_bs_params, |
3805 | 0 | const int tile_idx) { |
3806 | 0 | AV1_COMMON *const cm = &cpi->common; |
3807 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
3808 | 0 | int *const curr_tg_hdr_size = &pack_bs_params->curr_tg_hdr_size; |
3809 | 0 | const int tg_size = |
3810 | 0 | (tiles->rows * tiles->cols + cpi->num_tg - 1) / cpi->num_tg; |
3811 | | |
3812 | | // Write Tile group, frame and OBU header |
3813 | | // A new tile group begins at this tile. Write the obu header and |
3814 | | // tile group header |
3815 | 0 | const OBU_TYPE obu_type = (cpi->num_tg == 1) ? OBU_FRAME : OBU_TILE_GROUP; |
3816 | 0 | *curr_tg_hdr_size = av1_write_obu_header( |
3817 | 0 | &cpi->ppi->level_params, &cpi->frame_header_count, obu_type, |
3818 | 0 | cm->seq_params->has_nonzero_operating_point_idc, |
3819 | | /*is_layer_specific_obu=*/true, pack_bs_params->obu_extn_header, |
3820 | 0 | pack_bs_params->tile_data_curr); |
3821 | 0 | pack_bs_params->obu_header_size = *curr_tg_hdr_size; |
3822 | |
|
3823 | 0 | if (cpi->num_tg == 1) |
3824 | 0 | *curr_tg_hdr_size += write_frame_header_obu( |
3825 | 0 | cpi, xd, pack_bs_params->saved_wb, |
3826 | 0 | pack_bs_params->tile_data_curr + *curr_tg_hdr_size, 0); |
3827 | 0 | *curr_tg_hdr_size += write_tile_group_header( |
3828 | 0 | pack_bs_params->tile_data_curr + *curr_tg_hdr_size, tile_idx, |
3829 | 0 | AOMMIN(tile_idx + tg_size - 1, tiles->cols * tiles->rows - 1), |
3830 | 0 | (tiles->log2_rows + tiles->log2_cols), cpi->num_tg > 1); |
3831 | 0 | *pack_bs_params->total_size += *curr_tg_hdr_size; |
3832 | 0 | } |
3833 | | |
3834 | | // Pack tile data in the bitstream with tile_group, frame |
3835 | | // and OBU header. |
3836 | | void av1_pack_tile_info(AV1_COMP *const cpi, ThreadData *const td, |
3837 | 0 | PackBSParams *const pack_bs_params) { |
3838 | 0 | aom_writer mode_bc; |
3839 | 0 | AV1_COMMON *const cm = &cpi->common; |
3840 | 0 | int tile_row = pack_bs_params->tile_row; |
3841 | 0 | int tile_col = pack_bs_params->tile_col; |
3842 | 0 | uint32_t *const total_size = pack_bs_params->total_size; |
3843 | 0 | TileInfo tile_info; |
3844 | 0 | av1_tile_set_col(&tile_info, cm, tile_col); |
3845 | 0 | av1_tile_set_row(&tile_info, cm, tile_row); |
3846 | 0 | mode_bc.allow_update_cdf = 1; |
3847 | 0 | mode_bc.allow_update_cdf = |
3848 | 0 | mode_bc.allow_update_cdf && !cm->features.disable_cdf_update; |
3849 | |
|
3850 | 0 | unsigned int tile_size; |
3851 | |
|
3852 | 0 | const int num_planes = av1_num_planes(cm); |
3853 | 0 | av1_reset_loop_restoration(&td->mb.e_mbd, num_planes); |
3854 | |
|
3855 | 0 | pack_bs_params->buf.data = pack_bs_params->dst + *total_size; |
3856 | | |
3857 | | // The last tile of the tile group does not have a header. |
3858 | 0 | if (!pack_bs_params->is_last_tile_in_tg) *total_size += 4; |
3859 | | |
3860 | | // Pack tile data |
3861 | 0 | aom_start_encode(&mode_bc, pack_bs_params->dst + *total_size); |
3862 | 0 | write_modes(cpi, td, &tile_info, &mode_bc, tile_row, tile_col); |
3863 | 0 | if (aom_stop_encode(&mode_bc) < 0) { |
3864 | 0 | aom_internal_error(td->mb.e_mbd.error_info, AOM_CODEC_ERROR, |
3865 | 0 | "Error writing modes"); |
3866 | 0 | } |
3867 | 0 | tile_size = mode_bc.pos; |
3868 | 0 | assert(tile_size >= AV1_MIN_TILE_SIZE_BYTES); |
3869 | |
|
3870 | 0 | pack_bs_params->buf.size = tile_size; |
3871 | | |
3872 | | // Write tile size |
3873 | 0 | if (!pack_bs_params->is_last_tile_in_tg) { |
3874 | | // size of this tile |
3875 | 0 | mem_put_le32(pack_bs_params->buf.data, tile_size - AV1_MIN_TILE_SIZE_BYTES); |
3876 | 0 | } |
3877 | 0 | } |
3878 | | |
3879 | | void av1_write_last_tile_info( |
3880 | | AV1_COMP *const cpi, const FrameHeaderInfo *fh_info, |
3881 | | struct aom_write_bit_buffer *saved_wb, size_t *curr_tg_data_size, |
3882 | | uint8_t *curr_tg_start, uint32_t *const total_size, |
3883 | | uint8_t **tile_data_start, int *const largest_tile_id, |
3884 | 0 | int *const is_first_tg, uint32_t obu_header_size, uint8_t obu_extn_header) { |
3885 | | // write current tile group size |
3886 | 0 | const size_t obu_payload_size = *curr_tg_data_size - obu_header_size; |
3887 | 0 | const size_t length_field_size = |
3888 | 0 | obu_memmove_unsafe(obu_header_size, obu_payload_size, curr_tg_start); |
3889 | 0 | if (av1_write_uleb_obu_size_unsafe( |
3890 | 0 | obu_payload_size, curr_tg_start + obu_header_size) != AOM_CODEC_OK) { |
3891 | 0 | aom_internal_error(cpi->common.error, AOM_CODEC_ERROR, |
3892 | 0 | "av1_write_last_tile_info: output buffer full"); |
3893 | 0 | } |
3894 | 0 | *curr_tg_data_size += length_field_size; |
3895 | 0 | *total_size += (uint32_t)length_field_size; |
3896 | 0 | *tile_data_start += length_field_size; |
3897 | 0 | if (cpi->num_tg == 1) { |
3898 | | // if this tg is combined with the frame header then update saved |
3899 | | // frame header base offset according to length field size |
3900 | 0 | saved_wb->bit_buffer += length_field_size; |
3901 | 0 | } |
3902 | |
|
3903 | 0 | if (!(*is_first_tg) && cpi->common.features.error_resilient_mode) { |
3904 | | // Make room for a duplicate Frame Header OBU. |
3905 | 0 | memmove(curr_tg_start + fh_info->total_length, curr_tg_start, |
3906 | 0 | *curr_tg_data_size); |
3907 | | |
3908 | | // Insert a copy of the Frame Header OBU. |
3909 | 0 | memcpy(curr_tg_start, fh_info->frame_header, fh_info->total_length); |
3910 | | |
3911 | | // Force context update tile to be the first tile in error |
3912 | | // resilient mode as the duplicate frame headers will have |
3913 | | // context_update_tile_id set to 0 |
3914 | 0 | *largest_tile_id = 0; |
3915 | | |
3916 | | // Rewrite the OBU header to change the OBU type to Redundant Frame |
3917 | | // Header. |
3918 | 0 | av1_write_obu_header( |
3919 | 0 | &cpi->ppi->level_params, &cpi->frame_header_count, |
3920 | 0 | OBU_REDUNDANT_FRAME_HEADER, |
3921 | 0 | cpi->common.seq_params->has_nonzero_operating_point_idc, |
3922 | | /*is_layer_specific_obu=*/true, obu_extn_header, |
3923 | 0 | &curr_tg_start[fh_info->obu_header_byte_offset]); |
3924 | |
|
3925 | 0 | *curr_tg_data_size += fh_info->total_length; |
3926 | 0 | *total_size += (uint32_t)fh_info->total_length; |
3927 | 0 | } |
3928 | 0 | *is_first_tg = 0; |
3929 | 0 | } |
3930 | | |
3931 | 0 | void av1_reset_pack_bs_thread_data(ThreadData *const td) { |
3932 | 0 | td->coefficient_size = 0; |
3933 | 0 | td->max_mv_magnitude = 0; |
3934 | 0 | av1_zero(td->interp_filter_selected); |
3935 | 0 | } |
3936 | | |
3937 | | void av1_accumulate_pack_bs_thread_data(AV1_COMP *const cpi, |
3938 | 0 | ThreadData const *td) { |
3939 | 0 | int do_max_mv_magnitude_update = 1; |
3940 | 0 | cpi->rc.coefficient_size += td->coefficient_size; |
3941 | | |
3942 | | // Disable max_mv_magnitude update for parallel frames based on update flag. |
3943 | 0 | if (!cpi->do_frame_data_update) do_max_mv_magnitude_update = 0; |
3944 | |
|
3945 | 0 | if (cpi->sf.mv_sf.auto_mv_step_size && do_max_mv_magnitude_update) |
3946 | 0 | cpi->mv_search_params.max_mv_magnitude = |
3947 | 0 | AOMMAX(cpi->mv_search_params.max_mv_magnitude, td->max_mv_magnitude); |
3948 | |
|
3949 | 0 | for (InterpFilter filter = EIGHTTAP_REGULAR; filter < SWITCHABLE; filter++) |
3950 | 0 | cpi->common.cur_frame->interp_filter_selected[filter] += |
3951 | 0 | td->interp_filter_selected[filter]; |
3952 | 0 | } |
3953 | | |
3954 | | // Store information related to each default tile in the OBU header. |
3955 | | static void write_tile_obu( |
3956 | | AV1_COMP *const cpi, uint8_t *const dst, uint32_t *total_size, |
3957 | | struct aom_write_bit_buffer *saved_wb, uint8_t obu_extn_header, |
3958 | | const FrameHeaderInfo *fh_info, int *const largest_tile_id, |
3959 | | unsigned int *max_tile_size, uint32_t *const obu_header_size, |
3960 | 0 | uint8_t **tile_data_start) { |
3961 | 0 | AV1_COMMON *const cm = &cpi->common; |
3962 | 0 | MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; |
3963 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
3964 | 0 | const int tile_cols = tiles->cols; |
3965 | 0 | const int tile_rows = tiles->rows; |
3966 | | // Fixed size tile groups for the moment |
3967 | 0 | const int num_tg_hdrs = cpi->num_tg; |
3968 | 0 | const int tg_size = (tile_rows * tile_cols + num_tg_hdrs - 1) / num_tg_hdrs; |
3969 | 0 | int tile_count = 0; |
3970 | 0 | size_t curr_tg_data_size = 0; |
3971 | 0 | uint8_t *tile_data_curr = dst; |
3972 | 0 | int new_tg = 1; |
3973 | 0 | int is_first_tg = 1; |
3974 | |
|
3975 | 0 | av1_reset_pack_bs_thread_data(&cpi->td); |
3976 | 0 | for (int tile_row = 0; tile_row < tile_rows; tile_row++) { |
3977 | 0 | for (int tile_col = 0; tile_col < tile_cols; tile_col++) { |
3978 | 0 | const int tile_idx = tile_row * tile_cols + tile_col; |
3979 | 0 | TileDataEnc *this_tile = &cpi->tile_data[tile_idx]; |
3980 | |
|
3981 | 0 | int is_last_tile_in_tg = 0; |
3982 | 0 | if (new_tg) { |
3983 | 0 | tile_data_curr = dst + *total_size; |
3984 | 0 | tile_count = 0; |
3985 | 0 | } |
3986 | 0 | tile_count++; |
3987 | |
|
3988 | 0 | if (tile_count == tg_size || tile_idx == (tile_cols * tile_rows - 1)) |
3989 | 0 | is_last_tile_in_tg = 1; |
3990 | |
|
3991 | 0 | xd->tile_ctx = &this_tile->tctx; |
3992 | | |
3993 | | // PackBSParams stores all parameters required to pack tile and header |
3994 | | // info. |
3995 | 0 | PackBSParams pack_bs_params; |
3996 | 0 | pack_bs_params.dst = dst; |
3997 | 0 | pack_bs_params.curr_tg_hdr_size = 0; |
3998 | 0 | pack_bs_params.is_last_tile_in_tg = is_last_tile_in_tg; |
3999 | 0 | pack_bs_params.new_tg = new_tg; |
4000 | 0 | pack_bs_params.obu_extn_header = obu_extn_header; |
4001 | 0 | pack_bs_params.obu_header_size = 0; |
4002 | 0 | pack_bs_params.saved_wb = saved_wb; |
4003 | 0 | pack_bs_params.tile_col = tile_col; |
4004 | 0 | pack_bs_params.tile_row = tile_row; |
4005 | 0 | pack_bs_params.tile_data_curr = tile_data_curr; |
4006 | 0 | pack_bs_params.total_size = total_size; |
4007 | |
|
4008 | 0 | if (new_tg) |
4009 | 0 | av1_write_obu_tg_tile_headers(cpi, xd, &pack_bs_params, tile_idx); |
4010 | |
|
4011 | 0 | av1_pack_tile_info(cpi, &cpi->td, &pack_bs_params); |
4012 | |
|
4013 | 0 | if (new_tg) { |
4014 | 0 | curr_tg_data_size = pack_bs_params.curr_tg_hdr_size; |
4015 | 0 | *tile_data_start += pack_bs_params.curr_tg_hdr_size; |
4016 | 0 | *obu_header_size = pack_bs_params.obu_header_size; |
4017 | 0 | new_tg = 0; |
4018 | 0 | } |
4019 | 0 | if (is_last_tile_in_tg) new_tg = 1; |
4020 | |
|
4021 | 0 | curr_tg_data_size += |
4022 | 0 | (pack_bs_params.buf.size + (is_last_tile_in_tg ? 0 : 4)); |
4023 | |
|
4024 | 0 | if (pack_bs_params.buf.size > *max_tile_size) { |
4025 | 0 | *largest_tile_id = tile_idx; |
4026 | 0 | *max_tile_size = (unsigned int)pack_bs_params.buf.size; |
4027 | 0 | } |
4028 | |
|
4029 | 0 | if (is_last_tile_in_tg) |
4030 | 0 | av1_write_last_tile_info(cpi, fh_info, saved_wb, &curr_tg_data_size, |
4031 | 0 | tile_data_curr, total_size, tile_data_start, |
4032 | 0 | largest_tile_id, &is_first_tg, |
4033 | 0 | *obu_header_size, obu_extn_header); |
4034 | 0 | *total_size += (uint32_t)pack_bs_params.buf.size; |
4035 | 0 | } |
4036 | 0 | } |
4037 | 0 | av1_accumulate_pack_bs_thread_data(cpi, &cpi->td); |
4038 | 0 | } |
4039 | | |
4040 | | // Write total buffer size and related information into the OBU header for |
4041 | | // default tile case. |
4042 | | static void write_tile_obu_size(AV1_COMP *const cpi, uint8_t *const dst, |
4043 | | struct aom_write_bit_buffer *saved_wb, |
4044 | | int largest_tile_id, uint32_t *const total_size, |
4045 | | unsigned int max_tile_size, |
4046 | | uint32_t obu_header_size, |
4047 | 0 | uint8_t *tile_data_start) { |
4048 | 0 | const CommonTileParams *const tiles = &cpi->common.tiles; |
4049 | | |
4050 | | // Fill in context_update_tile_id indicating the tile to use for the |
4051 | | // cdf update. The encoder currently sets it to the largest tile |
4052 | | // (but is up to the encoder) |
4053 | 0 | aom_wb_overwrite_literal(saved_wb, largest_tile_id, |
4054 | 0 | (tiles->log2_cols + tiles->log2_rows)); |
4055 | | // If more than one tile group. tile_size_bytes takes the default value 4 |
4056 | | // and does not need to be set. For a single tile group it is set in the |
4057 | | // section below. |
4058 | 0 | if (cpi->num_tg != 1) return; |
4059 | 0 | int tile_size_bytes = 4, unused; |
4060 | 0 | const uint32_t tile_data_offset = (uint32_t)(tile_data_start - dst); |
4061 | 0 | const uint32_t tile_data_size = *total_size - tile_data_offset; |
4062 | |
|
4063 | 0 | *total_size = remux_tiles(tiles, tile_data_start, tile_data_size, |
4064 | 0 | max_tile_size, 0, &tile_size_bytes, &unused); |
4065 | 0 | *total_size += tile_data_offset; |
4066 | 0 | assert(tile_size_bytes >= 1 && tile_size_bytes <= 4); |
4067 | |
|
4068 | 0 | aom_wb_overwrite_literal(saved_wb, tile_size_bytes - 1, 2); |
4069 | | |
4070 | | // Update the OBU length if remux_tiles() reduced the size. |
4071 | 0 | uint64_t payload_size; |
4072 | 0 | size_t length_field_size; |
4073 | 0 | int res = |
4074 | 0 | aom_uleb_decode(dst + obu_header_size, *total_size - obu_header_size, |
4075 | 0 | &payload_size, &length_field_size); |
4076 | 0 | assert(res == 0); |
4077 | 0 | (void)res; |
4078 | |
|
4079 | 0 | const uint64_t new_payload_size = |
4080 | 0 | *total_size - obu_header_size - length_field_size; |
4081 | 0 | if (new_payload_size != payload_size) { |
4082 | 0 | size_t new_length_field_size; |
4083 | 0 | res = aom_uleb_encode(new_payload_size, length_field_size, |
4084 | 0 | dst + obu_header_size, &new_length_field_size); |
4085 | 0 | assert(res == 0); |
4086 | 0 | if (new_length_field_size < length_field_size) { |
4087 | 0 | const size_t src_offset = obu_header_size + length_field_size; |
4088 | 0 | const size_t dst_offset = obu_header_size + new_length_field_size; |
4089 | 0 | memmove(dst + dst_offset, dst + src_offset, (size_t)payload_size); |
4090 | 0 | *total_size -= (int)(length_field_size - new_length_field_size); |
4091 | 0 | } |
4092 | 0 | } |
4093 | 0 | } |
4094 | | |
4095 | | // As per the experiments, single-thread bitstream packing is better for |
4096 | | // frames with a smaller bitstream size. This behavior is due to setup time |
4097 | | // overhead of multithread function would be more than that of time required |
4098 | | // to pack the smaller bitstream of such frames. This function computes the |
4099 | | // number of required number of workers based on setup time overhead and job |
4100 | | // dispatch time overhead for given tiles and available workers. |
4101 | | static int calc_pack_bs_mt_workers(const TileDataEnc *tile_data, int num_tiles, |
4102 | 0 | int avail_workers, bool pack_bs_mt_enabled) { |
4103 | 0 | if (!pack_bs_mt_enabled) return 1; |
4104 | | |
4105 | 0 | uint64_t frame_abs_sum_level = 0; |
4106 | |
|
4107 | 0 | for (int idx = 0; idx < num_tiles; idx++) |
4108 | 0 | frame_abs_sum_level += tile_data[idx].abs_sum_level; |
4109 | |
|
4110 | 0 | int ideal_num_workers = 1; |
4111 | 0 | const float job_disp_time_const = (float)num_tiles * JOB_DISP_TIME_OH_CONST; |
4112 | 0 | float max_sum = 0.0; |
4113 | |
|
4114 | 0 | for (int num_workers = avail_workers; num_workers > 1; num_workers--) { |
4115 | 0 | const float fas_per_worker_const = |
4116 | 0 | ((float)(num_workers - 1) / num_workers) * frame_abs_sum_level; |
4117 | 0 | const float setup_time_const = (float)num_workers * SETUP_TIME_OH_CONST; |
4118 | 0 | const float this_sum = fas_per_worker_const - setup_time_const - |
4119 | 0 | job_disp_time_const / num_workers; |
4120 | |
|
4121 | 0 | if (this_sum > max_sum) { |
4122 | 0 | max_sum = this_sum; |
4123 | 0 | ideal_num_workers = num_workers; |
4124 | 0 | } |
4125 | 0 | } |
4126 | 0 | return ideal_num_workers; |
4127 | 0 | } |
4128 | | |
4129 | | static inline uint32_t pack_tiles_in_tg_obus( |
4130 | | AV1_COMP *const cpi, uint8_t *const dst, |
4131 | | struct aom_write_bit_buffer *saved_wb, uint8_t obu_extension_header, |
4132 | 0 | const FrameHeaderInfo *fh_info, int *const largest_tile_id) { |
4133 | 0 | const CommonTileParams *const tiles = &cpi->common.tiles; |
4134 | 0 | uint32_t total_size = 0; |
4135 | 0 | unsigned int max_tile_size = 0; |
4136 | 0 | uint32_t obu_header_size = 0; |
4137 | 0 | uint8_t *tile_data_start = dst; |
4138 | 0 | const int tile_cols = tiles->cols; |
4139 | 0 | const int tile_rows = tiles->rows; |
4140 | 0 | const int num_tiles = tile_rows * tile_cols; |
4141 | |
|
4142 | 0 | const int num_workers = calc_pack_bs_mt_workers( |
4143 | 0 | cpi->tile_data, num_tiles, cpi->mt_info.num_mod_workers[MOD_PACK_BS], |
4144 | 0 | cpi->mt_info.pack_bs_mt_enabled); |
4145 | |
|
4146 | 0 | if (num_workers > 1) { |
4147 | 0 | av1_write_tile_obu_mt(cpi, dst, &total_size, saved_wb, obu_extension_header, |
4148 | 0 | fh_info, largest_tile_id, &max_tile_size, |
4149 | 0 | &obu_header_size, &tile_data_start, num_workers); |
4150 | 0 | } else { |
4151 | 0 | write_tile_obu(cpi, dst, &total_size, saved_wb, obu_extension_header, |
4152 | 0 | fh_info, largest_tile_id, &max_tile_size, &obu_header_size, |
4153 | 0 | &tile_data_start); |
4154 | 0 | } |
4155 | |
|
4156 | 0 | if (num_tiles > 1) |
4157 | 0 | write_tile_obu_size(cpi, dst, saved_wb, *largest_tile_id, &total_size, |
4158 | 0 | max_tile_size, obu_header_size, tile_data_start); |
4159 | 0 | return total_size; |
4160 | 0 | } |
4161 | | |
4162 | | static uint32_t write_tiles_in_tg_obus(AV1_COMP *const cpi, uint8_t *const dst, |
4163 | | size_t dst_size, |
4164 | | struct aom_write_bit_buffer *saved_wb, |
4165 | | uint8_t obu_extension_header, |
4166 | | const FrameHeaderInfo *fh_info, |
4167 | 0 | int *const largest_tile_id) { |
4168 | | // TODO: bug 42302568 - Use dst_size. |
4169 | 0 | (void)dst_size; |
4170 | 0 | AV1_COMMON *const cm = &cpi->common; |
4171 | 0 | const CommonTileParams *const tiles = &cm->tiles; |
4172 | 0 | *largest_tile_id = 0; |
4173 | | |
4174 | | // Select the coding strategy (temporal or spatial) |
4175 | 0 | if (cm->seg.enabled && cm->seg.update_map) { |
4176 | 0 | if (cm->features.primary_ref_frame == PRIMARY_REF_NONE) { |
4177 | 0 | cm->seg.temporal_update = 0; |
4178 | 0 | } else { |
4179 | 0 | cm->seg.temporal_update = 1; |
4180 | 0 | if (cpi->td.rd_counts.seg_tmp_pred_cost[0] < |
4181 | 0 | cpi->td.rd_counts.seg_tmp_pred_cost[1]) |
4182 | 0 | cm->seg.temporal_update = 0; |
4183 | 0 | } |
4184 | 0 | } |
4185 | |
|
4186 | 0 | if (tiles->large_scale) |
4187 | 0 | return pack_large_scale_tiles_in_tg_obus( |
4188 | 0 | cpi, dst, saved_wb, obu_extension_header, largest_tile_id); |
4189 | | |
4190 | 0 | return pack_tiles_in_tg_obus(cpi, dst, saved_wb, obu_extension_header, |
4191 | 0 | fh_info, largest_tile_id); |
4192 | 0 | } |
4193 | | |
4194 | | // Returns the number of bytes written on success. Returns 0 on failure. |
4195 | | static size_t av1_write_metadata_obu(const aom_metadata_t *metadata, |
4196 | 0 | uint8_t *const dst, size_t dst_size) { |
4197 | 0 | size_t coded_metadata_size = 0; |
4198 | 0 | const uint64_t metadata_type = (uint64_t)metadata->type; |
4199 | 0 | if (aom_uleb_encode(metadata_type, dst_size, dst, &coded_metadata_size) != |
4200 | 0 | 0) { |
4201 | 0 | return 0; |
4202 | 0 | } |
4203 | 0 | if (coded_metadata_size + metadata->sz + 1 > dst_size) { |
4204 | 0 | return 0; |
4205 | 0 | } |
4206 | 0 | memcpy(dst + coded_metadata_size, metadata->payload, metadata->sz); |
4207 | | // Add trailing bits. |
4208 | 0 | dst[coded_metadata_size + metadata->sz] = 0x80; |
4209 | 0 | return coded_metadata_size + metadata->sz + 1; |
4210 | 0 | } |
4211 | | |
4212 | | static size_t av1_write_metadata_array(AV1_COMP *const cpi, uint8_t *dst, |
4213 | 0 | size_t dst_size) { |
4214 | 0 | if (!cpi->source) return 0; |
4215 | 0 | AV1_COMMON *const cm = &cpi->common; |
4216 | 0 | aom_metadata_array_t *arr = cpi->source->metadata; |
4217 | 0 | if (!arr) return 0; |
4218 | 0 | size_t obu_header_size = 0; |
4219 | 0 | size_t obu_payload_size = 0; |
4220 | 0 | size_t total_bytes_written = 0; |
4221 | 0 | size_t length_field_size = 0; |
4222 | 0 | for (size_t i = 0; i < arr->sz; i++) { |
4223 | 0 | aom_metadata_t *current_metadata = arr->metadata_array[i]; |
4224 | 0 | if (current_metadata && current_metadata->payload) { |
4225 | 0 | if ((cm->current_frame.frame_type == KEY_FRAME && |
4226 | 0 | current_metadata->insert_flag == AOM_MIF_KEY_FRAME) || |
4227 | 0 | (cm->current_frame.frame_type != KEY_FRAME && |
4228 | 0 | current_metadata->insert_flag == AOM_MIF_NON_KEY_FRAME) || |
4229 | 0 | current_metadata->insert_flag == AOM_MIF_ANY_FRAME) { |
4230 | | // OBU header is either one or two bytes. |
4231 | 0 | if (dst_size < 2) { |
4232 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
4233 | 0 | "av1_write_metadata_array: output buffer full"); |
4234 | 0 | } |
4235 | | // According to the AV1 spec draft version (as of git commit 5e04f) |
4236 | | // Section 6.7.1, some metadata types can be layer specific, but we |
4237 | | // currently only support non-layer specific metadata. |
4238 | 0 | obu_header_size = av1_write_obu_header( |
4239 | 0 | &cpi->ppi->level_params, &cpi->frame_header_count, OBU_METADATA, |
4240 | 0 | cm->seq_params->has_nonzero_operating_point_idc, |
4241 | | /*is_layer_specific_obu=*/false, 0, dst); |
4242 | 0 | assert(obu_header_size <= 2); |
4243 | 0 | obu_payload_size = |
4244 | 0 | av1_write_metadata_obu(current_metadata, dst + obu_header_size, |
4245 | 0 | dst_size - obu_header_size); |
4246 | 0 | if (obu_payload_size == 0) { |
4247 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
4248 | 0 | "av1_write_metadata_array: output buffer full"); |
4249 | 0 | } |
4250 | 0 | length_field_size = |
4251 | 0 | obu_memmove(obu_header_size, obu_payload_size, dst, dst_size); |
4252 | 0 | if (length_field_size == 0) { |
4253 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
4254 | 0 | "av1_write_metadata_array: output buffer full"); |
4255 | 0 | } |
4256 | 0 | if (av1_write_uleb_obu_size(obu_payload_size, dst + obu_header_size, |
4257 | 0 | length_field_size) == AOM_CODEC_OK) { |
4258 | 0 | const size_t obu_size = |
4259 | 0 | obu_header_size + length_field_size + obu_payload_size; |
4260 | 0 | dst += obu_size; |
4261 | 0 | dst_size -= obu_size; |
4262 | 0 | total_bytes_written += obu_size; |
4263 | 0 | } else { |
4264 | 0 | aom_internal_error(cpi->common.error, AOM_CODEC_ERROR, |
4265 | 0 | "av1_write_metadata_array: output buffer full"); |
4266 | 0 | } |
4267 | 0 | } |
4268 | 0 | } |
4269 | 0 | } |
4270 | 0 | return total_bytes_written; |
4271 | 0 | } |
4272 | | |
4273 | | int av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dst, size_t dst_size, |
4274 | 0 | size_t *size, int *const largest_tile_id) { |
4275 | 0 | uint8_t *data = dst; |
4276 | 0 | size_t data_size = dst_size; |
4277 | 0 | AV1_COMMON *const cm = &cpi->common; |
4278 | 0 | AV1LevelParams *const level_params = &cpi->ppi->level_params; |
4279 | 0 | uint32_t obu_header_size = 0; |
4280 | 0 | uint32_t obu_payload_size = 0; |
4281 | 0 | FrameHeaderInfo fh_info = { NULL, 0, 0 }; |
4282 | 0 | const uint8_t obu_extension_header = |
4283 | 0 | cm->temporal_layer_id << 5 | cm->spatial_layer_id << 3 | 0; |
4284 | | |
4285 | | // If no non-zero delta_q has been used, reset delta_q_present_flag |
4286 | 0 | if (cm->delta_q_info.delta_q_present_flag && cpi->deltaq_used == 0) { |
4287 | 0 | cm->delta_q_info.delta_q_present_flag = 0; |
4288 | 0 | } |
4289 | |
|
4290 | | #if CONFIG_BITSTREAM_DEBUG |
4291 | | bitstream_queue_reset_write(); |
4292 | | #endif |
4293 | |
|
4294 | 0 | cpi->frame_header_count = 0; |
4295 | | |
4296 | | // The TD is now written outside the frame encode loop |
4297 | | |
4298 | | // write sequence header obu at each key frame or intra_only frame, |
4299 | | // preceded by 4-byte size |
4300 | 0 | if (cm->current_frame.frame_type == INTRA_ONLY_FRAME || |
4301 | 0 | cm->current_frame.frame_type == KEY_FRAME) { |
4302 | | // OBU header is either one or two bytes. |
4303 | 0 | if (data_size < 2) { |
4304 | 0 | return AOM_CODEC_ERROR; |
4305 | 0 | } |
4306 | 0 | obu_header_size = av1_write_obu_header( |
4307 | 0 | level_params, &cpi->frame_header_count, OBU_SEQUENCE_HEADER, |
4308 | 0 | cm->seq_params->has_nonzero_operating_point_idc, |
4309 | | /*is_layer_specific_obu=*/false, 0, data); |
4310 | 0 | assert(obu_header_size <= 2); |
4311 | 0 | obu_payload_size = av1_write_sequence_header_obu( |
4312 | 0 | cm->seq_params, data + obu_header_size, data_size - obu_header_size); |
4313 | 0 | const size_t length_field_size = |
4314 | 0 | obu_memmove(obu_header_size, obu_payload_size, data, data_size); |
4315 | 0 | if (length_field_size == 0) { |
4316 | 0 | return AOM_CODEC_ERROR; |
4317 | 0 | } |
4318 | 0 | if (av1_write_uleb_obu_size(obu_payload_size, data + obu_header_size, |
4319 | 0 | length_field_size) != AOM_CODEC_OK) { |
4320 | 0 | return AOM_CODEC_ERROR; |
4321 | 0 | } |
4322 | | |
4323 | 0 | const size_t bytes_written = |
4324 | 0 | obu_header_size + length_field_size + obu_payload_size; |
4325 | 0 | data += bytes_written; |
4326 | 0 | data_size -= bytes_written; |
4327 | 0 | } |
4328 | | |
4329 | | // write metadata obus before the frame obu that has the show_frame flag set |
4330 | 0 | if (cm->show_frame) { |
4331 | 0 | const size_t bytes_written = av1_write_metadata_array(cpi, data, data_size); |
4332 | 0 | data += bytes_written; |
4333 | 0 | data_size -= bytes_written; |
4334 | 0 | } |
4335 | |
|
4336 | 0 | const int write_frame_header = |
4337 | 0 | (cpi->num_tg > 1 || encode_show_existing_frame(cm)); |
4338 | 0 | struct aom_write_bit_buffer saved_wb = { NULL, 0 }; |
4339 | 0 | size_t length_field = 0; |
4340 | 0 | if (write_frame_header) { |
4341 | | // Write Frame Header OBU. |
4342 | 0 | fh_info.frame_header = data; |
4343 | | // OBU header is either one or two bytes. |
4344 | 0 | if (data_size < 2) { |
4345 | 0 | return AOM_CODEC_ERROR; |
4346 | 0 | } |
4347 | 0 | obu_header_size = av1_write_obu_header( |
4348 | 0 | level_params, &cpi->frame_header_count, OBU_FRAME_HEADER, |
4349 | 0 | cm->seq_params->has_nonzero_operating_point_idc, |
4350 | | /*is_layer_specific_obu=*/true, obu_extension_header, data); |
4351 | | // TODO: bug 42302568 - Pass data_size - obu_header_size to |
4352 | | // write_frame_header_obu(). |
4353 | 0 | obu_payload_size = write_frame_header_obu(cpi, &cpi->td.mb.e_mbd, &saved_wb, |
4354 | 0 | data + obu_header_size, 1); |
4355 | |
|
4356 | 0 | length_field = |
4357 | 0 | obu_memmove(obu_header_size, obu_payload_size, data, data_size); |
4358 | 0 | if (length_field == 0) { |
4359 | 0 | return AOM_CODEC_ERROR; |
4360 | 0 | } |
4361 | 0 | if (av1_write_uleb_obu_size(obu_payload_size, data + obu_header_size, |
4362 | 0 | length_field) != AOM_CODEC_OK) { |
4363 | 0 | return AOM_CODEC_ERROR; |
4364 | 0 | } |
4365 | | |
4366 | 0 | fh_info.obu_header_byte_offset = 0; |
4367 | 0 | fh_info.total_length = obu_header_size + length_field + obu_payload_size; |
4368 | | // Make sure it is safe to cast fh_info.total_length to uint32_t. |
4369 | 0 | if (fh_info.total_length > UINT32_MAX) { |
4370 | 0 | return AOM_CODEC_ERROR; |
4371 | 0 | } |
4372 | 0 | data += fh_info.total_length; |
4373 | 0 | data_size -= fh_info.total_length; |
4374 | 0 | } |
4375 | | |
4376 | 0 | if (!encode_show_existing_frame(cm)) { |
4377 | | // Since length_field is determined adaptively after frame header |
4378 | | // encoding, saved_wb must be adjusted accordingly. |
4379 | 0 | if (saved_wb.bit_buffer != NULL) { |
4380 | 0 | saved_wb.bit_buffer += length_field; |
4381 | 0 | } |
4382 | | |
4383 | | // Each tile group obu will be preceded by 4-byte size of the tile group |
4384 | | // obu |
4385 | 0 | const size_t bytes_written = |
4386 | 0 | write_tiles_in_tg_obus(cpi, data, data_size, &saved_wb, |
4387 | 0 | obu_extension_header, &fh_info, largest_tile_id); |
4388 | 0 | data += bytes_written; |
4389 | 0 | data_size -= bytes_written; |
4390 | 0 | } |
4391 | 0 | *size = data - dst; |
4392 | 0 | (void)data_size; |
4393 | 0 | return AOM_CODEC_OK; |
4394 | 0 | } |