/src/aom/av1/encoder/level.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2019, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include "av1/encoder/encoder.h" |
13 | | #include "av1/encoder/level.h" |
14 | | |
15 | | #define UNDEFINED_LEVEL \ |
16 | | { \ |
17 | | .level = SEQ_LEVEL_MAX, .max_picture_size = 0, .max_h_size = 0, \ |
18 | | .max_v_size = 0, .max_display_rate = 0, .max_decode_rate = 0, \ |
19 | | .max_header_rate = 0, .main_mbps = 0, .high_mbps = 0, .main_cr = 0, \ |
20 | | .high_cr = 0, .max_tiles = 0, .max_tile_cols = 0 \ |
21 | | } |
22 | | |
23 | | static const AV1LevelSpec av1_level_defs[SEQ_LEVELS] = { |
24 | | { .level = SEQ_LEVEL_2_0, |
25 | | .max_picture_size = 147456, |
26 | | .max_h_size = 2048, |
27 | | .max_v_size = 1152, |
28 | | .max_display_rate = 4423680L, |
29 | | .max_decode_rate = 5529600L, |
30 | | .max_header_rate = 150, |
31 | | .main_mbps = 1.5, |
32 | | .high_mbps = 0, |
33 | | .main_cr = 2.0, |
34 | | .high_cr = 0, |
35 | | .max_tiles = 8, |
36 | | .max_tile_cols = 4 }, |
37 | | { .level = SEQ_LEVEL_2_1, |
38 | | .max_picture_size = 278784, |
39 | | .max_h_size = 2816, |
40 | | .max_v_size = 1584, |
41 | | .max_display_rate = 8363520L, |
42 | | .max_decode_rate = 10454400L, |
43 | | .max_header_rate = 150, |
44 | | .main_mbps = 3.0, |
45 | | .high_mbps = 0, |
46 | | .main_cr = 2.0, |
47 | | .high_cr = 0, |
48 | | .max_tiles = 8, |
49 | | .max_tile_cols = 4 }, |
50 | | UNDEFINED_LEVEL, |
51 | | UNDEFINED_LEVEL, |
52 | | { .level = SEQ_LEVEL_3_0, |
53 | | .max_picture_size = 665856, |
54 | | .max_h_size = 4352, |
55 | | .max_v_size = 2448, |
56 | | .max_display_rate = 19975680L, |
57 | | .max_decode_rate = 24969600L, |
58 | | .max_header_rate = 150, |
59 | | .main_mbps = 6.0, |
60 | | .high_mbps = 0, |
61 | | .main_cr = 2.0, |
62 | | .high_cr = 0, |
63 | | .max_tiles = 16, |
64 | | .max_tile_cols = 6 }, |
65 | | { .level = SEQ_LEVEL_3_1, |
66 | | .max_picture_size = 1065024, |
67 | | .max_h_size = 5504, |
68 | | .max_v_size = 3096, |
69 | | .max_display_rate = 31950720L, |
70 | | .max_decode_rate = 39938400L, |
71 | | .max_header_rate = 150, |
72 | | .main_mbps = 10.0, |
73 | | .high_mbps = 0, |
74 | | .main_cr = 2.0, |
75 | | .high_cr = 0, |
76 | | .max_tiles = 16, |
77 | | .max_tile_cols = 6 }, |
78 | | UNDEFINED_LEVEL, |
79 | | UNDEFINED_LEVEL, |
80 | | { .level = SEQ_LEVEL_4_0, |
81 | | .max_picture_size = 2359296, |
82 | | .max_h_size = 6144, |
83 | | .max_v_size = 3456, |
84 | | .max_display_rate = 70778880L, |
85 | | .max_decode_rate = 77856768L, |
86 | | .max_header_rate = 300, |
87 | | .main_mbps = 12.0, |
88 | | .high_mbps = 30.0, |
89 | | .main_cr = 4.0, |
90 | | .high_cr = 4.0, |
91 | | .max_tiles = 32, |
92 | | .max_tile_cols = 8 }, |
93 | | { .level = SEQ_LEVEL_4_1, |
94 | | .max_picture_size = 2359296, |
95 | | .max_h_size = 6144, |
96 | | .max_v_size = 3456, |
97 | | .max_display_rate = 141557760L, |
98 | | .max_decode_rate = 155713536L, |
99 | | .max_header_rate = 300, |
100 | | .main_mbps = 20.0, |
101 | | .high_mbps = 50.0, |
102 | | .main_cr = 4.0, |
103 | | .high_cr = 4.0, |
104 | | .max_tiles = 32, |
105 | | .max_tile_cols = 8 }, |
106 | | UNDEFINED_LEVEL, |
107 | | UNDEFINED_LEVEL, |
108 | | { .level = SEQ_LEVEL_5_0, |
109 | | .max_picture_size = 8912896, |
110 | | .max_h_size = 8192, |
111 | | .max_v_size = 4352, |
112 | | .max_display_rate = 267386880L, |
113 | | .max_decode_rate = 273715200L, |
114 | | .max_header_rate = 300, |
115 | | .main_mbps = 30.0, |
116 | | .high_mbps = 100.0, |
117 | | .main_cr = 6.0, |
118 | | .high_cr = 4.0, |
119 | | .max_tiles = 64, |
120 | | .max_tile_cols = 8 }, |
121 | | { .level = SEQ_LEVEL_5_1, |
122 | | .max_picture_size = 8912896, |
123 | | .max_h_size = 8192, |
124 | | .max_v_size = 4352, |
125 | | .max_display_rate = 534773760L, |
126 | | .max_decode_rate = 547430400L, |
127 | | .max_header_rate = 300, |
128 | | .main_mbps = 40.0, |
129 | | .high_mbps = 160.0, |
130 | | .main_cr = 8.0, |
131 | | .high_cr = 4.0, |
132 | | .max_tiles = 64, |
133 | | .max_tile_cols = 8 }, |
134 | | { .level = SEQ_LEVEL_5_2, |
135 | | .max_picture_size = 8912896, |
136 | | .max_h_size = 8192, |
137 | | .max_v_size = 4352, |
138 | | .max_display_rate = 1069547520L, |
139 | | .max_decode_rate = 1094860800L, |
140 | | .max_header_rate = 300, |
141 | | .main_mbps = 60.0, |
142 | | .high_mbps = 240.0, |
143 | | .main_cr = 8.0, |
144 | | .high_cr = 4.0, |
145 | | .max_tiles = 64, |
146 | | .max_tile_cols = 8 }, |
147 | | { .level = SEQ_LEVEL_5_3, |
148 | | .max_picture_size = 8912896, |
149 | | .max_h_size = 8192, |
150 | | .max_v_size = 4352, |
151 | | .max_display_rate = 1069547520L, |
152 | | .max_decode_rate = 1176502272L, |
153 | | .max_header_rate = 300, |
154 | | .main_mbps = 60.0, |
155 | | .high_mbps = 240.0, |
156 | | .main_cr = 8.0, |
157 | | .high_cr = 4.0, |
158 | | .max_tiles = 64, |
159 | | .max_tile_cols = 8 }, |
160 | | { .level = SEQ_LEVEL_6_0, |
161 | | .max_picture_size = 35651584, |
162 | | .max_h_size = 16384, |
163 | | .max_v_size = 8704, |
164 | | .max_display_rate = 1069547520L, |
165 | | .max_decode_rate = 1176502272L, |
166 | | .max_header_rate = 300, |
167 | | .main_mbps = 60.0, |
168 | | .high_mbps = 240.0, |
169 | | .main_cr = 8.0, |
170 | | .high_cr = 4.0, |
171 | | .max_tiles = 128, |
172 | | .max_tile_cols = 16 }, |
173 | | { .level = SEQ_LEVEL_6_1, |
174 | | .max_picture_size = 35651584, |
175 | | .max_h_size = 16384, |
176 | | .max_v_size = 8704, |
177 | | .max_display_rate = 2139095040L, |
178 | | .max_decode_rate = 2189721600L, |
179 | | .max_header_rate = 300, |
180 | | .main_mbps = 100.0, |
181 | | .high_mbps = 480.0, |
182 | | .main_cr = 8.0, |
183 | | .high_cr = 4.0, |
184 | | .max_tiles = 128, |
185 | | .max_tile_cols = 16 }, |
186 | | { .level = SEQ_LEVEL_6_2, |
187 | | .max_picture_size = 35651584, |
188 | | .max_h_size = 16384, |
189 | | .max_v_size = 8704, |
190 | | .max_display_rate = 4278190080L, |
191 | | .max_decode_rate = 4379443200L, |
192 | | .max_header_rate = 300, |
193 | | .main_mbps = 160.0, |
194 | | .high_mbps = 800.0, |
195 | | .main_cr = 8.0, |
196 | | .high_cr = 4.0, |
197 | | .max_tiles = 128, |
198 | | .max_tile_cols = 16 }, |
199 | | { .level = SEQ_LEVEL_6_3, |
200 | | .max_picture_size = 35651584, |
201 | | .max_h_size = 16384, |
202 | | .max_v_size = 8704, |
203 | | .max_display_rate = 4278190080L, |
204 | | .max_decode_rate = 4706009088L, |
205 | | .max_header_rate = 300, |
206 | | .main_mbps = 160.0, |
207 | | .high_mbps = 800.0, |
208 | | .main_cr = 8.0, |
209 | | .high_cr = 4.0, |
210 | | .max_tiles = 128, |
211 | | .max_tile_cols = 16 }, |
212 | | #if CONFIG_CWG_C013 |
213 | | { .level = SEQ_LEVEL_7_0, |
214 | | .max_picture_size = 142606336, |
215 | | .max_h_size = 32768, |
216 | | .max_v_size = 17408, |
217 | | .max_display_rate = 4278190080L, |
218 | | .max_decode_rate = 4706009088L, |
219 | | .max_header_rate = 300, |
220 | | .main_mbps = 160.0, |
221 | | .high_mbps = 800.0, |
222 | | .main_cr = 8.0, |
223 | | .high_cr = 4.0, |
224 | | .max_tiles = 256, |
225 | | .max_tile_cols = 32 }, |
226 | | { .level = SEQ_LEVEL_7_1, |
227 | | .max_picture_size = 142606336, |
228 | | .max_h_size = 32768, |
229 | | .max_v_size = 17408, |
230 | | .max_display_rate = 8556380160L, |
231 | | .max_decode_rate = 8758886400L, |
232 | | .max_header_rate = 300, |
233 | | .main_mbps = 200.0, |
234 | | .high_mbps = 960.0, |
235 | | .main_cr = 8.0, |
236 | | .high_cr = 4.0, |
237 | | .max_tiles = 256, |
238 | | .max_tile_cols = 32 }, |
239 | | { .level = SEQ_LEVEL_7_2, |
240 | | .max_picture_size = 142606336, |
241 | | .max_h_size = 32768, |
242 | | .max_v_size = 17408, |
243 | | .max_display_rate = 17112760320L, |
244 | | .max_decode_rate = 17517772800L, |
245 | | .max_header_rate = 300, |
246 | | .main_mbps = 320.0, |
247 | | .high_mbps = 1600.0, |
248 | | .main_cr = 8.0, |
249 | | .high_cr = 4.0, |
250 | | .max_tiles = 256, |
251 | | .max_tile_cols = 32 }, |
252 | | { .level = SEQ_LEVEL_7_3, |
253 | | .max_picture_size = 142606336, |
254 | | .max_h_size = 32768, |
255 | | .max_v_size = 17408, |
256 | | .max_display_rate = 17112760320L, |
257 | | .max_decode_rate = 18824036352L, |
258 | | .max_header_rate = 300, |
259 | | .main_mbps = 320.0, |
260 | | .high_mbps = 1600.0, |
261 | | .main_cr = 8.0, |
262 | | .high_cr = 4.0, |
263 | | .max_tiles = 256, |
264 | | .max_tile_cols = 32 }, |
265 | | { .level = SEQ_LEVEL_8_0, |
266 | | .max_picture_size = 530841600, |
267 | | .max_h_size = 65536, |
268 | | .max_v_size = 34816, |
269 | | .max_display_rate = 17112760320L, |
270 | | .max_decode_rate = 18824036352L, |
271 | | .max_header_rate = 300, |
272 | | .main_mbps = 320.0, |
273 | | .high_mbps = 1600.0, |
274 | | .main_cr = 8.0, |
275 | | .high_cr = 4.0, |
276 | | .max_tiles = 512, |
277 | | .max_tile_cols = 64 }, |
278 | | { .level = SEQ_LEVEL_8_1, |
279 | | .max_picture_size = 530841600, |
280 | | .max_h_size = 65536, |
281 | | .max_v_size = 34816, |
282 | | .max_display_rate = 34225520640L, |
283 | | .max_decode_rate = 34910031052L, |
284 | | .max_header_rate = 300, |
285 | | .main_mbps = 400.0, |
286 | | .high_mbps = 1920.0, |
287 | | .main_cr = 8.0, |
288 | | .high_cr = 4.0, |
289 | | .max_tiles = 512, |
290 | | .max_tile_cols = 64 }, |
291 | | { .level = SEQ_LEVEL_8_2, |
292 | | .max_picture_size = 530841600, |
293 | | .max_h_size = 65536, |
294 | | .max_v_size = 34816, |
295 | | .max_display_rate = 68451041280L, |
296 | | .max_decode_rate = 69820062105L, |
297 | | .max_header_rate = 300, |
298 | | .main_mbps = 640.0, |
299 | | .high_mbps = 3200.0, |
300 | | .main_cr = 8.0, |
301 | | .high_cr = 4.0, |
302 | | .max_tiles = 512, |
303 | | .max_tile_cols = 64 }, |
304 | | { .level = SEQ_LEVEL_8_3, |
305 | | .max_picture_size = 530841600, |
306 | | .max_h_size = 65536, |
307 | | .max_v_size = 34816, |
308 | | .max_display_rate = 68451041280L, |
309 | | .max_decode_rate = 75296145408L, |
310 | | .max_header_rate = 300, |
311 | | .main_mbps = 640.0, |
312 | | .high_mbps = 3200.0, |
313 | | .main_cr = 8.0, |
314 | | .high_cr = 4.0, |
315 | | .max_tiles = 512, |
316 | | .max_tile_cols = 64 }, |
317 | | #else // !CONFIG_CWG_C013 |
318 | | UNDEFINED_LEVEL, |
319 | | UNDEFINED_LEVEL, |
320 | | UNDEFINED_LEVEL, |
321 | | UNDEFINED_LEVEL, |
322 | | UNDEFINED_LEVEL, |
323 | | UNDEFINED_LEVEL, |
324 | | UNDEFINED_LEVEL, |
325 | | UNDEFINED_LEVEL, |
326 | | #endif // CONFIG_CWG_C013 |
327 | | }; |
328 | | |
329 | | typedef enum { |
330 | | LUMA_PIC_SIZE_TOO_LARGE, |
331 | | LUMA_PIC_H_SIZE_TOO_LARGE, |
332 | | LUMA_PIC_V_SIZE_TOO_LARGE, |
333 | | LUMA_PIC_H_SIZE_TOO_SMALL, |
334 | | LUMA_PIC_V_SIZE_TOO_SMALL, |
335 | | TOO_MANY_TILE_COLUMNS, |
336 | | TOO_MANY_TILES, |
337 | | TILE_RATE_TOO_HIGH, |
338 | | TILE_TOO_LARGE, |
339 | | SUPERRES_TILE_WIDTH_TOO_LARGE, |
340 | | CROPPED_TILE_WIDTH_TOO_SMALL, |
341 | | CROPPED_TILE_HEIGHT_TOO_SMALL, |
342 | | TILE_WIDTH_INVALID, |
343 | | FRAME_HEADER_RATE_TOO_HIGH, |
344 | | DISPLAY_RATE_TOO_HIGH, |
345 | | DECODE_RATE_TOO_HIGH, |
346 | | CR_TOO_SMALL, |
347 | | TILE_SIZE_HEADER_RATE_TOO_HIGH, |
348 | | BITRATE_TOO_HIGH, |
349 | | DECODER_MODEL_FAIL, |
350 | | |
351 | | TARGET_LEVEL_FAIL_IDS, |
352 | | TARGET_LEVEL_OK, |
353 | | } TARGET_LEVEL_FAIL_ID; |
354 | | |
355 | | static const char *level_fail_messages[TARGET_LEVEL_FAIL_IDS] = { |
356 | | "The picture size is too large.", |
357 | | "The picture width is too large.", |
358 | | "The picture height is too large.", |
359 | | "The picture width is too small.", |
360 | | "The picture height is too small.", |
361 | | "Too many tile columns are used.", |
362 | | "Too many tiles are used.", |
363 | | "The tile rate is too high.", |
364 | | "The tile size is too large.", |
365 | | "The superres tile width is too large.", |
366 | | "The cropped tile width is less than 8.", |
367 | | "The cropped tile height is less than 8.", |
368 | | "The tile width is invalid.", |
369 | | "The frame header rate is too high.", |
370 | | "The display luma sample rate is too high.", |
371 | | "The decoded luma sample rate is too high.", |
372 | | "The compression ratio is too small.", |
373 | | "The product of max tile size and header rate is too high.", |
374 | | "The bitrate is too high.", |
375 | | "The decoder model fails.", |
376 | | }; |
377 | | |
378 | | static double get_max_bitrate(const AV1LevelSpec *const level_spec, int tier, |
379 | 0 | BITSTREAM_PROFILE profile) { |
380 | 0 | if (level_spec->level < SEQ_LEVEL_4_0) tier = 0; |
381 | 0 | const double bitrate_basis = |
382 | 0 | (tier ? level_spec->high_mbps : level_spec->main_mbps) * 1e6; |
383 | 0 | const double bitrate_profile_factor = |
384 | 0 | profile == PROFILE_0 ? 1.0 : (profile == PROFILE_1 ? 2.0 : 3.0); |
385 | 0 | return bitrate_basis * bitrate_profile_factor; |
386 | 0 | } |
387 | | |
388 | | double av1_get_max_bitrate_for_level(AV1_LEVEL level_index, int tier, |
389 | 0 | BITSTREAM_PROFILE profile) { |
390 | 0 | assert(is_valid_seq_level_idx(level_index)); |
391 | 0 | return get_max_bitrate(&av1_level_defs[level_index], tier, profile); |
392 | 0 | } |
393 | | |
394 | | void av1_get_max_tiles_for_level(AV1_LEVEL level_index, int *const max_tiles, |
395 | 0 | int *const max_tile_cols) { |
396 | 0 | assert(is_valid_seq_level_idx(level_index)); |
397 | 0 | const AV1LevelSpec *const level_spec = &av1_level_defs[level_index]; |
398 | 0 | *max_tiles = level_spec->max_tiles; |
399 | 0 | *max_tile_cols = level_spec->max_tile_cols; |
400 | 0 | } |
401 | | |
402 | | // We assume time t to be valid if and only if t >= 0.0. |
403 | | // So INVALID_TIME can be defined as anything less than 0. |
404 | 0 | #define INVALID_TIME (-1.0) |
405 | | |
406 | | // This corresponds to "free_buffer" in the spec. |
407 | 0 | static void release_buffer(DECODER_MODEL *const decoder_model, int idx) { |
408 | 0 | assert(idx >= 0 && idx < BUFFER_POOL_MAX_SIZE); |
409 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[idx]; |
410 | 0 | this_buffer->decoder_ref_count = 0; |
411 | 0 | this_buffer->player_ref_count = 0; |
412 | 0 | this_buffer->display_index = -1; |
413 | 0 | this_buffer->presentation_time = INVALID_TIME; |
414 | 0 | } |
415 | | |
416 | 0 | static void initialize_buffer_pool(DECODER_MODEL *const decoder_model) { |
417 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
418 | 0 | release_buffer(decoder_model, i); |
419 | 0 | } |
420 | 0 | for (int i = 0; i < REF_FRAMES; ++i) { |
421 | 0 | decoder_model->vbi[i] = -1; |
422 | 0 | } |
423 | 0 | } |
424 | | |
425 | 0 | static int get_free_buffer(DECODER_MODEL *const decoder_model) { |
426 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
427 | 0 | const FRAME_BUFFER *const this_buffer = |
428 | 0 | &decoder_model->frame_buffer_pool[i]; |
429 | 0 | if (this_buffer->decoder_ref_count == 0 && |
430 | 0 | this_buffer->player_ref_count == 0) |
431 | 0 | return i; |
432 | 0 | } |
433 | 0 | return -1; |
434 | 0 | } |
435 | | |
436 | | static void update_ref_buffers(DECODER_MODEL *const decoder_model, int idx, |
437 | 0 | int refresh_frame_flags) { |
438 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[idx]; |
439 | 0 | for (int i = 0; i < REF_FRAMES; ++i) { |
440 | 0 | if (refresh_frame_flags & (1 << i)) { |
441 | 0 | const int pre_idx = decoder_model->vbi[i]; |
442 | 0 | if (pre_idx != -1) { |
443 | 0 | --decoder_model->frame_buffer_pool[pre_idx].decoder_ref_count; |
444 | 0 | } |
445 | 0 | decoder_model->vbi[i] = idx; |
446 | 0 | ++this_buffer->decoder_ref_count; |
447 | 0 | } |
448 | 0 | } |
449 | 0 | } |
450 | | |
451 | | // The time (in seconds) required to decode a frame. |
452 | | static double time_to_decode_frame(const AV1_COMMON *const cm, |
453 | 0 | int64_t max_decode_rate) { |
454 | 0 | if (cm->show_existing_frame) return 0.0; |
455 | | |
456 | 0 | const FRAME_TYPE frame_type = cm->current_frame.frame_type; |
457 | 0 | int luma_samples = 0; |
458 | 0 | if (frame_type == KEY_FRAME || frame_type == INTRA_ONLY_FRAME) { |
459 | 0 | luma_samples = cm->superres_upscaled_width * cm->height; |
460 | 0 | } else { |
461 | 0 | const int spatial_layer_dimensions_present_flag = 0; |
462 | 0 | if (spatial_layer_dimensions_present_flag) { |
463 | 0 | assert(0 && "Spatial layer dimensions not supported yet."); |
464 | 0 | } else { |
465 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
466 | 0 | const int max_frame_width = seq_params->max_frame_width; |
467 | 0 | const int max_frame_height = seq_params->max_frame_height; |
468 | 0 | luma_samples = max_frame_width * max_frame_height; |
469 | 0 | } |
470 | 0 | } |
471 | |
|
472 | 0 | return luma_samples / (double)max_decode_rate; |
473 | 0 | } |
474 | | |
475 | | // Release frame buffers that are no longer needed for decode or display. |
476 | | // It corresponds to "start_decode_at_removal_time" in the spec. |
477 | | static void release_processed_frames(DECODER_MODEL *const decoder_model, |
478 | 0 | double removal_time) { |
479 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
480 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[i]; |
481 | 0 | if (this_buffer->player_ref_count > 0) { |
482 | 0 | if (this_buffer->presentation_time >= 0.0 && |
483 | 0 | this_buffer->presentation_time <= removal_time) { |
484 | 0 | this_buffer->player_ref_count = 0; |
485 | 0 | if (this_buffer->decoder_ref_count == 0) { |
486 | 0 | release_buffer(decoder_model, i); |
487 | 0 | } |
488 | 0 | } |
489 | 0 | } |
490 | 0 | } |
491 | 0 | } |
492 | | |
493 | 0 | static int frames_in_buffer_pool(const DECODER_MODEL *const decoder_model) { |
494 | 0 | int frames_in_pool = 0; |
495 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
496 | 0 | const FRAME_BUFFER *const this_buffer = |
497 | 0 | &decoder_model->frame_buffer_pool[i]; |
498 | 0 | if (this_buffer->decoder_ref_count > 0 || |
499 | 0 | this_buffer->player_ref_count > 0) { |
500 | 0 | ++frames_in_pool; |
501 | 0 | } |
502 | 0 | } |
503 | 0 | return frames_in_pool; |
504 | 0 | } |
505 | | |
506 | | static double get_presentation_time(const DECODER_MODEL *const decoder_model, |
507 | 0 | int display_index) { |
508 | 0 | if (decoder_model->mode == SCHEDULE_MODE) { |
509 | 0 | assert(0 && "SCHEDULE_MODE NOT SUPPORTED"); |
510 | 0 | return INVALID_TIME; |
511 | 0 | } else { |
512 | 0 | const double initial_presentation_delay = |
513 | 0 | decoder_model->initial_presentation_delay; |
514 | | // Can't decide presentation time until the initial presentation delay is |
515 | | // known. |
516 | 0 | if (initial_presentation_delay < 0.0) return INVALID_TIME; |
517 | | |
518 | 0 | return initial_presentation_delay + |
519 | 0 | display_index * decoder_model->num_ticks_per_picture * |
520 | 0 | decoder_model->display_clock_tick; |
521 | 0 | } |
522 | 0 | } |
523 | | |
524 | 0 | #define MAX_TIME 1e16 |
525 | | static double time_next_buffer_is_free(int num_decoded_frame, |
526 | | int decoder_buffer_delay, |
527 | | const FRAME_BUFFER *frame_buffer_pool, |
528 | 0 | double current_time) { |
529 | 0 | if (num_decoded_frame == 0) { |
530 | 0 | return (double)decoder_buffer_delay / 90000.0; |
531 | 0 | } |
532 | | |
533 | 0 | double buf_free_time = MAX_TIME; |
534 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
535 | 0 | const FRAME_BUFFER *const this_buffer = &frame_buffer_pool[i]; |
536 | 0 | if (this_buffer->decoder_ref_count == 0) { |
537 | 0 | if (this_buffer->player_ref_count == 0) { |
538 | 0 | return current_time; |
539 | 0 | } |
540 | 0 | const double presentation_time = this_buffer->presentation_time; |
541 | 0 | if (presentation_time >= 0.0 && presentation_time < buf_free_time) { |
542 | 0 | buf_free_time = presentation_time; |
543 | 0 | } |
544 | 0 | } |
545 | 0 | } |
546 | 0 | return buf_free_time < MAX_TIME ? buf_free_time : INVALID_TIME; |
547 | 0 | } |
548 | | #undef MAX_TIME |
549 | | |
550 | | static double get_removal_time(int mode, int num_decoded_frame, |
551 | | int decoder_buffer_delay, |
552 | | const FRAME_BUFFER *frame_buffer_pool, |
553 | 0 | double current_time) { |
554 | 0 | if (mode == SCHEDULE_MODE) { |
555 | 0 | assert(0 && "SCHEDULE_MODE IS NOT SUPPORTED YET"); |
556 | 0 | return INVALID_TIME; |
557 | 0 | } else { |
558 | 0 | return time_next_buffer_is_free(num_decoded_frame, decoder_buffer_delay, |
559 | 0 | frame_buffer_pool, current_time); |
560 | 0 | } |
561 | 0 | } |
562 | | |
563 | | #if 0 |
564 | | // Print the status of the decoder model (for debugging). |
565 | | void av1_decoder_model_print_status(const DECODER_MODEL *const decoder_model) { |
566 | | printf( |
567 | | "\n status %d, num_frame %3d, num_decoded_frame %3d, " |
568 | | "num_shown_frame %3d, current time %6.2f, frames in buffer %2d, " |
569 | | "presentation delay %6.2f, total interval %6.2f\n", |
570 | | decoder_model->status, decoder_model->num_frame, |
571 | | decoder_model->num_decoded_frame, decoder_model->num_shown_frame, |
572 | | decoder_model->current_time, frames_in_buffer_pool(decoder_model), |
573 | | decoder_model->initial_presentation_delay, |
574 | | decoder_model->dfg_interval_queue.total_interval); |
575 | | for (int i = 0; i < 10; ++i) { |
576 | | const FRAME_BUFFER *const this_buffer = |
577 | | &decoder_model->frame_buffer_pool[i]; |
578 | | printf("buffer %d, decode count %d, display count %d, present time %6.4f\n", |
579 | | i, this_buffer->decoder_ref_count, this_buffer->player_ref_count, |
580 | | this_buffer->presentation_time); |
581 | | } |
582 | | } |
583 | | #endif |
584 | | |
585 | | // op_index is the operating point index. |
586 | | static void decoder_model_init(const AV1_COMP *const cpi, AV1_LEVEL level, |
587 | | int op_index, |
588 | 0 | DECODER_MODEL *const decoder_model) { |
589 | 0 | decoder_model->status = DECODER_MODEL_OK; |
590 | 0 | decoder_model->level = level; |
591 | |
|
592 | 0 | const AV1_COMMON *const cm = &cpi->common; |
593 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
594 | 0 | decoder_model->bit_rate = get_max_bitrate( |
595 | 0 | av1_level_defs + level, seq_params->tier[op_index], seq_params->profile); |
596 | | |
597 | | // TODO(huisu or anyone): implement SCHEDULE_MODE. |
598 | 0 | decoder_model->mode = RESOURCE_MODE; |
599 | 0 | decoder_model->encoder_buffer_delay = 20000; |
600 | 0 | decoder_model->decoder_buffer_delay = 70000; |
601 | 0 | decoder_model->is_low_delay_mode = false; |
602 | |
|
603 | 0 | decoder_model->first_bit_arrival_time = 0.0; |
604 | 0 | decoder_model->last_bit_arrival_time = 0.0; |
605 | 0 | decoder_model->coded_bits = 0; |
606 | |
|
607 | 0 | decoder_model->removal_time = INVALID_TIME; |
608 | 0 | decoder_model->presentation_time = INVALID_TIME; |
609 | 0 | decoder_model->decode_samples = 0; |
610 | 0 | decoder_model->display_samples = 0; |
611 | 0 | decoder_model->max_decode_rate = 0.0; |
612 | 0 | decoder_model->max_display_rate = 0.0; |
613 | |
|
614 | 0 | decoder_model->num_frame = -1; |
615 | 0 | decoder_model->num_decoded_frame = -1; |
616 | 0 | decoder_model->num_shown_frame = -1; |
617 | 0 | decoder_model->current_time = 0.0; |
618 | |
|
619 | 0 | initialize_buffer_pool(decoder_model); |
620 | |
|
621 | 0 | DFG_INTERVAL_QUEUE *const dfg_interval_queue = |
622 | 0 | &decoder_model->dfg_interval_queue; |
623 | 0 | dfg_interval_queue->total_interval = 0.0; |
624 | 0 | dfg_interval_queue->head = 0; |
625 | 0 | dfg_interval_queue->size = 0; |
626 | |
|
627 | 0 | if (seq_params->timing_info_present) { |
628 | 0 | decoder_model->num_ticks_per_picture = |
629 | 0 | seq_params->timing_info.num_ticks_per_picture; |
630 | 0 | decoder_model->display_clock_tick = |
631 | 0 | seq_params->timing_info.num_units_in_display_tick / |
632 | 0 | seq_params->timing_info.time_scale; |
633 | 0 | } else { |
634 | 0 | decoder_model->num_ticks_per_picture = 1; |
635 | 0 | decoder_model->display_clock_tick = 1.0 / cpi->framerate; |
636 | 0 | } |
637 | |
|
638 | 0 | decoder_model->initial_display_delay = |
639 | 0 | seq_params->op_params[op_index].initial_display_delay; |
640 | 0 | decoder_model->initial_presentation_delay = INVALID_TIME; |
641 | 0 | decoder_model->decode_rate = av1_level_defs[level].max_decode_rate; |
642 | 0 | } |
643 | | |
644 | | DECODER_MODEL_STATUS av1_decoder_model_try_smooth_buf( |
645 | | const AV1_COMP *const cpi, size_t coded_bits, |
646 | 0 | const DECODER_MODEL *const decoder_model) { |
647 | 0 | DECODER_MODEL_STATUS status = DECODER_MODEL_OK; |
648 | |
|
649 | 0 | if (!decoder_model || decoder_model->status != DECODER_MODEL_OK) { |
650 | 0 | return status; |
651 | 0 | } |
652 | | |
653 | 0 | const AV1_COMMON *const cm = &cpi->common; |
654 | 0 | const int show_existing_frame = cm->show_existing_frame; |
655 | |
|
656 | 0 | size_t cur_coded_bits = decoder_model->coded_bits + coded_bits; |
657 | 0 | int num_decoded_frame = decoder_model->num_decoded_frame; |
658 | 0 | if (!show_existing_frame) ++num_decoded_frame; |
659 | |
|
660 | 0 | if (show_existing_frame) { |
661 | 0 | return status; |
662 | 0 | } else { |
663 | 0 | const double removal_time = get_removal_time( |
664 | 0 | decoder_model->mode, num_decoded_frame, |
665 | 0 | decoder_model->decoder_buffer_delay, decoder_model->frame_buffer_pool, |
666 | 0 | decoder_model->current_time); |
667 | 0 | if (removal_time < 0.0) { |
668 | 0 | status = DECODE_FRAME_BUF_UNAVAILABLE; |
669 | 0 | return status; |
670 | 0 | } |
671 | | |
672 | | // A frame with show_existing_frame being false indicates the end of a DFG. |
673 | | // Update the bits arrival time of this DFG. |
674 | 0 | const double buffer_delay = (decoder_model->encoder_buffer_delay + |
675 | 0 | decoder_model->decoder_buffer_delay) / |
676 | 0 | 90000.0; |
677 | 0 | const double latest_arrival_time = removal_time - buffer_delay; |
678 | 0 | const double first_bit_arrival_time = |
679 | 0 | AOMMAX(decoder_model->last_bit_arrival_time, latest_arrival_time); |
680 | 0 | const double last_bit_arrival_time = |
681 | 0 | first_bit_arrival_time + |
682 | 0 | (double)cur_coded_bits / decoder_model->bit_rate; |
683 | | // Smoothing buffer underflows if the last bit arrives after the removal |
684 | | // time. |
685 | 0 | if (last_bit_arrival_time > removal_time && |
686 | 0 | !decoder_model->is_low_delay_mode) { |
687 | 0 | status = SMOOTHING_BUFFER_UNDERFLOW; |
688 | 0 | return status; |
689 | 0 | } |
690 | | |
691 | | // Check if the smoothing buffer overflows. |
692 | 0 | const DFG_INTERVAL_QUEUE *const queue = &decoder_model->dfg_interval_queue; |
693 | 0 | if (queue->size >= DFG_INTERVAL_QUEUE_SIZE) { |
694 | 0 | assert(0); |
695 | 0 | } |
696 | |
|
697 | 0 | double total_interval = queue->total_interval; |
698 | 0 | int qhead = queue->head; |
699 | 0 | int qsize = queue->size; |
700 | | // Remove the DFGs with removal time earlier than last_bit_arrival_time. |
701 | 0 | while (queue->buf[qhead].removal_time <= last_bit_arrival_time && |
702 | 0 | qsize > 0) { |
703 | 0 | if (queue->buf[qhead].removal_time - first_bit_arrival_time + |
704 | 0 | total_interval > |
705 | 0 | 1.0) { |
706 | 0 | status = SMOOTHING_BUFFER_OVERFLOW; |
707 | 0 | return status; |
708 | 0 | } |
709 | 0 | total_interval -= queue->buf[qhead].last_bit_arrival_time - |
710 | 0 | queue->buf[qhead].first_bit_arrival_time; |
711 | 0 | qhead = (qhead + 1) % DFG_INTERVAL_QUEUE_SIZE; |
712 | 0 | --qsize; |
713 | 0 | } |
714 | 0 | total_interval += last_bit_arrival_time - first_bit_arrival_time; |
715 | | // The smoothing buffer can hold at most "bit_rate" bits, which is |
716 | | // equivalent to 1 second of total interval. |
717 | 0 | if (total_interval > 1.0) { |
718 | 0 | status = SMOOTHING_BUFFER_OVERFLOW; |
719 | 0 | return status; |
720 | 0 | } |
721 | | |
722 | 0 | return status; |
723 | 0 | } |
724 | 0 | } |
725 | | |
726 | | static void decoder_model_process_frame(const AV1_COMP *const cpi, |
727 | | size_t coded_bits, |
728 | 0 | DECODER_MODEL *const decoder_model) { |
729 | 0 | if (!decoder_model || decoder_model->status != DECODER_MODEL_OK) return; |
730 | | |
731 | 0 | const AV1_COMMON *const cm = &cpi->common; |
732 | 0 | const int luma_pic_size = cm->superres_upscaled_width * cm->height; |
733 | 0 | const int show_existing_frame = cm->show_existing_frame; |
734 | 0 | const int show_frame = cm->show_frame || show_existing_frame; |
735 | 0 | ++decoder_model->num_frame; |
736 | 0 | if (!show_existing_frame) ++decoder_model->num_decoded_frame; |
737 | 0 | if (show_frame) ++decoder_model->num_shown_frame; |
738 | 0 | decoder_model->coded_bits += coded_bits; |
739 | |
|
740 | 0 | int display_idx = -1; |
741 | 0 | if (show_existing_frame) { |
742 | 0 | display_idx = decoder_model->vbi[cpi->existing_fb_idx_to_show]; |
743 | 0 | if (display_idx < 0) { |
744 | 0 | decoder_model->status = DECODE_EXISTING_FRAME_BUF_EMPTY; |
745 | 0 | return; |
746 | 0 | } |
747 | 0 | if (decoder_model->frame_buffer_pool[display_idx].frame_type == KEY_FRAME) { |
748 | 0 | update_ref_buffers(decoder_model, display_idx, 0xFF); |
749 | 0 | } |
750 | 0 | } else { |
751 | 0 | const double removal_time = get_removal_time( |
752 | 0 | decoder_model->mode, decoder_model->num_decoded_frame, |
753 | 0 | decoder_model->decoder_buffer_delay, decoder_model->frame_buffer_pool, |
754 | 0 | decoder_model->current_time); |
755 | 0 | if (removal_time < 0.0) { |
756 | 0 | decoder_model->status = DECODE_FRAME_BUF_UNAVAILABLE; |
757 | 0 | return; |
758 | 0 | } |
759 | | |
760 | 0 | const int previous_decode_samples = decoder_model->decode_samples; |
761 | 0 | const double previous_removal_time = decoder_model->removal_time; |
762 | 0 | assert(previous_removal_time < removal_time); |
763 | 0 | decoder_model->removal_time = removal_time; |
764 | 0 | decoder_model->decode_samples = luma_pic_size; |
765 | 0 | const double this_decode_rate = |
766 | 0 | previous_decode_samples / (removal_time - previous_removal_time); |
767 | 0 | decoder_model->max_decode_rate = |
768 | 0 | AOMMAX(decoder_model->max_decode_rate, this_decode_rate); |
769 | | |
770 | | // A frame with show_existing_frame being false indicates the end of a DFG. |
771 | | // Update the bits arrival time of this DFG. |
772 | 0 | const double buffer_delay = (decoder_model->encoder_buffer_delay + |
773 | 0 | decoder_model->decoder_buffer_delay) / |
774 | 0 | 90000.0; |
775 | 0 | const double latest_arrival_time = removal_time - buffer_delay; |
776 | 0 | decoder_model->first_bit_arrival_time = |
777 | 0 | AOMMAX(decoder_model->last_bit_arrival_time, latest_arrival_time); |
778 | 0 | decoder_model->last_bit_arrival_time = |
779 | 0 | decoder_model->first_bit_arrival_time + |
780 | 0 | (double)decoder_model->coded_bits / decoder_model->bit_rate; |
781 | | // Smoothing buffer underflows if the last bit arrives after the removal |
782 | | // time. |
783 | 0 | if (decoder_model->last_bit_arrival_time > removal_time && |
784 | 0 | !decoder_model->is_low_delay_mode) { |
785 | 0 | decoder_model->status = SMOOTHING_BUFFER_UNDERFLOW; |
786 | 0 | return; |
787 | 0 | } |
788 | | // Reset the coded bits for the next DFG. |
789 | 0 | decoder_model->coded_bits = 0; |
790 | | |
791 | | // Check if the smoothing buffer overflows. |
792 | 0 | DFG_INTERVAL_QUEUE *const queue = &decoder_model->dfg_interval_queue; |
793 | 0 | if (queue->size >= DFG_INTERVAL_QUEUE_SIZE) { |
794 | 0 | assert(0); |
795 | 0 | } |
796 | 0 | const double first_bit_arrival_time = decoder_model->first_bit_arrival_time; |
797 | 0 | const double last_bit_arrival_time = decoder_model->last_bit_arrival_time; |
798 | | // Remove the DFGs with removal time earlier than last_bit_arrival_time. |
799 | 0 | while (queue->buf[queue->head].removal_time <= last_bit_arrival_time && |
800 | 0 | queue->size > 0) { |
801 | 0 | if (queue->buf[queue->head].removal_time - first_bit_arrival_time + |
802 | 0 | queue->total_interval > |
803 | 0 | 1.0) { |
804 | 0 | decoder_model->status = SMOOTHING_BUFFER_OVERFLOW; |
805 | 0 | return; |
806 | 0 | } |
807 | 0 | queue->total_interval -= queue->buf[queue->head].last_bit_arrival_time - |
808 | 0 | queue->buf[queue->head].first_bit_arrival_time; |
809 | 0 | queue->head = (queue->head + 1) % DFG_INTERVAL_QUEUE_SIZE; |
810 | 0 | --queue->size; |
811 | 0 | } |
812 | | // Push current DFG into the queue. |
813 | 0 | const int queue_index = |
814 | 0 | (queue->head + queue->size++) % DFG_INTERVAL_QUEUE_SIZE; |
815 | 0 | queue->buf[queue_index].first_bit_arrival_time = first_bit_arrival_time; |
816 | 0 | queue->buf[queue_index].last_bit_arrival_time = last_bit_arrival_time; |
817 | 0 | queue->buf[queue_index].removal_time = removal_time; |
818 | 0 | queue->total_interval += last_bit_arrival_time - first_bit_arrival_time; |
819 | | // The smoothing buffer can hold at most "bit_rate" bits, which is |
820 | | // equivalent to 1 second of total interval. |
821 | 0 | if (queue->total_interval > 1.0) { |
822 | 0 | decoder_model->status = SMOOTHING_BUFFER_OVERFLOW; |
823 | 0 | return; |
824 | 0 | } |
825 | | |
826 | 0 | release_processed_frames(decoder_model, removal_time); |
827 | 0 | decoder_model->current_time = |
828 | 0 | removal_time + time_to_decode_frame(cm, decoder_model->decode_rate); |
829 | |
|
830 | 0 | const int cfbi = get_free_buffer(decoder_model); |
831 | 0 | if (cfbi < 0) { |
832 | 0 | decoder_model->status = DECODE_FRAME_BUF_UNAVAILABLE; |
833 | 0 | return; |
834 | 0 | } |
835 | 0 | const CurrentFrame *const current_frame = &cm->current_frame; |
836 | 0 | decoder_model->frame_buffer_pool[cfbi].frame_type = |
837 | 0 | cm->current_frame.frame_type; |
838 | 0 | display_idx = cfbi; |
839 | 0 | update_ref_buffers(decoder_model, cfbi, current_frame->refresh_frame_flags); |
840 | |
|
841 | 0 | if (decoder_model->initial_presentation_delay < 0.0) { |
842 | | // Display can begin after required number of frames have been buffered. |
843 | 0 | if (frames_in_buffer_pool(decoder_model) >= |
844 | 0 | decoder_model->initial_display_delay - 1) { |
845 | 0 | decoder_model->initial_presentation_delay = decoder_model->current_time; |
846 | | // Update presentation time for each shown frame in the frame buffer. |
847 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
848 | 0 | FRAME_BUFFER *const this_buffer = |
849 | 0 | &decoder_model->frame_buffer_pool[i]; |
850 | 0 | if (this_buffer->player_ref_count == 0) continue; |
851 | 0 | assert(this_buffer->display_index >= 0); |
852 | 0 | this_buffer->presentation_time = |
853 | 0 | get_presentation_time(decoder_model, this_buffer->display_index); |
854 | 0 | } |
855 | 0 | } |
856 | 0 | } |
857 | 0 | } |
858 | | |
859 | | // Display. |
860 | 0 | if (show_frame) { |
861 | 0 | assert(display_idx >= 0 && display_idx < BUFFER_POOL_MAX_SIZE); |
862 | 0 | FRAME_BUFFER *const this_buffer = |
863 | 0 | &decoder_model->frame_buffer_pool[display_idx]; |
864 | 0 | ++this_buffer->player_ref_count; |
865 | 0 | this_buffer->display_index = decoder_model->num_shown_frame; |
866 | 0 | const double presentation_time = |
867 | 0 | get_presentation_time(decoder_model, this_buffer->display_index); |
868 | 0 | this_buffer->presentation_time = presentation_time; |
869 | 0 | if (presentation_time >= 0.0 && |
870 | 0 | decoder_model->current_time > presentation_time) { |
871 | 0 | decoder_model->status = DISPLAY_FRAME_LATE; |
872 | 0 | return; |
873 | 0 | } |
874 | | |
875 | 0 | const int previous_display_samples = decoder_model->display_samples; |
876 | 0 | const double previous_presentation_time = decoder_model->presentation_time; |
877 | 0 | decoder_model->display_samples = luma_pic_size; |
878 | 0 | decoder_model->presentation_time = presentation_time; |
879 | 0 | if (presentation_time >= 0.0 && previous_presentation_time >= 0.0) { |
880 | 0 | assert(previous_presentation_time < presentation_time); |
881 | 0 | const double this_display_rate = |
882 | 0 | previous_display_samples / |
883 | 0 | (presentation_time - previous_presentation_time); |
884 | 0 | decoder_model->max_display_rate = |
885 | 0 | AOMMAX(decoder_model->max_display_rate, this_display_rate); |
886 | 0 | } |
887 | 0 | } |
888 | 0 | } |
889 | | |
890 | 0 | void av1_init_level_info(AV1_COMP *cpi) { |
891 | 0 | for (int op_index = 0; op_index < MAX_NUM_OPERATING_POINTS; ++op_index) { |
892 | 0 | AV1LevelInfo *const this_level_info = |
893 | 0 | cpi->ppi->level_params.level_info[op_index]; |
894 | 0 | if (!this_level_info) continue; |
895 | 0 | memset(this_level_info, 0, sizeof(*this_level_info)); |
896 | 0 | AV1LevelSpec *const level_spec = &this_level_info->level_spec; |
897 | 0 | level_spec->level = SEQ_LEVEL_MAX; |
898 | 0 | AV1LevelStats *const level_stats = &this_level_info->level_stats; |
899 | 0 | level_stats->min_cropped_tile_width = INT_MAX; |
900 | 0 | level_stats->min_cropped_tile_height = INT_MAX; |
901 | 0 | level_stats->min_frame_width = INT_MAX; |
902 | 0 | level_stats->min_frame_height = INT_MAX; |
903 | 0 | level_stats->tile_width_is_valid = 1; |
904 | 0 | level_stats->min_cr = 1e8; |
905 | |
|
906 | 0 | FrameWindowBuffer *const frame_window_buffer = |
907 | 0 | &this_level_info->frame_window_buffer; |
908 | 0 | frame_window_buffer->num = 0; |
909 | 0 | frame_window_buffer->start = 0; |
910 | |
|
911 | 0 | const AV1_COMMON *const cm = &cpi->common; |
912 | 0 | const int upscaled_width = cm->superres_upscaled_width; |
913 | 0 | const int height = cm->height; |
914 | 0 | const int pic_size = upscaled_width * height; |
915 | 0 | for (AV1_LEVEL level = SEQ_LEVEL_2_0; level < SEQ_LEVELS; ++level) { |
916 | 0 | DECODER_MODEL *const this_model = &this_level_info->decoder_models[level]; |
917 | 0 | const AV1LevelSpec *const spec = &av1_level_defs[level]; |
918 | 0 | if (upscaled_width > spec->max_h_size || height > spec->max_v_size || |
919 | 0 | pic_size > spec->max_picture_size) { |
920 | | // Turn off decoder model for this level as the frame size already |
921 | | // exceeds level constraints. |
922 | 0 | this_model->status = DECODER_MODEL_DISABLED; |
923 | 0 | } else { |
924 | 0 | decoder_model_init(cpi, level, op_index, this_model); |
925 | 0 | } |
926 | 0 | } |
927 | 0 | } |
928 | 0 | } |
929 | | |
930 | | static double get_min_cr(const AV1LevelSpec *const level_spec, int tier, |
931 | 0 | int is_still_picture, int64_t decoded_sample_rate) { |
932 | 0 | if (is_still_picture) return 0.8; |
933 | 0 | if (level_spec->level < SEQ_LEVEL_4_0) tier = 0; |
934 | 0 | const double min_cr_basis = tier ? level_spec->high_cr : level_spec->main_cr; |
935 | 0 | const double speed_adj = |
936 | 0 | (double)decoded_sample_rate / level_spec->max_display_rate; |
937 | 0 | return AOMMAX(min_cr_basis * speed_adj, 0.8); |
938 | 0 | } |
939 | | |
940 | | double av1_get_min_cr_for_level(AV1_LEVEL level_index, int tier, |
941 | 0 | int is_still_picture) { |
942 | 0 | assert(is_valid_seq_level_idx(level_index)); |
943 | 0 | const AV1LevelSpec *const level_spec = &av1_level_defs[level_index]; |
944 | 0 | return get_min_cr(level_spec, tier, is_still_picture, |
945 | 0 | level_spec->max_decode_rate); |
946 | 0 | } |
947 | | |
948 | | static void get_temporal_parallel_params(int scalability_mode_idc, |
949 | | int *temporal_parallel_num, |
950 | 0 | int *temporal_parallel_denom) { |
951 | 0 | if (scalability_mode_idc < 0) { |
952 | 0 | *temporal_parallel_num = 1; |
953 | 0 | *temporal_parallel_denom = 1; |
954 | 0 | return; |
955 | 0 | } |
956 | | |
957 | | // TODO(huisu@): handle scalability cases. |
958 | 0 | if (scalability_mode_idc == SCALABILITY_SS) { |
959 | 0 | (void)scalability_mode_idc; |
960 | 0 | } else { |
961 | 0 | (void)scalability_mode_idc; |
962 | 0 | } |
963 | 0 | } |
964 | | |
965 | 0 | #define MIN_CROPPED_TILE_WIDTH 8 |
966 | 0 | #define MIN_CROPPED_TILE_HEIGHT 8 |
967 | 0 | #define MIN_FRAME_WIDTH 16 |
968 | 0 | #define MIN_FRAME_HEIGHT 16 |
969 | 0 | #define MAX_TILE_SIZE_HEADER_RATE_PRODUCT 588251136 |
970 | | |
971 | | static TARGET_LEVEL_FAIL_ID check_level_constraints( |
972 | | const AV1LevelInfo *const level_info, AV1_LEVEL level, int tier, |
973 | 0 | int is_still_picture, BITSTREAM_PROFILE profile, int check_bitrate) { |
974 | 0 | const DECODER_MODEL *const decoder_model = &level_info->decoder_models[level]; |
975 | 0 | const DECODER_MODEL_STATUS decoder_model_status = decoder_model->status; |
976 | 0 | if (decoder_model_status != DECODER_MODEL_OK && |
977 | 0 | decoder_model_status != DECODER_MODEL_DISABLED) { |
978 | 0 | return DECODER_MODEL_FAIL; |
979 | 0 | } |
980 | | |
981 | 0 | const AV1LevelSpec *const level_spec = &level_info->level_spec; |
982 | 0 | const AV1LevelSpec *const target_level_spec = &av1_level_defs[level]; |
983 | 0 | const AV1LevelStats *const level_stats = &level_info->level_stats; |
984 | 0 | TARGET_LEVEL_FAIL_ID fail_id = TARGET_LEVEL_OK; |
985 | 0 | do { |
986 | 0 | if (level_spec->max_picture_size > target_level_spec->max_picture_size) { |
987 | 0 | fail_id = LUMA_PIC_SIZE_TOO_LARGE; |
988 | 0 | break; |
989 | 0 | } |
990 | | |
991 | 0 | if (level_spec->max_h_size > target_level_spec->max_h_size) { |
992 | 0 | fail_id = LUMA_PIC_H_SIZE_TOO_LARGE; |
993 | 0 | break; |
994 | 0 | } |
995 | | |
996 | 0 | if (level_spec->max_v_size > target_level_spec->max_v_size) { |
997 | 0 | fail_id = LUMA_PIC_V_SIZE_TOO_LARGE; |
998 | 0 | break; |
999 | 0 | } |
1000 | | |
1001 | 0 | if (level_spec->max_tile_cols > target_level_spec->max_tile_cols) { |
1002 | 0 | fail_id = TOO_MANY_TILE_COLUMNS; |
1003 | 0 | break; |
1004 | 0 | } |
1005 | | |
1006 | 0 | if (level_spec->max_tiles > target_level_spec->max_tiles) { |
1007 | 0 | fail_id = TOO_MANY_TILES; |
1008 | 0 | break; |
1009 | 0 | } |
1010 | | |
1011 | 0 | if (level_spec->max_header_rate > target_level_spec->max_header_rate) { |
1012 | 0 | fail_id = FRAME_HEADER_RATE_TOO_HIGH; |
1013 | 0 | break; |
1014 | 0 | } |
1015 | | |
1016 | 0 | if (decoder_model->max_display_rate > |
1017 | 0 | (double)target_level_spec->max_display_rate) { |
1018 | 0 | fail_id = DISPLAY_RATE_TOO_HIGH; |
1019 | 0 | break; |
1020 | 0 | } |
1021 | | |
1022 | | // TODO(huisu): we are not using max decode rate calculated by the decoder |
1023 | | // model because the model in resource availability mode always returns |
1024 | | // MaxDecodeRate(as in the level definitions) as the max decode rate. |
1025 | 0 | if (level_spec->max_decode_rate > target_level_spec->max_decode_rate) { |
1026 | 0 | fail_id = DECODE_RATE_TOO_HIGH; |
1027 | 0 | break; |
1028 | 0 | } |
1029 | | |
1030 | 0 | if (level_spec->max_tile_rate > target_level_spec->max_tiles * 120) { |
1031 | 0 | fail_id = TILE_RATE_TOO_HIGH; |
1032 | 0 | break; |
1033 | 0 | } |
1034 | | |
1035 | | #if CONFIG_CWG_C013 |
1036 | | const int max_tile_size = (level >= SEQ_LEVEL_7_0 && level <= SEQ_LEVEL_8_3) |
1037 | | ? MAX_TILE_AREA_LEVEL_7_AND_ABOVE |
1038 | | : MAX_TILE_AREA; |
1039 | | #else |
1040 | 0 | const int max_tile_size = MAX_TILE_AREA; |
1041 | 0 | #endif |
1042 | 0 | if (level_stats->max_tile_size > max_tile_size) { |
1043 | 0 | fail_id = TILE_TOO_LARGE; |
1044 | 0 | break; |
1045 | 0 | } |
1046 | | |
1047 | 0 | if (level_stats->max_superres_tile_width > MAX_TILE_WIDTH) { |
1048 | 0 | fail_id = SUPERRES_TILE_WIDTH_TOO_LARGE; |
1049 | 0 | break; |
1050 | 0 | } |
1051 | | |
1052 | 0 | if (level_stats->min_cropped_tile_width < MIN_CROPPED_TILE_WIDTH) { |
1053 | 0 | fail_id = CROPPED_TILE_WIDTH_TOO_SMALL; |
1054 | 0 | break; |
1055 | 0 | } |
1056 | | |
1057 | 0 | if (level_stats->min_cropped_tile_height < MIN_CROPPED_TILE_HEIGHT) { |
1058 | 0 | fail_id = CROPPED_TILE_HEIGHT_TOO_SMALL; |
1059 | 0 | break; |
1060 | 0 | } |
1061 | | |
1062 | 0 | if (level_stats->min_frame_width < MIN_FRAME_WIDTH) { |
1063 | 0 | fail_id = LUMA_PIC_H_SIZE_TOO_SMALL; |
1064 | 0 | break; |
1065 | 0 | } |
1066 | | |
1067 | 0 | if (level_stats->min_frame_height < MIN_FRAME_HEIGHT) { |
1068 | 0 | fail_id = LUMA_PIC_V_SIZE_TOO_SMALL; |
1069 | 0 | break; |
1070 | 0 | } |
1071 | | |
1072 | 0 | if (!level_stats->tile_width_is_valid) { |
1073 | 0 | fail_id = TILE_WIDTH_INVALID; |
1074 | 0 | break; |
1075 | 0 | } |
1076 | | |
1077 | 0 | const double min_cr = get_min_cr(target_level_spec, tier, is_still_picture, |
1078 | 0 | level_spec->max_decode_rate); |
1079 | 0 | if (level_stats->min_cr < min_cr) { |
1080 | 0 | fail_id = CR_TOO_SMALL; |
1081 | 0 | break; |
1082 | 0 | } |
1083 | | |
1084 | 0 | if (check_bitrate) { |
1085 | | // Check average bitrate instead of max_bitrate. |
1086 | 0 | const double bitrate_limit = |
1087 | 0 | get_max_bitrate(target_level_spec, tier, profile); |
1088 | 0 | const double avg_bitrate = level_stats->total_compressed_size * 8.0 / |
1089 | 0 | level_stats->total_time_encoded; |
1090 | 0 | if (avg_bitrate > bitrate_limit) { |
1091 | 0 | fail_id = BITRATE_TOO_HIGH; |
1092 | 0 | break; |
1093 | 0 | } |
1094 | 0 | } |
1095 | | |
1096 | 0 | if (target_level_spec->level > SEQ_LEVEL_5_1) { |
1097 | 0 | int temporal_parallel_num; |
1098 | 0 | int temporal_parallel_denom; |
1099 | 0 | const int scalability_mode_idc = -1; |
1100 | 0 | get_temporal_parallel_params(scalability_mode_idc, &temporal_parallel_num, |
1101 | 0 | &temporal_parallel_denom); |
1102 | 0 | const int val = level_stats->max_tile_size * level_spec->max_header_rate * |
1103 | 0 | temporal_parallel_denom / temporal_parallel_num; |
1104 | 0 | if (val > MAX_TILE_SIZE_HEADER_RATE_PRODUCT) { |
1105 | 0 | fail_id = TILE_SIZE_HEADER_RATE_TOO_HIGH; |
1106 | 0 | break; |
1107 | 0 | } |
1108 | 0 | } |
1109 | 0 | } while (0); |
1110 | | |
1111 | 0 | return fail_id; |
1112 | 0 | } |
1113 | | |
1114 | | static void get_tile_stats(const AV1_COMMON *const cm, |
1115 | | const TileDataEnc *const tile_data, |
1116 | | int *max_tile_size, int *max_superres_tile_width, |
1117 | | int *min_cropped_tile_width, |
1118 | | int *min_cropped_tile_height, |
1119 | 0 | int *tile_width_valid) { |
1120 | 0 | const int tile_cols = cm->tiles.cols; |
1121 | 0 | const int tile_rows = cm->tiles.rows; |
1122 | 0 | const int superres_scale_denominator = cm->superres_scale_denominator; |
1123 | |
|
1124 | 0 | *max_tile_size = 0; |
1125 | 0 | *max_superres_tile_width = 0; |
1126 | 0 | *min_cropped_tile_width = INT_MAX; |
1127 | 0 | *min_cropped_tile_height = INT_MAX; |
1128 | 0 | *tile_width_valid = 1; |
1129 | |
|
1130 | 0 | for (int tile_row = 0; tile_row < tile_rows; ++tile_row) { |
1131 | 0 | for (int tile_col = 0; tile_col < tile_cols; ++tile_col) { |
1132 | 0 | const TileInfo *const tile_info = |
1133 | 0 | &tile_data[tile_row * cm->tiles.cols + tile_col].tile_info; |
1134 | 0 | const int tile_width = |
1135 | 0 | (tile_info->mi_col_end - tile_info->mi_col_start) * MI_SIZE; |
1136 | 0 | const int tile_height = |
1137 | 0 | (tile_info->mi_row_end - tile_info->mi_row_start) * MI_SIZE; |
1138 | 0 | const int tile_size = tile_width * tile_height; |
1139 | 0 | *max_tile_size = AOMMAX(*max_tile_size, tile_size); |
1140 | |
|
1141 | 0 | const int supperres_tile_width = |
1142 | 0 | tile_width * superres_scale_denominator / SCALE_NUMERATOR; |
1143 | 0 | *max_superres_tile_width = |
1144 | 0 | AOMMAX(*max_superres_tile_width, supperres_tile_width); |
1145 | |
|
1146 | 0 | const int cropped_tile_width = |
1147 | 0 | cm->width - tile_info->mi_col_start * MI_SIZE; |
1148 | 0 | const int cropped_tile_height = |
1149 | 0 | cm->height - tile_info->mi_row_start * MI_SIZE; |
1150 | 0 | *min_cropped_tile_width = |
1151 | 0 | AOMMIN(*min_cropped_tile_width, cropped_tile_width); |
1152 | 0 | *min_cropped_tile_height = |
1153 | 0 | AOMMIN(*min_cropped_tile_height, cropped_tile_height); |
1154 | |
|
1155 | 0 | const int is_right_most_tile = |
1156 | 0 | tile_info->mi_col_end == cm->mi_params.mi_cols; |
1157 | 0 | if (!is_right_most_tile) { |
1158 | 0 | if (av1_superres_scaled(cm)) |
1159 | 0 | *tile_width_valid &= tile_width >= 128; |
1160 | 0 | else |
1161 | 0 | *tile_width_valid &= tile_width >= 64; |
1162 | 0 | } |
1163 | 0 | } |
1164 | 0 | } |
1165 | 0 | } |
1166 | | |
1167 | | static int store_frame_record(int64_t ts_start, int64_t ts_end, |
1168 | | size_t encoded_size, int pic_size, |
1169 | | int frame_header_count, int tiles, int show_frame, |
1170 | | int show_existing_frame, |
1171 | 0 | FrameWindowBuffer *const buffer) { |
1172 | 0 | if (buffer->num < FRAME_WINDOW_SIZE) { |
1173 | 0 | ++buffer->num; |
1174 | 0 | } else { |
1175 | 0 | buffer->start = (buffer->start + 1) % FRAME_WINDOW_SIZE; |
1176 | 0 | } |
1177 | 0 | const int new_idx = (buffer->start + buffer->num - 1) % FRAME_WINDOW_SIZE; |
1178 | 0 | FrameRecord *const record = &buffer->buf[new_idx]; |
1179 | 0 | record->ts_start = ts_start; |
1180 | 0 | record->ts_end = ts_end; |
1181 | 0 | record->encoded_size_in_bytes = encoded_size; |
1182 | 0 | record->pic_size = pic_size; |
1183 | 0 | record->frame_header_count = frame_header_count; |
1184 | 0 | record->tiles = tiles; |
1185 | 0 | record->show_frame = show_frame; |
1186 | 0 | record->show_existing_frame = show_existing_frame; |
1187 | |
|
1188 | 0 | return new_idx; |
1189 | 0 | } |
1190 | | |
1191 | | // Count the number of frames encoded in the last "duration" ticks, in display |
1192 | | // time. |
1193 | | static int count_frames(const FrameWindowBuffer *const buffer, |
1194 | 0 | int64_t duration) { |
1195 | 0 | const int current_idx = (buffer->start + buffer->num - 1) % FRAME_WINDOW_SIZE; |
1196 | | // Assume current frame is shown frame. |
1197 | 0 | assert(buffer->buf[current_idx].show_frame); |
1198 | |
|
1199 | 0 | const int64_t current_time = buffer->buf[current_idx].ts_end; |
1200 | 0 | const int64_t time_limit = AOMMAX(current_time - duration, 0); |
1201 | 0 | int num_frames = 1; |
1202 | 0 | int index = current_idx - 1; |
1203 | 0 | for (int i = buffer->num - 2; i >= 0; --i, --index, ++num_frames) { |
1204 | 0 | if (index < 0) index = FRAME_WINDOW_SIZE - 1; |
1205 | 0 | const FrameRecord *const record = &buffer->buf[index]; |
1206 | 0 | if (!record->show_frame) continue; |
1207 | 0 | const int64_t ts_start = record->ts_start; |
1208 | 0 | if (ts_start < time_limit) break; |
1209 | 0 | } |
1210 | |
|
1211 | 0 | return num_frames; |
1212 | 0 | } |
1213 | | |
1214 | | // Scan previously encoded frames and update level metrics accordingly. |
1215 | | static void scan_past_frames(const FrameWindowBuffer *const buffer, |
1216 | | int num_frames_to_scan, |
1217 | | AV1LevelSpec *const level_spec, |
1218 | 0 | AV1LevelStats *const level_stats) { |
1219 | 0 | const int num_frames_in_buffer = buffer->num; |
1220 | 0 | int index = (buffer->start + num_frames_in_buffer - 1) % FRAME_WINDOW_SIZE; |
1221 | 0 | int frame_headers = 0; |
1222 | 0 | int tiles = 0; |
1223 | 0 | int64_t display_samples = 0; |
1224 | 0 | int64_t decoded_samples = 0; |
1225 | 0 | size_t encoded_size_in_bytes = 0; |
1226 | 0 | for (int i = 0; i < AOMMIN(num_frames_in_buffer, num_frames_to_scan); ++i) { |
1227 | 0 | const FrameRecord *const record = &buffer->buf[index]; |
1228 | 0 | if (!record->show_existing_frame) { |
1229 | 0 | frame_headers += record->frame_header_count; |
1230 | 0 | decoded_samples += record->pic_size; |
1231 | 0 | } |
1232 | 0 | if (record->show_frame) { |
1233 | 0 | display_samples += record->pic_size; |
1234 | 0 | } |
1235 | 0 | tiles += record->tiles; |
1236 | 0 | encoded_size_in_bytes += record->encoded_size_in_bytes; |
1237 | 0 | --index; |
1238 | 0 | if (index < 0) index = FRAME_WINDOW_SIZE - 1; |
1239 | 0 | } |
1240 | 0 | level_spec->max_header_rate = |
1241 | 0 | AOMMAX(level_spec->max_header_rate, frame_headers); |
1242 | | // TODO(huisu): we can now compute max display rate with the decoder model, so |
1243 | | // these couple of lines can be removed. Keep them here for a while for |
1244 | | // debugging purpose. |
1245 | 0 | level_spec->max_display_rate = |
1246 | 0 | AOMMAX(level_spec->max_display_rate, display_samples); |
1247 | 0 | level_spec->max_decode_rate = |
1248 | 0 | AOMMAX(level_spec->max_decode_rate, decoded_samples); |
1249 | 0 | level_spec->max_tile_rate = AOMMAX(level_spec->max_tile_rate, tiles); |
1250 | 0 | level_stats->max_bitrate = |
1251 | 0 | AOMMAX(level_stats->max_bitrate, |
1252 | 0 | (int)AOMMIN(encoded_size_in_bytes * 8, (size_t)INT_MAX)); |
1253 | 0 | } |
1254 | | |
1255 | | void av1_update_level_info(AV1_COMP *cpi, size_t size, int64_t ts_start, |
1256 | 0 | int64_t ts_end) { |
1257 | 0 | AV1_COMMON *const cm = &cpi->common; |
1258 | 0 | const AV1LevelParams *const level_params = &cpi->ppi->level_params; |
1259 | |
|
1260 | 0 | const int upscaled_width = cm->superres_upscaled_width; |
1261 | 0 | const int width = cm->width; |
1262 | 0 | const int height = cm->height; |
1263 | 0 | const int tile_cols = cm->tiles.cols; |
1264 | 0 | const int tile_rows = cm->tiles.rows; |
1265 | 0 | const int tiles = tile_cols * tile_rows; |
1266 | 0 | const int luma_pic_size = upscaled_width * height; |
1267 | 0 | const int frame_header_count = cpi->frame_header_count; |
1268 | 0 | const int show_frame = cm->show_frame; |
1269 | 0 | const int show_existing_frame = cm->show_existing_frame; |
1270 | |
|
1271 | 0 | int max_tile_size; |
1272 | 0 | int min_cropped_tile_width; |
1273 | 0 | int min_cropped_tile_height; |
1274 | 0 | int max_superres_tile_width; |
1275 | 0 | int tile_width_is_valid; |
1276 | 0 | get_tile_stats(cm, cpi->tile_data, &max_tile_size, &max_superres_tile_width, |
1277 | 0 | &min_cropped_tile_width, &min_cropped_tile_height, |
1278 | 0 | &tile_width_is_valid); |
1279 | |
|
1280 | 0 | const double compression_ratio = av1_get_compression_ratio(cm, size); |
1281 | |
|
1282 | 0 | const int temporal_layer_id = cm->temporal_layer_id; |
1283 | 0 | const int spatial_layer_id = cm->spatial_layer_id; |
1284 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
1285 | 0 | const BITSTREAM_PROFILE profile = seq_params->profile; |
1286 | 0 | const int is_still_picture = seq_params->still_picture; |
1287 | | // update level_stats |
1288 | | // TODO(kyslov@) fix the implementation according to buffer model |
1289 | 0 | for (int i = 0; i < seq_params->operating_points_cnt_minus_1 + 1; ++i) { |
1290 | 0 | if (!is_in_operating_point(seq_params->operating_point_idc[i], |
1291 | 0 | temporal_layer_id, spatial_layer_id) || |
1292 | 0 | !((level_params->keep_level_stats >> i) & 1)) { |
1293 | 0 | continue; |
1294 | 0 | } |
1295 | | |
1296 | 0 | AV1LevelInfo *const level_info = level_params->level_info[i]; |
1297 | 0 | assert(level_info != NULL); |
1298 | 0 | AV1LevelStats *const level_stats = &level_info->level_stats; |
1299 | |
|
1300 | 0 | level_stats->max_tile_size = |
1301 | 0 | AOMMAX(level_stats->max_tile_size, max_tile_size); |
1302 | 0 | level_stats->max_superres_tile_width = |
1303 | 0 | AOMMAX(level_stats->max_superres_tile_width, max_superres_tile_width); |
1304 | 0 | level_stats->min_cropped_tile_width = |
1305 | 0 | AOMMIN(level_stats->min_cropped_tile_width, min_cropped_tile_width); |
1306 | 0 | level_stats->min_cropped_tile_height = |
1307 | 0 | AOMMIN(level_stats->min_cropped_tile_height, min_cropped_tile_height); |
1308 | 0 | level_stats->tile_width_is_valid &= tile_width_is_valid; |
1309 | 0 | level_stats->min_frame_width = AOMMIN(level_stats->min_frame_width, width); |
1310 | 0 | level_stats->min_frame_height = |
1311 | 0 | AOMMIN(level_stats->min_frame_height, height); |
1312 | 0 | level_stats->min_cr = AOMMIN(level_stats->min_cr, compression_ratio); |
1313 | 0 | level_stats->total_compressed_size += (double)size; |
1314 | | |
1315 | | // update level_spec |
1316 | | // TODO(kyslov@) update all spec fields |
1317 | 0 | AV1LevelSpec *const level_spec = &level_info->level_spec; |
1318 | 0 | level_spec->max_picture_size = |
1319 | 0 | AOMMAX(level_spec->max_picture_size, luma_pic_size); |
1320 | 0 | level_spec->max_h_size = |
1321 | 0 | AOMMAX(level_spec->max_h_size, cm->superres_upscaled_width); |
1322 | 0 | level_spec->max_v_size = AOMMAX(level_spec->max_v_size, height); |
1323 | 0 | level_spec->max_tile_cols = AOMMAX(level_spec->max_tile_cols, tile_cols); |
1324 | 0 | level_spec->max_tiles = AOMMAX(level_spec->max_tiles, tiles); |
1325 | | |
1326 | | // Store info. of current frame into FrameWindowBuffer. |
1327 | 0 | FrameWindowBuffer *const buffer = &level_info->frame_window_buffer; |
1328 | 0 | store_frame_record(ts_start, ts_end, size, luma_pic_size, |
1329 | 0 | frame_header_count, tiles, show_frame, |
1330 | 0 | show_existing_frame, buffer); |
1331 | 0 | if (show_frame) { |
1332 | | // Count the number of frames encoded in the past 1 second. |
1333 | 0 | const int encoded_frames_in_last_second = |
1334 | 0 | show_frame ? count_frames(buffer, TICKS_PER_SEC) : 0; |
1335 | 0 | scan_past_frames(buffer, encoded_frames_in_last_second, level_spec, |
1336 | 0 | level_stats); |
1337 | 0 | level_stats->total_time_encoded += |
1338 | 0 | (cpi->time_stamps.prev_ts_end - cpi->time_stamps.prev_ts_start) / |
1339 | 0 | (double)TICKS_PER_SEC; |
1340 | 0 | } |
1341 | |
|
1342 | 0 | DECODER_MODEL *const decoder_models = level_info->decoder_models; |
1343 | 0 | for (AV1_LEVEL level = SEQ_LEVEL_2_0; level < SEQ_LEVELS; ++level) { |
1344 | 0 | decoder_model_process_frame(cpi, size << 3, &decoder_models[level]); |
1345 | 0 | } |
1346 | | |
1347 | | // Check whether target level is met. |
1348 | 0 | const AV1_LEVEL target_level = level_params->target_seq_level_idx[i]; |
1349 | 0 | if (target_level < SEQ_LEVELS && cpi->oxcf.strict_level_conformance) { |
1350 | 0 | assert(is_valid_seq_level_idx(target_level)); |
1351 | 0 | const int tier = seq_params->tier[i]; |
1352 | 0 | const TARGET_LEVEL_FAIL_ID fail_id = check_level_constraints( |
1353 | 0 | level_info, target_level, tier, is_still_picture, profile, 0); |
1354 | 0 | if (fail_id != TARGET_LEVEL_OK) { |
1355 | 0 | const int target_level_major = 2 + (target_level >> 2); |
1356 | 0 | const int target_level_minor = target_level & 3; |
1357 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
1358 | 0 | "Failed to encode to the target level %d_%d. %s", |
1359 | 0 | target_level_major, target_level_minor, |
1360 | 0 | level_fail_messages[fail_id]); |
1361 | 0 | } |
1362 | 0 | } |
1363 | 0 | } |
1364 | 0 | } |
1365 | | |
1366 | | aom_codec_err_t av1_get_seq_level_idx(const SequenceHeader *seq_params, |
1367 | | const AV1LevelParams *level_params, |
1368 | 0 | int *seq_level_idx) { |
1369 | 0 | const int is_still_picture = seq_params->still_picture; |
1370 | 0 | const BITSTREAM_PROFILE profile = seq_params->profile; |
1371 | 0 | for (int op = 0; op < seq_params->operating_points_cnt_minus_1 + 1; ++op) { |
1372 | 0 | seq_level_idx[op] = (int)SEQ_LEVEL_MAX; |
1373 | 0 | if (!((level_params->keep_level_stats >> op) & 1)) continue; |
1374 | 0 | const int tier = seq_params->tier[op]; |
1375 | 0 | const AV1LevelInfo *const level_info = level_params->level_info[op]; |
1376 | 0 | assert(level_info != NULL); |
1377 | 0 | for (int level = 0; level < SEQ_LEVELS; ++level) { |
1378 | 0 | if (!is_valid_seq_level_idx(level)) continue; |
1379 | 0 | const TARGET_LEVEL_FAIL_ID fail_id = check_level_constraints( |
1380 | 0 | level_info, level, tier, is_still_picture, profile, 1); |
1381 | 0 | if (fail_id == TARGET_LEVEL_OK) { |
1382 | 0 | seq_level_idx[op] = level; |
1383 | 0 | break; |
1384 | 0 | } |
1385 | 0 | } |
1386 | 0 | } |
1387 | |
|
1388 | 0 | return AOM_CODEC_OK; |
1389 | 0 | } |
1390 | | |
1391 | | aom_codec_err_t av1_get_target_seq_level_idx(const SequenceHeader *seq_params, |
1392 | | const AV1LevelParams *level_params, |
1393 | 0 | int *target_seq_level_idx) { |
1394 | 0 | for (int op = 0; op < seq_params->operating_points_cnt_minus_1 + 1; ++op) { |
1395 | 0 | target_seq_level_idx[op] = (int)SEQ_LEVEL_MAX; |
1396 | 0 | if (!((level_params->keep_level_stats >> op) & 1)) continue; |
1397 | 0 | target_seq_level_idx[op] = level_params->target_seq_level_idx[op]; |
1398 | 0 | } |
1399 | |
|
1400 | 0 | return AOM_CODEC_OK; |
1401 | 0 | } |