/src/libavif/ext/aom/av1/encoder/level.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2019, Alliance for Open Media. All rights reserved. |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #include "av1/encoder/encoder.h" |
13 | | #include "av1/encoder/level.h" |
14 | | |
15 | | #define UNDEFINED_LEVEL \ |
16 | | { .level = SEQ_LEVEL_MAX, \ |
17 | | .max_picture_size = 0, \ |
18 | | .max_h_size = 0, \ |
19 | | .max_v_size = 0, \ |
20 | | .max_display_rate = 0, \ |
21 | | .max_decode_rate = 0, \ |
22 | | .max_header_rate = 0, \ |
23 | | .main_mbps = 0, \ |
24 | | .high_mbps = 0, \ |
25 | | .main_cr = 0, \ |
26 | | .high_cr = 0, \ |
27 | | .max_tiles = 0, \ |
28 | | .max_tile_cols = 0 } |
29 | | |
30 | | static const AV1LevelSpec av1_level_defs[SEQ_LEVELS] = { |
31 | | { .level = SEQ_LEVEL_2_0, |
32 | | .max_picture_size = 147456, |
33 | | .max_h_size = 2048, |
34 | | .max_v_size = 1152, |
35 | | .max_display_rate = 4423680L, |
36 | | .max_decode_rate = 5529600L, |
37 | | .max_header_rate = 150, |
38 | | .main_mbps = 1.5, |
39 | | .high_mbps = 0, |
40 | | .main_cr = 2.0, |
41 | | .high_cr = 0, |
42 | | .max_tiles = 8, |
43 | | .max_tile_cols = 4 }, |
44 | | { .level = SEQ_LEVEL_2_1, |
45 | | .max_picture_size = 278784, |
46 | | .max_h_size = 2816, |
47 | | .max_v_size = 1584, |
48 | | .max_display_rate = 8363520L, |
49 | | .max_decode_rate = 10454400L, |
50 | | .max_header_rate = 150, |
51 | | .main_mbps = 3.0, |
52 | | .high_mbps = 0, |
53 | | .main_cr = 2.0, |
54 | | .high_cr = 0, |
55 | | .max_tiles = 8, |
56 | | .max_tile_cols = 4 }, |
57 | | UNDEFINED_LEVEL, |
58 | | UNDEFINED_LEVEL, |
59 | | { .level = SEQ_LEVEL_3_0, |
60 | | .max_picture_size = 665856, |
61 | | .max_h_size = 4352, |
62 | | .max_v_size = 2448, |
63 | | .max_display_rate = 19975680L, |
64 | | .max_decode_rate = 24969600L, |
65 | | .max_header_rate = 150, |
66 | | .main_mbps = 6.0, |
67 | | .high_mbps = 0, |
68 | | .main_cr = 2.0, |
69 | | .high_cr = 0, |
70 | | .max_tiles = 16, |
71 | | .max_tile_cols = 6 }, |
72 | | { .level = SEQ_LEVEL_3_1, |
73 | | .max_picture_size = 1065024, |
74 | | .max_h_size = 5504, |
75 | | .max_v_size = 3096, |
76 | | .max_display_rate = 31950720L, |
77 | | .max_decode_rate = 39938400L, |
78 | | .max_header_rate = 150, |
79 | | .main_mbps = 10.0, |
80 | | .high_mbps = 0, |
81 | | .main_cr = 2.0, |
82 | | .high_cr = 0, |
83 | | .max_tiles = 16, |
84 | | .max_tile_cols = 6 }, |
85 | | UNDEFINED_LEVEL, |
86 | | UNDEFINED_LEVEL, |
87 | | { .level = SEQ_LEVEL_4_0, |
88 | | .max_picture_size = 2359296, |
89 | | .max_h_size = 6144, |
90 | | .max_v_size = 3456, |
91 | | .max_display_rate = 70778880L, |
92 | | .max_decode_rate = 77856768L, |
93 | | .max_header_rate = 300, |
94 | | .main_mbps = 12.0, |
95 | | .high_mbps = 30.0, |
96 | | .main_cr = 4.0, |
97 | | .high_cr = 4.0, |
98 | | .max_tiles = 32, |
99 | | .max_tile_cols = 8 }, |
100 | | { .level = SEQ_LEVEL_4_1, |
101 | | .max_picture_size = 2359296, |
102 | | .max_h_size = 6144, |
103 | | .max_v_size = 3456, |
104 | | .max_display_rate = 141557760L, |
105 | | .max_decode_rate = 155713536L, |
106 | | .max_header_rate = 300, |
107 | | .main_mbps = 20.0, |
108 | | .high_mbps = 50.0, |
109 | | .main_cr = 4.0, |
110 | | .high_cr = 4.0, |
111 | | .max_tiles = 32, |
112 | | .max_tile_cols = 8 }, |
113 | | UNDEFINED_LEVEL, |
114 | | UNDEFINED_LEVEL, |
115 | | { .level = SEQ_LEVEL_5_0, |
116 | | .max_picture_size = 8912896, |
117 | | .max_h_size = 8192, |
118 | | .max_v_size = 4352, |
119 | | .max_display_rate = 267386880L, |
120 | | .max_decode_rate = 273715200L, |
121 | | .max_header_rate = 300, |
122 | | .main_mbps = 30.0, |
123 | | .high_mbps = 100.0, |
124 | | .main_cr = 6.0, |
125 | | .high_cr = 4.0, |
126 | | .max_tiles = 64, |
127 | | .max_tile_cols = 8 }, |
128 | | { .level = SEQ_LEVEL_5_1, |
129 | | .max_picture_size = 8912896, |
130 | | .max_h_size = 8192, |
131 | | .max_v_size = 4352, |
132 | | .max_display_rate = 534773760L, |
133 | | .max_decode_rate = 547430400L, |
134 | | .max_header_rate = 300, |
135 | | .main_mbps = 40.0, |
136 | | .high_mbps = 160.0, |
137 | | .main_cr = 8.0, |
138 | | .high_cr = 4.0, |
139 | | .max_tiles = 64, |
140 | | .max_tile_cols = 8 }, |
141 | | { .level = SEQ_LEVEL_5_2, |
142 | | .max_picture_size = 8912896, |
143 | | .max_h_size = 8192, |
144 | | .max_v_size = 4352, |
145 | | .max_display_rate = 1069547520L, |
146 | | .max_decode_rate = 1094860800L, |
147 | | .max_header_rate = 300, |
148 | | .main_mbps = 60.0, |
149 | | .high_mbps = 240.0, |
150 | | .main_cr = 8.0, |
151 | | .high_cr = 4.0, |
152 | | .max_tiles = 64, |
153 | | .max_tile_cols = 8 }, |
154 | | { .level = SEQ_LEVEL_5_3, |
155 | | .max_picture_size = 8912896, |
156 | | .max_h_size = 8192, |
157 | | .max_v_size = 4352, |
158 | | .max_display_rate = 1069547520L, |
159 | | .max_decode_rate = 1176502272L, |
160 | | .max_header_rate = 300, |
161 | | .main_mbps = 60.0, |
162 | | .high_mbps = 240.0, |
163 | | .main_cr = 8.0, |
164 | | .high_cr = 4.0, |
165 | | .max_tiles = 64, |
166 | | .max_tile_cols = 8 }, |
167 | | { .level = SEQ_LEVEL_6_0, |
168 | | .max_picture_size = 35651584, |
169 | | .max_h_size = 16384, |
170 | | .max_v_size = 8704, |
171 | | .max_display_rate = 1069547520L, |
172 | | .max_decode_rate = 1176502272L, |
173 | | .max_header_rate = 300, |
174 | | .main_mbps = 60.0, |
175 | | .high_mbps = 240.0, |
176 | | .main_cr = 8.0, |
177 | | .high_cr = 4.0, |
178 | | .max_tiles = 128, |
179 | | .max_tile_cols = 16 }, |
180 | | { .level = SEQ_LEVEL_6_1, |
181 | | .max_picture_size = 35651584, |
182 | | .max_h_size = 16384, |
183 | | .max_v_size = 8704, |
184 | | .max_display_rate = 2139095040L, |
185 | | .max_decode_rate = 2189721600L, |
186 | | .max_header_rate = 300, |
187 | | .main_mbps = 100.0, |
188 | | .high_mbps = 480.0, |
189 | | .main_cr = 8.0, |
190 | | .high_cr = 4.0, |
191 | | .max_tiles = 128, |
192 | | .max_tile_cols = 16 }, |
193 | | { .level = SEQ_LEVEL_6_2, |
194 | | .max_picture_size = 35651584, |
195 | | .max_h_size = 16384, |
196 | | .max_v_size = 8704, |
197 | | .max_display_rate = 4278190080L, |
198 | | .max_decode_rate = 4379443200L, |
199 | | .max_header_rate = 300, |
200 | | .main_mbps = 160.0, |
201 | | .high_mbps = 800.0, |
202 | | .main_cr = 8.0, |
203 | | .high_cr = 4.0, |
204 | | .max_tiles = 128, |
205 | | .max_tile_cols = 16 }, |
206 | | { .level = SEQ_LEVEL_6_3, |
207 | | .max_picture_size = 35651584, |
208 | | .max_h_size = 16384, |
209 | | .max_v_size = 8704, |
210 | | .max_display_rate = 4278190080L, |
211 | | .max_decode_rate = 4706009088L, |
212 | | .max_header_rate = 300, |
213 | | .main_mbps = 160.0, |
214 | | .high_mbps = 800.0, |
215 | | .main_cr = 8.0, |
216 | | .high_cr = 4.0, |
217 | | .max_tiles = 128, |
218 | | .max_tile_cols = 16 }, |
219 | | #if CONFIG_CWG_C013 |
220 | | { .level = SEQ_LEVEL_7_0, |
221 | | .max_picture_size = 142606336, |
222 | | .max_h_size = 32768, |
223 | | .max_v_size = 17408, |
224 | | .max_display_rate = 4278190080L, |
225 | | .max_decode_rate = 4706009088L, |
226 | | .max_header_rate = 300, |
227 | | .main_mbps = 160.0, |
228 | | .high_mbps = 800.0, |
229 | | .main_cr = 8.0, |
230 | | .high_cr = 4.0, |
231 | | .max_tiles = 256, |
232 | | .max_tile_cols = 32 }, |
233 | | { .level = SEQ_LEVEL_7_1, |
234 | | .max_picture_size = 142606336, |
235 | | .max_h_size = 32768, |
236 | | .max_v_size = 17408, |
237 | | .max_display_rate = 8556380160L, |
238 | | .max_decode_rate = 8758886400L, |
239 | | .max_header_rate = 300, |
240 | | .main_mbps = 200.0, |
241 | | .high_mbps = 960.0, |
242 | | .main_cr = 8.0, |
243 | | .high_cr = 4.0, |
244 | | .max_tiles = 256, |
245 | | .max_tile_cols = 32 }, |
246 | | { .level = SEQ_LEVEL_7_2, |
247 | | .max_picture_size = 142606336, |
248 | | .max_h_size = 32768, |
249 | | .max_v_size = 17408, |
250 | | .max_display_rate = 17112760320L, |
251 | | .max_decode_rate = 17517772800L, |
252 | | .max_header_rate = 300, |
253 | | .main_mbps = 320.0, |
254 | | .high_mbps = 1600.0, |
255 | | .main_cr = 8.0, |
256 | | .high_cr = 4.0, |
257 | | .max_tiles = 256, |
258 | | .max_tile_cols = 32 }, |
259 | | { .level = SEQ_LEVEL_7_3, |
260 | | .max_picture_size = 142606336, |
261 | | .max_h_size = 32768, |
262 | | .max_v_size = 17408, |
263 | | .max_display_rate = 17112760320L, |
264 | | .max_decode_rate = 18824036352L, |
265 | | .max_header_rate = 300, |
266 | | .main_mbps = 320.0, |
267 | | .high_mbps = 1600.0, |
268 | | .main_cr = 8.0, |
269 | | .high_cr = 4.0, |
270 | | .max_tiles = 256, |
271 | | .max_tile_cols = 32 }, |
272 | | { .level = SEQ_LEVEL_8_0, |
273 | | .max_picture_size = 530841600, |
274 | | .max_h_size = 65536, |
275 | | .max_v_size = 34816, |
276 | | .max_display_rate = 17112760320L, |
277 | | .max_decode_rate = 18824036352L, |
278 | | .max_header_rate = 300, |
279 | | .main_mbps = 320.0, |
280 | | .high_mbps = 1600.0, |
281 | | .main_cr = 8.0, |
282 | | .high_cr = 4.0, |
283 | | .max_tiles = 512, |
284 | | .max_tile_cols = 64 }, |
285 | | { .level = SEQ_LEVEL_8_1, |
286 | | .max_picture_size = 530841600, |
287 | | .max_h_size = 65536, |
288 | | .max_v_size = 34816, |
289 | | .max_display_rate = 34225520640L, |
290 | | .max_decode_rate = 34910031052L, |
291 | | .max_header_rate = 300, |
292 | | .main_mbps = 400.0, |
293 | | .high_mbps = 1920.0, |
294 | | .main_cr = 8.0, |
295 | | .high_cr = 4.0, |
296 | | .max_tiles = 512, |
297 | | .max_tile_cols = 64 }, |
298 | | { .level = SEQ_LEVEL_8_2, |
299 | | .max_picture_size = 530841600, |
300 | | .max_h_size = 65536, |
301 | | .max_v_size = 34816, |
302 | | .max_display_rate = 68451041280L, |
303 | | .max_decode_rate = 69820062105L, |
304 | | .max_header_rate = 300, |
305 | | .main_mbps = 640.0, |
306 | | .high_mbps = 3200.0, |
307 | | .main_cr = 8.0, |
308 | | .high_cr = 4.0, |
309 | | .max_tiles = 512, |
310 | | .max_tile_cols = 64 }, |
311 | | { .level = SEQ_LEVEL_8_3, |
312 | | .max_picture_size = 530841600, |
313 | | .max_h_size = 65536, |
314 | | .max_v_size = 34816, |
315 | | .max_display_rate = 68451041280L, |
316 | | .max_decode_rate = 75296145408L, |
317 | | .max_header_rate = 300, |
318 | | .main_mbps = 640.0, |
319 | | .high_mbps = 3200.0, |
320 | | .main_cr = 8.0, |
321 | | .high_cr = 4.0, |
322 | | .max_tiles = 512, |
323 | | .max_tile_cols = 64 }, |
324 | | #else // !CONFIG_CWG_C013 |
325 | | UNDEFINED_LEVEL, |
326 | | UNDEFINED_LEVEL, |
327 | | UNDEFINED_LEVEL, |
328 | | UNDEFINED_LEVEL, |
329 | | UNDEFINED_LEVEL, |
330 | | UNDEFINED_LEVEL, |
331 | | UNDEFINED_LEVEL, |
332 | | UNDEFINED_LEVEL, |
333 | | #endif // CONFIG_CWG_C013 |
334 | | }; |
335 | | |
336 | | typedef enum { |
337 | | LUMA_PIC_SIZE_TOO_LARGE, |
338 | | LUMA_PIC_H_SIZE_TOO_LARGE, |
339 | | LUMA_PIC_V_SIZE_TOO_LARGE, |
340 | | LUMA_PIC_H_SIZE_TOO_SMALL, |
341 | | LUMA_PIC_V_SIZE_TOO_SMALL, |
342 | | TOO_MANY_TILE_COLUMNS, |
343 | | TOO_MANY_TILES, |
344 | | TILE_RATE_TOO_HIGH, |
345 | | TILE_TOO_LARGE, |
346 | | SUPERRES_TILE_WIDTH_TOO_LARGE, |
347 | | CROPPED_TILE_WIDTH_TOO_SMALL, |
348 | | CROPPED_TILE_HEIGHT_TOO_SMALL, |
349 | | TILE_WIDTH_INVALID, |
350 | | FRAME_HEADER_RATE_TOO_HIGH, |
351 | | DISPLAY_RATE_TOO_HIGH, |
352 | | DECODE_RATE_TOO_HIGH, |
353 | | CR_TOO_SMALL, |
354 | | TILE_SIZE_HEADER_RATE_TOO_HIGH, |
355 | | BITRATE_TOO_HIGH, |
356 | | DECODER_MODEL_FAIL, |
357 | | |
358 | | TARGET_LEVEL_FAIL_IDS, |
359 | | TARGET_LEVEL_OK, |
360 | | } TARGET_LEVEL_FAIL_ID; |
361 | | |
362 | | static const char *level_fail_messages[TARGET_LEVEL_FAIL_IDS] = { |
363 | | "The picture size is too large.", |
364 | | "The picture width is too large.", |
365 | | "The picture height is too large.", |
366 | | "The picture width is too small.", |
367 | | "The picture height is too small.", |
368 | | "Too many tile columns are used.", |
369 | | "Too many tiles are used.", |
370 | | "The tile rate is too high.", |
371 | | "The tile size is too large.", |
372 | | "The superres tile width is too large.", |
373 | | "The cropped tile width is less than 8.", |
374 | | "The cropped tile height is less than 8.", |
375 | | "The tile width is invalid.", |
376 | | "The frame header rate is too high.", |
377 | | "The display luma sample rate is too high.", |
378 | | "The decoded luma sample rate is too high.", |
379 | | "The compression ratio is too small.", |
380 | | "The product of max tile size and header rate is too high.", |
381 | | "The bitrate is too high.", |
382 | | "The decoder model fails.", |
383 | | }; |
384 | | |
385 | | static double get_max_bitrate(const AV1LevelSpec *const level_spec, int tier, |
386 | 0 | BITSTREAM_PROFILE profile) { |
387 | 0 | if (level_spec->level < SEQ_LEVEL_4_0) tier = 0; |
388 | 0 | const double bitrate_basis = |
389 | 0 | (tier ? level_spec->high_mbps : level_spec->main_mbps) * 1e6; |
390 | 0 | const double bitrate_profile_factor = |
391 | 0 | profile == PROFILE_0 ? 1.0 : (profile == PROFILE_1 ? 2.0 : 3.0); |
392 | 0 | return bitrate_basis * bitrate_profile_factor; |
393 | 0 | } |
394 | | |
395 | | double av1_get_max_bitrate_for_level(AV1_LEVEL level_index, int tier, |
396 | 0 | BITSTREAM_PROFILE profile) { |
397 | 0 | assert(is_valid_seq_level_idx(level_index)); |
398 | 0 | return get_max_bitrate(&av1_level_defs[level_index], tier, profile); |
399 | 0 | } |
400 | | |
401 | | void av1_get_max_tiles_for_level(AV1_LEVEL level_index, int *const max_tiles, |
402 | 0 | int *const max_tile_cols) { |
403 | 0 | assert(is_valid_seq_level_idx(level_index)); |
404 | 0 | const AV1LevelSpec *const level_spec = &av1_level_defs[level_index]; |
405 | 0 | *max_tiles = level_spec->max_tiles; |
406 | 0 | *max_tile_cols = level_spec->max_tile_cols; |
407 | 0 | } |
408 | | |
409 | | // We assume time t to be valid if and only if t >= 0.0. |
410 | | // So INVALID_TIME can be defined as anything less than 0. |
411 | 0 | #define INVALID_TIME (-1.0) |
412 | | |
413 | | // This corresponds to "free_buffer" in the spec. |
414 | 0 | static void release_buffer(DECODER_MODEL *const decoder_model, int idx) { |
415 | 0 | assert(idx >= 0 && idx < BUFFER_POOL_MAX_SIZE); |
416 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[idx]; |
417 | 0 | this_buffer->decoder_ref_count = 0; |
418 | 0 | this_buffer->player_ref_count = 0; |
419 | 0 | this_buffer->display_index = -1; |
420 | 0 | this_buffer->presentation_time = INVALID_TIME; |
421 | 0 | } |
422 | | |
423 | 0 | static void initialize_buffer_pool(DECODER_MODEL *const decoder_model) { |
424 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
425 | 0 | release_buffer(decoder_model, i); |
426 | 0 | } |
427 | 0 | for (int i = 0; i < REF_FRAMES; ++i) { |
428 | 0 | decoder_model->vbi[i] = -1; |
429 | 0 | } |
430 | 0 | } |
431 | | |
432 | 0 | static int get_free_buffer(DECODER_MODEL *const decoder_model) { |
433 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
434 | 0 | const FRAME_BUFFER *const this_buffer = |
435 | 0 | &decoder_model->frame_buffer_pool[i]; |
436 | 0 | if (this_buffer->decoder_ref_count == 0 && |
437 | 0 | this_buffer->player_ref_count == 0) |
438 | 0 | return i; |
439 | 0 | } |
440 | 0 | return -1; |
441 | 0 | } |
442 | | |
443 | | static void update_ref_buffers(DECODER_MODEL *const decoder_model, int idx, |
444 | 0 | int refresh_frame_flags) { |
445 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[idx]; |
446 | 0 | for (int i = 0; i < REF_FRAMES; ++i) { |
447 | 0 | if (refresh_frame_flags & (1 << i)) { |
448 | 0 | const int pre_idx = decoder_model->vbi[i]; |
449 | 0 | if (pre_idx != -1) { |
450 | 0 | --decoder_model->frame_buffer_pool[pre_idx].decoder_ref_count; |
451 | 0 | } |
452 | 0 | decoder_model->vbi[i] = idx; |
453 | 0 | ++this_buffer->decoder_ref_count; |
454 | 0 | } |
455 | 0 | } |
456 | 0 | } |
457 | | |
458 | | // The time (in seconds) required to decode a frame. |
459 | | static double time_to_decode_frame(const AV1_COMMON *const cm, |
460 | 0 | int64_t max_decode_rate) { |
461 | 0 | if (cm->show_existing_frame) return 0.0; |
462 | | |
463 | 0 | const FRAME_TYPE frame_type = cm->current_frame.frame_type; |
464 | 0 | int luma_samples = 0; |
465 | 0 | if (frame_type == KEY_FRAME || frame_type == INTRA_ONLY_FRAME) { |
466 | 0 | luma_samples = cm->superres_upscaled_width * cm->height; |
467 | 0 | } else { |
468 | 0 | const int spatial_layer_dimensions_present_flag = 0; |
469 | 0 | if (spatial_layer_dimensions_present_flag) { |
470 | 0 | assert(0 && "Spatial layer dimensions not supported yet."); |
471 | 0 | } else { |
472 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
473 | 0 | const int max_frame_width = seq_params->max_frame_width; |
474 | 0 | const int max_frame_height = seq_params->max_frame_height; |
475 | 0 | luma_samples = max_frame_width * max_frame_height; |
476 | 0 | } |
477 | 0 | } |
478 | |
|
479 | 0 | return luma_samples / (double)max_decode_rate; |
480 | 0 | } |
481 | | |
482 | | // Release frame buffers that are no longer needed for decode or display. |
483 | | // It corresponds to "start_decode_at_removal_time" in the spec. |
484 | | static void release_processed_frames(DECODER_MODEL *const decoder_model, |
485 | 0 | double removal_time) { |
486 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
487 | 0 | FRAME_BUFFER *const this_buffer = &decoder_model->frame_buffer_pool[i]; |
488 | 0 | if (this_buffer->player_ref_count > 0) { |
489 | 0 | if (this_buffer->presentation_time >= 0.0 && |
490 | 0 | this_buffer->presentation_time <= removal_time) { |
491 | 0 | this_buffer->player_ref_count = 0; |
492 | 0 | if (this_buffer->decoder_ref_count == 0) { |
493 | 0 | release_buffer(decoder_model, i); |
494 | 0 | } |
495 | 0 | } |
496 | 0 | } |
497 | 0 | } |
498 | 0 | } |
499 | | |
500 | 0 | static int frames_in_buffer_pool(const DECODER_MODEL *const decoder_model) { |
501 | 0 | int frames_in_pool = 0; |
502 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
503 | 0 | const FRAME_BUFFER *const this_buffer = |
504 | 0 | &decoder_model->frame_buffer_pool[i]; |
505 | 0 | if (this_buffer->decoder_ref_count > 0 || |
506 | 0 | this_buffer->player_ref_count > 0) { |
507 | 0 | ++frames_in_pool; |
508 | 0 | } |
509 | 0 | } |
510 | 0 | return frames_in_pool; |
511 | 0 | } |
512 | | |
513 | | static double get_presentation_time(const DECODER_MODEL *const decoder_model, |
514 | 0 | int display_index) { |
515 | 0 | if (decoder_model->mode == SCHEDULE_MODE) { |
516 | 0 | assert(0 && "SCHEDULE_MODE NOT SUPPORTED"); |
517 | 0 | return INVALID_TIME; |
518 | 0 | } else { |
519 | 0 | const double initial_presentation_delay = |
520 | 0 | decoder_model->initial_presentation_delay; |
521 | | // Can't decide presentation time until the initial presentation delay is |
522 | | // known. |
523 | 0 | if (initial_presentation_delay < 0.0) return INVALID_TIME; |
524 | | |
525 | 0 | return initial_presentation_delay + |
526 | 0 | display_index * decoder_model->num_ticks_per_picture * |
527 | 0 | decoder_model->display_clock_tick; |
528 | 0 | } |
529 | 0 | } |
530 | | |
531 | 0 | #define MAX_TIME 1e16 |
532 | | static double time_next_buffer_is_free(int num_decoded_frame, |
533 | | int decoder_buffer_delay, |
534 | | const FRAME_BUFFER *frame_buffer_pool, |
535 | 0 | double current_time) { |
536 | 0 | if (num_decoded_frame == 0) { |
537 | 0 | return (double)decoder_buffer_delay / 90000.0; |
538 | 0 | } |
539 | | |
540 | 0 | double buf_free_time = MAX_TIME; |
541 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
542 | 0 | const FRAME_BUFFER *const this_buffer = &frame_buffer_pool[i]; |
543 | 0 | if (this_buffer->decoder_ref_count == 0) { |
544 | 0 | if (this_buffer->player_ref_count == 0) { |
545 | 0 | return current_time; |
546 | 0 | } |
547 | 0 | const double presentation_time = this_buffer->presentation_time; |
548 | 0 | if (presentation_time >= 0.0 && presentation_time < buf_free_time) { |
549 | 0 | buf_free_time = presentation_time; |
550 | 0 | } |
551 | 0 | } |
552 | 0 | } |
553 | 0 | return buf_free_time < MAX_TIME ? buf_free_time : INVALID_TIME; |
554 | 0 | } |
555 | | #undef MAX_TIME |
556 | | |
557 | | static double get_removal_time(int mode, int num_decoded_frame, |
558 | | int decoder_buffer_delay, |
559 | | const FRAME_BUFFER *frame_buffer_pool, |
560 | 0 | double current_time) { |
561 | 0 | if (mode == SCHEDULE_MODE) { |
562 | 0 | assert(0 && "SCHEDULE_MODE IS NOT SUPPORTED YET"); |
563 | 0 | return INVALID_TIME; |
564 | 0 | } else { |
565 | 0 | return time_next_buffer_is_free(num_decoded_frame, decoder_buffer_delay, |
566 | 0 | frame_buffer_pool, current_time); |
567 | 0 | } |
568 | 0 | } |
569 | | |
570 | | #if 0 |
571 | | // Print the status of the decoder model (for debugging). |
572 | | void av1_decoder_model_print_status(const DECODER_MODEL *const decoder_model) { |
573 | | printf( |
574 | | "\n status %d, num_frame %3d, num_decoded_frame %3d, " |
575 | | "num_shown_frame %3d, current time %6.2f, frames in buffer %2d, " |
576 | | "presentation delay %6.2f, total interval %6.2f\n", |
577 | | decoder_model->status, decoder_model->num_frame, |
578 | | decoder_model->num_decoded_frame, decoder_model->num_shown_frame, |
579 | | decoder_model->current_time, frames_in_buffer_pool(decoder_model), |
580 | | decoder_model->initial_presentation_delay, |
581 | | decoder_model->dfg_interval_queue.total_interval); |
582 | | for (int i = 0; i < 10; ++i) { |
583 | | const FRAME_BUFFER *const this_buffer = |
584 | | &decoder_model->frame_buffer_pool[i]; |
585 | | printf("buffer %d, decode count %d, display count %d, present time %6.4f\n", |
586 | | i, this_buffer->decoder_ref_count, this_buffer->player_ref_count, |
587 | | this_buffer->presentation_time); |
588 | | } |
589 | | } |
590 | | #endif |
591 | | |
592 | | // op_index is the operating point index. |
593 | | static void decoder_model_init(const AV1_COMP *const cpi, AV1_LEVEL level, |
594 | | int op_index, |
595 | 0 | DECODER_MODEL *const decoder_model) { |
596 | 0 | decoder_model->status = DECODER_MODEL_OK; |
597 | 0 | decoder_model->level = level; |
598 | |
|
599 | 0 | const AV1_COMMON *const cm = &cpi->common; |
600 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
601 | 0 | decoder_model->bit_rate = get_max_bitrate( |
602 | 0 | av1_level_defs + level, seq_params->tier[op_index], seq_params->profile); |
603 | | |
604 | | // TODO(huisu or anyone): implement SCHEDULE_MODE. |
605 | 0 | decoder_model->mode = RESOURCE_MODE; |
606 | 0 | decoder_model->encoder_buffer_delay = 20000; |
607 | 0 | decoder_model->decoder_buffer_delay = 70000; |
608 | 0 | decoder_model->is_low_delay_mode = false; |
609 | |
|
610 | 0 | decoder_model->first_bit_arrival_time = 0.0; |
611 | 0 | decoder_model->last_bit_arrival_time = 0.0; |
612 | 0 | decoder_model->coded_bits = 0; |
613 | |
|
614 | 0 | decoder_model->removal_time = INVALID_TIME; |
615 | 0 | decoder_model->presentation_time = INVALID_TIME; |
616 | 0 | decoder_model->decode_samples = 0; |
617 | 0 | decoder_model->display_samples = 0; |
618 | 0 | decoder_model->max_decode_rate = 0.0; |
619 | 0 | decoder_model->max_display_rate = 0.0; |
620 | |
|
621 | 0 | decoder_model->num_frame = -1; |
622 | 0 | decoder_model->num_decoded_frame = -1; |
623 | 0 | decoder_model->num_shown_frame = -1; |
624 | 0 | decoder_model->current_time = 0.0; |
625 | |
|
626 | 0 | initialize_buffer_pool(decoder_model); |
627 | |
|
628 | 0 | DFG_INTERVAL_QUEUE *const dfg_interval_queue = |
629 | 0 | &decoder_model->dfg_interval_queue; |
630 | 0 | dfg_interval_queue->total_interval = 0.0; |
631 | 0 | dfg_interval_queue->head = 0; |
632 | 0 | dfg_interval_queue->size = 0; |
633 | |
|
634 | 0 | if (seq_params->timing_info_present) { |
635 | 0 | decoder_model->num_ticks_per_picture = |
636 | 0 | seq_params->timing_info.num_ticks_per_picture; |
637 | 0 | decoder_model->display_clock_tick = |
638 | 0 | seq_params->timing_info.num_units_in_display_tick / |
639 | 0 | seq_params->timing_info.time_scale; |
640 | 0 | } else { |
641 | 0 | decoder_model->num_ticks_per_picture = 1; |
642 | 0 | decoder_model->display_clock_tick = 1.0 / cpi->framerate; |
643 | 0 | } |
644 | |
|
645 | 0 | decoder_model->initial_display_delay = |
646 | 0 | seq_params->op_params[op_index].initial_display_delay; |
647 | 0 | decoder_model->initial_presentation_delay = INVALID_TIME; |
648 | 0 | decoder_model->decode_rate = av1_level_defs[level].max_decode_rate; |
649 | 0 | } |
650 | | |
651 | | DECODER_MODEL_STATUS av1_decoder_model_try_smooth_buf( |
652 | | const AV1_COMP *const cpi, size_t coded_bits, |
653 | 0 | const DECODER_MODEL *const decoder_model) { |
654 | 0 | DECODER_MODEL_STATUS status = DECODER_MODEL_OK; |
655 | |
|
656 | 0 | if (!decoder_model || decoder_model->status != DECODER_MODEL_OK) { |
657 | 0 | return status; |
658 | 0 | } |
659 | | |
660 | 0 | const AV1_COMMON *const cm = &cpi->common; |
661 | 0 | const int show_existing_frame = cm->show_existing_frame; |
662 | |
|
663 | 0 | size_t cur_coded_bits = decoder_model->coded_bits + coded_bits; |
664 | 0 | int num_decoded_frame = decoder_model->num_decoded_frame; |
665 | 0 | if (!show_existing_frame) ++num_decoded_frame; |
666 | |
|
667 | 0 | if (show_existing_frame) { |
668 | 0 | return status; |
669 | 0 | } else { |
670 | 0 | const double removal_time = get_removal_time( |
671 | 0 | decoder_model->mode, num_decoded_frame, |
672 | 0 | decoder_model->decoder_buffer_delay, decoder_model->frame_buffer_pool, |
673 | 0 | decoder_model->current_time); |
674 | 0 | if (removal_time < 0.0) { |
675 | 0 | status = DECODE_FRAME_BUF_UNAVAILABLE; |
676 | 0 | return status; |
677 | 0 | } |
678 | | |
679 | | // A frame with show_existing_frame being false indicates the end of a DFG. |
680 | | // Update the bits arrival time of this DFG. |
681 | 0 | const double buffer_delay = (decoder_model->encoder_buffer_delay + |
682 | 0 | decoder_model->decoder_buffer_delay) / |
683 | 0 | 90000.0; |
684 | 0 | const double latest_arrival_time = removal_time - buffer_delay; |
685 | 0 | const double first_bit_arrival_time = |
686 | 0 | AOMMAX(decoder_model->last_bit_arrival_time, latest_arrival_time); |
687 | 0 | const double last_bit_arrival_time = |
688 | 0 | first_bit_arrival_time + |
689 | 0 | (double)cur_coded_bits / decoder_model->bit_rate; |
690 | | // Smoothing buffer underflows if the last bit arrives after the removal |
691 | | // time. |
692 | 0 | if (last_bit_arrival_time > removal_time && |
693 | 0 | !decoder_model->is_low_delay_mode) { |
694 | 0 | status = SMOOTHING_BUFFER_UNDERFLOW; |
695 | 0 | return status; |
696 | 0 | } |
697 | | |
698 | | // Check if the smoothing buffer overflows. |
699 | 0 | const DFG_INTERVAL_QUEUE *const queue = &decoder_model->dfg_interval_queue; |
700 | 0 | if (queue->size >= DFG_INTERVAL_QUEUE_SIZE) { |
701 | 0 | assert(0); |
702 | 0 | } |
703 | |
|
704 | 0 | double total_interval = queue->total_interval; |
705 | 0 | int qhead = queue->head; |
706 | 0 | int qsize = queue->size; |
707 | | // Remove the DFGs with removal time earlier than last_bit_arrival_time. |
708 | 0 | while (queue->buf[qhead].removal_time <= last_bit_arrival_time && |
709 | 0 | qsize > 0) { |
710 | 0 | if (queue->buf[qhead].removal_time - first_bit_arrival_time + |
711 | 0 | total_interval > |
712 | 0 | 1.0) { |
713 | 0 | status = SMOOTHING_BUFFER_OVERFLOW; |
714 | 0 | return status; |
715 | 0 | } |
716 | 0 | total_interval -= queue->buf[qhead].last_bit_arrival_time - |
717 | 0 | queue->buf[qhead].first_bit_arrival_time; |
718 | 0 | qhead = (qhead + 1) % DFG_INTERVAL_QUEUE_SIZE; |
719 | 0 | --qsize; |
720 | 0 | } |
721 | 0 | total_interval += last_bit_arrival_time - first_bit_arrival_time; |
722 | | // The smoothing buffer can hold at most "bit_rate" bits, which is |
723 | | // equivalent to 1 second of total interval. |
724 | 0 | if (total_interval > 1.0) { |
725 | 0 | status = SMOOTHING_BUFFER_OVERFLOW; |
726 | 0 | return status; |
727 | 0 | } |
728 | | |
729 | 0 | return status; |
730 | 0 | } |
731 | 0 | } |
732 | | |
733 | | static void decoder_model_process_frame(const AV1_COMP *const cpi, |
734 | | size_t coded_bits, |
735 | 0 | DECODER_MODEL *const decoder_model) { |
736 | 0 | if (!decoder_model || decoder_model->status != DECODER_MODEL_OK) return; |
737 | | |
738 | 0 | const AV1_COMMON *const cm = &cpi->common; |
739 | 0 | const int luma_pic_size = cm->superres_upscaled_width * cm->height; |
740 | 0 | const int show_existing_frame = cm->show_existing_frame; |
741 | 0 | const int show_frame = cm->show_frame || show_existing_frame; |
742 | 0 | ++decoder_model->num_frame; |
743 | 0 | if (!show_existing_frame) ++decoder_model->num_decoded_frame; |
744 | 0 | if (show_frame) ++decoder_model->num_shown_frame; |
745 | 0 | decoder_model->coded_bits += coded_bits; |
746 | |
|
747 | 0 | int display_idx = -1; |
748 | 0 | if (show_existing_frame) { |
749 | 0 | display_idx = decoder_model->vbi[cpi->existing_fb_idx_to_show]; |
750 | 0 | if (display_idx < 0) { |
751 | 0 | decoder_model->status = DECODE_EXISTING_FRAME_BUF_EMPTY; |
752 | 0 | return; |
753 | 0 | } |
754 | 0 | if (decoder_model->frame_buffer_pool[display_idx].frame_type == KEY_FRAME) { |
755 | 0 | update_ref_buffers(decoder_model, display_idx, 0xFF); |
756 | 0 | } |
757 | 0 | } else { |
758 | 0 | const double removal_time = get_removal_time( |
759 | 0 | decoder_model->mode, decoder_model->num_decoded_frame, |
760 | 0 | decoder_model->decoder_buffer_delay, decoder_model->frame_buffer_pool, |
761 | 0 | decoder_model->current_time); |
762 | 0 | if (removal_time < 0.0) { |
763 | 0 | decoder_model->status = DECODE_FRAME_BUF_UNAVAILABLE; |
764 | 0 | return; |
765 | 0 | } |
766 | | |
767 | 0 | const int previous_decode_samples = decoder_model->decode_samples; |
768 | 0 | const double previous_removal_time = decoder_model->removal_time; |
769 | 0 | assert(previous_removal_time < removal_time); |
770 | 0 | decoder_model->removal_time = removal_time; |
771 | 0 | decoder_model->decode_samples = luma_pic_size; |
772 | 0 | const double this_decode_rate = |
773 | 0 | previous_decode_samples / (removal_time - previous_removal_time); |
774 | 0 | decoder_model->max_decode_rate = |
775 | 0 | AOMMAX(decoder_model->max_decode_rate, this_decode_rate); |
776 | | |
777 | | // A frame with show_existing_frame being false indicates the end of a DFG. |
778 | | // Update the bits arrival time of this DFG. |
779 | 0 | const double buffer_delay = (decoder_model->encoder_buffer_delay + |
780 | 0 | decoder_model->decoder_buffer_delay) / |
781 | 0 | 90000.0; |
782 | 0 | const double latest_arrival_time = removal_time - buffer_delay; |
783 | 0 | decoder_model->first_bit_arrival_time = |
784 | 0 | AOMMAX(decoder_model->last_bit_arrival_time, latest_arrival_time); |
785 | 0 | decoder_model->last_bit_arrival_time = |
786 | 0 | decoder_model->first_bit_arrival_time + |
787 | 0 | (double)decoder_model->coded_bits / decoder_model->bit_rate; |
788 | | // Smoothing buffer underflows if the last bit arrives after the removal |
789 | | // time. |
790 | 0 | if (decoder_model->last_bit_arrival_time > removal_time && |
791 | 0 | !decoder_model->is_low_delay_mode) { |
792 | 0 | decoder_model->status = SMOOTHING_BUFFER_UNDERFLOW; |
793 | 0 | return; |
794 | 0 | } |
795 | | // Reset the coded bits for the next DFG. |
796 | 0 | decoder_model->coded_bits = 0; |
797 | | |
798 | | // Check if the smoothing buffer overflows. |
799 | 0 | DFG_INTERVAL_QUEUE *const queue = &decoder_model->dfg_interval_queue; |
800 | 0 | if (queue->size >= DFG_INTERVAL_QUEUE_SIZE) { |
801 | 0 | assert(0); |
802 | 0 | } |
803 | 0 | const double first_bit_arrival_time = decoder_model->first_bit_arrival_time; |
804 | 0 | const double last_bit_arrival_time = decoder_model->last_bit_arrival_time; |
805 | | // Remove the DFGs with removal time earlier than last_bit_arrival_time. |
806 | 0 | while (queue->buf[queue->head].removal_time <= last_bit_arrival_time && |
807 | 0 | queue->size > 0) { |
808 | 0 | if (queue->buf[queue->head].removal_time - first_bit_arrival_time + |
809 | 0 | queue->total_interval > |
810 | 0 | 1.0) { |
811 | 0 | decoder_model->status = SMOOTHING_BUFFER_OVERFLOW; |
812 | 0 | return; |
813 | 0 | } |
814 | 0 | queue->total_interval -= queue->buf[queue->head].last_bit_arrival_time - |
815 | 0 | queue->buf[queue->head].first_bit_arrival_time; |
816 | 0 | queue->head = (queue->head + 1) % DFG_INTERVAL_QUEUE_SIZE; |
817 | 0 | --queue->size; |
818 | 0 | } |
819 | | // Push current DFG into the queue. |
820 | 0 | const int queue_index = |
821 | 0 | (queue->head + queue->size++) % DFG_INTERVAL_QUEUE_SIZE; |
822 | 0 | queue->buf[queue_index].first_bit_arrival_time = first_bit_arrival_time; |
823 | 0 | queue->buf[queue_index].last_bit_arrival_time = last_bit_arrival_time; |
824 | 0 | queue->buf[queue_index].removal_time = removal_time; |
825 | 0 | queue->total_interval += last_bit_arrival_time - first_bit_arrival_time; |
826 | | // The smoothing buffer can hold at most "bit_rate" bits, which is |
827 | | // equivalent to 1 second of total interval. |
828 | 0 | if (queue->total_interval > 1.0) { |
829 | 0 | decoder_model->status = SMOOTHING_BUFFER_OVERFLOW; |
830 | 0 | return; |
831 | 0 | } |
832 | | |
833 | 0 | release_processed_frames(decoder_model, removal_time); |
834 | 0 | decoder_model->current_time = |
835 | 0 | removal_time + time_to_decode_frame(cm, decoder_model->decode_rate); |
836 | |
|
837 | 0 | const int cfbi = get_free_buffer(decoder_model); |
838 | 0 | if (cfbi < 0) { |
839 | 0 | decoder_model->status = DECODE_FRAME_BUF_UNAVAILABLE; |
840 | 0 | return; |
841 | 0 | } |
842 | 0 | const CurrentFrame *const current_frame = &cm->current_frame; |
843 | 0 | decoder_model->frame_buffer_pool[cfbi].frame_type = |
844 | 0 | cm->current_frame.frame_type; |
845 | 0 | display_idx = cfbi; |
846 | 0 | update_ref_buffers(decoder_model, cfbi, current_frame->refresh_frame_flags); |
847 | |
|
848 | 0 | if (decoder_model->initial_presentation_delay < 0.0) { |
849 | | // Display can begin after required number of frames have been buffered. |
850 | 0 | if (frames_in_buffer_pool(decoder_model) >= |
851 | 0 | decoder_model->initial_display_delay - 1) { |
852 | 0 | decoder_model->initial_presentation_delay = decoder_model->current_time; |
853 | | // Update presentation time for each shown frame in the frame buffer. |
854 | 0 | for (int i = 0; i < BUFFER_POOL_MAX_SIZE; ++i) { |
855 | 0 | FRAME_BUFFER *const this_buffer = |
856 | 0 | &decoder_model->frame_buffer_pool[i]; |
857 | 0 | if (this_buffer->player_ref_count == 0) continue; |
858 | 0 | assert(this_buffer->display_index >= 0); |
859 | 0 | this_buffer->presentation_time = |
860 | 0 | get_presentation_time(decoder_model, this_buffer->display_index); |
861 | 0 | } |
862 | 0 | } |
863 | 0 | } |
864 | 0 | } |
865 | | |
866 | | // Display. |
867 | 0 | if (show_frame) { |
868 | 0 | assert(display_idx >= 0 && display_idx < BUFFER_POOL_MAX_SIZE); |
869 | 0 | FRAME_BUFFER *const this_buffer = |
870 | 0 | &decoder_model->frame_buffer_pool[display_idx]; |
871 | 0 | ++this_buffer->player_ref_count; |
872 | 0 | this_buffer->display_index = decoder_model->num_shown_frame; |
873 | 0 | const double presentation_time = |
874 | 0 | get_presentation_time(decoder_model, this_buffer->display_index); |
875 | 0 | this_buffer->presentation_time = presentation_time; |
876 | 0 | if (presentation_time >= 0.0 && |
877 | 0 | decoder_model->current_time > presentation_time) { |
878 | 0 | decoder_model->status = DISPLAY_FRAME_LATE; |
879 | 0 | return; |
880 | 0 | } |
881 | | |
882 | 0 | const int previous_display_samples = decoder_model->display_samples; |
883 | 0 | const double previous_presentation_time = decoder_model->presentation_time; |
884 | 0 | decoder_model->display_samples = luma_pic_size; |
885 | 0 | decoder_model->presentation_time = presentation_time; |
886 | 0 | if (presentation_time >= 0.0 && previous_presentation_time >= 0.0) { |
887 | 0 | assert(previous_presentation_time < presentation_time); |
888 | 0 | const double this_display_rate = |
889 | 0 | previous_display_samples / |
890 | 0 | (presentation_time - previous_presentation_time); |
891 | 0 | decoder_model->max_display_rate = |
892 | 0 | AOMMAX(decoder_model->max_display_rate, this_display_rate); |
893 | 0 | } |
894 | 0 | } |
895 | 0 | } |
896 | | |
897 | 0 | void av1_init_level_info(AV1_COMP *cpi) { |
898 | 0 | for (int op_index = 0; op_index < MAX_NUM_OPERATING_POINTS; ++op_index) { |
899 | 0 | AV1LevelInfo *const this_level_info = |
900 | 0 | cpi->ppi->level_params.level_info[op_index]; |
901 | 0 | if (!this_level_info) continue; |
902 | 0 | memset(this_level_info, 0, sizeof(*this_level_info)); |
903 | 0 | AV1LevelSpec *const level_spec = &this_level_info->level_spec; |
904 | 0 | level_spec->level = SEQ_LEVEL_MAX; |
905 | 0 | AV1LevelStats *const level_stats = &this_level_info->level_stats; |
906 | 0 | level_stats->min_cropped_tile_width = INT_MAX; |
907 | 0 | level_stats->min_cropped_tile_height = INT_MAX; |
908 | 0 | level_stats->min_frame_width = INT_MAX; |
909 | 0 | level_stats->min_frame_height = INT_MAX; |
910 | 0 | level_stats->tile_width_is_valid = 1; |
911 | 0 | level_stats->min_cr = 1e8; |
912 | |
|
913 | 0 | FrameWindowBuffer *const frame_window_buffer = |
914 | 0 | &this_level_info->frame_window_buffer; |
915 | 0 | frame_window_buffer->num = 0; |
916 | 0 | frame_window_buffer->start = 0; |
917 | |
|
918 | 0 | const AV1_COMMON *const cm = &cpi->common; |
919 | 0 | const int upscaled_width = cm->superres_upscaled_width; |
920 | 0 | const int height = cm->height; |
921 | 0 | const int pic_size = upscaled_width * height; |
922 | 0 | for (AV1_LEVEL level = SEQ_LEVEL_2_0; level < SEQ_LEVELS; ++level) { |
923 | 0 | DECODER_MODEL *const this_model = &this_level_info->decoder_models[level]; |
924 | 0 | const AV1LevelSpec *const spec = &av1_level_defs[level]; |
925 | 0 | if (upscaled_width > spec->max_h_size || height > spec->max_v_size || |
926 | 0 | pic_size > spec->max_picture_size) { |
927 | | // Turn off decoder model for this level as the frame size already |
928 | | // exceeds level constraints. |
929 | 0 | this_model->status = DECODER_MODEL_DISABLED; |
930 | 0 | } else { |
931 | 0 | decoder_model_init(cpi, level, op_index, this_model); |
932 | 0 | } |
933 | 0 | } |
934 | 0 | } |
935 | 0 | } |
936 | | |
937 | | static double get_min_cr(const AV1LevelSpec *const level_spec, int tier, |
938 | 0 | int is_still_picture, int64_t decoded_sample_rate) { |
939 | 0 | if (is_still_picture) return 0.8; |
940 | 0 | if (level_spec->level < SEQ_LEVEL_4_0) tier = 0; |
941 | 0 | const double min_cr_basis = tier ? level_spec->high_cr : level_spec->main_cr; |
942 | 0 | const double speed_adj = |
943 | 0 | (double)decoded_sample_rate / level_spec->max_display_rate; |
944 | 0 | return AOMMAX(min_cr_basis * speed_adj, 0.8); |
945 | 0 | } |
946 | | |
947 | | double av1_get_min_cr_for_level(AV1_LEVEL level_index, int tier, |
948 | 0 | int is_still_picture) { |
949 | 0 | assert(is_valid_seq_level_idx(level_index)); |
950 | 0 | const AV1LevelSpec *const level_spec = &av1_level_defs[level_index]; |
951 | 0 | return get_min_cr(level_spec, tier, is_still_picture, |
952 | 0 | level_spec->max_decode_rate); |
953 | 0 | } |
954 | | |
955 | | static void get_temporal_parallel_params(int scalability_mode_idc, |
956 | | int *temporal_parallel_num, |
957 | 0 | int *temporal_parallel_denom) { |
958 | 0 | if (scalability_mode_idc < 0) { |
959 | 0 | *temporal_parallel_num = 1; |
960 | 0 | *temporal_parallel_denom = 1; |
961 | 0 | return; |
962 | 0 | } |
963 | | |
964 | | // TODO(huisu@): handle scalability cases. |
965 | 0 | if (scalability_mode_idc == SCALABILITY_SS) { |
966 | 0 | (void)scalability_mode_idc; |
967 | 0 | } else { |
968 | 0 | (void)scalability_mode_idc; |
969 | 0 | } |
970 | 0 | } |
971 | | |
972 | 0 | #define MIN_CROPPED_TILE_WIDTH 8 |
973 | 0 | #define MIN_CROPPED_TILE_HEIGHT 8 |
974 | 0 | #define MIN_FRAME_WIDTH 16 |
975 | 0 | #define MIN_FRAME_HEIGHT 16 |
976 | 0 | #define MAX_TILE_SIZE_HEADER_RATE_PRODUCT 588251136 |
977 | | |
978 | | static TARGET_LEVEL_FAIL_ID check_level_constraints( |
979 | | const AV1LevelInfo *const level_info, AV1_LEVEL level, int tier, |
980 | 0 | int is_still_picture, BITSTREAM_PROFILE profile, int check_bitrate) { |
981 | 0 | const DECODER_MODEL *const decoder_model = &level_info->decoder_models[level]; |
982 | 0 | const DECODER_MODEL_STATUS decoder_model_status = decoder_model->status; |
983 | 0 | if (decoder_model_status != DECODER_MODEL_OK && |
984 | 0 | decoder_model_status != DECODER_MODEL_DISABLED) { |
985 | 0 | return DECODER_MODEL_FAIL; |
986 | 0 | } |
987 | | |
988 | 0 | const AV1LevelSpec *const level_spec = &level_info->level_spec; |
989 | 0 | const AV1LevelSpec *const target_level_spec = &av1_level_defs[level]; |
990 | 0 | const AV1LevelStats *const level_stats = &level_info->level_stats; |
991 | 0 | TARGET_LEVEL_FAIL_ID fail_id = TARGET_LEVEL_OK; |
992 | 0 | do { |
993 | 0 | if (level_spec->max_picture_size > target_level_spec->max_picture_size) { |
994 | 0 | fail_id = LUMA_PIC_SIZE_TOO_LARGE; |
995 | 0 | break; |
996 | 0 | } |
997 | | |
998 | 0 | if (level_spec->max_h_size > target_level_spec->max_h_size) { |
999 | 0 | fail_id = LUMA_PIC_H_SIZE_TOO_LARGE; |
1000 | 0 | break; |
1001 | 0 | } |
1002 | | |
1003 | 0 | if (level_spec->max_v_size > target_level_spec->max_v_size) { |
1004 | 0 | fail_id = LUMA_PIC_V_SIZE_TOO_LARGE; |
1005 | 0 | break; |
1006 | 0 | } |
1007 | | |
1008 | 0 | if (level_spec->max_tile_cols > target_level_spec->max_tile_cols) { |
1009 | 0 | fail_id = TOO_MANY_TILE_COLUMNS; |
1010 | 0 | break; |
1011 | 0 | } |
1012 | | |
1013 | 0 | if (level_spec->max_tiles > target_level_spec->max_tiles) { |
1014 | 0 | fail_id = TOO_MANY_TILES; |
1015 | 0 | break; |
1016 | 0 | } |
1017 | | |
1018 | 0 | if (level_spec->max_header_rate > target_level_spec->max_header_rate) { |
1019 | 0 | fail_id = FRAME_HEADER_RATE_TOO_HIGH; |
1020 | 0 | break; |
1021 | 0 | } |
1022 | | |
1023 | 0 | if (decoder_model->max_display_rate > |
1024 | 0 | (double)target_level_spec->max_display_rate) { |
1025 | 0 | fail_id = DISPLAY_RATE_TOO_HIGH; |
1026 | 0 | break; |
1027 | 0 | } |
1028 | | |
1029 | | // TODO(huisu): we are not using max decode rate calculated by the decoder |
1030 | | // model because the model in resource availability mode always returns |
1031 | | // MaxDecodeRate(as in the level definitions) as the max decode rate. |
1032 | 0 | if (level_spec->max_decode_rate > target_level_spec->max_decode_rate) { |
1033 | 0 | fail_id = DECODE_RATE_TOO_HIGH; |
1034 | 0 | break; |
1035 | 0 | } |
1036 | | |
1037 | 0 | if (level_spec->max_tile_rate > target_level_spec->max_tiles * 120) { |
1038 | 0 | fail_id = TILE_RATE_TOO_HIGH; |
1039 | 0 | break; |
1040 | 0 | } |
1041 | | |
1042 | | #if CONFIG_CWG_C013 |
1043 | | const int max_tile_size = (level >= SEQ_LEVEL_7_0 && level <= SEQ_LEVEL_8_3) |
1044 | | ? MAX_TILE_AREA_LEVEL_7_AND_ABOVE |
1045 | | : MAX_TILE_AREA; |
1046 | | #else |
1047 | 0 | const int max_tile_size = MAX_TILE_AREA; |
1048 | 0 | #endif |
1049 | 0 | if (level_stats->max_tile_size > max_tile_size) { |
1050 | 0 | fail_id = TILE_TOO_LARGE; |
1051 | 0 | break; |
1052 | 0 | } |
1053 | | |
1054 | 0 | if (level_stats->max_superres_tile_width > MAX_TILE_WIDTH) { |
1055 | 0 | fail_id = SUPERRES_TILE_WIDTH_TOO_LARGE; |
1056 | 0 | break; |
1057 | 0 | } |
1058 | | |
1059 | 0 | if (level_stats->min_cropped_tile_width < MIN_CROPPED_TILE_WIDTH) { |
1060 | 0 | fail_id = CROPPED_TILE_WIDTH_TOO_SMALL; |
1061 | 0 | break; |
1062 | 0 | } |
1063 | | |
1064 | 0 | if (level_stats->min_cropped_tile_height < MIN_CROPPED_TILE_HEIGHT) { |
1065 | 0 | fail_id = CROPPED_TILE_HEIGHT_TOO_SMALL; |
1066 | 0 | break; |
1067 | 0 | } |
1068 | | |
1069 | 0 | if (level_stats->min_frame_width < MIN_FRAME_WIDTH) { |
1070 | 0 | fail_id = LUMA_PIC_H_SIZE_TOO_SMALL; |
1071 | 0 | break; |
1072 | 0 | } |
1073 | | |
1074 | 0 | if (level_stats->min_frame_height < MIN_FRAME_HEIGHT) { |
1075 | 0 | fail_id = LUMA_PIC_V_SIZE_TOO_SMALL; |
1076 | 0 | break; |
1077 | 0 | } |
1078 | | |
1079 | 0 | if (!level_stats->tile_width_is_valid) { |
1080 | 0 | fail_id = TILE_WIDTH_INVALID; |
1081 | 0 | break; |
1082 | 0 | } |
1083 | | |
1084 | 0 | const double min_cr = get_min_cr(target_level_spec, tier, is_still_picture, |
1085 | 0 | level_spec->max_decode_rate); |
1086 | 0 | if (level_stats->min_cr < min_cr) { |
1087 | 0 | fail_id = CR_TOO_SMALL; |
1088 | 0 | break; |
1089 | 0 | } |
1090 | | |
1091 | 0 | if (check_bitrate) { |
1092 | | // Check average bitrate instead of max_bitrate. |
1093 | 0 | const double bitrate_limit = |
1094 | 0 | get_max_bitrate(target_level_spec, tier, profile); |
1095 | 0 | const double avg_bitrate = level_stats->total_compressed_size * 8.0 / |
1096 | 0 | level_stats->total_time_encoded; |
1097 | 0 | if (avg_bitrate > bitrate_limit) { |
1098 | 0 | fail_id = BITRATE_TOO_HIGH; |
1099 | 0 | break; |
1100 | 0 | } |
1101 | 0 | } |
1102 | | |
1103 | 0 | if (target_level_spec->level > SEQ_LEVEL_5_1) { |
1104 | 0 | int temporal_parallel_num; |
1105 | 0 | int temporal_parallel_denom; |
1106 | 0 | const int scalability_mode_idc = -1; |
1107 | 0 | get_temporal_parallel_params(scalability_mode_idc, &temporal_parallel_num, |
1108 | 0 | &temporal_parallel_denom); |
1109 | 0 | const int val = level_stats->max_tile_size * level_spec->max_header_rate * |
1110 | 0 | temporal_parallel_denom / temporal_parallel_num; |
1111 | 0 | if (val > MAX_TILE_SIZE_HEADER_RATE_PRODUCT) { |
1112 | 0 | fail_id = TILE_SIZE_HEADER_RATE_TOO_HIGH; |
1113 | 0 | break; |
1114 | 0 | } |
1115 | 0 | } |
1116 | 0 | } while (0); |
1117 | |
|
1118 | 0 | return fail_id; |
1119 | 0 | } |
1120 | | |
1121 | | static void get_tile_stats(const AV1_COMMON *const cm, |
1122 | | const TileDataEnc *const tile_data, |
1123 | | int *max_tile_size, int *max_superres_tile_width, |
1124 | | int *min_cropped_tile_width, |
1125 | | int *min_cropped_tile_height, |
1126 | 0 | int *tile_width_valid) { |
1127 | 0 | const int tile_cols = cm->tiles.cols; |
1128 | 0 | const int tile_rows = cm->tiles.rows; |
1129 | 0 | const int superres_scale_denominator = cm->superres_scale_denominator; |
1130 | |
|
1131 | 0 | *max_tile_size = 0; |
1132 | 0 | *max_superres_tile_width = 0; |
1133 | 0 | *min_cropped_tile_width = INT_MAX; |
1134 | 0 | *min_cropped_tile_height = INT_MAX; |
1135 | 0 | *tile_width_valid = 1; |
1136 | |
|
1137 | 0 | for (int tile_row = 0; tile_row < tile_rows; ++tile_row) { |
1138 | 0 | for (int tile_col = 0; tile_col < tile_cols; ++tile_col) { |
1139 | 0 | const TileInfo *const tile_info = |
1140 | 0 | &tile_data[tile_row * cm->tiles.cols + tile_col].tile_info; |
1141 | 0 | const int tile_width = |
1142 | 0 | (tile_info->mi_col_end - tile_info->mi_col_start) * MI_SIZE; |
1143 | 0 | const int tile_height = |
1144 | 0 | (tile_info->mi_row_end - tile_info->mi_row_start) * MI_SIZE; |
1145 | 0 | const int tile_size = tile_width * tile_height; |
1146 | 0 | *max_tile_size = AOMMAX(*max_tile_size, tile_size); |
1147 | |
|
1148 | 0 | const int supperres_tile_width = |
1149 | 0 | tile_width * superres_scale_denominator / SCALE_NUMERATOR; |
1150 | 0 | *max_superres_tile_width = |
1151 | 0 | AOMMAX(*max_superres_tile_width, supperres_tile_width); |
1152 | |
|
1153 | 0 | const int cropped_tile_width = |
1154 | 0 | cm->width - tile_info->mi_col_start * MI_SIZE; |
1155 | 0 | const int cropped_tile_height = |
1156 | 0 | cm->height - tile_info->mi_row_start * MI_SIZE; |
1157 | 0 | *min_cropped_tile_width = |
1158 | 0 | AOMMIN(*min_cropped_tile_width, cropped_tile_width); |
1159 | 0 | *min_cropped_tile_height = |
1160 | 0 | AOMMIN(*min_cropped_tile_height, cropped_tile_height); |
1161 | |
|
1162 | 0 | const int is_right_most_tile = |
1163 | 0 | tile_info->mi_col_end == cm->mi_params.mi_cols; |
1164 | 0 | if (!is_right_most_tile) { |
1165 | 0 | if (av1_superres_scaled(cm)) |
1166 | 0 | *tile_width_valid &= tile_width >= 128; |
1167 | 0 | else |
1168 | 0 | *tile_width_valid &= tile_width >= 64; |
1169 | 0 | } |
1170 | 0 | } |
1171 | 0 | } |
1172 | 0 | } |
1173 | | |
1174 | | static int store_frame_record(int64_t ts_start, int64_t ts_end, |
1175 | | size_t encoded_size, int pic_size, |
1176 | | int frame_header_count, int tiles, int show_frame, |
1177 | | int show_existing_frame, |
1178 | 0 | FrameWindowBuffer *const buffer) { |
1179 | 0 | if (buffer->num < FRAME_WINDOW_SIZE) { |
1180 | 0 | ++buffer->num; |
1181 | 0 | } else { |
1182 | 0 | buffer->start = (buffer->start + 1) % FRAME_WINDOW_SIZE; |
1183 | 0 | } |
1184 | 0 | const int new_idx = (buffer->start + buffer->num - 1) % FRAME_WINDOW_SIZE; |
1185 | 0 | FrameRecord *const record = &buffer->buf[new_idx]; |
1186 | 0 | record->ts_start = ts_start; |
1187 | 0 | record->ts_end = ts_end; |
1188 | 0 | record->encoded_size_in_bytes = encoded_size; |
1189 | 0 | record->pic_size = pic_size; |
1190 | 0 | record->frame_header_count = frame_header_count; |
1191 | 0 | record->tiles = tiles; |
1192 | 0 | record->show_frame = show_frame; |
1193 | 0 | record->show_existing_frame = show_existing_frame; |
1194 | |
|
1195 | 0 | return new_idx; |
1196 | 0 | } |
1197 | | |
1198 | | // Count the number of frames encoded in the last "duration" ticks, in display |
1199 | | // time. |
1200 | | static int count_frames(const FrameWindowBuffer *const buffer, |
1201 | 0 | int64_t duration) { |
1202 | 0 | const int current_idx = (buffer->start + buffer->num - 1) % FRAME_WINDOW_SIZE; |
1203 | | // Assume current frame is shown frame. |
1204 | 0 | assert(buffer->buf[current_idx].show_frame); |
1205 | |
|
1206 | 0 | const int64_t current_time = buffer->buf[current_idx].ts_end; |
1207 | 0 | const int64_t time_limit = AOMMAX(current_time - duration, 0); |
1208 | 0 | int num_frames = 1; |
1209 | 0 | int index = current_idx - 1; |
1210 | 0 | for (int i = buffer->num - 2; i >= 0; --i, --index, ++num_frames) { |
1211 | 0 | if (index < 0) index = FRAME_WINDOW_SIZE - 1; |
1212 | 0 | const FrameRecord *const record = &buffer->buf[index]; |
1213 | 0 | if (!record->show_frame) continue; |
1214 | 0 | const int64_t ts_start = record->ts_start; |
1215 | 0 | if (ts_start < time_limit) break; |
1216 | 0 | } |
1217 | |
|
1218 | 0 | return num_frames; |
1219 | 0 | } |
1220 | | |
1221 | | // Scan previously encoded frames and update level metrics accordingly. |
1222 | | static void scan_past_frames(const FrameWindowBuffer *const buffer, |
1223 | | int num_frames_to_scan, |
1224 | | AV1LevelSpec *const level_spec, |
1225 | 0 | AV1LevelStats *const level_stats) { |
1226 | 0 | const int num_frames_in_buffer = buffer->num; |
1227 | 0 | int index = (buffer->start + num_frames_in_buffer - 1) % FRAME_WINDOW_SIZE; |
1228 | 0 | int frame_headers = 0; |
1229 | 0 | int tiles = 0; |
1230 | 0 | int64_t display_samples = 0; |
1231 | 0 | int64_t decoded_samples = 0; |
1232 | 0 | size_t encoded_size_in_bytes = 0; |
1233 | 0 | for (int i = 0; i < AOMMIN(num_frames_in_buffer, num_frames_to_scan); ++i) { |
1234 | 0 | const FrameRecord *const record = &buffer->buf[index]; |
1235 | 0 | if (!record->show_existing_frame) { |
1236 | 0 | frame_headers += record->frame_header_count; |
1237 | 0 | decoded_samples += record->pic_size; |
1238 | 0 | } |
1239 | 0 | if (record->show_frame) { |
1240 | 0 | display_samples += record->pic_size; |
1241 | 0 | } |
1242 | 0 | tiles += record->tiles; |
1243 | 0 | encoded_size_in_bytes += record->encoded_size_in_bytes; |
1244 | 0 | --index; |
1245 | 0 | if (index < 0) index = FRAME_WINDOW_SIZE - 1; |
1246 | 0 | } |
1247 | 0 | level_spec->max_header_rate = |
1248 | 0 | AOMMAX(level_spec->max_header_rate, frame_headers); |
1249 | | // TODO(huisu): we can now compute max display rate with the decoder model, so |
1250 | | // these couple of lines can be removed. Keep them here for a while for |
1251 | | // debugging purpose. |
1252 | 0 | level_spec->max_display_rate = |
1253 | 0 | AOMMAX(level_spec->max_display_rate, display_samples); |
1254 | 0 | level_spec->max_decode_rate = |
1255 | 0 | AOMMAX(level_spec->max_decode_rate, decoded_samples); |
1256 | 0 | level_spec->max_tile_rate = AOMMAX(level_spec->max_tile_rate, tiles); |
1257 | 0 | level_stats->max_bitrate = |
1258 | 0 | AOMMAX(level_stats->max_bitrate, |
1259 | 0 | (int)AOMMIN(encoded_size_in_bytes * 8, (size_t)INT_MAX)); |
1260 | 0 | } |
1261 | | |
1262 | | void av1_update_level_info(AV1_COMP *cpi, size_t size, int64_t ts_start, |
1263 | 0 | int64_t ts_end) { |
1264 | 0 | AV1_COMMON *const cm = &cpi->common; |
1265 | 0 | const AV1LevelParams *const level_params = &cpi->ppi->level_params; |
1266 | |
|
1267 | 0 | const int upscaled_width = cm->superres_upscaled_width; |
1268 | 0 | const int width = cm->width; |
1269 | 0 | const int height = cm->height; |
1270 | 0 | const int tile_cols = cm->tiles.cols; |
1271 | 0 | const int tile_rows = cm->tiles.rows; |
1272 | 0 | const int tiles = tile_cols * tile_rows; |
1273 | 0 | const int luma_pic_size = upscaled_width * height; |
1274 | 0 | const int frame_header_count = cpi->frame_header_count; |
1275 | 0 | const int show_frame = cm->show_frame; |
1276 | 0 | const int show_existing_frame = cm->show_existing_frame; |
1277 | |
|
1278 | 0 | int max_tile_size; |
1279 | 0 | int min_cropped_tile_width; |
1280 | 0 | int min_cropped_tile_height; |
1281 | 0 | int max_superres_tile_width; |
1282 | 0 | int tile_width_is_valid; |
1283 | 0 | get_tile_stats(cm, cpi->tile_data, &max_tile_size, &max_superres_tile_width, |
1284 | 0 | &min_cropped_tile_width, &min_cropped_tile_height, |
1285 | 0 | &tile_width_is_valid); |
1286 | |
|
1287 | 0 | const double compression_ratio = av1_get_compression_ratio(cm, size); |
1288 | |
|
1289 | 0 | const int temporal_layer_id = cm->temporal_layer_id; |
1290 | 0 | const int spatial_layer_id = cm->spatial_layer_id; |
1291 | 0 | const SequenceHeader *const seq_params = cm->seq_params; |
1292 | 0 | const BITSTREAM_PROFILE profile = seq_params->profile; |
1293 | 0 | const int is_still_picture = seq_params->still_picture; |
1294 | | // update level_stats |
1295 | | // TODO(kyslov@) fix the implementation according to buffer model |
1296 | 0 | for (int i = 0; i < seq_params->operating_points_cnt_minus_1 + 1; ++i) { |
1297 | 0 | if (!is_in_operating_point(seq_params->operating_point_idc[i], |
1298 | 0 | temporal_layer_id, spatial_layer_id) || |
1299 | 0 | !((level_params->keep_level_stats >> i) & 1)) { |
1300 | 0 | continue; |
1301 | 0 | } |
1302 | | |
1303 | 0 | AV1LevelInfo *const level_info = level_params->level_info[i]; |
1304 | 0 | assert(level_info != NULL); |
1305 | 0 | AV1LevelStats *const level_stats = &level_info->level_stats; |
1306 | |
|
1307 | 0 | level_stats->max_tile_size = |
1308 | 0 | AOMMAX(level_stats->max_tile_size, max_tile_size); |
1309 | 0 | level_stats->max_superres_tile_width = |
1310 | 0 | AOMMAX(level_stats->max_superres_tile_width, max_superres_tile_width); |
1311 | 0 | level_stats->min_cropped_tile_width = |
1312 | 0 | AOMMIN(level_stats->min_cropped_tile_width, min_cropped_tile_width); |
1313 | 0 | level_stats->min_cropped_tile_height = |
1314 | 0 | AOMMIN(level_stats->min_cropped_tile_height, min_cropped_tile_height); |
1315 | 0 | level_stats->tile_width_is_valid &= tile_width_is_valid; |
1316 | 0 | level_stats->min_frame_width = AOMMIN(level_stats->min_frame_width, width); |
1317 | 0 | level_stats->min_frame_height = |
1318 | 0 | AOMMIN(level_stats->min_frame_height, height); |
1319 | 0 | level_stats->min_cr = AOMMIN(level_stats->min_cr, compression_ratio); |
1320 | 0 | level_stats->total_compressed_size += (double)size; |
1321 | | |
1322 | | // update level_spec |
1323 | | // TODO(kyslov@) update all spec fields |
1324 | 0 | AV1LevelSpec *const level_spec = &level_info->level_spec; |
1325 | 0 | level_spec->max_picture_size = |
1326 | 0 | AOMMAX(level_spec->max_picture_size, luma_pic_size); |
1327 | 0 | level_spec->max_h_size = |
1328 | 0 | AOMMAX(level_spec->max_h_size, cm->superres_upscaled_width); |
1329 | 0 | level_spec->max_v_size = AOMMAX(level_spec->max_v_size, height); |
1330 | 0 | level_spec->max_tile_cols = AOMMAX(level_spec->max_tile_cols, tile_cols); |
1331 | 0 | level_spec->max_tiles = AOMMAX(level_spec->max_tiles, tiles); |
1332 | | |
1333 | | // Store info. of current frame into FrameWindowBuffer. |
1334 | 0 | FrameWindowBuffer *const buffer = &level_info->frame_window_buffer; |
1335 | 0 | store_frame_record(ts_start, ts_end, size, luma_pic_size, |
1336 | 0 | frame_header_count, tiles, show_frame, |
1337 | 0 | show_existing_frame, buffer); |
1338 | 0 | if (show_frame) { |
1339 | | // Count the number of frames encoded in the past 1 second. |
1340 | 0 | const int encoded_frames_in_last_second = |
1341 | 0 | show_frame ? count_frames(buffer, TICKS_PER_SEC) : 0; |
1342 | 0 | scan_past_frames(buffer, encoded_frames_in_last_second, level_spec, |
1343 | 0 | level_stats); |
1344 | 0 | level_stats->total_time_encoded += |
1345 | 0 | (cpi->time_stamps.prev_ts_end - cpi->time_stamps.prev_ts_start) / |
1346 | 0 | (double)TICKS_PER_SEC; |
1347 | 0 | } |
1348 | |
|
1349 | 0 | DECODER_MODEL *const decoder_models = level_info->decoder_models; |
1350 | 0 | for (AV1_LEVEL level = SEQ_LEVEL_2_0; level < SEQ_LEVELS; ++level) { |
1351 | 0 | decoder_model_process_frame(cpi, size << 3, &decoder_models[level]); |
1352 | 0 | } |
1353 | | |
1354 | | // Check whether target level is met. |
1355 | 0 | const AV1_LEVEL target_level = level_params->target_seq_level_idx[i]; |
1356 | 0 | if (target_level < SEQ_LEVELS && cpi->oxcf.strict_level_conformance) { |
1357 | 0 | assert(is_valid_seq_level_idx(target_level)); |
1358 | 0 | const int tier = seq_params->tier[i]; |
1359 | 0 | const TARGET_LEVEL_FAIL_ID fail_id = check_level_constraints( |
1360 | 0 | level_info, target_level, tier, is_still_picture, profile, 0); |
1361 | 0 | if (fail_id != TARGET_LEVEL_OK) { |
1362 | 0 | const int target_level_major = 2 + (target_level >> 2); |
1363 | 0 | const int target_level_minor = target_level & 3; |
1364 | 0 | aom_internal_error(cm->error, AOM_CODEC_ERROR, |
1365 | 0 | "Failed to encode to the target level %d_%d. %s", |
1366 | 0 | target_level_major, target_level_minor, |
1367 | 0 | level_fail_messages[fail_id]); |
1368 | 0 | } |
1369 | 0 | } |
1370 | 0 | } |
1371 | 0 | } |
1372 | | |
1373 | | aom_codec_err_t av1_get_seq_level_idx(const SequenceHeader *seq_params, |
1374 | | const AV1LevelParams *level_params, |
1375 | 0 | int *seq_level_idx) { |
1376 | 0 | const int is_still_picture = seq_params->still_picture; |
1377 | 0 | const BITSTREAM_PROFILE profile = seq_params->profile; |
1378 | 0 | for (int op = 0; op < seq_params->operating_points_cnt_minus_1 + 1; ++op) { |
1379 | 0 | seq_level_idx[op] = (int)SEQ_LEVEL_MAX; |
1380 | 0 | if (!((level_params->keep_level_stats >> op) & 1)) continue; |
1381 | 0 | const int tier = seq_params->tier[op]; |
1382 | 0 | const AV1LevelInfo *const level_info = level_params->level_info[op]; |
1383 | 0 | assert(level_info != NULL); |
1384 | 0 | for (int level = 0; level < SEQ_LEVELS; ++level) { |
1385 | 0 | if (!is_valid_seq_level_idx(level)) continue; |
1386 | 0 | const TARGET_LEVEL_FAIL_ID fail_id = check_level_constraints( |
1387 | 0 | level_info, level, tier, is_still_picture, profile, 1); |
1388 | 0 | if (fail_id == TARGET_LEVEL_OK) { |
1389 | 0 | seq_level_idx[op] = level; |
1390 | 0 | break; |
1391 | 0 | } |
1392 | 0 | } |
1393 | 0 | } |
1394 | |
|
1395 | 0 | return AOM_CODEC_OK; |
1396 | 0 | } |
1397 | | |
1398 | | aom_codec_err_t av1_get_target_seq_level_idx(const SequenceHeader *seq_params, |
1399 | | const AV1LevelParams *level_params, |
1400 | 0 | int *target_seq_level_idx) { |
1401 | 0 | for (int op = 0; op < seq_params->operating_points_cnt_minus_1 + 1; ++op) { |
1402 | 0 | target_seq_level_idx[op] = (int)SEQ_LEVEL_MAX; |
1403 | 0 | if (!((level_params->keep_level_stats >> op) & 1)) continue; |
1404 | 0 | target_seq_level_idx[op] = level_params->target_seq_level_idx[op]; |
1405 | 0 | } |
1406 | |
|
1407 | 0 | return AOM_CODEC_OK; |
1408 | 0 | } |