/src/libvpx/vp9/decoder/vp9_decoder.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <limits.h> |
13 | | #include <stdio.h> |
14 | | |
15 | | #include "./vp9_rtcd.h" |
16 | | #include "./vpx_dsp_rtcd.h" |
17 | | #include "./vpx_scale_rtcd.h" |
18 | | |
19 | | #include "vpx_mem/vpx_mem.h" |
20 | | #include "vpx_ports/system_state.h" |
21 | | #include "vpx_ports/vpx_once.h" |
22 | | #include "vpx_ports/vpx_timer.h" |
23 | | #include "vpx_scale/vpx_scale.h" |
24 | | #include "vpx_util/vpx_pthread.h" |
25 | | #include "vpx_util/vpx_thread.h" |
26 | | |
27 | | #include "vp9/common/vp9_alloccommon.h" |
28 | | #include "vp9/common/vp9_loopfilter.h" |
29 | | #include "vp9/common/vp9_onyxc_int.h" |
30 | | #if CONFIG_VP9_POSTPROC |
31 | | #include "vp9/common/vp9_postproc.h" |
32 | | #endif |
33 | | #include "vp9/common/vp9_quant_common.h" |
34 | | #include "vp9/common/vp9_reconintra.h" |
35 | | |
36 | | #include "vp9/decoder/vp9_decodeframe.h" |
37 | | #include "vp9/decoder/vp9_decoder.h" |
38 | | #include "vp9/decoder/vp9_detokenize.h" |
39 | | |
40 | 1 | static void initialize_dec(void) { |
41 | 1 | static volatile int init_done = 0; |
42 | | |
43 | 1 | if (!init_done) { |
44 | 1 | vp9_rtcd(); |
45 | 1 | vpx_dsp_rtcd(); |
46 | 1 | vpx_scale_rtcd(); |
47 | 1 | vp9_init_intra_predictors(); |
48 | 1 | init_done = 1; |
49 | 1 | } |
50 | 1 | } |
51 | | |
52 | 84.9k | static void vp9_dec_setup_mi(VP9_COMMON *cm) { |
53 | 84.9k | cm->mi = cm->mip + cm->mi_stride + 1; |
54 | 84.9k | cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1; |
55 | 84.9k | memset(cm->mi_grid_base, 0, |
56 | 84.9k | cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base)); |
57 | 84.9k | } |
58 | | |
59 | | void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data, |
60 | | VP9_COMMON *cm, int num_sbs, int max_threads, |
61 | 0 | int num_jobs) { |
62 | 0 | int plane; |
63 | 0 | const size_t dqcoeff_size = (num_sbs << DQCOEFFS_PER_SB_LOG2) * |
64 | 0 | sizeof(*row_mt_worker_data->dqcoeff[0]); |
65 | 0 | row_mt_worker_data->num_jobs = num_jobs; |
66 | 0 | #if CONFIG_MULTITHREAD |
67 | 0 | { |
68 | 0 | int i; |
69 | 0 | CHECK_MEM_ERROR( |
70 | 0 | &cm->error, row_mt_worker_data->recon_sync_mutex, |
71 | 0 | vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_mutex) * num_jobs)); |
72 | 0 | if (row_mt_worker_data->recon_sync_mutex) { |
73 | 0 | for (i = 0; i < num_jobs; ++i) { |
74 | 0 | pthread_mutex_init(&row_mt_worker_data->recon_sync_mutex[i], NULL); |
75 | 0 | } |
76 | 0 | } |
77 | |
|
78 | 0 | CHECK_MEM_ERROR( |
79 | 0 | &cm->error, row_mt_worker_data->recon_sync_cond, |
80 | 0 | vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_cond) * num_jobs)); |
81 | 0 | if (row_mt_worker_data->recon_sync_cond) { |
82 | 0 | for (i = 0; i < num_jobs; ++i) { |
83 | 0 | pthread_cond_init(&row_mt_worker_data->recon_sync_cond[i], NULL); |
84 | 0 | } |
85 | 0 | } |
86 | 0 | } |
87 | 0 | #endif |
88 | 0 | row_mt_worker_data->num_sbs = num_sbs; |
89 | 0 | for (plane = 0; plane < 3; ++plane) { |
90 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->dqcoeff[plane], |
91 | 0 | vpx_memalign(32, dqcoeff_size)); |
92 | 0 | memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size); |
93 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->eob[plane], |
94 | 0 | vpx_calloc(num_sbs << EOBS_PER_SB_LOG2, |
95 | 0 | sizeof(*row_mt_worker_data->eob[plane]))); |
96 | 0 | } |
97 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->partition, |
98 | 0 | vpx_calloc(num_sbs * PARTITIONS_PER_SB, |
99 | 0 | sizeof(*row_mt_worker_data->partition))); |
100 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->recon_map, |
101 | 0 | vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map))); |
102 | | |
103 | | // allocate memory for thread_data |
104 | 0 | if (row_mt_worker_data->thread_data == NULL) { |
105 | 0 | const size_t thread_size = |
106 | 0 | max_threads * sizeof(*row_mt_worker_data->thread_data); |
107 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->thread_data, |
108 | 0 | vpx_memalign(32, thread_size)); |
109 | 0 | } |
110 | 0 | } |
111 | | |
112 | 0 | void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) { |
113 | 0 | if (row_mt_worker_data != NULL) { |
114 | 0 | int plane; |
115 | 0 | #if CONFIG_MULTITHREAD |
116 | 0 | int i; |
117 | 0 | if (row_mt_worker_data->recon_sync_mutex != NULL) { |
118 | 0 | for (i = 0; i < row_mt_worker_data->num_jobs; ++i) { |
119 | 0 | pthread_mutex_destroy(&row_mt_worker_data->recon_sync_mutex[i]); |
120 | 0 | } |
121 | 0 | vpx_free(row_mt_worker_data->recon_sync_mutex); |
122 | 0 | row_mt_worker_data->recon_sync_mutex = NULL; |
123 | 0 | } |
124 | 0 | if (row_mt_worker_data->recon_sync_cond != NULL) { |
125 | 0 | for (i = 0; i < row_mt_worker_data->num_jobs; ++i) { |
126 | 0 | pthread_cond_destroy(&row_mt_worker_data->recon_sync_cond[i]); |
127 | 0 | } |
128 | 0 | vpx_free(row_mt_worker_data->recon_sync_cond); |
129 | 0 | row_mt_worker_data->recon_sync_cond = NULL; |
130 | 0 | } |
131 | 0 | #endif |
132 | 0 | for (plane = 0; plane < 3; ++plane) { |
133 | 0 | vpx_free(row_mt_worker_data->eob[plane]); |
134 | 0 | row_mt_worker_data->eob[plane] = NULL; |
135 | 0 | vpx_free(row_mt_worker_data->dqcoeff[plane]); |
136 | 0 | row_mt_worker_data->dqcoeff[plane] = NULL; |
137 | 0 | } |
138 | 0 | vpx_free(row_mt_worker_data->partition); |
139 | 0 | row_mt_worker_data->partition = NULL; |
140 | 0 | vpx_free(row_mt_worker_data->recon_map); |
141 | 0 | row_mt_worker_data->recon_map = NULL; |
142 | 0 | vpx_free(row_mt_worker_data->thread_data); |
143 | 0 | row_mt_worker_data->thread_data = NULL; |
144 | 0 | } |
145 | 0 | } |
146 | | |
147 | 19.6k | static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) { |
148 | 19.6k | cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); |
149 | 19.6k | if (!cm->mip) return 1; |
150 | 19.6k | cm->mi_alloc_size = mi_size; |
151 | 19.6k | cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *)); |
152 | 19.6k | if (!cm->mi_grid_base) return 1; |
153 | 19.6k | return 0; |
154 | 19.6k | } |
155 | | |
156 | 35.8k | static void vp9_dec_free_mi(VP9_COMMON *cm) { |
157 | | #if CONFIG_VP9_POSTPROC |
158 | | // MFQE allocates an additional mip and swaps it with cm->mip. |
159 | | vpx_free(cm->postproc_state.prev_mip); |
160 | | cm->postproc_state.prev_mip = NULL; |
161 | | #endif |
162 | 35.8k | vpx_free(cm->mip); |
163 | 35.8k | cm->mip = NULL; |
164 | 35.8k | vpx_free(cm->mi_grid_base); |
165 | 35.8k | cm->mi_grid_base = NULL; |
166 | 35.8k | cm->mi_alloc_size = 0; |
167 | 35.8k | } |
168 | | |
169 | 16.2k | VP9Decoder *vp9_decoder_create(BufferPool *const pool) { |
170 | 16.2k | VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi)); |
171 | 16.2k | VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL; |
172 | | |
173 | 16.2k | if (!cm) return NULL; |
174 | | |
175 | 16.2k | vp9_zero(*pbi); |
176 | | |
177 | 16.2k | if (setjmp(cm->error.jmp)) { |
178 | 0 | cm->error.setjmp = 0; |
179 | 0 | vp9_decoder_remove(pbi); |
180 | 0 | return NULL; |
181 | 0 | } |
182 | | |
183 | 16.2k | cm->error.setjmp = 1; |
184 | | |
185 | 16.2k | CHECK_MEM_ERROR(&cm->error, cm->fc, |
186 | 16.2k | (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); |
187 | 16.2k | CHECK_MEM_ERROR( |
188 | 16.2k | &cm->error, cm->frame_contexts, |
189 | 16.2k | (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts))); |
190 | | |
191 | 16.2k | pbi->need_resync = 1; |
192 | 16.2k | once(initialize_dec); |
193 | | |
194 | | // Initialize the references to not point to any frame buffers. |
195 | 16.2k | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
196 | 16.2k | memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); |
197 | | |
198 | 16.2k | init_frame_indexes(cm); |
199 | 16.2k | pbi->ready_for_new_data = 1; |
200 | 16.2k | pbi->common.buffer_pool = pool; |
201 | | |
202 | 16.2k | cm->bit_depth = VPX_BITS_8; |
203 | 16.2k | cm->dequant_bit_depth = VPX_BITS_8; |
204 | | |
205 | 16.2k | cm->alloc_mi = vp9_dec_alloc_mi; |
206 | 16.2k | cm->free_mi = vp9_dec_free_mi; |
207 | 16.2k | cm->setup_mi = vp9_dec_setup_mi; |
208 | | |
209 | 16.2k | vp9_loop_filter_init(cm); |
210 | | |
211 | 16.2k | cm->error.setjmp = 0; |
212 | | |
213 | 16.2k | vpx_get_worker_interface()->init(&pbi->lf_worker); |
214 | 16.2k | pbi->lf_worker.thread_name = "vpx lf worker"; |
215 | | |
216 | 16.2k | return pbi; |
217 | 16.2k | } |
218 | | |
219 | 16.2k | void vp9_decoder_remove(VP9Decoder *pbi) { |
220 | 16.2k | int i; |
221 | | |
222 | 16.2k | if (!pbi) return; |
223 | | |
224 | 16.2k | vpx_get_worker_interface()->end(&pbi->lf_worker); |
225 | 16.2k | vpx_free(pbi->lf_worker.data1); |
226 | | |
227 | 53.5k | for (i = 0; i < pbi->num_tile_workers; ++i) { |
228 | 37.3k | VPxWorker *const worker = &pbi->tile_workers[i]; |
229 | 37.3k | vpx_get_worker_interface()->end(worker); |
230 | 37.3k | } |
231 | | |
232 | 16.2k | vpx_free(pbi->tile_worker_data); |
233 | 16.2k | vpx_free(pbi->tile_workers); |
234 | | |
235 | 16.2k | if (pbi->num_tile_workers > 0) { |
236 | 1.25k | vp9_loop_filter_dealloc(&pbi->lf_row_sync); |
237 | 1.25k | } |
238 | | |
239 | 16.2k | if (pbi->row_mt == 1) { |
240 | 0 | vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data); |
241 | 0 | if (pbi->row_mt_worker_data != NULL) { |
242 | 0 | vp9_jobq_deinit(&pbi->row_mt_worker_data->jobq); |
243 | 0 | vpx_free(pbi->row_mt_worker_data->jobq_buf); |
244 | 0 | #if CONFIG_MULTITHREAD |
245 | 0 | pthread_mutex_destroy(&pbi->row_mt_worker_data->recon_done_mutex); |
246 | 0 | #endif |
247 | 0 | } |
248 | 0 | vpx_free(pbi->row_mt_worker_data); |
249 | 0 | } |
250 | | |
251 | 16.2k | vp9_remove_common(&pbi->common); |
252 | 16.2k | vpx_free(pbi); |
253 | 16.2k | } |
254 | | |
255 | | static int equal_dimensions(const YV12_BUFFER_CONFIG *a, |
256 | 0 | const YV12_BUFFER_CONFIG *b) { |
257 | 0 | return a->y_height == b->y_height && a->y_width == b->y_width && |
258 | 0 | a->uv_height == b->uv_height && a->uv_width == b->uv_width; |
259 | 0 | } |
260 | | |
261 | | vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi, |
262 | | VP9_REFFRAME ref_frame_flag, |
263 | 0 | YV12_BUFFER_CONFIG *sd) { |
264 | 0 | VP9_COMMON *cm = &pbi->common; |
265 | | |
266 | | /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the |
267 | | * encoder is using the frame buffers for. This is just a stub to keep the |
268 | | * vpxenc --test-decode functionality working, and will be replaced in a |
269 | | * later commit that adds VP9-specific controls for this functionality. |
270 | | */ |
271 | 0 | if (ref_frame_flag == VP9_LAST_FLAG) { |
272 | 0 | const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0); |
273 | 0 | if (cfg == NULL) { |
274 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
275 | 0 | "No 'last' reference frame"); |
276 | 0 | return VPX_CODEC_ERROR; |
277 | 0 | } |
278 | 0 | if (!equal_dimensions(cfg, sd)) |
279 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
280 | 0 | "Incorrect buffer dimensions"); |
281 | 0 | else |
282 | 0 | vpx_yv12_copy_frame(cfg, sd); |
283 | 0 | } else { |
284 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); |
285 | 0 | } |
286 | | |
287 | 0 | return cm->error.error_code; |
288 | 0 | } |
289 | | |
290 | | vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm, |
291 | | VP9_REFFRAME ref_frame_flag, |
292 | 0 | YV12_BUFFER_CONFIG *sd) { |
293 | 0 | int idx; |
294 | 0 | YV12_BUFFER_CONFIG *ref_buf = NULL; |
295 | | |
296 | | // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the |
297 | | // encoder is using the frame buffers for. This is just a stub to keep the |
298 | | // vpxenc --test-decode functionality working, and will be replaced in a |
299 | | // later commit that adds VP9-specific controls for this functionality. |
300 | | // (Yunqing) The set_reference control depends on the following setting in |
301 | | // encoder. |
302 | | // cpi->lst_fb_idx = 0; |
303 | | // cpi->gld_fb_idx = 1; |
304 | | // cpi->alt_fb_idx = 2; |
305 | 0 | if (ref_frame_flag == VP9_LAST_FLAG) { |
306 | 0 | idx = cm->ref_frame_map[0]; |
307 | 0 | } else if (ref_frame_flag == VP9_GOLD_FLAG) { |
308 | 0 | idx = cm->ref_frame_map[1]; |
309 | 0 | } else if (ref_frame_flag == VP9_ALT_FLAG) { |
310 | 0 | idx = cm->ref_frame_map[2]; |
311 | 0 | } else { |
312 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); |
313 | 0 | return cm->error.error_code; |
314 | 0 | } |
315 | | |
316 | 0 | if (idx < 0 || idx >= FRAME_BUFFERS) { |
317 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
318 | 0 | "Invalid reference frame map"); |
319 | 0 | return cm->error.error_code; |
320 | 0 | } |
321 | | |
322 | | // Get the destination reference buffer. |
323 | 0 | ref_buf = &cm->buffer_pool->frame_bufs[idx].buf; |
324 | |
|
325 | 0 | if (!equal_dimensions(ref_buf, sd)) { |
326 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
327 | 0 | "Incorrect buffer dimensions"); |
328 | 0 | } else { |
329 | | // Overwrite the reference frame buffer. |
330 | 0 | vpx_yv12_copy_frame(sd, ref_buf); |
331 | 0 | } |
332 | |
|
333 | 0 | return cm->error.error_code; |
334 | 0 | } |
335 | | |
336 | | /* If any buffer updating is signaled it should be done here. */ |
337 | 91.6k | static void swap_frame_buffers(VP9Decoder *pbi) { |
338 | 91.6k | int ref_index = 0, mask; |
339 | 91.6k | VP9_COMMON *const cm = &pbi->common; |
340 | 91.6k | BufferPool *const pool = cm->buffer_pool; |
341 | 91.6k | RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; |
342 | | |
343 | 537k | for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
344 | 446k | const int old_idx = cm->ref_frame_map[ref_index]; |
345 | | // Current thread releases the holding of reference frame. |
346 | 446k | decrease_ref_count(old_idx, frame_bufs, pool); |
347 | | |
348 | | // Release the reference frame in reference map. |
349 | 446k | if (mask & 1) { |
350 | 308k | decrease_ref_count(old_idx, frame_bufs, pool); |
351 | 308k | } |
352 | 446k | cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; |
353 | 446k | ++ref_index; |
354 | 446k | } |
355 | | |
356 | | // Current thread releases the holding of reference frame. |
357 | 312k | for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { |
358 | 220k | const int old_idx = cm->ref_frame_map[ref_index]; |
359 | 220k | decrease_ref_count(old_idx, frame_bufs, pool); |
360 | 220k | cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; |
361 | 220k | } |
362 | 91.6k | pbi->hold_ref_buf = 0; |
363 | 91.6k | cm->frame_to_show = get_frame_new_buffer(cm); |
364 | | |
365 | 91.6k | --frame_bufs[cm->new_fb_idx].ref_count; |
366 | | |
367 | | // Invalidate these references until the next frame starts. |
368 | 366k | for (ref_index = 0; ref_index < 3; ref_index++) |
369 | 274k | cm->frame_refs[ref_index].idx = -1; |
370 | 91.6k | } |
371 | | |
372 | 121k | static void release_fb_on_decoder_exit(VP9Decoder *pbi) { |
373 | 121k | const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); |
374 | 121k | VP9_COMMON *volatile const cm = &pbi->common; |
375 | 121k | BufferPool *volatile const pool = cm->buffer_pool; |
376 | 121k | RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; |
377 | 121k | int i; |
378 | | |
379 | | // Synchronize all threads immediately as a subsequent decode call may |
380 | | // cause a resize invalidating some allocations. |
381 | 121k | winterface->sync(&pbi->lf_worker); |
382 | 284k | for (i = 0; i < pbi->num_tile_workers; ++i) { |
383 | 162k | winterface->sync(&pbi->tile_workers[i]); |
384 | 162k | } |
385 | | |
386 | | // Release all the reference buffers if worker thread is holding them. |
387 | 121k | if (pbi->hold_ref_buf == 1) { |
388 | 75.7k | int ref_index = 0, mask; |
389 | 383k | for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
390 | 307k | const int old_idx = cm->ref_frame_map[ref_index]; |
391 | | // Current thread releases the holding of reference frame. |
392 | 307k | decrease_ref_count(old_idx, frame_bufs, pool); |
393 | | |
394 | | // Release the reference frame in reference map. |
395 | 307k | if (mask & 1) { |
396 | 236k | decrease_ref_count(old_idx, frame_bufs, pool); |
397 | 236k | } |
398 | 307k | ++ref_index; |
399 | 307k | } |
400 | | |
401 | | // Current thread releases the holding of reference frame. |
402 | 373k | for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { |
403 | 298k | const int old_idx = cm->ref_frame_map[ref_index]; |
404 | 298k | decrease_ref_count(old_idx, frame_bufs, pool); |
405 | 298k | } |
406 | 75.7k | pbi->hold_ref_buf = 0; |
407 | 75.7k | } |
408 | 121k | } |
409 | | |
410 | | int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size, |
411 | 212k | const uint8_t **psource) { |
412 | 212k | VP9_COMMON *volatile const cm = &pbi->common; |
413 | 212k | BufferPool *volatile const pool = cm->buffer_pool; |
414 | 212k | RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; |
415 | 212k | const uint8_t *source = *psource; |
416 | 212k | int retcode = 0; |
417 | 212k | cm->error.error_code = VPX_CODEC_OK; |
418 | | |
419 | 212k | if (size == 0) { |
420 | | // This is used to signal that we are missing frames. |
421 | | // We do not know if the missing frame(s) was supposed to update |
422 | | // any of the reference buffers, but we act conservative and |
423 | | // mark only the last buffer as corrupted. |
424 | | // |
425 | | // TODO(jkoleszar): Error concealment is undefined and non-normative |
426 | | // at this point, but if it becomes so, [0] may not always be the correct |
427 | | // thing to do here. |
428 | 132 | if (cm->frame_refs[0].idx > 0) { |
429 | 66 | assert(cm->frame_refs[0].buf != NULL); |
430 | 66 | cm->frame_refs[0].buf->corrupted = 1; |
431 | 66 | } |
432 | 132 | } |
433 | | |
434 | 212k | pbi->ready_for_new_data = 0; |
435 | | |
436 | | // Check if the previous frame was a frame without any references to it. |
437 | 212k | if (cm->new_fb_idx >= 0 && frame_bufs[cm->new_fb_idx].ref_count == 0 && |
438 | 212k | !frame_bufs[cm->new_fb_idx].released) { |
439 | 5.27k | pool->release_fb_cb(pool->cb_priv, |
440 | 5.27k | &frame_bufs[cm->new_fb_idx].raw_frame_buffer); |
441 | 5.27k | frame_bufs[cm->new_fb_idx].released = 1; |
442 | 5.27k | } |
443 | | |
444 | | // Find a free frame buffer. Return error if can not find any. |
445 | 212k | cm->new_fb_idx = get_free_fb(cm); |
446 | 212k | if (cm->new_fb_idx == INVALID_IDX) { |
447 | 239 | pbi->ready_for_new_data = 1; |
448 | 239 | release_fb_on_decoder_exit(pbi); |
449 | 239 | vpx_clear_system_state(); |
450 | 239 | vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
451 | 239 | "Unable to find free frame buffer"); |
452 | 239 | return cm->error.error_code; |
453 | 239 | } |
454 | | |
455 | | // Assign a MV array to the frame buffer. |
456 | 212k | cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; |
457 | | |
458 | 212k | pbi->hold_ref_buf = 0; |
459 | 212k | pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; |
460 | | |
461 | 212k | if (setjmp(cm->error.jmp)) { |
462 | 121k | cm->error.setjmp = 0; |
463 | 121k | pbi->ready_for_new_data = 1; |
464 | 121k | release_fb_on_decoder_exit(pbi); |
465 | | // Release current frame. |
466 | 121k | decrease_ref_count(cm->new_fb_idx, frame_bufs, pool); |
467 | 121k | vpx_clear_system_state(); |
468 | 121k | return -1; |
469 | 121k | } |
470 | | |
471 | 91.6k | cm->error.setjmp = 1; |
472 | 91.6k | vp9_decode_frame(pbi, source, source + size, psource); |
473 | | |
474 | 91.6k | swap_frame_buffers(pbi); |
475 | | |
476 | 91.6k | vpx_clear_system_state(); |
477 | | |
478 | 91.6k | if (!cm->show_existing_frame) { |
479 | 83.3k | cm->last_show_frame = cm->show_frame; |
480 | 83.3k | cm->prev_frame = cm->cur_frame; |
481 | 83.3k | if (cm->seg.enabled) vp9_swap_current_and_last_seg_map(cm); |
482 | 83.3k | } |
483 | | |
484 | 91.6k | if (cm->show_frame) cm->cur_show_frame_fb_idx = cm->new_fb_idx; |
485 | | |
486 | | // Update progress in frame parallel decode. |
487 | 91.6k | cm->last_width = cm->width; |
488 | 91.6k | cm->last_height = cm->height; |
489 | 91.6k | if (cm->show_frame) { |
490 | 43.1k | cm->current_video_frame++; |
491 | 43.1k | } |
492 | | |
493 | 91.6k | cm->error.setjmp = 0; |
494 | 91.6k | return retcode; |
495 | 212k | } |
496 | | |
497 | | int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, |
498 | 169k | vp9_ppflags_t *flags) { |
499 | 169k | VP9_COMMON *const cm = &pbi->common; |
500 | 169k | int ret = -1; |
501 | 169k | #if !CONFIG_VP9_POSTPROC |
502 | 169k | (void)*flags; |
503 | 169k | #endif |
504 | | |
505 | 169k | if (pbi->ready_for_new_data == 1) return ret; |
506 | | |
507 | 21.4k | pbi->ready_for_new_data = 1; |
508 | | |
509 | | /* no raw frame to show!!! */ |
510 | 21.4k | if (!cm->show_frame) return ret; |
511 | | |
512 | 18.7k | pbi->ready_for_new_data = 1; |
513 | | |
514 | | #if CONFIG_VP9_POSTPROC |
515 | | if (!cm->show_existing_frame) { |
516 | | ret = vp9_post_proc_frame(cm, sd, flags, cm->width); |
517 | | } else { |
518 | | *sd = *cm->frame_to_show; |
519 | | ret = 0; |
520 | | } |
521 | | #else |
522 | 18.7k | *sd = *cm->frame_to_show; |
523 | 18.7k | ret = 0; |
524 | 18.7k | #endif /*!CONFIG_POSTPROC*/ |
525 | 18.7k | vpx_clear_system_state(); |
526 | 18.7k | return ret; |
527 | 21.4k | } |
528 | | |
529 | | vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz, |
530 | | uint32_t sizes[8], int *count, |
531 | | vpx_decrypt_cb decrypt_cb, |
532 | 146k | void *decrypt_state) { |
533 | | // A chunk ending with a byte matching 0xc0 is an invalid chunk unless |
534 | | // it is a super frame index. If the last byte of real video compression |
535 | | // data is 0xc0 the encoder must add a 0 byte. If we have the marker but |
536 | | // not the associated matching marker byte at the front of the index we have |
537 | | // an invalid bitstream and need to return an error. |
538 | | |
539 | 146k | uint8_t marker; |
540 | | |
541 | 146k | assert(data_sz); |
542 | 146k | marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1); |
543 | 146k | *count = 0; |
544 | | |
545 | 146k | if ((marker & 0xe0) == 0xc0) { |
546 | 9.08k | const uint32_t frames = (marker & 0x7) + 1; |
547 | 9.08k | const uint32_t mag = ((marker >> 3) & 0x3) + 1; |
548 | 9.08k | const size_t index_sz = 2 + mag * frames; |
549 | | |
550 | | // This chunk is marked as having a superframe index but doesn't have |
551 | | // enough data for it, thus it's an invalid superframe index. |
552 | 9.08k | if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME; |
553 | | |
554 | 9.02k | { |
555 | 9.02k | const uint8_t marker2 = |
556 | 9.02k | read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz); |
557 | | |
558 | | // This chunk is marked as having a superframe index but doesn't have |
559 | | // the matching marker byte at the front of the index therefore it's an |
560 | | // invalid chunk. |
561 | 9.02k | if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME; |
562 | 9.02k | } |
563 | | |
564 | 8.85k | { |
565 | | // Found a valid superframe index. |
566 | 8.85k | uint32_t i, j; |
567 | 8.85k | const uint8_t *x = &data[data_sz - index_sz + 1]; |
568 | | |
569 | | // Frames has a maximum of 8 and mag has a maximum of 4. |
570 | 8.85k | uint8_t clear_buffer[32]; |
571 | 8.85k | assert(sizeof(clear_buffer) >= frames * mag); |
572 | 8.85k | if (decrypt_cb) { |
573 | 0 | decrypt_cb(decrypt_state, x, clear_buffer, frames * mag); |
574 | 0 | x = clear_buffer; |
575 | 0 | } |
576 | | |
577 | 29.5k | for (i = 0; i < frames; ++i) { |
578 | 20.7k | uint32_t this_sz = 0; |
579 | | |
580 | 47.9k | for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8); |
581 | 20.7k | sizes[i] = this_sz; |
582 | 20.7k | } |
583 | 8.85k | *count = frames; |
584 | 8.85k | } |
585 | 8.85k | } |
586 | 145k | return VPX_CODEC_OK; |
587 | 146k | } |