/src/libvpx/vp9/decoder/vp9_decoder.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <limits.h> |
13 | | #include <stdio.h> |
14 | | |
15 | | #include "./vp9_rtcd.h" |
16 | | #include "./vpx_dsp_rtcd.h" |
17 | | #include "./vpx_scale_rtcd.h" |
18 | | |
19 | | #include "vpx_mem/vpx_mem.h" |
20 | | #include "vpx_ports/system_state.h" |
21 | | #include "vpx_ports/vpx_once.h" |
22 | | #include "vpx_ports/vpx_timer.h" |
23 | | #include "vpx_scale/vpx_scale.h" |
24 | | #include "vpx_util/vpx_pthread.h" |
25 | | #include "vpx_util/vpx_thread.h" |
26 | | |
27 | | #include "vp9/common/vp9_alloccommon.h" |
28 | | #include "vp9/common/vp9_loopfilter.h" |
29 | | #include "vp9/common/vp9_onyxc_int.h" |
30 | | #if CONFIG_VP9_POSTPROC |
31 | | #include "vp9/common/vp9_postproc.h" |
32 | | #endif |
33 | | #include "vp9/common/vp9_quant_common.h" |
34 | | #include "vp9/common/vp9_reconintra.h" |
35 | | |
36 | | #include "vp9/decoder/vp9_decodeframe.h" |
37 | | #include "vp9/decoder/vp9_decoder.h" |
38 | | #include "vp9/decoder/vp9_detokenize.h" |
39 | | |
40 | 1 | static void initialize_dec(void) { |
41 | 1 | static volatile int init_done = 0; |
42 | | |
43 | 1 | if (!init_done) { |
44 | 1 | vp9_rtcd(); |
45 | 1 | vpx_dsp_rtcd(); |
46 | 1 | vpx_scale_rtcd(); |
47 | 1 | vp9_init_intra_predictors(); |
48 | 1 | init_done = 1; |
49 | 1 | } |
50 | 1 | } |
51 | | |
52 | 76.8k | static void vp9_dec_setup_mi(VP9_COMMON *cm) { |
53 | 76.8k | cm->mi = cm->mip + cm->mi_stride + 1; |
54 | 76.8k | cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1; |
55 | 76.8k | memset(cm->mi_grid_base, 0, |
56 | 76.8k | cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base)); |
57 | 76.8k | } |
58 | | |
59 | | void vp9_dec_alloc_row_mt_mem(RowMTWorkerData *row_mt_worker_data, |
60 | | VP9_COMMON *cm, int num_sbs, int max_threads, |
61 | 0 | int num_jobs) { |
62 | 0 | if ((size_t)num_sbs > SIZE_MAX / (sizeof(*row_mt_worker_data->dqcoeff[0]) |
63 | 0 | << DQCOEFFS_PER_SB_LOG2)) { |
64 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "num_sbs too big"); |
65 | 0 | } |
66 | 0 | const size_t dqcoeff_size = ((size_t)num_sbs << DQCOEFFS_PER_SB_LOG2) * |
67 | 0 | sizeof(*row_mt_worker_data->dqcoeff[0]); |
68 | 0 | row_mt_worker_data->num_jobs = num_jobs; |
69 | 0 | #if CONFIG_MULTITHREAD |
70 | 0 | { |
71 | 0 | int i; |
72 | 0 | CHECK_MEM_ERROR( |
73 | 0 | &cm->error, row_mt_worker_data->recon_sync_mutex, |
74 | 0 | vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_mutex) * num_jobs)); |
75 | 0 | if (row_mt_worker_data->recon_sync_mutex) { |
76 | 0 | for (i = 0; i < num_jobs; ++i) { |
77 | 0 | pthread_mutex_init(&row_mt_worker_data->recon_sync_mutex[i], NULL); |
78 | 0 | } |
79 | 0 | } |
80 | |
|
81 | 0 | CHECK_MEM_ERROR( |
82 | 0 | &cm->error, row_mt_worker_data->recon_sync_cond, |
83 | 0 | vpx_malloc(sizeof(*row_mt_worker_data->recon_sync_cond) * num_jobs)); |
84 | 0 | if (row_mt_worker_data->recon_sync_cond) { |
85 | 0 | for (i = 0; i < num_jobs; ++i) { |
86 | 0 | pthread_cond_init(&row_mt_worker_data->recon_sync_cond[i], NULL); |
87 | 0 | } |
88 | 0 | } |
89 | 0 | } |
90 | 0 | #endif |
91 | 0 | row_mt_worker_data->num_sbs = num_sbs; |
92 | 0 | for (int plane = 0; plane < 3; ++plane) { |
93 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->dqcoeff[plane], |
94 | 0 | vpx_memalign(32, dqcoeff_size)); |
95 | 0 | memset(row_mt_worker_data->dqcoeff[plane], 0, dqcoeff_size); |
96 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->eob[plane], |
97 | 0 | vpx_calloc((size_t)num_sbs << EOBS_PER_SB_LOG2, |
98 | 0 | sizeof(*row_mt_worker_data->eob[plane]))); |
99 | 0 | } |
100 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->partition, |
101 | 0 | vpx_calloc((size_t)num_sbs * PARTITIONS_PER_SB, |
102 | 0 | sizeof(*row_mt_worker_data->partition))); |
103 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->recon_map, |
104 | 0 | vpx_calloc(num_sbs, sizeof(*row_mt_worker_data->recon_map))); |
105 | | |
106 | | // allocate memory for thread_data |
107 | 0 | if (row_mt_worker_data->thread_data == NULL) { |
108 | 0 | const size_t thread_size = |
109 | 0 | max_threads * sizeof(*row_mt_worker_data->thread_data); |
110 | 0 | CHECK_MEM_ERROR(&cm->error, row_mt_worker_data->thread_data, |
111 | 0 | vpx_memalign(32, thread_size)); |
112 | 0 | } |
113 | 0 | } |
114 | | |
115 | 0 | void vp9_dec_free_row_mt_mem(RowMTWorkerData *row_mt_worker_data) { |
116 | 0 | if (row_mt_worker_data != NULL) { |
117 | 0 | int plane; |
118 | 0 | #if CONFIG_MULTITHREAD |
119 | 0 | int i; |
120 | 0 | if (row_mt_worker_data->recon_sync_mutex != NULL) { |
121 | 0 | for (i = 0; i < row_mt_worker_data->num_jobs; ++i) { |
122 | 0 | pthread_mutex_destroy(&row_mt_worker_data->recon_sync_mutex[i]); |
123 | 0 | } |
124 | 0 | vpx_free(row_mt_worker_data->recon_sync_mutex); |
125 | 0 | row_mt_worker_data->recon_sync_mutex = NULL; |
126 | 0 | } |
127 | 0 | if (row_mt_worker_data->recon_sync_cond != NULL) { |
128 | 0 | for (i = 0; i < row_mt_worker_data->num_jobs; ++i) { |
129 | 0 | pthread_cond_destroy(&row_mt_worker_data->recon_sync_cond[i]); |
130 | 0 | } |
131 | 0 | vpx_free(row_mt_worker_data->recon_sync_cond); |
132 | 0 | row_mt_worker_data->recon_sync_cond = NULL; |
133 | 0 | } |
134 | 0 | #endif |
135 | 0 | for (plane = 0; plane < 3; ++plane) { |
136 | 0 | vpx_free(row_mt_worker_data->eob[plane]); |
137 | 0 | row_mt_worker_data->eob[plane] = NULL; |
138 | 0 | vpx_free(row_mt_worker_data->dqcoeff[plane]); |
139 | 0 | row_mt_worker_data->dqcoeff[plane] = NULL; |
140 | 0 | } |
141 | 0 | vpx_free(row_mt_worker_data->partition); |
142 | 0 | row_mt_worker_data->partition = NULL; |
143 | 0 | vpx_free(row_mt_worker_data->recon_map); |
144 | 0 | row_mt_worker_data->recon_map = NULL; |
145 | 0 | vpx_free(row_mt_worker_data->thread_data); |
146 | 0 | row_mt_worker_data->thread_data = NULL; |
147 | 0 | } |
148 | 0 | } |
149 | | |
150 | 16.9k | static int vp9_dec_alloc_mi(VP9_COMMON *cm, int mi_size) { |
151 | 16.9k | cm->mip = vpx_calloc(mi_size, sizeof(*cm->mip)); |
152 | 16.9k | if (!cm->mip) return 1; |
153 | 16.9k | cm->mi_alloc_size = mi_size; |
154 | 16.9k | cm->mi_grid_base = (MODE_INFO **)vpx_calloc(mi_size, sizeof(MODE_INFO *)); |
155 | 16.9k | if (!cm->mi_grid_base) return 1; |
156 | 16.9k | return 0; |
157 | 16.9k | } |
158 | | |
159 | 31.4k | static void vp9_dec_free_mi(VP9_COMMON *cm) { |
160 | | #if CONFIG_VP9_POSTPROC |
161 | | // MFQE allocates an additional mip and swaps it with cm->mip. |
162 | | vpx_free(cm->postproc_state.prev_mip); |
163 | | cm->postproc_state.prev_mip = NULL; |
164 | | #endif |
165 | 31.4k | vpx_free(cm->mip); |
166 | 31.4k | cm->mip = NULL; |
167 | 31.4k | vpx_free(cm->mi_grid_base); |
168 | 31.4k | cm->mi_grid_base = NULL; |
169 | 31.4k | cm->mi_alloc_size = 0; |
170 | 31.4k | } |
171 | | |
172 | 14.4k | VP9Decoder *vp9_decoder_create(BufferPool *const pool) { |
173 | 14.4k | VP9Decoder *volatile const pbi = vpx_memalign(32, sizeof(*pbi)); |
174 | 14.4k | VP9_COMMON *volatile const cm = pbi ? &pbi->common : NULL; |
175 | | |
176 | 14.4k | if (!cm) return NULL; |
177 | | |
178 | 14.4k | vp9_zero(*pbi); |
179 | | |
180 | 14.4k | if (setjmp(cm->error.jmp)) { |
181 | 0 | cm->error.setjmp = 0; |
182 | 0 | vp9_decoder_remove(pbi); |
183 | 0 | return NULL; |
184 | 0 | } |
185 | | |
186 | 14.4k | cm->error.setjmp = 1; |
187 | | |
188 | 14.4k | CHECK_MEM_ERROR(&cm->error, cm->fc, |
189 | 14.4k | (FRAME_CONTEXT *)vpx_calloc(1, sizeof(*cm->fc))); |
190 | 14.4k | CHECK_MEM_ERROR( |
191 | 14.4k | &cm->error, cm->frame_contexts, |
192 | 14.4k | (FRAME_CONTEXT *)vpx_calloc(FRAME_CONTEXTS, sizeof(*cm->frame_contexts))); |
193 | | |
194 | 14.4k | pbi->need_resync = 1; |
195 | 14.4k | once(initialize_dec); |
196 | | |
197 | | // Initialize the references to not point to any frame buffers. |
198 | 14.4k | memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map)); |
199 | 14.4k | memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map)); |
200 | | |
201 | 14.4k | init_frame_indexes(cm); |
202 | 14.4k | pbi->ready_for_new_data = 1; |
203 | 14.4k | pbi->common.buffer_pool = pool; |
204 | | |
205 | 14.4k | cm->bit_depth = VPX_BITS_8; |
206 | 14.4k | cm->dequant_bit_depth = VPX_BITS_8; |
207 | | |
208 | 14.4k | cm->alloc_mi = vp9_dec_alloc_mi; |
209 | 14.4k | cm->free_mi = vp9_dec_free_mi; |
210 | 14.4k | cm->setup_mi = vp9_dec_setup_mi; |
211 | | |
212 | 14.4k | vp9_loop_filter_init(cm); |
213 | | |
214 | 14.4k | cm->error.setjmp = 0; |
215 | | |
216 | 14.4k | vpx_get_worker_interface()->init(&pbi->lf_worker); |
217 | 14.4k | pbi->lf_worker.thread_name = "vpx lf worker"; |
218 | | |
219 | 14.4k | return pbi; |
220 | 14.4k | } |
221 | | |
222 | 14.4k | void vp9_decoder_remove(VP9Decoder *pbi) { |
223 | 14.4k | int i; |
224 | | |
225 | 14.4k | if (!pbi) return; |
226 | | |
227 | 14.4k | vpx_get_worker_interface()->end(&pbi->lf_worker); |
228 | 14.4k | vpx_free(pbi->lf_worker.data1); |
229 | | |
230 | 14.4k | for (i = 0; i < pbi->num_tile_workers; ++i) { |
231 | 0 | VPxWorker *const worker = &pbi->tile_workers[i]; |
232 | 0 | vpx_get_worker_interface()->end(worker); |
233 | 0 | } |
234 | | |
235 | 14.4k | vpx_free(pbi->tile_worker_data); |
236 | 14.4k | vpx_free(pbi->tile_workers); |
237 | | |
238 | 14.4k | if (pbi->num_tile_workers > 0) { |
239 | 0 | vp9_loop_filter_dealloc(&pbi->lf_row_sync); |
240 | 0 | } |
241 | | |
242 | 14.4k | if (pbi->row_mt == 1) { |
243 | 0 | vp9_dec_free_row_mt_mem(pbi->row_mt_worker_data); |
244 | 0 | if (pbi->row_mt_worker_data != NULL) { |
245 | 0 | vp9_jobq_deinit(&pbi->row_mt_worker_data->jobq); |
246 | 0 | vpx_free(pbi->row_mt_worker_data->jobq_buf); |
247 | 0 | #if CONFIG_MULTITHREAD |
248 | 0 | pthread_mutex_destroy(&pbi->row_mt_worker_data->recon_done_mutex); |
249 | 0 | #endif |
250 | 0 | } |
251 | 0 | vpx_free(pbi->row_mt_worker_data); |
252 | 0 | } |
253 | | |
254 | 14.4k | vp9_remove_common(&pbi->common); |
255 | 14.4k | vpx_free(pbi); |
256 | 14.4k | } |
257 | | |
258 | | static int equal_dimensions(const YV12_BUFFER_CONFIG *a, |
259 | 0 | const YV12_BUFFER_CONFIG *b) { |
260 | 0 | return a->y_height == b->y_height && a->y_width == b->y_width && |
261 | 0 | a->uv_height == b->uv_height && a->uv_width == b->uv_width; |
262 | 0 | } |
263 | | |
264 | | vpx_codec_err_t vp9_copy_reference_dec(VP9Decoder *pbi, |
265 | | VP9_REFFRAME ref_frame_flag, |
266 | 0 | YV12_BUFFER_CONFIG *sd) { |
267 | 0 | VP9_COMMON *cm = &pbi->common; |
268 | | |
269 | | /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the |
270 | | * encoder is using the frame buffers for. This is just a stub to keep the |
271 | | * vpxenc --test-decode functionality working, and will be replaced in a |
272 | | * later commit that adds VP9-specific controls for this functionality. |
273 | | */ |
274 | 0 | if (ref_frame_flag == VP9_LAST_FLAG) { |
275 | 0 | const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0); |
276 | 0 | if (cfg == NULL) { |
277 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
278 | 0 | "No 'last' reference frame"); |
279 | 0 | return VPX_CODEC_ERROR; |
280 | 0 | } |
281 | 0 | if (!equal_dimensions(cfg, sd)) |
282 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
283 | 0 | "Incorrect buffer dimensions"); |
284 | 0 | else |
285 | 0 | vpx_yv12_copy_frame(cfg, sd); |
286 | 0 | } else { |
287 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); |
288 | 0 | } |
289 | | |
290 | 0 | return cm->error.error_code; |
291 | 0 | } |
292 | | |
293 | | vpx_codec_err_t vp9_set_reference_dec(VP9_COMMON *cm, |
294 | | VP9_REFFRAME ref_frame_flag, |
295 | 0 | YV12_BUFFER_CONFIG *sd) { |
296 | 0 | int idx; |
297 | 0 | YV12_BUFFER_CONFIG *ref_buf = NULL; |
298 | | |
299 | | // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the |
300 | | // encoder is using the frame buffers for. This is just a stub to keep the |
301 | | // vpxenc --test-decode functionality working, and will be replaced in a |
302 | | // later commit that adds VP9-specific controls for this functionality. |
303 | | // (Yunqing) The set_reference control depends on the following setting in |
304 | | // encoder. |
305 | | // cpi->lst_fb_idx = 0; |
306 | | // cpi->gld_fb_idx = 1; |
307 | | // cpi->alt_fb_idx = 2; |
308 | 0 | if (ref_frame_flag == VP9_LAST_FLAG) { |
309 | 0 | idx = cm->ref_frame_map[0]; |
310 | 0 | } else if (ref_frame_flag == VP9_GOLD_FLAG) { |
311 | 0 | idx = cm->ref_frame_map[1]; |
312 | 0 | } else if (ref_frame_flag == VP9_ALT_FLAG) { |
313 | 0 | idx = cm->ref_frame_map[2]; |
314 | 0 | } else { |
315 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, "Invalid reference frame"); |
316 | 0 | return cm->error.error_code; |
317 | 0 | } |
318 | | |
319 | 0 | if (idx < 0 || idx >= FRAME_BUFFERS) { |
320 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
321 | 0 | "Invalid reference frame map"); |
322 | 0 | return cm->error.error_code; |
323 | 0 | } |
324 | | |
325 | | // Get the destination reference buffer. |
326 | 0 | ref_buf = &cm->buffer_pool->frame_bufs[idx].buf; |
327 | |
|
328 | 0 | if (!equal_dimensions(ref_buf, sd)) { |
329 | 0 | vpx_internal_error(&cm->error, VPX_CODEC_ERROR, |
330 | 0 | "Incorrect buffer dimensions"); |
331 | 0 | } else { |
332 | | // Overwrite the reference frame buffer. |
333 | 0 | vpx_yv12_copy_frame(sd, ref_buf); |
334 | 0 | } |
335 | |
|
336 | 0 | return cm->error.error_code; |
337 | 0 | } |
338 | | |
339 | | /* If any buffer updating is signaled it should be done here. */ |
340 | 114k | static void swap_frame_buffers(VP9Decoder *pbi) { |
341 | 114k | int ref_index = 0, mask; |
342 | 114k | VP9_COMMON *const cm = &pbi->common; |
343 | 114k | BufferPool *const pool = cm->buffer_pool; |
344 | 114k | RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs; |
345 | | |
346 | 631k | for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
347 | 516k | const int old_idx = cm->ref_frame_map[ref_index]; |
348 | | // Current thread releases the holding of reference frame. |
349 | 516k | decrease_ref_count(old_idx, frame_bufs, pool); |
350 | | |
351 | | // Release the reference frame in reference map. |
352 | 516k | if (mask & 1) { |
353 | 419k | decrease_ref_count(old_idx, frame_bufs, pool); |
354 | 419k | } |
355 | 516k | cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; |
356 | 516k | ++ref_index; |
357 | 516k | } |
358 | | |
359 | | // Current thread releases the holding of reference frame. |
360 | 374k | for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { |
361 | 260k | const int old_idx = cm->ref_frame_map[ref_index]; |
362 | 260k | decrease_ref_count(old_idx, frame_bufs, pool); |
363 | 260k | cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index]; |
364 | 260k | } |
365 | 114k | pbi->hold_ref_buf = 0; |
366 | 114k | cm->frame_to_show = get_frame_new_buffer(cm); |
367 | | |
368 | 114k | --frame_bufs[cm->new_fb_idx].ref_count; |
369 | | |
370 | | // Invalidate these references until the next frame starts. |
371 | 458k | for (ref_index = 0; ref_index < 3; ref_index++) |
372 | 343k | cm->frame_refs[ref_index].idx = -1; |
373 | 114k | } |
374 | | |
375 | 277k | static void release_fb_on_decoder_exit(VP9Decoder *pbi) { |
376 | 277k | const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); |
377 | 277k | VP9_COMMON *volatile const cm = &pbi->common; |
378 | 277k | BufferPool *volatile const pool = cm->buffer_pool; |
379 | 277k | RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; |
380 | 277k | int i; |
381 | | |
382 | | // Synchronize all threads immediately as a subsequent decode call may |
383 | | // cause a resize invalidating some allocations. |
384 | 277k | winterface->sync(&pbi->lf_worker); |
385 | 277k | for (i = 0; i < pbi->num_tile_workers; ++i) { |
386 | 0 | winterface->sync(&pbi->tile_workers[i]); |
387 | 0 | } |
388 | | |
389 | | // Release all the reference buffers if worker thread is holding them. |
390 | 277k | if (pbi->hold_ref_buf == 1) { |
391 | 100k | int ref_index = 0, mask; |
392 | 729k | for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) { |
393 | 629k | const int old_idx = cm->ref_frame_map[ref_index]; |
394 | | // Current thread releases the holding of reference frame. |
395 | 629k | decrease_ref_count(old_idx, frame_bufs, pool); |
396 | | |
397 | | // Release the reference frame in reference map. |
398 | 629k | if (mask & 1) { |
399 | 458k | decrease_ref_count(old_idx, frame_bufs, pool); |
400 | 458k | } |
401 | 629k | ++ref_index; |
402 | 629k | } |
403 | | |
404 | | // Current thread releases the holding of reference frame. |
405 | 272k | for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) { |
406 | 172k | const int old_idx = cm->ref_frame_map[ref_index]; |
407 | 172k | decrease_ref_count(old_idx, frame_bufs, pool); |
408 | 172k | } |
409 | 100k | pbi->hold_ref_buf = 0; |
410 | 100k | } |
411 | 277k | } |
412 | | |
413 | | int vp9_receive_compressed_data(VP9Decoder *pbi, size_t size, |
414 | 391k | const uint8_t **psource) { |
415 | 391k | VP9_COMMON *volatile const cm = &pbi->common; |
416 | 391k | BufferPool *volatile const pool = cm->buffer_pool; |
417 | 391k | RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs; |
418 | 391k | const uint8_t *source = *psource; |
419 | 391k | int retcode = 0; |
420 | 391k | cm->error.error_code = VPX_CODEC_OK; |
421 | | |
422 | 391k | if (size == 0) { |
423 | | // This is used to signal that we are missing frames. |
424 | | // We do not know if the missing frame(s) was supposed to update |
425 | | // any of the reference buffers, but we act conservative and |
426 | | // mark only the last buffer as corrupted. |
427 | | // |
428 | | // TODO(jkoleszar): Error concealment is undefined and non-normative |
429 | | // at this point, but if it becomes so, [0] may not always be the correct |
430 | | // thing to do here. |
431 | 464 | if (cm->frame_refs[0].idx > 0) { |
432 | 209 | assert(cm->frame_refs[0].buf != NULL); |
433 | 209 | cm->frame_refs[0].buf->corrupted = 1; |
434 | 209 | } |
435 | 464 | } |
436 | | |
437 | 391k | pbi->ready_for_new_data = 0; |
438 | | |
439 | | // Check if the previous frame was a frame without any references to it. |
440 | 391k | if (cm->new_fb_idx >= 0 && frame_bufs[cm->new_fb_idx].ref_count == 0 && |
441 | 203k | !frame_bufs[cm->new_fb_idx].released) { |
442 | 25.7k | pool->release_fb_cb(pool->cb_priv, |
443 | 25.7k | &frame_bufs[cm->new_fb_idx].raw_frame_buffer); |
444 | 25.7k | frame_bufs[cm->new_fb_idx].released = 1; |
445 | 25.7k | } |
446 | | |
447 | | // Find a free frame buffer. Return error if can not find any. |
448 | 391k | cm->new_fb_idx = get_free_fb(cm); |
449 | 391k | if (cm->new_fb_idx == INVALID_IDX) { |
450 | 5.04k | pbi->ready_for_new_data = 1; |
451 | 5.04k | release_fb_on_decoder_exit(pbi); |
452 | 5.04k | vpx_clear_system_state(); |
453 | 5.04k | vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR, |
454 | 5.04k | "Unable to find free frame buffer"); |
455 | 5.04k | return cm->error.error_code; |
456 | 5.04k | } |
457 | | |
458 | | // Assign a MV array to the frame buffer. |
459 | 386k | cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx]; |
460 | | |
461 | 386k | pbi->hold_ref_buf = 0; |
462 | 386k | pbi->cur_buf = &frame_bufs[cm->new_fb_idx]; |
463 | | |
464 | 386k | if (setjmp(cm->error.jmp)) { |
465 | 272k | cm->error.setjmp = 0; |
466 | 272k | pbi->ready_for_new_data = 1; |
467 | 272k | release_fb_on_decoder_exit(pbi); |
468 | | // Release current frame. |
469 | 272k | decrease_ref_count(cm->new_fb_idx, frame_bufs, pool); |
470 | 272k | vpx_clear_system_state(); |
471 | 272k | return -1; |
472 | 272k | } |
473 | | |
474 | 114k | cm->error.setjmp = 1; |
475 | 114k | vp9_decode_frame(pbi, source, source + size, psource); |
476 | | |
477 | 114k | swap_frame_buffers(pbi); |
478 | | |
479 | 114k | vpx_clear_system_state(); |
480 | | |
481 | 114k | if (!cm->show_existing_frame) { |
482 | 97.0k | cm->last_show_frame = cm->show_frame; |
483 | 97.0k | cm->prev_frame = cm->cur_frame; |
484 | 97.0k | if (cm->seg.enabled) vp9_swap_current_and_last_seg_map(cm); |
485 | 97.0k | } |
486 | | |
487 | 114k | if (cm->show_frame) cm->cur_show_frame_fb_idx = cm->new_fb_idx; |
488 | | |
489 | | // Update progress in frame parallel decode. |
490 | 114k | cm->last_width = cm->width; |
491 | 114k | cm->last_height = cm->height; |
492 | 114k | if (cm->show_frame) { |
493 | 73.3k | cm->current_video_frame++; |
494 | 73.3k | } |
495 | | |
496 | 114k | cm->error.setjmp = 0; |
497 | 114k | return retcode; |
498 | 386k | } |
499 | | |
500 | | int vp9_get_raw_frame(VP9Decoder *pbi, YV12_BUFFER_CONFIG *sd, |
501 | 11.3k | vp9_ppflags_t *flags) { |
502 | 11.3k | VP9_COMMON *const cm = &pbi->common; |
503 | 11.3k | int ret = -1; |
504 | 11.3k | #if !CONFIG_VP9_POSTPROC |
505 | 11.3k | (void)*flags; |
506 | 11.3k | #endif |
507 | | |
508 | 11.3k | if (pbi->ready_for_new_data == 1) return ret; |
509 | | |
510 | 11.3k | pbi->ready_for_new_data = 1; |
511 | | |
512 | | /* no raw frame to show!!! */ |
513 | 11.3k | if (!cm->show_frame) return ret; |
514 | | |
515 | 8.82k | pbi->ready_for_new_data = 1; |
516 | | |
517 | | #if CONFIG_VP9_POSTPROC |
518 | | if (!cm->show_existing_frame) { |
519 | | ret = vp9_post_proc_frame(cm, sd, flags, cm->width); |
520 | | } else { |
521 | | *sd = *cm->frame_to_show; |
522 | | ret = 0; |
523 | | } |
524 | | #else |
525 | 8.82k | *sd = *cm->frame_to_show; |
526 | 8.82k | ret = 0; |
527 | 8.82k | #endif /*!CONFIG_POSTPROC*/ |
528 | 8.82k | vpx_clear_system_state(); |
529 | 8.82k | return ret; |
530 | 11.3k | } |
531 | | |
532 | | vpx_codec_err_t vp9_parse_superframe_index(const uint8_t *data, size_t data_sz, |
533 | | uint32_t sizes[8], int *count, |
534 | | vpx_decrypt_cb decrypt_cb, |
535 | 336k | void *decrypt_state) { |
536 | | // A chunk ending with a byte matching 0xc0 is an invalid chunk unless |
537 | | // it is a super frame index. If the last byte of real video compression |
538 | | // data is 0xc0 the encoder must add a 0 byte. If we have the marker but |
539 | | // not the associated matching marker byte at the front of the index we have |
540 | | // an invalid bitstream and need to return an error. |
541 | | |
542 | 336k | uint8_t marker; |
543 | | |
544 | 336k | assert(data_sz); |
545 | 336k | marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1); |
546 | 336k | *count = 0; |
547 | | |
548 | 336k | if ((marker & 0xe0) == 0xc0) { |
549 | 10.8k | const uint32_t frames = (marker & 0x7) + 1; |
550 | 10.8k | const uint32_t mag = ((marker >> 3) & 0x3) + 1; |
551 | 10.8k | const size_t index_sz = 2 + mag * frames; |
552 | | |
553 | | // This chunk is marked as having a superframe index but doesn't have |
554 | | // enough data for it, thus it's an invalid superframe index. |
555 | 10.8k | if (data_sz < index_sz) return VPX_CODEC_CORRUPT_FRAME; |
556 | | |
557 | 9.24k | { |
558 | 9.24k | const uint8_t marker2 = |
559 | 9.24k | read_marker(decrypt_cb, decrypt_state, data + data_sz - index_sz); |
560 | | |
561 | | // This chunk is marked as having a superframe index but doesn't have |
562 | | // the matching marker byte at the front of the index therefore it's an |
563 | | // invalid chunk. |
564 | 9.24k | if (marker != marker2) return VPX_CODEC_CORRUPT_FRAME; |
565 | 9.24k | } |
566 | | |
567 | 6.51k | { |
568 | | // Found a valid superframe index. |
569 | 6.51k | uint32_t i, j; |
570 | 6.51k | const uint8_t *x = &data[data_sz - index_sz + 1]; |
571 | | |
572 | | // Frames has a maximum of 8 and mag has a maximum of 4. |
573 | 6.51k | uint8_t clear_buffer[32]; |
574 | 6.51k | assert(sizeof(clear_buffer) >= frames * mag); |
575 | 6.51k | if (decrypt_cb) { |
576 | 0 | decrypt_cb(decrypt_state, x, clear_buffer, frames * mag); |
577 | 0 | x = clear_buffer; |
578 | 0 | } |
579 | | |
580 | 29.0k | for (i = 0; i < frames; ++i) { |
581 | 22.5k | uint32_t this_sz = 0; |
582 | | |
583 | 79.4k | for (j = 0; j < mag; ++j) this_sz |= ((uint32_t)(*x++)) << (j * 8); |
584 | 22.5k | sizes[i] = this_sz; |
585 | 22.5k | } |
586 | 6.51k | *count = frames; |
587 | 6.51k | } |
588 | 6.51k | } |
589 | 332k | return VPX_CODEC_OK; |
590 | 336k | } |