/src/ffmpeg/libavcodec/decode.c
Line | Count | Source |
1 | | /* |
2 | | * generic decoding-related code |
3 | | * |
4 | | * This file is part of FFmpeg. |
5 | | * |
6 | | * FFmpeg is free software; you can redistribute it and/or |
7 | | * modify it under the terms of the GNU Lesser General Public |
8 | | * License as published by the Free Software Foundation; either |
9 | | * version 2.1 of the License, or (at your option) any later version. |
10 | | * |
11 | | * FFmpeg is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | | * Lesser General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU Lesser General Public |
17 | | * License along with FFmpeg; if not, write to the Free Software |
18 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | | */ |
20 | | |
21 | | #include <stdint.h> |
22 | | #include <stdbool.h> |
23 | | #include <string.h> |
24 | | |
25 | | #include "config.h" |
26 | | |
27 | | #if CONFIG_ICONV |
28 | | # include <iconv.h> |
29 | | #endif |
30 | | |
31 | | #include "libavutil/avassert.h" |
32 | | #include "libavutil/channel_layout.h" |
33 | | #include "libavutil/common.h" |
34 | | #include "libavutil/emms.h" |
35 | | #include "libavutil/frame.h" |
36 | | #include "libavutil/hwcontext.h" |
37 | | #include "libavutil/imgutils.h" |
38 | | #include "libavutil/internal.h" |
39 | | #include "libavutil/mastering_display_metadata.h" |
40 | | #include "libavutil/mem.h" |
41 | | #include "libavutil/stereo3d.h" |
42 | | |
43 | | #include "avcodec.h" |
44 | | #include "avcodec_internal.h" |
45 | | #include "bytestream.h" |
46 | | #include "bsf.h" |
47 | | #include "codec_desc.h" |
48 | | #include "codec_internal.h" |
49 | | #include "decode.h" |
50 | | #include "exif.h" |
51 | | #include "hwaccel_internal.h" |
52 | | #include "hwconfig.h" |
53 | | #include "internal.h" |
54 | | #include "lcevcdec.h" |
55 | | #include "packet_internal.h" |
56 | | #include "progressframe.h" |
57 | | #include "libavutil/refstruct.h" |
58 | | #include "thread.h" |
59 | | #include "threadprogress.h" |
60 | | |
61 | | typedef struct DecodeContext { |
62 | | AVCodecInternal avci; |
63 | | |
64 | | /** |
65 | | * This is set to AV_FRAME_FLAG_KEY for decoders of intra-only formats |
66 | | * (those whose codec descriptor has AV_CODEC_PROP_INTRA_ONLY set) |
67 | | * to set the flag generically. |
68 | | */ |
69 | | int intra_only_flag; |
70 | | |
71 | | /** |
72 | | * This is set to AV_PICTURE_TYPE_I for intra only video decoders |
73 | | * and to AV_PICTURE_TYPE_NONE for other decoders. It is used to set |
74 | | * the AVFrame's pict_type before the decoder receives it. |
75 | | */ |
76 | | enum AVPictureType initial_pict_type; |
77 | | |
78 | | /* to prevent infinite loop on errors when draining */ |
79 | | int nb_draining_errors; |
80 | | |
81 | | /** |
82 | | * The caller has submitted a NULL packet on input. |
83 | | */ |
84 | | int draining_started; |
85 | | |
86 | | int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far |
87 | | int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far |
88 | | int64_t pts_correction_last_pts; /// PTS of the last frame |
89 | | int64_t pts_correction_last_dts; /// DTS of the last frame |
90 | | |
91 | | /** |
92 | | * Bitmask indicating for which side data types we prefer user-supplied |
93 | | * (global or attached to packets) side data over bytestream. |
94 | | */ |
95 | | uint64_t side_data_pref_mask; |
96 | | |
97 | | #if CONFIG_LIBLCEVC_DEC |
98 | | struct { |
99 | | FFLCEVCContext *ctx; |
100 | | int frame; |
101 | | int width; |
102 | | int height; |
103 | | } lcevc; |
104 | | #endif |
105 | | } DecodeContext; |
106 | | |
107 | | static DecodeContext *decode_ctx(AVCodecInternal *avci) |
108 | 1.33G | { |
109 | 1.33G | return (DecodeContext *)avci; |
110 | 1.33G | } |
111 | | |
112 | | static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt) |
113 | 190M | { |
114 | 190M | int ret; |
115 | 190M | size_t size; |
116 | 190M | const uint8_t *data; |
117 | 190M | uint32_t flags; |
118 | 190M | int64_t val; |
119 | | |
120 | 190M | data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); |
121 | 190M | if (!data) |
122 | 190M | return 0; |
123 | | |
124 | 0 | if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) { |
125 | 0 | av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter " |
126 | 0 | "changes, but PARAM_CHANGE side data was sent to it.\n"); |
127 | 0 | ret = AVERROR(EINVAL); |
128 | 0 | goto fail2; |
129 | 0 | } |
130 | | |
131 | 0 | if (size < 4) |
132 | 0 | goto fail; |
133 | | |
134 | 0 | flags = bytestream_get_le32(&data); |
135 | 0 | size -= 4; |
136 | |
|
137 | 0 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { |
138 | 0 | if (size < 4) |
139 | 0 | goto fail; |
140 | 0 | val = bytestream_get_le32(&data); |
141 | 0 | if (val <= 0 || val > INT_MAX) { |
142 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid sample rate"); |
143 | 0 | ret = AVERROR_INVALIDDATA; |
144 | 0 | goto fail2; |
145 | 0 | } |
146 | 0 | avctx->sample_rate = val; |
147 | 0 | size -= 4; |
148 | 0 | } |
149 | 0 | if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { |
150 | 0 | if (size < 8) |
151 | 0 | goto fail; |
152 | 0 | avctx->width = bytestream_get_le32(&data); |
153 | 0 | avctx->height = bytestream_get_le32(&data); |
154 | 0 | size -= 8; |
155 | 0 | ret = ff_set_dimensions(avctx, avctx->width, avctx->height); |
156 | 0 | if (ret < 0) |
157 | 0 | goto fail2; |
158 | 0 | } |
159 | | |
160 | 0 | return 0; |
161 | 0 | fail: |
162 | 0 | av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n"); |
163 | 0 | ret = AVERROR_INVALIDDATA; |
164 | 0 | fail2: |
165 | 0 | if (ret < 0) { |
166 | 0 | av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); |
167 | 0 | if (avctx->err_recognition & AV_EF_EXPLODE) |
168 | 0 | return ret; |
169 | 0 | } |
170 | 0 | return 0; |
171 | 0 | } |
172 | | |
173 | | static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt) |
174 | 190M | { |
175 | 190M | int ret = 0; |
176 | | |
177 | 190M | av_packet_unref(avci->last_pkt_props); |
178 | 190M | if (pkt) { |
179 | 190M | ret = av_packet_copy_props(avci->last_pkt_props, pkt); |
180 | 190M | } |
181 | 190M | return ret; |
182 | 190M | } |
183 | | |
184 | | static int decode_bsfs_init(AVCodecContext *avctx) |
185 | 1.07M | { |
186 | 1.07M | AVCodecInternal *avci = avctx->internal; |
187 | 1.07M | const FFCodec *const codec = ffcodec(avctx->codec); |
188 | 1.07M | int ret; |
189 | | |
190 | 1.07M | if (avci->bsf) |
191 | 0 | return 0; |
192 | | |
193 | 1.07M | ret = av_bsf_list_parse_str(codec->bsfs, &avci->bsf); |
194 | 1.07M | if (ret < 0) { |
195 | 0 | av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", codec->bsfs, av_err2str(ret)); |
196 | 0 | if (ret != AVERROR(ENOMEM)) |
197 | 0 | ret = AVERROR_BUG; |
198 | 0 | goto fail; |
199 | 0 | } |
200 | | |
201 | | /* We do not currently have an API for passing the input timebase into decoders, |
202 | | * but no filters used here should actually need it. |
203 | | * So we make up some plausible-looking number (the MPEG 90kHz timebase) */ |
204 | 1.07M | avci->bsf->time_base_in = (AVRational){ 1, 90000 }; |
205 | 1.07M | ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx); |
206 | 1.07M | if (ret < 0) |
207 | 0 | goto fail; |
208 | | |
209 | 1.07M | ret = av_bsf_init(avci->bsf); |
210 | 1.07M | if (ret < 0) |
211 | 373 | goto fail; |
212 | | |
213 | 1.07M | return 0; |
214 | 373 | fail: |
215 | 373 | av_bsf_free(&avci->bsf); |
216 | 373 | return ret; |
217 | 1.07M | } |
218 | | |
219 | | #if !HAVE_THREADS |
220 | | #define ff_thread_get_packet(avctx, pkt) (AVERROR_BUG) |
221 | | #define ff_thread_receive_frame(avctx, frame, flags) (AVERROR_BUG) |
222 | | #endif |
223 | | |
224 | | static int decode_get_packet(AVCodecContext *avctx, AVPacket *pkt) |
225 | 468M | { |
226 | 468M | AVCodecInternal *avci = avctx->internal; |
227 | 468M | int ret; |
228 | | |
229 | 468M | ret = av_bsf_receive_packet(avci->bsf, pkt); |
230 | 468M | if (ret < 0) |
231 | 278M | return ret; |
232 | | |
233 | 190M | if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) { |
234 | 190M | ret = extract_packet_props(avctx->internal, pkt); |
235 | 190M | if (ret < 0) |
236 | 0 | goto finish; |
237 | 190M | } |
238 | | |
239 | 190M | ret = apply_param_change(avctx, pkt); |
240 | 190M | if (ret < 0) |
241 | 0 | goto finish; |
242 | | |
243 | 190M | return 0; |
244 | 0 | finish: |
245 | 0 | av_packet_unref(pkt); |
246 | 0 | return ret; |
247 | 190M | } |
248 | | |
249 | | int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt) |
250 | 277M | { |
251 | 277M | AVCodecInternal *avci = avctx->internal; |
252 | 277M | DecodeContext *dc = decode_ctx(avci); |
253 | | |
254 | 277M | if (avci->draining) |
255 | 10.9k | return AVERROR_EOF; |
256 | | |
257 | | /* If we are a worker thread, get the next packet from the threading |
258 | | * context. Otherwise we are the main (user-facing) context, so we get the |
259 | | * next packet from the input filterchain. |
260 | | */ |
261 | 277M | if (avctx->internal->is_frame_mt) |
262 | 0 | return ff_thread_get_packet(avctx, pkt); |
263 | | |
264 | 468M | while (1) { |
265 | 468M | int ret = decode_get_packet(avctx, pkt); |
266 | 468M | if (ret == AVERROR(EAGAIN) && |
267 | 277M | (!AVPACKET_IS_EMPTY(avci->buffer_pkt) || dc->draining_started)) { |
268 | 190M | ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt); |
269 | 190M | if (ret >= 0) |
270 | 190M | continue; |
271 | | |
272 | 0 | av_packet_unref(avci->buffer_pkt); |
273 | 0 | } |
274 | | |
275 | 277M | if (ret == AVERROR_EOF) |
276 | 926k | avci->draining = 1; |
277 | 277M | return ret; |
278 | 468M | } |
279 | 277M | } |
280 | | |
281 | | /** |
282 | | * Attempt to guess proper monotonic timestamps for decoded video frames |
283 | | * which might have incorrect times. Input timestamps may wrap around, in |
284 | | * which case the output will as well. |
285 | | * |
286 | | * @param pts the pts field of the decoded AVPacket, as passed through |
287 | | * AVFrame.pts |
288 | | * @param dts the dts field of the decoded AVPacket |
289 | | * @return one of the input values, may be AV_NOPTS_VALUE |
290 | | */ |
291 | | static int64_t guess_correct_pts(DecodeContext *dc, |
292 | | int64_t reordered_pts, int64_t dts) |
293 | 45.9M | { |
294 | 45.9M | int64_t pts = AV_NOPTS_VALUE; |
295 | | |
296 | 45.9M | if (dts != AV_NOPTS_VALUE) { |
297 | 910 | dc->pts_correction_num_faulty_dts += dts <= dc->pts_correction_last_dts; |
298 | 910 | dc->pts_correction_last_dts = dts; |
299 | 45.9M | } else if (reordered_pts != AV_NOPTS_VALUE) |
300 | 13.6k | dc->pts_correction_last_dts = reordered_pts; |
301 | | |
302 | 45.9M | if (reordered_pts != AV_NOPTS_VALUE) { |
303 | 14.5k | dc->pts_correction_num_faulty_pts += reordered_pts <= dc->pts_correction_last_pts; |
304 | 14.5k | dc->pts_correction_last_pts = reordered_pts; |
305 | 45.8M | } else if(dts != AV_NOPTS_VALUE) |
306 | 0 | dc->pts_correction_last_pts = dts; |
307 | | |
308 | 45.9M | if ((dc->pts_correction_num_faulty_pts<=dc->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE) |
309 | 45.9M | && reordered_pts != AV_NOPTS_VALUE) |
310 | 14.0k | pts = reordered_pts; |
311 | 45.8M | else |
312 | 45.8M | pts = dts; |
313 | | |
314 | 45.9M | return pts; |
315 | 45.9M | } |
316 | | |
317 | | static int discard_samples(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples) |
318 | 70.2M | { |
319 | 70.2M | AVCodecInternal *avci = avctx->internal; |
320 | 70.2M | AVFrameSideData *side; |
321 | 70.2M | uint32_t discard_padding = 0; |
322 | 70.2M | uint8_t skip_reason = 0; |
323 | 70.2M | uint8_t discard_reason = 0; |
324 | | |
325 | 70.2M | side = av_frame_get_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES); |
326 | 70.2M | if (side && side->size >= 10) { |
327 | 0 | avci->skip_samples = AV_RL32(side->data); |
328 | 0 | avci->skip_samples = FFMAX(0, avci->skip_samples); |
329 | 0 | discard_padding = AV_RL32(side->data + 4); |
330 | 0 | av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n", |
331 | 0 | avci->skip_samples, (int)discard_padding); |
332 | 0 | skip_reason = AV_RL8(side->data + 8); |
333 | 0 | discard_reason = AV_RL8(side->data + 9); |
334 | 0 | } |
335 | | |
336 | 70.2M | if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { |
337 | 0 | if (!side && (avci->skip_samples || discard_padding)) |
338 | 0 | side = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); |
339 | 0 | if (side && (avci->skip_samples || discard_padding)) { |
340 | 0 | AV_WL32(side->data, avci->skip_samples); |
341 | 0 | AV_WL32(side->data + 4, discard_padding); |
342 | 0 | AV_WL8(side->data + 8, skip_reason); |
343 | 0 | AV_WL8(side->data + 9, discard_reason); |
344 | 0 | avci->skip_samples = 0; |
345 | 0 | } |
346 | 0 | return 0; |
347 | 0 | } |
348 | 70.2M | av_frame_remove_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES); |
349 | | |
350 | 70.2M | if ((frame->flags & AV_FRAME_FLAG_DISCARD)) { |
351 | 39.6M | avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples); |
352 | 39.6M | *discarded_samples += frame->nb_samples; |
353 | 39.6M | return AVERROR(EAGAIN); |
354 | 39.6M | } |
355 | | |
356 | 30.6M | if (avci->skip_samples > 0) { |
357 | 61.9k | if (frame->nb_samples <= avci->skip_samples){ |
358 | 56.4k | *discarded_samples += frame->nb_samples; |
359 | 56.4k | avci->skip_samples -= frame->nb_samples; |
360 | 56.4k | av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", |
361 | 56.4k | avci->skip_samples); |
362 | 56.4k | return AVERROR(EAGAIN); |
363 | 56.4k | } else { |
364 | 5.51k | av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples, |
365 | 5.51k | frame->nb_samples - avci->skip_samples, avctx->ch_layout.nb_channels, frame->format); |
366 | 5.51k | if (avctx->pkt_timebase.num && avctx->sample_rate) { |
367 | 0 | int64_t diff_ts = av_rescale_q(avci->skip_samples, |
368 | 0 | (AVRational){1, avctx->sample_rate}, |
369 | 0 | avctx->pkt_timebase); |
370 | 0 | if (frame->pts != AV_NOPTS_VALUE) |
371 | 0 | frame->pts += diff_ts; |
372 | 0 | if (frame->pkt_dts != AV_NOPTS_VALUE) |
373 | 0 | frame->pkt_dts += diff_ts; |
374 | 0 | if (frame->duration >= diff_ts) |
375 | 0 | frame->duration -= diff_ts; |
376 | 0 | } else |
377 | 5.51k | av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); |
378 | | |
379 | 5.51k | av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", |
380 | 5.51k | avci->skip_samples, frame->nb_samples); |
381 | 5.51k | *discarded_samples += avci->skip_samples; |
382 | 5.51k | frame->nb_samples -= avci->skip_samples; |
383 | 5.51k | avci->skip_samples = 0; |
384 | 5.51k | } |
385 | 61.9k | } |
386 | | |
387 | 30.5M | if (discard_padding > 0 && discard_padding <= frame->nb_samples) { |
388 | 0 | if (discard_padding == frame->nb_samples) { |
389 | 0 | *discarded_samples += frame->nb_samples; |
390 | 0 | return AVERROR(EAGAIN); |
391 | 0 | } else { |
392 | 0 | if (avctx->pkt_timebase.num && avctx->sample_rate) { |
393 | 0 | int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding, |
394 | 0 | (AVRational){1, avctx->sample_rate}, |
395 | 0 | avctx->pkt_timebase); |
396 | 0 | frame->duration = diff_ts; |
397 | 0 | } else |
398 | 0 | av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n"); |
399 | |
|
400 | 0 | av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n", |
401 | 0 | (int)discard_padding, frame->nb_samples); |
402 | 0 | frame->nb_samples -= discard_padding; |
403 | 0 | } |
404 | 0 | } |
405 | | |
406 | 30.5M | return 0; |
407 | 30.5M | } |
408 | | |
409 | | /* |
410 | | * The core of the receive_frame_wrapper for the decoders implementing |
411 | | * the simple API. Certain decoders might consume partial packets without |
412 | | * returning any output, so this function needs to be called in a loop until it |
413 | | * returns EAGAIN. |
414 | | **/ |
415 | | static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples) |
416 | 304M | { |
417 | 304M | AVCodecInternal *avci = avctx->internal; |
418 | 304M | DecodeContext *dc = decode_ctx(avci); |
419 | 304M | AVPacket *const pkt = avci->in_pkt; |
420 | 304M | const FFCodec *const codec = ffcodec(avctx->codec); |
421 | 304M | int got_frame, consumed; |
422 | 304M | int ret; |
423 | | |
424 | 304M | if (!pkt->data && !avci->draining) { |
425 | 274M | av_packet_unref(pkt); |
426 | 274M | ret = ff_decode_get_packet(avctx, pkt); |
427 | 274M | if (ret < 0 && ret != AVERROR_EOF) |
428 | 85.3M | return ret; |
429 | 274M | } |
430 | | |
431 | | // Some codecs (at least wma lossless) will crash when feeding drain packets |
432 | | // after EOF was signaled. |
433 | 218M | if (avci->draining_done) |
434 | 154k | return AVERROR_EOF; |
435 | | |
436 | 218M | if (!pkt->data && |
437 | 1.17M | !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) |
438 | 722k | return AVERROR_EOF; |
439 | | |
440 | 217M | got_frame = 0; |
441 | | |
442 | 217M | frame->pict_type = dc->initial_pict_type; |
443 | 217M | frame->flags |= dc->intra_only_flag; |
444 | 217M | consumed = codec->cb.decode(avctx, frame, &got_frame, pkt); |
445 | | |
446 | 217M | if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)) |
447 | 217M | frame->pkt_dts = pkt->dts; |
448 | 217M | emms_c(); |
449 | | |
450 | 217M | if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) { |
451 | 94.9M | ret = (!got_frame || frame->flags & AV_FRAME_FLAG_DISCARD) |
452 | 94.9M | ? AVERROR(EAGAIN) |
453 | 94.9M | : 0; |
454 | 122M | } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
455 | 122M | ret = !got_frame ? AVERROR(EAGAIN) |
456 | 122M | : discard_samples(avctx, frame, discarded_samples); |
457 | 122M | } else |
458 | 0 | av_assert0(0); |
459 | | |
460 | 217M | if (ret == AVERROR(EAGAIN)) |
461 | 172M | av_frame_unref(frame); |
462 | | |
463 | | // FF_CODEC_CB_TYPE_DECODE decoders must not return AVERROR EAGAIN |
464 | | // code later will add AVERROR(EAGAIN) to a pointer |
465 | 217M | av_assert0(consumed != AVERROR(EAGAIN)); |
466 | 217M | if (consumed < 0) |
467 | 102M | ret = consumed; |
468 | 217M | if (consumed >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO) |
469 | 34.2M | consumed = pkt->size; |
470 | | |
471 | 217M | if (!ret) |
472 | 44.9M | av_assert0(frame->buf[0]); |
473 | 217M | if (ret == AVERROR(EAGAIN)) |
474 | 70.2M | ret = 0; |
475 | | |
476 | | /* do not stop draining when got_frame != 0 or ret < 0 */ |
477 | 217M | if (avci->draining && !got_frame) { |
478 | 171k | if (ret < 0) { |
479 | | /* prevent infinite loop if a decoder wrongly always return error on draining */ |
480 | | /* reasonable nb_errors_max = maximum b frames + thread count */ |
481 | 20.3k | int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ? |
482 | 20.3k | avctx->thread_count : 1); |
483 | | |
484 | 20.3k | if (decode_ctx(avci)->nb_draining_errors++ >= nb_errors_max) { |
485 | 0 | av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. " |
486 | 0 | "Stop draining and force EOF.\n"); |
487 | 0 | avci->draining_done = 1; |
488 | 0 | ret = AVERROR_BUG; |
489 | 0 | } |
490 | 151k | } else { |
491 | 151k | avci->draining_done = 1; |
492 | 151k | } |
493 | 171k | } |
494 | | |
495 | 217M | if (consumed >= pkt->size || ret < 0) { |
496 | 185M | av_packet_unref(pkt); |
497 | 185M | } else { |
498 | 31.9M | pkt->data += consumed; |
499 | 31.9M | pkt->size -= consumed; |
500 | 31.9M | pkt->pts = AV_NOPTS_VALUE; |
501 | 31.9M | pkt->dts = AV_NOPTS_VALUE; |
502 | 31.9M | if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) { |
503 | 31.9M | avci->last_pkt_props->pts = AV_NOPTS_VALUE; |
504 | 31.9M | avci->last_pkt_props->dts = AV_NOPTS_VALUE; |
505 | 31.9M | } |
506 | 31.9M | } |
507 | | |
508 | 217M | return ret; |
509 | 217M | } |
510 | | |
511 | | #if CONFIG_LCMS2 |
512 | | static int detect_colorspace(AVCodecContext *avctx, AVFrame *frame) |
513 | | { |
514 | | AVCodecInternal *avci = avctx->internal; |
515 | | enum AVColorTransferCharacteristic trc; |
516 | | AVColorPrimariesDesc coeffs; |
517 | | enum AVColorPrimaries prim; |
518 | | cmsHPROFILE profile; |
519 | | AVFrameSideData *sd; |
520 | | int ret; |
521 | | if (!(avctx->flags2 & AV_CODEC_FLAG2_ICC_PROFILES)) |
522 | | return 0; |
523 | | |
524 | | sd = av_frame_get_side_data(frame, AV_FRAME_DATA_ICC_PROFILE); |
525 | | if (!sd || !sd->size) |
526 | | return 0; |
527 | | |
528 | | if (!avci->icc.avctx) { |
529 | | ret = ff_icc_context_init(&avci->icc, avctx); |
530 | | if (ret < 0) |
531 | | return ret; |
532 | | } |
533 | | |
534 | | profile = cmsOpenProfileFromMemTHR(avci->icc.ctx, sd->data, sd->size); |
535 | | if (!profile) |
536 | | return AVERROR_INVALIDDATA; |
537 | | |
538 | | ret = ff_icc_profile_sanitize(&avci->icc, profile); |
539 | | if (!ret) |
540 | | ret = ff_icc_profile_read_primaries(&avci->icc, profile, &coeffs); |
541 | | if (!ret) |
542 | | ret = ff_icc_profile_detect_transfer(&avci->icc, profile, &trc); |
543 | | cmsCloseProfile(profile); |
544 | | if (ret < 0) |
545 | | return ret; |
546 | | |
547 | | prim = av_csp_primaries_id_from_desc(&coeffs); |
548 | | if (prim != AVCOL_PRI_UNSPECIFIED) |
549 | | frame->color_primaries = prim; |
550 | | if (trc != AVCOL_TRC_UNSPECIFIED) |
551 | | frame->color_trc = trc; |
552 | | return 0; |
553 | | } |
554 | | #else /* !CONFIG_LCMS2 */ |
555 | | static int detect_colorspace(av_unused AVCodecContext *c, av_unused AVFrame *f) |
556 | 237M | { |
557 | 237M | return 0; |
558 | 237M | } |
559 | | #endif |
560 | | |
561 | | static int fill_frame_props(const AVCodecContext *avctx, AVFrame *frame) |
562 | 173M | { |
563 | 173M | int ret; |
564 | | |
565 | 173M | if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED) |
566 | 143M | frame->color_primaries = avctx->color_primaries; |
567 | 173M | if (frame->color_trc == AVCOL_TRC_UNSPECIFIED) |
568 | 142M | frame->color_trc = avctx->color_trc; |
569 | 173M | if (frame->colorspace == AVCOL_SPC_UNSPECIFIED) |
570 | 142M | frame->colorspace = avctx->colorspace; |
571 | 173M | if (frame->color_range == AVCOL_RANGE_UNSPECIFIED) |
572 | 172M | frame->color_range = avctx->color_range; |
573 | 173M | if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED) |
574 | 172M | frame->chroma_location = avctx->chroma_sample_location; |
575 | 173M | if (frame->alpha_mode == AVALPHA_MODE_UNSPECIFIED) |
576 | 173M | frame->alpha_mode = avctx->alpha_mode; |
577 | | |
578 | 173M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
579 | 58.8M | if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio; |
580 | 58.8M | if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt; |
581 | 114M | } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
582 | 114M | if (frame->format == AV_SAMPLE_FMT_NONE) |
583 | 83.6M | frame->format = avctx->sample_fmt; |
584 | 114M | if (!frame->ch_layout.nb_channels) { |
585 | 83.6M | ret = av_channel_layout_copy(&frame->ch_layout, &avctx->ch_layout); |
586 | 83.6M | if (ret < 0) |
587 | 0 | return ret; |
588 | 83.6M | } |
589 | 114M | if (!frame->sample_rate) |
590 | 84.2M | frame->sample_rate = avctx->sample_rate; |
591 | 114M | } |
592 | | |
593 | 173M | return 0; |
594 | 173M | } |
595 | | |
596 | | static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame) |
597 | 233M | { |
598 | 233M | int ret; |
599 | 233M | int64_t discarded_samples = 0; |
600 | | |
601 | 349M | while (!frame->buf[0]) { |
602 | 304M | if (discarded_samples > avctx->max_samples) |
603 | 1.49k | return AVERROR(EAGAIN); |
604 | 304M | ret = decode_simple_internal(avctx, frame, &discarded_samples); |
605 | 304M | if (ret < 0) |
606 | 188M | return ret; |
607 | 304M | } |
608 | | |
609 | 44.9M | return 0; |
610 | 233M | } |
611 | | |
612 | | int ff_decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame) |
613 | 237M | { |
614 | 237M | AVCodecInternal *avci = avctx->internal; |
615 | 237M | DecodeContext *dc = decode_ctx(avci); |
616 | 237M | const FFCodec *const codec = ffcodec(avctx->codec); |
617 | 237M | int ret; |
618 | | |
619 | 237M | av_assert0(!frame->buf[0]); |
620 | | |
621 | 237M | if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) { |
622 | 3.64M | while (1) { |
623 | 3.64M | frame->pict_type = dc->initial_pict_type; |
624 | 3.64M | frame->flags |= dc->intra_only_flag; |
625 | 3.64M | ret = codec->cb.receive_frame(avctx, frame); |
626 | 3.64M | emms_c(); |
627 | 3.64M | if (!ret) { |
628 | 1.17M | if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) { |
629 | 878k | int64_t discarded_samples = 0; |
630 | 878k | ret = discard_samples(avctx, frame, &discarded_samples); |
631 | 878k | } |
632 | 1.17M | if (ret == AVERROR(EAGAIN) || (frame->flags & AV_FRAME_FLAG_DISCARD)) { |
633 | 195k | av_frame_unref(frame); |
634 | 195k | continue; |
635 | 195k | } |
636 | 1.17M | } |
637 | 3.44M | break; |
638 | 3.64M | } |
639 | 3.44M | } else |
640 | 233M | ret = decode_simple_receive_frame(avctx, frame); |
641 | | |
642 | 237M | if (ret == AVERROR_EOF) |
643 | 922k | avci->draining_done = 1; |
644 | | |
645 | 237M | return ret; |
646 | 237M | } |
647 | | |
648 | | static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame, |
649 | | unsigned flags) |
650 | 237M | { |
651 | 237M | AVCodecInternal *avci = avctx->internal; |
652 | 237M | DecodeContext *dc = decode_ctx(avci); |
653 | 237M | int ret, ok; |
654 | | |
655 | 237M | if (avctx->active_thread_type & FF_THREAD_FRAME) |
656 | 0 | ret = ff_thread_receive_frame(avctx, frame, flags); |
657 | 237M | else |
658 | 237M | ret = ff_decode_receive_frame_internal(avctx, frame); |
659 | | |
660 | | /* preserve ret */ |
661 | 237M | ok = detect_colorspace(avctx, frame); |
662 | 237M | if (ok < 0) { |
663 | 0 | av_frame_unref(frame); |
664 | 0 | return ok; |
665 | 0 | } |
666 | | |
667 | 237M | if (!ret) { |
668 | 45.9M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
669 | 15.3M | if (!frame->width) |
670 | 79.1k | frame->width = avctx->width; |
671 | 15.3M | if (!frame->height) |
672 | 79.1k | frame->height = avctx->height; |
673 | 15.3M | } |
674 | | |
675 | 45.9M | ret = fill_frame_props(avctx, frame); |
676 | 45.9M | if (ret < 0) { |
677 | 0 | av_frame_unref(frame); |
678 | 0 | return ret; |
679 | 0 | } |
680 | | |
681 | 45.9M | frame->best_effort_timestamp = guess_correct_pts(dc, |
682 | 45.9M | frame->pts, |
683 | 45.9M | frame->pkt_dts); |
684 | | |
685 | | /* the only case where decode data is not set should be decoders |
686 | | * that do not call ff_get_buffer() */ |
687 | 45.9M | av_assert0(frame->private_ref || |
688 | 45.9M | !(avctx->codec->capabilities & AV_CODEC_CAP_DR1)); |
689 | | |
690 | 45.9M | if (frame->private_ref) { |
691 | 45.8M | FrameDecodeData *fdd = frame->private_ref; |
692 | | |
693 | 45.8M | if (fdd->post_process) { |
694 | 0 | ret = fdd->post_process(avctx, frame); |
695 | 0 | if (ret < 0) { |
696 | 0 | av_frame_unref(frame); |
697 | 0 | return ret; |
698 | 0 | } |
699 | 0 | } |
700 | 45.8M | } |
701 | 45.9M | } |
702 | | |
703 | | /* free the per-frame decode data */ |
704 | 237M | av_refstruct_unref(&frame->private_ref); |
705 | | |
706 | 237M | return ret; |
707 | 237M | } |
708 | | |
709 | | int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) |
710 | 206M | { |
711 | 206M | AVCodecInternal *avci = avctx->internal; |
712 | 206M | DecodeContext *dc = decode_ctx(avci); |
713 | 206M | int ret; |
714 | | |
715 | 206M | if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec)) |
716 | 0 | return AVERROR(EINVAL); |
717 | | |
718 | 206M | if (dc->draining_started) |
719 | 0 | return AVERROR_EOF; |
720 | | |
721 | 206M | if (avpkt && !avpkt->size && avpkt->data) |
722 | 9.64M | return AVERROR(EINVAL); |
723 | | |
724 | 196M | if (avpkt && (avpkt->data || avpkt->side_data_elems)) { |
725 | 195M | if (!AVPACKET_IS_EMPTY(avci->buffer_pkt)) |
726 | 3.91M | return AVERROR(EAGAIN); |
727 | 191M | ret = av_packet_ref(avci->buffer_pkt, avpkt); |
728 | 191M | if (ret < 0) |
729 | 0 | return ret; |
730 | 191M | } else |
731 | 953k | dc->draining_started = 1; |
732 | | |
733 | 192M | if (!avci->buffer_frame->buf[0] && !dc->draining_started) { |
734 | 190M | ret = decode_receive_frame_internal(avctx, avci->buffer_frame, 0); |
735 | 190M | if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) |
736 | 103M | return ret; |
737 | 190M | } |
738 | | |
739 | 89.0M | return 0; |
740 | 192M | } |
741 | | |
742 | | static int apply_cropping(AVCodecContext *avctx, AVFrame *frame) |
743 | 15.2M | { |
744 | | /* make sure we are noisy about decoders returning invalid cropping data */ |
745 | 15.2M | if (frame->crop_left >= INT_MAX - frame->crop_right || |
746 | 15.2M | frame->crop_top >= INT_MAX - frame->crop_bottom || |
747 | 15.2M | (frame->crop_left + frame->crop_right) >= frame->width || |
748 | 15.2M | (frame->crop_top + frame->crop_bottom) >= frame->height) { |
749 | 40 | av_log(avctx, AV_LOG_WARNING, |
750 | 40 | "Invalid cropping information set by a decoder: " |
751 | 40 | "%zu/%zu/%zu/%zu (frame size %dx%d). " |
752 | 40 | "This is a bug, please report it\n", |
753 | 40 | frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom, |
754 | 40 | frame->width, frame->height); |
755 | 40 | frame->crop_left = 0; |
756 | 40 | frame->crop_right = 0; |
757 | 40 | frame->crop_top = 0; |
758 | 40 | frame->crop_bottom = 0; |
759 | 40 | return 0; |
760 | 40 | } |
761 | | |
762 | 15.2M | if (!avctx->apply_cropping) |
763 | 0 | return 0; |
764 | | |
765 | 15.2M | return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ? |
766 | 15.2M | AV_FRAME_CROP_UNALIGNED : 0); |
767 | 15.2M | } |
768 | | |
769 | | // make sure frames returned to the caller are valid |
770 | | static int frame_validate(AVCodecContext *avctx, AVFrame *frame) |
771 | 42.8M | { |
772 | 42.8M | if (!frame->buf[0] || frame->format < 0) |
773 | 0 | goto fail; |
774 | | |
775 | 42.8M | switch (avctx->codec_type) { |
776 | 15.2M | case AVMEDIA_TYPE_VIDEO: |
777 | 15.2M | if (frame->width <= 0 || frame->height <= 0) |
778 | 0 | goto fail; |
779 | 15.2M | break; |
780 | 27.6M | case AVMEDIA_TYPE_AUDIO: |
781 | 27.6M | if (!av_channel_layout_check(&frame->ch_layout) || |
782 | 27.6M | frame->sample_rate <= 0) |
783 | 0 | goto fail; |
784 | | |
785 | 27.6M | break; |
786 | 27.6M | default: av_assert0(0); |
787 | 42.8M | } |
788 | | |
789 | 42.8M | return 0; |
790 | 0 | fail: |
791 | 0 | av_log(avctx, AV_LOG_ERROR, "An invalid frame was output by a decoder. " |
792 | 0 | "This is a bug, please report it.\n"); |
793 | 0 | return AVERROR_BUG; |
794 | 42.8M | } |
795 | | |
796 | | int ff_decode_receive_frame(AVCodecContext *avctx, AVFrame *frame, unsigned flags) |
797 | 84.3M | { |
798 | 84.3M | AVCodecInternal *avci = avctx->internal; |
799 | 84.3M | int ret; |
800 | | |
801 | 84.3M | if (avci->buffer_frame->buf[0]) { |
802 | 37.2M | av_frame_move_ref(frame, avci->buffer_frame); |
803 | 47.0M | } else { |
804 | 47.0M | ret = decode_receive_frame_internal(avctx, frame, flags); |
805 | 47.0M | if (ret < 0) |
806 | 41.4M | return ret; |
807 | 47.0M | } |
808 | | |
809 | 42.8M | ret = frame_validate(avctx, frame); |
810 | 42.8M | if (ret < 0) |
811 | 0 | goto fail; |
812 | | |
813 | 42.8M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
814 | 15.2M | ret = apply_cropping(avctx, frame); |
815 | 15.2M | if (ret < 0) |
816 | 0 | goto fail; |
817 | 15.2M | } |
818 | | |
819 | 42.8M | avctx->frame_num++; |
820 | | |
821 | 42.8M | return 0; |
822 | 0 | fail: |
823 | 0 | av_frame_unref(frame); |
824 | 0 | return ret; |
825 | 42.8M | } |
826 | | |
827 | | static void get_subtitle_defaults(AVSubtitle *sub) |
828 | 3.93M | { |
829 | 3.93M | memset(sub, 0, sizeof(*sub)); |
830 | 3.93M | sub->pts = AV_NOPTS_VALUE; |
831 | 3.93M | } |
832 | | |
833 | 0 | #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */ |
834 | | static int recode_subtitle(AVCodecContext *avctx, const AVPacket **outpkt, |
835 | | const AVPacket *inpkt, AVPacket *buf_pkt) |
836 | 3.82M | { |
837 | 3.82M | #if CONFIG_ICONV |
838 | 3.82M | iconv_t cd = (iconv_t)-1; |
839 | 3.82M | int ret = 0; |
840 | 3.82M | char *inb, *outb; |
841 | 3.82M | size_t inl, outl; |
842 | 3.82M | #endif |
843 | | |
844 | 3.82M | if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0) { |
845 | 3.82M | *outpkt = inpkt; |
846 | 3.82M | return 0; |
847 | 3.82M | } |
848 | | |
849 | 0 | #if CONFIG_ICONV |
850 | 0 | inb = inpkt->data; |
851 | 0 | inl = inpkt->size; |
852 | |
|
853 | 0 | if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) { |
854 | 0 | av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n"); |
855 | 0 | return AVERROR(ERANGE); |
856 | 0 | } |
857 | | |
858 | 0 | cd = iconv_open("UTF-8", avctx->sub_charenc); |
859 | 0 | av_assert0(cd != (iconv_t)-1); |
860 | | |
861 | 0 | ret = av_new_packet(buf_pkt, inl * UTF8_MAX_BYTES); |
862 | 0 | if (ret < 0) |
863 | 0 | goto end; |
864 | 0 | ret = av_packet_copy_props(buf_pkt, inpkt); |
865 | 0 | if (ret < 0) |
866 | 0 | goto end; |
867 | 0 | outb = buf_pkt->data; |
868 | 0 | outl = buf_pkt->size; |
869 | |
|
870 | 0 | if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 || |
871 | 0 | iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 || |
872 | 0 | outl >= buf_pkt->size || inl != 0) { |
873 | 0 | ret = FFMIN(AVERROR(errno), -1); |
874 | 0 | av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" " |
875 | 0 | "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc); |
876 | 0 | goto end; |
877 | 0 | } |
878 | 0 | buf_pkt->size -= outl; |
879 | 0 | memset(buf_pkt->data + buf_pkt->size, 0, outl); |
880 | 0 | *outpkt = buf_pkt; |
881 | |
|
882 | 0 | ret = 0; |
883 | 0 | end: |
884 | 0 | if (ret < 0) |
885 | 0 | av_packet_unref(buf_pkt); |
886 | 0 | if (cd != (iconv_t)-1) |
887 | 0 | iconv_close(cd); |
888 | 0 | return ret; |
889 | | #else |
890 | | av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv"); |
891 | | return AVERROR(EINVAL); |
892 | | #endif |
893 | 0 | } |
894 | | |
895 | | static int utf8_check(const uint8_t *str) |
896 | 5.48M | { |
897 | 5.48M | const uint8_t *byte; |
898 | 5.48M | uint32_t codepoint, min; |
899 | | |
900 | 472M | while (*str) { |
901 | 467M | byte = str; |
902 | 933M | GET_UTF8(codepoint, *(byte++), return 0;); |
903 | 933M | min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 : |
904 | 6.85M | 1 << (5 * (byte - str) - 4); |
905 | 933M | if (codepoint < min || codepoint >= 0x110000 || |
906 | 466M | codepoint == 0xFFFE /* BOM */ || |
907 | 466M | codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */) |
908 | 10.8k | return 0; |
909 | 466M | str = byte; |
910 | 466M | } |
911 | 4.78M | return 1; |
912 | 5.48M | } |
913 | | |
914 | | int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, |
915 | | int *got_sub_ptr, const AVPacket *avpkt) |
916 | 3.93M | { |
917 | 3.93M | int ret = 0; |
918 | | |
919 | 3.93M | if (!avpkt->data && avpkt->size) { |
920 | 0 | av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); |
921 | 0 | return AVERROR(EINVAL); |
922 | 0 | } |
923 | 3.93M | if (!avctx->codec) |
924 | 0 | return AVERROR(EINVAL); |
925 | 3.93M | if (ffcodec(avctx->codec)->cb_type != FF_CODEC_CB_TYPE_DECODE_SUB) { |
926 | 0 | av_log(avctx, AV_LOG_ERROR, "Codec not subtitle decoder\n"); |
927 | 0 | return AVERROR(EINVAL); |
928 | 0 | } |
929 | | |
930 | 3.93M | *got_sub_ptr = 0; |
931 | 3.93M | get_subtitle_defaults(sub); |
932 | | |
933 | 3.93M | if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) { |
934 | 3.82M | AVCodecInternal *avci = avctx->internal; |
935 | 3.82M | const AVPacket *pkt; |
936 | | |
937 | 3.82M | ret = recode_subtitle(avctx, &pkt, avpkt, avci->buffer_pkt); |
938 | 3.82M | if (ret < 0) |
939 | 0 | return ret; |
940 | | |
941 | 3.82M | if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE) |
942 | 0 | sub->pts = av_rescale_q(avpkt->pts, |
943 | 0 | avctx->pkt_timebase, AV_TIME_BASE_Q); |
944 | 3.82M | ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt); |
945 | 3.82M | if (pkt == avci->buffer_pkt) // did we recode? |
946 | 0 | av_packet_unref(avci->buffer_pkt); |
947 | 3.82M | if (ret < 0) { |
948 | 722k | *got_sub_ptr = 0; |
949 | 722k | avsubtitle_free(sub); |
950 | 722k | return ret; |
951 | 722k | } |
952 | 3.10M | av_assert1(!sub->num_rects || *got_sub_ptr); |
953 | | |
954 | 3.10M | if (sub->num_rects && !sub->end_display_time && avpkt->duration && |
955 | 0 | avctx->pkt_timebase.num) { |
956 | 0 | AVRational ms = { 1, 1000 }; |
957 | 0 | sub->end_display_time = av_rescale_q(avpkt->duration, |
958 | 0 | avctx->pkt_timebase, ms); |
959 | 0 | } |
960 | | |
961 | 3.10M | if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) |
962 | 185k | sub->format = 0; |
963 | 2.92M | else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB) |
964 | 2.92M | sub->format = 1; |
965 | | |
966 | 7.90M | for (unsigned i = 0; i < sub->num_rects; i++) { |
967 | 5.49M | if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE && |
968 | 5.49M | sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) { |
969 | 702k | av_log(avctx, AV_LOG_ERROR, |
970 | 702k | "Invalid UTF-8 in decoded subtitles text; " |
971 | 702k | "maybe missing -sub_charenc option\n"); |
972 | 702k | avsubtitle_free(sub); |
973 | 702k | *got_sub_ptr = 0; |
974 | 702k | return AVERROR_INVALIDDATA; |
975 | 702k | } |
976 | 5.49M | } |
977 | | |
978 | 2.40M | if (*got_sub_ptr) |
979 | 1.79M | avctx->frame_num++; |
980 | 2.40M | } |
981 | | |
982 | 2.51M | return ret; |
983 | 3.93M | } |
984 | | |
985 | | enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, |
986 | | const enum AVPixelFormat *fmt) |
987 | 754k | { |
988 | 754k | const AVPixFmtDescriptor *desc; |
989 | 754k | const AVCodecHWConfig *config; |
990 | 754k | int i, n; |
991 | | |
992 | | // If a device was supplied when the codec was opened, assume that the |
993 | | // user wants to use it. |
994 | 754k | if (avctx->hw_device_ctx && ffcodec(avctx->codec)->hw_configs) { |
995 | 0 | AVHWDeviceContext *device_ctx = |
996 | 0 | (AVHWDeviceContext*)avctx->hw_device_ctx->data; |
997 | 0 | for (i = 0;; i++) { |
998 | 0 | config = &ffcodec(avctx->codec)->hw_configs[i]->public; |
999 | 0 | if (!config) |
1000 | 0 | break; |
1001 | 0 | if (!(config->methods & |
1002 | 0 | AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX)) |
1003 | 0 | continue; |
1004 | 0 | if (device_ctx->type != config->device_type) |
1005 | 0 | continue; |
1006 | 0 | for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) { |
1007 | 0 | if (config->pix_fmt == fmt[n]) |
1008 | 0 | return fmt[n]; |
1009 | 0 | } |
1010 | 0 | } |
1011 | 0 | } |
1012 | | // No device or other setup, so we have to choose from things which |
1013 | | // don't any other external information. |
1014 | | |
1015 | | // If the last element of the list is a software format, choose it |
1016 | | // (this should be best software format if any exist). |
1017 | 1.50M | for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++); |
1018 | 754k | desc = av_pix_fmt_desc_get(fmt[n - 1]); |
1019 | 754k | if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) |
1020 | 754k | return fmt[n - 1]; |
1021 | | |
1022 | | // Finally, traverse the list in order and choose the first entry |
1023 | | // with no external dependencies (if there is no hardware configuration |
1024 | | // information available then this just picks the first entry). |
1025 | 0 | for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) { |
1026 | 0 | for (i = 0;; i++) { |
1027 | 0 | config = avcodec_get_hw_config(avctx->codec, i); |
1028 | 0 | if (!config) |
1029 | 0 | break; |
1030 | 0 | if (config->pix_fmt == fmt[n]) |
1031 | 0 | break; |
1032 | 0 | } |
1033 | 0 | if (!config) { |
1034 | | // No specific config available, so the decoder must be able |
1035 | | // to handle this format without any additional setup. |
1036 | 0 | return fmt[n]; |
1037 | 0 | } |
1038 | 0 | if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) { |
1039 | | // Usable with only internal setup. |
1040 | 0 | return fmt[n]; |
1041 | 0 | } |
1042 | 0 | } |
1043 | | |
1044 | | // Nothing is usable, give up. |
1045 | 0 | return AV_PIX_FMT_NONE; |
1046 | 0 | } |
1047 | | |
1048 | | int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, |
1049 | | enum AVHWDeviceType dev_type) |
1050 | 0 | { |
1051 | 0 | AVHWDeviceContext *device_ctx; |
1052 | 0 | AVHWFramesContext *frames_ctx; |
1053 | 0 | int ret; |
1054 | |
|
1055 | 0 | if (!avctx->hwaccel) |
1056 | 0 | return AVERROR(ENOSYS); |
1057 | | |
1058 | 0 | if (avctx->hw_frames_ctx) |
1059 | 0 | return 0; |
1060 | 0 | if (!avctx->hw_device_ctx) { |
1061 | 0 | av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is " |
1062 | 0 | "required for hardware accelerated decoding.\n"); |
1063 | 0 | return AVERROR(EINVAL); |
1064 | 0 | } |
1065 | | |
1066 | 0 | device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data; |
1067 | 0 | if (device_ctx->type != dev_type) { |
1068 | 0 | av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware " |
1069 | 0 | "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type), |
1070 | 0 | av_hwdevice_get_type_name(device_ctx->type)); |
1071 | 0 | return AVERROR(EINVAL); |
1072 | 0 | } |
1073 | | |
1074 | 0 | ret = avcodec_get_hw_frames_parameters(avctx, |
1075 | 0 | avctx->hw_device_ctx, |
1076 | 0 | avctx->hwaccel->pix_fmt, |
1077 | 0 | &avctx->hw_frames_ctx); |
1078 | 0 | if (ret < 0) |
1079 | 0 | return ret; |
1080 | | |
1081 | 0 | frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
1082 | | |
1083 | |
|
1084 | 0 | if (frames_ctx->initial_pool_size) { |
1085 | | // We guarantee 4 base work surfaces. The function above guarantees 1 |
1086 | | // (the absolute minimum), so add the missing count. |
1087 | 0 | frames_ctx->initial_pool_size += 3; |
1088 | 0 | } |
1089 | |
|
1090 | 0 | ret = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
1091 | 0 | if (ret < 0) { |
1092 | 0 | av_buffer_unref(&avctx->hw_frames_ctx); |
1093 | 0 | return ret; |
1094 | 0 | } |
1095 | | |
1096 | 0 | return 0; |
1097 | 0 | } |
1098 | | |
1099 | | int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, |
1100 | | AVBufferRef *device_ref, |
1101 | | enum AVPixelFormat hw_pix_fmt, |
1102 | | AVBufferRef **out_frames_ref) |
1103 | 0 | { |
1104 | 0 | AVBufferRef *frames_ref = NULL; |
1105 | 0 | const AVCodecHWConfigInternal *hw_config; |
1106 | 0 | const FFHWAccel *hwa; |
1107 | 0 | int i, ret; |
1108 | 0 | bool clean_priv_data = false; |
1109 | |
|
1110 | 0 | for (i = 0;; i++) { |
1111 | 0 | hw_config = ffcodec(avctx->codec)->hw_configs[i]; |
1112 | 0 | if (!hw_config) |
1113 | 0 | return AVERROR(ENOENT); |
1114 | 0 | if (hw_config->public.pix_fmt == hw_pix_fmt) |
1115 | 0 | break; |
1116 | 0 | } |
1117 | | |
1118 | 0 | hwa = hw_config->hwaccel; |
1119 | 0 | if (!hwa || !hwa->frame_params) |
1120 | 0 | return AVERROR(ENOENT); |
1121 | | |
1122 | 0 | frames_ref = av_hwframe_ctx_alloc(device_ref); |
1123 | 0 | if (!frames_ref) |
1124 | 0 | return AVERROR(ENOMEM); |
1125 | | |
1126 | 0 | if (!avctx->internal->hwaccel_priv_data) { |
1127 | 0 | avctx->internal->hwaccel_priv_data = |
1128 | 0 | av_mallocz(hwa->priv_data_size); |
1129 | 0 | if (!avctx->internal->hwaccel_priv_data) { |
1130 | 0 | av_buffer_unref(&frames_ref); |
1131 | 0 | return AVERROR(ENOMEM); |
1132 | 0 | } |
1133 | 0 | clean_priv_data = true; |
1134 | 0 | } |
1135 | | |
1136 | 0 | ret = hwa->frame_params(avctx, frames_ref); |
1137 | 0 | if (ret >= 0) { |
1138 | 0 | AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data; |
1139 | |
|
1140 | 0 | if (frames_ctx->initial_pool_size) { |
1141 | | // If the user has requested that extra output surfaces be |
1142 | | // available then add them here. |
1143 | 0 | if (avctx->extra_hw_frames > 0) |
1144 | 0 | frames_ctx->initial_pool_size += avctx->extra_hw_frames; |
1145 | | |
1146 | | // If frame threading is enabled then an extra surface per thread |
1147 | | // is also required. |
1148 | 0 | if (avctx->active_thread_type & FF_THREAD_FRAME) |
1149 | 0 | frames_ctx->initial_pool_size += avctx->thread_count; |
1150 | 0 | } |
1151 | |
|
1152 | 0 | *out_frames_ref = frames_ref; |
1153 | 0 | } else { |
1154 | 0 | if (clean_priv_data) |
1155 | 0 | av_freep(&avctx->internal->hwaccel_priv_data); |
1156 | 0 | av_buffer_unref(&frames_ref); |
1157 | 0 | } |
1158 | 0 | return ret; |
1159 | 0 | } |
1160 | | |
1161 | | static int hwaccel_init(AVCodecContext *avctx, |
1162 | | const FFHWAccel *hwaccel) |
1163 | 0 | { |
1164 | 0 | int err; |
1165 | |
|
1166 | 0 | if (hwaccel->p.capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL && |
1167 | 0 | avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { |
1168 | 0 | av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n", |
1169 | 0 | hwaccel->p.name); |
1170 | 0 | return AVERROR_PATCHWELCOME; |
1171 | 0 | } |
1172 | | |
1173 | 0 | if (!avctx->internal->hwaccel_priv_data && hwaccel->priv_data_size) { |
1174 | 0 | avctx->internal->hwaccel_priv_data = |
1175 | 0 | av_mallocz(hwaccel->priv_data_size); |
1176 | 0 | if (!avctx->internal->hwaccel_priv_data) |
1177 | 0 | return AVERROR(ENOMEM); |
1178 | 0 | } |
1179 | | |
1180 | 0 | avctx->hwaccel = &hwaccel->p; |
1181 | 0 | if (hwaccel->init) { |
1182 | 0 | err = hwaccel->init(avctx); |
1183 | 0 | if (err < 0) { |
1184 | 0 | av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: " |
1185 | 0 | "hwaccel initialisation returned error.\n", |
1186 | 0 | av_get_pix_fmt_name(hwaccel->p.pix_fmt)); |
1187 | 0 | av_freep(&avctx->internal->hwaccel_priv_data); |
1188 | 0 | avctx->hwaccel = NULL; |
1189 | 0 | return err; |
1190 | 0 | } |
1191 | 0 | } |
1192 | | |
1193 | 0 | return 0; |
1194 | 0 | } |
1195 | | |
1196 | | void ff_hwaccel_uninit(AVCodecContext *avctx) |
1197 | 1.93M | { |
1198 | 1.93M | if (FF_HW_HAS_CB(avctx, uninit)) |
1199 | 0 | FF_HW_SIMPLE_CALL(avctx, uninit); |
1200 | | |
1201 | 1.93M | av_freep(&avctx->internal->hwaccel_priv_data); |
1202 | | |
1203 | 1.93M | avctx->hwaccel = NULL; |
1204 | | |
1205 | 1.93M | av_buffer_unref(&avctx->hw_frames_ctx); |
1206 | 1.93M | } |
1207 | | |
1208 | | int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) |
1209 | 754k | { |
1210 | 754k | const AVPixFmtDescriptor *desc; |
1211 | 754k | enum AVPixelFormat *choices; |
1212 | 754k | enum AVPixelFormat ret, user_choice; |
1213 | 754k | const AVCodecHWConfigInternal *hw_config; |
1214 | 754k | const AVCodecHWConfig *config; |
1215 | 754k | int i, n, err; |
1216 | | |
1217 | | // Find end of list. |
1218 | 1.50M | for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++); |
1219 | | // Must contain at least one entry. |
1220 | 754k | av_assert0(n >= 1); |
1221 | | // If a software format is available, it must be the last entry. |
1222 | 754k | desc = av_pix_fmt_desc_get(fmt[n - 1]); |
1223 | 754k | if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) { |
1224 | | // No software format is available. |
1225 | 754k | } else { |
1226 | 754k | avctx->sw_pix_fmt = fmt[n - 1]; |
1227 | 754k | } |
1228 | | |
1229 | 754k | choices = av_memdup(fmt, (n + 1) * sizeof(*choices)); |
1230 | 754k | if (!choices) |
1231 | 0 | return AV_PIX_FMT_NONE; |
1232 | | |
1233 | 754k | for (;;) { |
1234 | | // Remove the previous hwaccel, if there was one. |
1235 | 754k | ff_hwaccel_uninit(avctx); |
1236 | | |
1237 | 754k | user_choice = avctx->get_format(avctx, choices); |
1238 | 754k | if (user_choice == AV_PIX_FMT_NONE) { |
1239 | | // Explicitly chose nothing, give up. |
1240 | 0 | ret = AV_PIX_FMT_NONE; |
1241 | 0 | break; |
1242 | 0 | } |
1243 | | |
1244 | 754k | desc = av_pix_fmt_desc_get(user_choice); |
1245 | 754k | if (!desc) { |
1246 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid format returned by " |
1247 | 0 | "get_format() callback.\n"); |
1248 | 0 | ret = AV_PIX_FMT_NONE; |
1249 | 0 | break; |
1250 | 0 | } |
1251 | 754k | av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n", |
1252 | 754k | desc->name); |
1253 | | |
1254 | 754k | for (i = 0; i < n; i++) { |
1255 | 754k | if (choices[i] == user_choice) |
1256 | 754k | break; |
1257 | 754k | } |
1258 | 754k | if (i == n) { |
1259 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): " |
1260 | 0 | "%s not in possible list.\n", desc->name); |
1261 | 0 | ret = AV_PIX_FMT_NONE; |
1262 | 0 | break; |
1263 | 0 | } |
1264 | | |
1265 | 754k | if (ffcodec(avctx->codec)->hw_configs) { |
1266 | 506k | for (i = 0;; i++) { |
1267 | 506k | hw_config = ffcodec(avctx->codec)->hw_configs[i]; |
1268 | 506k | if (!hw_config) |
1269 | 506k | break; |
1270 | 0 | if (hw_config->public.pix_fmt == user_choice) |
1271 | 0 | break; |
1272 | 0 | } |
1273 | 506k | } else { |
1274 | 247k | hw_config = NULL; |
1275 | 247k | } |
1276 | | |
1277 | 754k | if (!hw_config) { |
1278 | | // No config available, so no extra setup required. |
1279 | 754k | ret = user_choice; |
1280 | 754k | break; |
1281 | 754k | } |
1282 | 0 | config = &hw_config->public; |
1283 | |
|
1284 | 0 | if (config->methods & |
1285 | 0 | AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX && |
1286 | 0 | avctx->hw_frames_ctx) { |
1287 | 0 | const AVHWFramesContext *frames_ctx = |
1288 | 0 | (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
1289 | 0 | if (frames_ctx->format != user_choice) { |
1290 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: " |
1291 | 0 | "does not match the format of the provided frames " |
1292 | 0 | "context.\n", desc->name); |
1293 | 0 | goto try_again; |
1294 | 0 | } |
1295 | 0 | } else if (config->methods & |
1296 | 0 | AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX && |
1297 | 0 | avctx->hw_device_ctx) { |
1298 | 0 | const AVHWDeviceContext *device_ctx = |
1299 | 0 | (AVHWDeviceContext*)avctx->hw_device_ctx->data; |
1300 | 0 | if (device_ctx->type != config->device_type) { |
1301 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: " |
1302 | 0 | "does not match the type of the provided device " |
1303 | 0 | "context.\n", desc->name); |
1304 | 0 | goto try_again; |
1305 | 0 | } |
1306 | 0 | } else if (config->methods & |
1307 | 0 | AV_CODEC_HW_CONFIG_METHOD_INTERNAL) { |
1308 | | // Internal-only setup, no additional configuration. |
1309 | 0 | } else if (config->methods & |
1310 | 0 | AV_CODEC_HW_CONFIG_METHOD_AD_HOC) { |
1311 | | // Some ad-hoc configuration we can't see and can't check. |
1312 | 0 | } else { |
1313 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: " |
1314 | 0 | "missing configuration.\n", desc->name); |
1315 | 0 | goto try_again; |
1316 | 0 | } |
1317 | 0 | if (hw_config->hwaccel) { |
1318 | 0 | av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel %s " |
1319 | 0 | "initialisation.\n", desc->name, hw_config->hwaccel->p.name); |
1320 | 0 | err = hwaccel_init(avctx, hw_config->hwaccel); |
1321 | 0 | if (err < 0) |
1322 | 0 | goto try_again; |
1323 | 0 | } |
1324 | 0 | ret = user_choice; |
1325 | 0 | break; |
1326 | | |
1327 | 0 | try_again: |
1328 | 0 | av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying " |
1329 | 0 | "get_format() without it.\n", desc->name); |
1330 | 0 | for (i = 0; i < n; i++) { |
1331 | 0 | if (choices[i] == user_choice) |
1332 | 0 | break; |
1333 | 0 | } |
1334 | 0 | for (; i + 1 < n; i++) |
1335 | 0 | choices[i] = choices[i + 1]; |
1336 | 0 | --n; |
1337 | 0 | } |
1338 | | |
1339 | 754k | if (ret < 0) |
1340 | 0 | ff_hwaccel_uninit(avctx); |
1341 | | |
1342 | 754k | av_freep(&choices); |
1343 | 754k | return ret; |
1344 | 754k | } |
1345 | | |
1346 | | static const AVPacketSideData* |
1347 | | packet_side_data_get(const AVPacketSideData *sd, int nb_sd, |
1348 | | enum AVPacketSideDataType type) |
1349 | 3.58G | { |
1350 | 3.58G | for (int i = 0; i < nb_sd; i++) |
1351 | 0 | if (sd[i].type == type) |
1352 | 0 | return &sd[i]; |
1353 | | |
1354 | 3.58G | return NULL; |
1355 | 3.58G | } |
1356 | | |
1357 | | const AVPacketSideData *ff_get_coded_side_data(const AVCodecContext *avctx, |
1358 | | enum AVPacketSideDataType type) |
1359 | 31.7k | { |
1360 | 31.7k | return packet_side_data_get(avctx->coded_side_data, avctx->nb_coded_side_data, type); |
1361 | 31.7k | } |
1362 | | |
1363 | | static int side_data_stereo3d_merge(AVFrameSideData *sd_frame, |
1364 | | const AVPacketSideData *sd_pkt) |
1365 | 0 | { |
1366 | 0 | const AVStereo3D *src; |
1367 | 0 | AVStereo3D *dst; |
1368 | 0 | int ret; |
1369 | |
|
1370 | 0 | ret = av_buffer_make_writable(&sd_frame->buf); |
1371 | 0 | if (ret < 0) |
1372 | 0 | return ret; |
1373 | 0 | sd_frame->data = sd_frame->buf->data; |
1374 | |
|
1375 | 0 | dst = ( AVStereo3D*)sd_frame->data; |
1376 | 0 | src = (const AVStereo3D*)sd_pkt->data; |
1377 | |
|
1378 | 0 | if (dst->type == AV_STEREO3D_UNSPEC) |
1379 | 0 | dst->type = src->type; |
1380 | |
|
1381 | 0 | if (dst->view == AV_STEREO3D_VIEW_UNSPEC) |
1382 | 0 | dst->view = src->view; |
1383 | |
|
1384 | 0 | if (dst->primary_eye == AV_PRIMARY_EYE_NONE) |
1385 | 0 | dst->primary_eye = src->primary_eye; |
1386 | |
|
1387 | 0 | if (!dst->baseline) |
1388 | 0 | dst->baseline = src->baseline; |
1389 | |
|
1390 | 0 | if (!dst->horizontal_disparity_adjustment.num) |
1391 | 0 | dst->horizontal_disparity_adjustment = src->horizontal_disparity_adjustment; |
1392 | |
|
1393 | 0 | if (!dst->horizontal_field_of_view.num) |
1394 | 0 | dst->horizontal_field_of_view = src->horizontal_field_of_view; |
1395 | |
|
1396 | 0 | return 0; |
1397 | 0 | } |
1398 | | |
1399 | | static int side_data_exif_parse(AVFrame *dst, const AVPacketSideData *sd_pkt) |
1400 | 0 | { |
1401 | 0 | AVExifMetadata ifd = { 0 }; |
1402 | 0 | AVExifEntry *entry = NULL; |
1403 | 0 | AVBufferRef *buf = NULL; |
1404 | 0 | AVFrameSideData *sd_frame; |
1405 | 0 | int ret; |
1406 | |
|
1407 | 0 | ret = av_exif_parse_buffer(NULL, sd_pkt->data, sd_pkt->size, &ifd, |
1408 | 0 | AV_EXIF_TIFF_HEADER); |
1409 | 0 | if (ret < 0) |
1410 | 0 | return ret; |
1411 | | |
1412 | 0 | ret = av_exif_get_entry(NULL, &ifd, av_exif_get_tag_id("Orientation"), 0, &entry); |
1413 | 0 | if (ret < 0) |
1414 | 0 | goto end; |
1415 | | |
1416 | 0 | if (!entry) { |
1417 | 0 | ret = av_exif_ifd_to_dict(NULL, &ifd, &dst->metadata); |
1418 | 0 | if (ret < 0) |
1419 | 0 | goto end; |
1420 | | |
1421 | 0 | sd_frame = av_frame_side_data_new(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_EXIF, |
1422 | 0 | sd_pkt->size, 0); |
1423 | 0 | if (sd_frame) |
1424 | 0 | memcpy(sd_frame->data, sd_pkt->data, sd_pkt->size); |
1425 | 0 | ret = sd_frame ? 0 : AVERROR(ENOMEM); |
1426 | |
|
1427 | 0 | goto end; |
1428 | 0 | } else if (entry->count <= 0 || entry->type != AV_TIFF_SHORT) { |
1429 | 0 | ret = AVERROR_INVALIDDATA; |
1430 | 0 | goto end; |
1431 | 0 | } |
1432 | | |
1433 | | // If a display matrix already exists in the frame, give it priority |
1434 | 0 | if (av_frame_side_data_get(dst->side_data, dst->nb_side_data, AV_FRAME_DATA_DISPLAYMATRIX)) |
1435 | 0 | goto finish; |
1436 | | |
1437 | 0 | sd_frame = av_frame_side_data_new(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_DISPLAYMATRIX, |
1438 | 0 | sizeof(int32_t) * 9, 0); |
1439 | 0 | if (!sd_frame) { |
1440 | 0 | ret = AVERROR(ENOMEM); |
1441 | 0 | goto end; |
1442 | 0 | } |
1443 | | |
1444 | 0 | ret = av_exif_orientation_to_matrix((int32_t *)sd_frame->data, entry->value.uint[0]); |
1445 | 0 | if (ret < 0) |
1446 | 0 | goto end; |
1447 | | |
1448 | 0 | finish: |
1449 | 0 | av_exif_remove_entry(NULL, &ifd, entry->id, 0); |
1450 | |
|
1451 | 0 | ret = av_exif_ifd_to_dict(NULL, &ifd, &dst->metadata); |
1452 | 0 | if (ret < 0) |
1453 | 0 | goto end; |
1454 | | |
1455 | 0 | ret = av_exif_write(NULL, &ifd, &buf, AV_EXIF_TIFF_HEADER); |
1456 | 0 | if (ret < 0) |
1457 | 0 | goto end; |
1458 | | |
1459 | 0 | if (!av_frame_side_data_add(&dst->side_data, &dst->nb_side_data, AV_FRAME_DATA_EXIF, &buf, 0)) { |
1460 | 0 | ret = AVERROR(ENOMEM); |
1461 | 0 | goto end; |
1462 | 0 | } |
1463 | | |
1464 | 0 | ret = 0; |
1465 | 0 | end: |
1466 | 0 | av_buffer_unref(&buf); |
1467 | 0 | av_exif_free(&ifd); |
1468 | 0 | return ret; |
1469 | 0 | } |
1470 | | |
1471 | | static int side_data_map(AVFrame *dst, |
1472 | | const AVPacketSideData *sd_src, int nb_sd_src, |
1473 | | const SideDataMap *map) |
1474 | | |
1475 | 383M | { |
1476 | 3.96G | for (int i = 0; map[i].packet < AV_PKT_DATA_NB; i++) { |
1477 | 3.58G | const enum AVPacketSideDataType type_pkt = map[i].packet; |
1478 | 3.58G | const enum AVFrameSideDataType type_frame = map[i].frame; |
1479 | 3.58G | const AVPacketSideData *sd_pkt; |
1480 | 3.58G | AVFrameSideData *sd_frame; |
1481 | | |
1482 | 3.58G | sd_pkt = packet_side_data_get(sd_src, nb_sd_src, type_pkt); |
1483 | 3.58G | if (!sd_pkt) |
1484 | 3.58G | continue; |
1485 | | |
1486 | 0 | sd_frame = av_frame_get_side_data(dst, type_frame); |
1487 | 0 | if (sd_frame) { |
1488 | 0 | if (type_frame == AV_FRAME_DATA_STEREO3D) { |
1489 | 0 | int ret = side_data_stereo3d_merge(sd_frame, sd_pkt); |
1490 | 0 | if (ret < 0) |
1491 | 0 | return ret; |
1492 | 0 | } |
1493 | | |
1494 | 0 | continue; |
1495 | 0 | } |
1496 | | |
1497 | 0 | switch (type_pkt) { |
1498 | 0 | case AV_PKT_DATA_EXIF: { |
1499 | 0 | int ret = side_data_exif_parse(dst, sd_pkt); |
1500 | 0 | if (ret < 0) |
1501 | 0 | return ret; |
1502 | 0 | break; |
1503 | 0 | } |
1504 | 0 | default: |
1505 | 0 | sd_frame = av_frame_new_side_data(dst, type_frame, sd_pkt->size); |
1506 | 0 | if (!sd_frame) |
1507 | 0 | return AVERROR(ENOMEM); |
1508 | | |
1509 | 0 | memcpy(sd_frame->data, sd_pkt->data, sd_pkt->size); |
1510 | 0 | break; |
1511 | 0 | } |
1512 | 0 | } |
1513 | | |
1514 | 383M | return 0; |
1515 | 383M | } |
1516 | | |
1517 | | static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame) |
1518 | 127M | { |
1519 | 127M | size_t size; |
1520 | 127M | const uint8_t *side_metadata; |
1521 | | |
1522 | 127M | AVDictionary **frame_md = &frame->metadata; |
1523 | | |
1524 | 127M | side_metadata = av_packet_get_side_data(avpkt, |
1525 | 127M | AV_PKT_DATA_STRINGS_METADATA, &size); |
1526 | 127M | return av_packet_unpack_dictionary(side_metadata, size, frame_md); |
1527 | 127M | } |
1528 | | |
1529 | | int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx, |
1530 | | AVFrame *frame, const AVPacket *pkt) |
1531 | 127M | { |
1532 | 127M | static const SideDataMap sd[] = { |
1533 | 127M | { AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC }, |
1534 | 127M | { AV_PKT_DATA_AFD, AV_FRAME_DATA_AFD }, |
1535 | 127M | { AV_PKT_DATA_DYNAMIC_HDR10_PLUS, AV_FRAME_DATA_DYNAMIC_HDR_PLUS }, |
1536 | 127M | { AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE }, |
1537 | 127M | { AV_PKT_DATA_SKIP_SAMPLES, AV_FRAME_DATA_SKIP_SAMPLES }, |
1538 | 127M | { AV_PKT_DATA_LCEVC, AV_FRAME_DATA_LCEVC }, |
1539 | 127M | { AV_PKT_DATA_NB } |
1540 | 127M | }; |
1541 | | |
1542 | 127M | int ret = 0; |
1543 | | |
1544 | 127M | frame->pts = pkt->pts; |
1545 | 127M | frame->duration = pkt->duration; |
1546 | | |
1547 | 127M | ret = side_data_map(frame, pkt->side_data, pkt->side_data_elems, ff_sd_global_map); |
1548 | 127M | if (ret < 0) |
1549 | 0 | return ret; |
1550 | | |
1551 | 127M | ret = side_data_map(frame, pkt->side_data, pkt->side_data_elems, sd); |
1552 | 127M | if (ret < 0) |
1553 | 0 | return ret; |
1554 | | |
1555 | 127M | add_metadata_from_side_data(pkt, frame); |
1556 | | |
1557 | 127M | if (pkt->flags & AV_PKT_FLAG_DISCARD) { |
1558 | 60.2M | frame->flags |= AV_FRAME_FLAG_DISCARD; |
1559 | 60.2M | } |
1560 | | |
1561 | 127M | if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) { |
1562 | 0 | int ret = av_buffer_replace(&frame->opaque_ref, pkt->opaque_ref); |
1563 | 0 | if (ret < 0) |
1564 | 0 | return ret; |
1565 | 0 | frame->opaque = pkt->opaque; |
1566 | 0 | } |
1567 | | |
1568 | 127M | return 0; |
1569 | 127M | } |
1570 | | |
1571 | | int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) |
1572 | 127M | { |
1573 | 127M | int ret; |
1574 | | |
1575 | 127M | ret = side_data_map(frame, avctx->coded_side_data, avctx->nb_coded_side_data, |
1576 | 127M | ff_sd_global_map); |
1577 | 127M | if (ret < 0) |
1578 | 0 | return ret; |
1579 | | |
1580 | 127M | for (int i = 0; i < avctx->nb_decoded_side_data; i++) { |
1581 | 1.01k | const AVFrameSideData *src = avctx->decoded_side_data[i]; |
1582 | 1.01k | if (av_frame_get_side_data(frame, src->type)) |
1583 | 106 | continue; |
1584 | 910 | ret = av_frame_side_data_clone(&frame->side_data, &frame->nb_side_data, src, 0); |
1585 | 910 | if (ret < 0) |
1586 | 0 | return ret; |
1587 | 910 | } |
1588 | | |
1589 | 127M | if (!(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) { |
1590 | 127M | const AVPacket *pkt = avctx->internal->last_pkt_props; |
1591 | | |
1592 | 127M | ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt); |
1593 | 127M | if (ret < 0) |
1594 | 0 | return ret; |
1595 | 127M | } |
1596 | | |
1597 | 127M | ret = fill_frame_props(avctx, frame); |
1598 | 127M | if (ret < 0) |
1599 | 0 | return ret; |
1600 | | |
1601 | 127M | switch (avctx->codec->type) { |
1602 | 43.4M | case AVMEDIA_TYPE_VIDEO: |
1603 | 43.4M | if (frame->width && frame->height && |
1604 | 43.3M | av_image_check_sar(frame->width, frame->height, |
1605 | 43.3M | frame->sample_aspect_ratio) < 0) { |
1606 | 344k | av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", |
1607 | 344k | frame->sample_aspect_ratio.num, |
1608 | 344k | frame->sample_aspect_ratio.den); |
1609 | 344k | frame->sample_aspect_ratio = (AVRational){ 0, 1 }; |
1610 | 344k | } |
1611 | 43.4M | break; |
1612 | 127M | } |
1613 | 127M | return 0; |
1614 | 127M | } |
1615 | | |
1616 | | static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame) |
1617 | 116M | { |
1618 | 116M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
1619 | 35.3M | int i; |
1620 | 35.3M | int num_planes = av_pix_fmt_count_planes(frame->format); |
1621 | 35.3M | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); |
1622 | 35.3M | int flags = desc ? desc->flags : 0; |
1623 | 35.3M | if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL)) |
1624 | 7.43M | num_planes = 2; |
1625 | 113M | for (i = 0; i < num_planes; i++) { |
1626 | 77.7M | av_assert0(frame->data[i]); |
1627 | 77.7M | } |
1628 | | // For formats without data like hwaccel allow unused pointers to be non-NULL. |
1629 | 240M | for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) { |
1630 | 204M | if (frame->data[i]) |
1631 | 0 | av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n"); |
1632 | 204M | frame->data[i] = NULL; |
1633 | 204M | } |
1634 | 35.3M | } |
1635 | 116M | } |
1636 | | |
1637 | | static void decode_data_free(AVRefStructOpaque unused, void *obj) |
1638 | 116M | { |
1639 | 116M | FrameDecodeData *fdd = obj; |
1640 | | |
1641 | 116M | if (fdd->post_process_opaque_free) |
1642 | 0 | fdd->post_process_opaque_free(fdd->post_process_opaque); |
1643 | | |
1644 | 116M | if (fdd->hwaccel_priv_free) |
1645 | 0 | fdd->hwaccel_priv_free(fdd->hwaccel_priv); |
1646 | 116M | } |
1647 | | |
1648 | | int ff_attach_decode_data(AVFrame *frame) |
1649 | 116M | { |
1650 | 116M | FrameDecodeData *fdd; |
1651 | | |
1652 | 116M | av_assert1(!frame->private_ref); |
1653 | 116M | av_refstruct_unref(&frame->private_ref); |
1654 | | |
1655 | 116M | fdd = av_refstruct_alloc_ext(sizeof(*fdd), 0, NULL, decode_data_free); |
1656 | 116M | if (!fdd) |
1657 | 0 | return AVERROR(ENOMEM); |
1658 | | |
1659 | 116M | frame->private_ref = fdd; |
1660 | | |
1661 | 116M | return 0; |
1662 | 116M | } |
1663 | | |
1664 | | static void update_frame_props(AVCodecContext *avctx, AVFrame *frame) |
1665 | 119M | { |
1666 | | #if CONFIG_LIBLCEVC_DEC |
1667 | | AVCodecInternal *avci = avctx->internal; |
1668 | | DecodeContext *dc = decode_ctx(avci); |
1669 | | |
1670 | | dc->lcevc.frame = dc->lcevc.ctx && avctx->codec_type == AVMEDIA_TYPE_VIDEO && |
1671 | | av_frame_get_side_data(frame, AV_FRAME_DATA_LCEVC); |
1672 | | |
1673 | | if (dc->lcevc.frame) { |
1674 | | dc->lcevc.width = frame->width; |
1675 | | dc->lcevc.height = frame->height; |
1676 | | frame->width = frame->width * 2 / FFMAX(frame->sample_aspect_ratio.den, 1); |
1677 | | frame->height = frame->height * 2 / FFMAX(frame->sample_aspect_ratio.num, 1); |
1678 | | } |
1679 | | #endif |
1680 | 119M | } |
1681 | | |
1682 | | static int attach_post_process_data(AVCodecContext *avctx, AVFrame *frame) |
1683 | 116M | { |
1684 | | #if CONFIG_LIBLCEVC_DEC |
1685 | | AVCodecInternal *avci = avctx->internal; |
1686 | | DecodeContext *dc = decode_ctx(avci); |
1687 | | |
1688 | | if (dc->lcevc.frame) { |
1689 | | FrameDecodeData *fdd = frame->private_ref; |
1690 | | FFLCEVCFrame *frame_ctx; |
1691 | | int ret; |
1692 | | |
1693 | | frame_ctx = av_mallocz(sizeof(*frame_ctx)); |
1694 | | if (!frame_ctx) |
1695 | | return AVERROR(ENOMEM); |
1696 | | |
1697 | | frame_ctx->frame = av_frame_alloc(); |
1698 | | if (!frame_ctx->frame) { |
1699 | | av_free(frame_ctx); |
1700 | | return AVERROR(ENOMEM); |
1701 | | } |
1702 | | |
1703 | | frame_ctx->lcevc = av_refstruct_ref(dc->lcevc.ctx); |
1704 | | frame_ctx->frame->width = frame->width; |
1705 | | frame_ctx->frame->height = frame->height; |
1706 | | frame_ctx->frame->format = frame->format; |
1707 | | |
1708 | | frame->width = dc->lcevc.width; |
1709 | | frame->height = dc->lcevc.height; |
1710 | | |
1711 | | ret = avctx->get_buffer2(avctx, frame_ctx->frame, 0); |
1712 | | if (ret < 0) { |
1713 | | ff_lcevc_unref(frame_ctx); |
1714 | | return ret; |
1715 | | } |
1716 | | |
1717 | | validate_avframe_allocation(avctx, frame_ctx->frame); |
1718 | | |
1719 | | fdd->post_process_opaque = frame_ctx; |
1720 | | fdd->post_process_opaque_free = ff_lcevc_unref; |
1721 | | fdd->post_process = ff_lcevc_process; |
1722 | | } |
1723 | | dc->lcevc.frame = 0; |
1724 | | #endif |
1725 | | |
1726 | 116M | return 0; |
1727 | 116M | } |
1728 | | |
1729 | | int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) |
1730 | 122M | { |
1731 | 122M | const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel); |
1732 | 122M | int override_dimensions = 1; |
1733 | 122M | int ret; |
1734 | | |
1735 | 122M | av_assert0(ff_codec_is_decoder(avctx->codec)); |
1736 | | |
1737 | 122M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
1738 | 38.0M | if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN || |
1739 | 38.0M | (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) { |
1740 | 2.72M | av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n"); |
1741 | 2.72M | ret = AVERROR(EINVAL); |
1742 | 2.72M | goto fail; |
1743 | 2.72M | } |
1744 | | |
1745 | 35.3M | if (frame->width <= 0 || frame->height <= 0) { |
1746 | 35.1M | frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres)); |
1747 | 35.1M | frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres)); |
1748 | 35.1M | override_dimensions = 0; |
1749 | 35.1M | } |
1750 | | |
1751 | 35.3M | if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) { |
1752 | 10.6k | av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n"); |
1753 | 10.6k | ret = AVERROR(EINVAL); |
1754 | 10.6k | goto fail; |
1755 | 10.6k | } |
1756 | 84.5M | } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { |
1757 | 84.5M | if (frame->nb_samples * (int64_t)avctx->ch_layout.nb_channels > avctx->max_samples) { |
1758 | 159k | av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples); |
1759 | 159k | ret = AVERROR(EINVAL); |
1760 | 159k | goto fail; |
1761 | 159k | } |
1762 | 84.5M | } |
1763 | 119M | ret = ff_decode_frame_props(avctx, frame); |
1764 | 119M | if (ret < 0) |
1765 | 0 | goto fail; |
1766 | | |
1767 | 119M | if (hwaccel) { |
1768 | 0 | if (hwaccel->alloc_frame) { |
1769 | 0 | ret = hwaccel->alloc_frame(avctx, frame); |
1770 | 0 | goto end; |
1771 | 0 | } |
1772 | 119M | } else { |
1773 | 119M | avctx->sw_pix_fmt = avctx->pix_fmt; |
1774 | 119M | update_frame_props(avctx, frame); |
1775 | 119M | } |
1776 | | |
1777 | 119M | ret = avctx->get_buffer2(avctx, frame, flags); |
1778 | 119M | if (ret < 0) |
1779 | 2.71M | goto fail; |
1780 | | |
1781 | 116M | validate_avframe_allocation(avctx, frame); |
1782 | | |
1783 | 116M | ret = ff_attach_decode_data(frame); |
1784 | 116M | if (ret < 0) |
1785 | 0 | goto fail; |
1786 | | |
1787 | 116M | ret = attach_post_process_data(avctx, frame); |
1788 | 116M | if (ret < 0) |
1789 | 0 | goto fail; |
1790 | | |
1791 | 116M | end: |
1792 | 116M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions && |
1793 | 35.1M | !(ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) { |
1794 | 29.3M | frame->width = avctx->width; |
1795 | 29.3M | frame->height = avctx->height; |
1796 | 29.3M | } |
1797 | | |
1798 | 122M | fail: |
1799 | 122M | if (ret < 0) { |
1800 | 5.60M | av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); |
1801 | 5.60M | av_frame_unref(frame); |
1802 | 5.60M | } |
1803 | | |
1804 | 122M | return ret; |
1805 | 116M | } |
1806 | | |
1807 | | static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) |
1808 | 13.0M | { |
1809 | 13.0M | AVFrame *tmp; |
1810 | 13.0M | int ret; |
1811 | | |
1812 | 13.0M | av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO); |
1813 | | |
1814 | | // make sure the discard flag does not persist |
1815 | 13.0M | frame->flags &= ~AV_FRAME_FLAG_DISCARD; |
1816 | | |
1817 | 13.0M | if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) { |
1818 | 71.8k | av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n", |
1819 | 71.8k | frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt)); |
1820 | 71.8k | av_frame_unref(frame); |
1821 | 71.8k | } |
1822 | | |
1823 | 13.0M | if (!frame->data[0]) |
1824 | 2.32M | return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); |
1825 | | |
1826 | 10.7M | av_frame_side_data_free(&frame->side_data, &frame->nb_side_data); |
1827 | | |
1828 | 10.7M | if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame)) |
1829 | 7.89M | return ff_decode_frame_props(avctx, frame); |
1830 | | |
1831 | 2.85M | tmp = av_frame_alloc(); |
1832 | 2.85M | if (!tmp) |
1833 | 0 | return AVERROR(ENOMEM); |
1834 | | |
1835 | 2.85M | av_frame_move_ref(tmp, frame); |
1836 | | |
1837 | 2.85M | ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); |
1838 | 2.85M | if (ret < 0) { |
1839 | 0 | av_frame_free(&tmp); |
1840 | 0 | return ret; |
1841 | 0 | } |
1842 | | |
1843 | 2.85M | av_frame_copy(frame, tmp); |
1844 | 2.85M | av_frame_free(&tmp); |
1845 | | |
1846 | 2.85M | return 0; |
1847 | 2.85M | } |
1848 | | |
1849 | | int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) |
1850 | 13.0M | { |
1851 | 13.0M | int ret = reget_buffer_internal(avctx, frame, flags); |
1852 | 13.0M | if (ret < 0) |
1853 | 1.64M | av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); |
1854 | 13.0M | return ret; |
1855 | 13.0M | } |
1856 | | |
1857 | | typedef struct ProgressInternal { |
1858 | | ThreadProgress progress; |
1859 | | struct AVFrame *f; |
1860 | | } ProgressInternal; |
1861 | | |
1862 | | static void check_progress_consistency(const ProgressFrame *f) |
1863 | 28.6M | { |
1864 | 28.6M | av_assert1(!!f->f == !!f->progress); |
1865 | 28.6M | av_assert1(!f->progress || f->progress->f == f->f); |
1866 | 28.6M | } |
1867 | | |
1868 | | int ff_progress_frame_alloc(AVCodecContext *avctx, ProgressFrame *f) |
1869 | 1.06M | { |
1870 | 1.06M | AVRefStructPool *pool = avctx->internal->progress_frame_pool; |
1871 | | |
1872 | 1.06M | av_assert1(!f->f && !f->progress); |
1873 | | |
1874 | 1.06M | f->progress = av_refstruct_pool_get(pool); |
1875 | 1.06M | if (!f->progress) |
1876 | 0 | return AVERROR(ENOMEM); |
1877 | | |
1878 | 1.06M | f->f = f->progress->f; |
1879 | 1.06M | return 0; |
1880 | 1.06M | } |
1881 | | |
1882 | | int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags) |
1883 | 962k | { |
1884 | 962k | int ret = ff_progress_frame_alloc(avctx, f); |
1885 | 962k | if (ret < 0) |
1886 | 0 | return ret; |
1887 | | |
1888 | 962k | ret = ff_thread_get_buffer(avctx, f->progress->f, flags); |
1889 | 962k | if (ret < 0) { |
1890 | 2.65k | f->f = NULL; |
1891 | 2.65k | av_refstruct_unref(&f->progress); |
1892 | 2.65k | return ret; |
1893 | 2.65k | } |
1894 | 960k | return 0; |
1895 | 962k | } |
1896 | | |
1897 | | void ff_progress_frame_ref(ProgressFrame *dst, const ProgressFrame *src) |
1898 | 4.66M | { |
1899 | 4.66M | av_assert1(src->progress && src->f && src->f == src->progress->f); |
1900 | 4.66M | av_assert1(!dst->f && !dst->progress); |
1901 | 4.66M | dst->f = src->f; |
1902 | 4.66M | dst->progress = av_refstruct_ref(src->progress); |
1903 | 4.66M | } |
1904 | | |
1905 | | void ff_progress_frame_unref(ProgressFrame *f) |
1906 | 23.7M | { |
1907 | 23.7M | check_progress_consistency(f); |
1908 | 23.7M | f->f = NULL; |
1909 | 23.7M | av_refstruct_unref(&f->progress); |
1910 | 23.7M | } |
1911 | | |
1912 | | void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src) |
1913 | 4.83M | { |
1914 | 4.83M | if (dst == src) |
1915 | 0 | return; |
1916 | 4.83M | ff_progress_frame_unref(dst); |
1917 | 4.83M | check_progress_consistency(src); |
1918 | 4.83M | if (src->f) |
1919 | 4.66M | ff_progress_frame_ref(dst, src); |
1920 | 4.83M | } |
1921 | | |
1922 | | void ff_progress_frame_report(ProgressFrame *f, int n) |
1923 | 3.53M | { |
1924 | 3.53M | ff_thread_progress_report(&f->progress->progress, n); |
1925 | 3.53M | } |
1926 | | |
1927 | | void ff_progress_frame_await(const ProgressFrame *f, int n) |
1928 | 93.6M | { |
1929 | 93.6M | ff_thread_progress_await(&f->progress->progress, n); |
1930 | 93.6M | } |
1931 | | |
1932 | | #if !HAVE_THREADS |
1933 | | enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset) |
1934 | | { |
1935 | | return FF_THREAD_NO_FRAME_THREADING; |
1936 | | } |
1937 | | #endif /* !HAVE_THREADS */ |
1938 | | |
1939 | | static av_cold int progress_frame_pool_init_cb(AVRefStructOpaque opaque, void *obj) |
1940 | 100k | { |
1941 | 100k | const AVCodecContext *avctx = opaque.nc; |
1942 | 100k | ProgressInternal *progress = obj; |
1943 | 100k | int ret; |
1944 | | |
1945 | 100k | ret = ff_thread_progress_init(&progress->progress, avctx->active_thread_type & FF_THREAD_FRAME); |
1946 | 100k | if (ret < 0) |
1947 | 0 | return ret; |
1948 | | |
1949 | 100k | progress->f = av_frame_alloc(); |
1950 | 100k | if (!progress->f) |
1951 | 0 | return AVERROR(ENOMEM); |
1952 | | |
1953 | 100k | return 0; |
1954 | 100k | } |
1955 | | |
1956 | | static void progress_frame_pool_reset_cb(AVRefStructOpaque unused, void *obj) |
1957 | 1.06M | { |
1958 | 1.06M | ProgressInternal *progress = obj; |
1959 | | |
1960 | 1.06M | ff_thread_progress_reset(&progress->progress); |
1961 | 1.06M | av_frame_unref(progress->f); |
1962 | 1.06M | } |
1963 | | |
1964 | | static av_cold void progress_frame_pool_free_entry_cb(AVRefStructOpaque opaque, void *obj) |
1965 | 100k | { |
1966 | 100k | ProgressInternal *progress = obj; |
1967 | | |
1968 | 100k | ff_thread_progress_destroy(&progress->progress); |
1969 | 100k | av_frame_free(&progress->f); |
1970 | 100k | } |
1971 | | |
1972 | | av_cold int ff_decode_preinit(AVCodecContext *avctx) |
1973 | 1.07M | { |
1974 | 1.07M | AVCodecInternal *avci = avctx->internal; |
1975 | 1.07M | DecodeContext *dc = decode_ctx(avci); |
1976 | 1.07M | int ret = 0; |
1977 | | |
1978 | 1.07M | dc->initial_pict_type = AV_PICTURE_TYPE_NONE; |
1979 | 1.07M | if (avctx->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY) { |
1980 | 516k | dc->intra_only_flag = AV_FRAME_FLAG_KEY; |
1981 | 516k | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) |
1982 | 258k | dc->initial_pict_type = AV_PICTURE_TYPE_I; |
1983 | 516k | } |
1984 | | |
1985 | | /* if the decoder init function was already called previously, |
1986 | | * free the already allocated subtitle_header before overwriting it */ |
1987 | 1.07M | av_freep(&avctx->subtitle_header); |
1988 | | |
1989 | 1.07M | if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { |
1990 | 0 | av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n", |
1991 | 0 | avctx->codec->max_lowres); |
1992 | 0 | avctx->lowres = avctx->codec->max_lowres; |
1993 | 0 | } |
1994 | 1.07M | if (avctx->sub_charenc) { |
1995 | 0 | if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { |
1996 | 0 | av_log(avctx, AV_LOG_ERROR, "Character encoding is only " |
1997 | 0 | "supported with subtitles codecs\n"); |
1998 | 0 | return AVERROR(EINVAL); |
1999 | 0 | } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { |
2000 | 0 | av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " |
2001 | 0 | "subtitles character encoding will be ignored\n", |
2002 | 0 | avctx->codec_descriptor->name); |
2003 | 0 | avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; |
2004 | 0 | } else { |
2005 | | /* input character encoding is set for a text based subtitle |
2006 | | * codec at this point */ |
2007 | 0 | if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) |
2008 | 0 | avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; |
2009 | |
|
2010 | 0 | if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { |
2011 | 0 | #if CONFIG_ICONV |
2012 | 0 | iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); |
2013 | 0 | if (cd == (iconv_t)-1) { |
2014 | 0 | ret = AVERROR(errno); |
2015 | 0 | av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " |
2016 | 0 | "with input character encoding \"%s\"\n", avctx->sub_charenc); |
2017 | 0 | return ret; |
2018 | 0 | } |
2019 | 0 | iconv_close(cd); |
2020 | | #else |
2021 | | av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " |
2022 | | "conversion needs a libavcodec built with iconv support " |
2023 | | "for this codec\n"); |
2024 | | return AVERROR(ENOSYS); |
2025 | | #endif |
2026 | 0 | } |
2027 | 0 | } |
2028 | 0 | } |
2029 | | |
2030 | 1.07M | dc->pts_correction_num_faulty_pts = |
2031 | 1.07M | dc->pts_correction_num_faulty_dts = 0; |
2032 | 1.07M | dc->pts_correction_last_pts = |
2033 | 1.07M | dc->pts_correction_last_dts = INT64_MIN; |
2034 | | |
2035 | 1.07M | if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY |
2036 | 0 | && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO) |
2037 | 0 | av_log(avctx, AV_LOG_WARNING, |
2038 | 0 | "gray decoding requested but not enabled at configuration time\n"); |
2039 | 1.07M | if (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) { |
2040 | 0 | avctx->export_side_data |= AV_CODEC_EXPORT_DATA_MVS; |
2041 | 0 | } |
2042 | | |
2043 | 1.07M | if (avctx->nb_side_data_prefer_packet == 1 && |
2044 | 0 | avctx->side_data_prefer_packet[0] == -1) |
2045 | 0 | dc->side_data_pref_mask = ~0ULL; |
2046 | 1.07M | else { |
2047 | 1.07M | for (unsigned i = 0; i < avctx->nb_side_data_prefer_packet; i++) { |
2048 | 0 | int val = avctx->side_data_prefer_packet[i]; |
2049 | |
|
2050 | 0 | if (val < 0 || val >= AV_PKT_DATA_NB) { |
2051 | 0 | av_log(avctx, AV_LOG_ERROR, "Invalid side data type: %d\n", val); |
2052 | 0 | return AVERROR(EINVAL); |
2053 | 0 | } |
2054 | | |
2055 | 0 | for (unsigned j = 0; ff_sd_global_map[j].packet < AV_PKT_DATA_NB; j++) { |
2056 | 0 | if (ff_sd_global_map[j].packet == val) { |
2057 | 0 | val = ff_sd_global_map[j].frame; |
2058 | | |
2059 | | // this code will need to be changed when we have more than |
2060 | | // 64 frame side data types |
2061 | 0 | if (val >= 64) { |
2062 | 0 | av_log(avctx, AV_LOG_ERROR, "Side data type too big\n"); |
2063 | 0 | return AVERROR_BUG; |
2064 | 0 | } |
2065 | | |
2066 | 0 | dc->side_data_pref_mask |= 1ULL << val; |
2067 | 0 | } |
2068 | 0 | } |
2069 | 0 | } |
2070 | 1.07M | } |
2071 | | |
2072 | 1.07M | avci->in_pkt = av_packet_alloc(); |
2073 | 1.07M | avci->last_pkt_props = av_packet_alloc(); |
2074 | 1.07M | if (!avci->in_pkt || !avci->last_pkt_props) |
2075 | 0 | return AVERROR(ENOMEM); |
2076 | | |
2077 | 1.07M | if (ffcodec(avctx->codec)->caps_internal & FF_CODEC_CAP_USES_PROGRESSFRAMES) { |
2078 | 87.8k | avci->progress_frame_pool = |
2079 | 87.8k | av_refstruct_pool_alloc_ext(sizeof(ProgressInternal), |
2080 | 87.8k | AV_REFSTRUCT_POOL_FLAG_FREE_ON_INIT_ERROR, |
2081 | 87.8k | avctx, progress_frame_pool_init_cb, |
2082 | 87.8k | progress_frame_pool_reset_cb, |
2083 | 87.8k | progress_frame_pool_free_entry_cb, NULL); |
2084 | 87.8k | if (!avci->progress_frame_pool) |
2085 | 0 | return AVERROR(ENOMEM); |
2086 | 87.8k | } |
2087 | 1.07M | ret = decode_bsfs_init(avctx); |
2088 | 1.07M | if (ret < 0) |
2089 | 373 | return ret; |
2090 | | |
2091 | 1.07M | if (!(avctx->export_side_data & AV_CODEC_EXPORT_DATA_ENHANCEMENTS)) { |
2092 | 1.07M | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
2093 | | #if CONFIG_LIBLCEVC_DEC |
2094 | | ret = ff_lcevc_alloc(&dc->lcevc.ctx); |
2095 | | if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) |
2096 | | return ret; |
2097 | | #endif |
2098 | 743k | } |
2099 | 1.07M | } |
2100 | | |
2101 | 1.07M | return 0; |
2102 | 1.07M | } |
2103 | | |
2104 | | /** |
2105 | | * Check side data preference and clear existing side data from frame |
2106 | | * if needed. |
2107 | | * |
2108 | | * @retval 0 side data of this type can be added to frame |
2109 | | * @retval 1 side data of this type should not be added to frame |
2110 | | */ |
2111 | | static int side_data_pref(const AVCodecContext *avctx, AVFrameSideData ***sd, |
2112 | | int *nb_sd, enum AVFrameSideDataType type) |
2113 | 367k | { |
2114 | 367k | DecodeContext *dc = decode_ctx(avctx->internal); |
2115 | | |
2116 | | // Note: could be skipped for `type` without corresponding packet sd |
2117 | 367k | if (av_frame_side_data_get(*sd, *nb_sd, type)) { |
2118 | 2.99k | if (dc->side_data_pref_mask & (1ULL << type)) |
2119 | 0 | return 1; |
2120 | 2.99k | av_frame_side_data_remove(sd, nb_sd, type); |
2121 | 2.99k | } |
2122 | | |
2123 | 367k | return 0; |
2124 | 367k | } |
2125 | | |
2126 | | |
2127 | | int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, |
2128 | | enum AVFrameSideDataType type, size_t size, |
2129 | | AVFrameSideData **psd) |
2130 | 226k | { |
2131 | 226k | AVFrameSideData *sd; |
2132 | | |
2133 | 226k | if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data, type)) { |
2134 | 0 | if (psd) |
2135 | 0 | *psd = NULL; |
2136 | 0 | return 0; |
2137 | 0 | } |
2138 | | |
2139 | 226k | sd = av_frame_new_side_data(frame, type, size); |
2140 | 226k | if (psd) |
2141 | 226k | *psd = sd; |
2142 | | |
2143 | 226k | return sd ? 0 : AVERROR(ENOMEM); |
2144 | 226k | } |
2145 | | |
2146 | | int ff_frame_new_side_data_from_buf_ext(const AVCodecContext *avctx, |
2147 | | AVFrameSideData ***sd, int *nb_sd, |
2148 | | enum AVFrameSideDataType type, |
2149 | | AVBufferRef **buf) |
2150 | 118k | { |
2151 | 118k | int ret = 0; |
2152 | | |
2153 | 118k | if (side_data_pref(avctx, sd, nb_sd, type)) |
2154 | 0 | goto finish; |
2155 | | |
2156 | 118k | if (!av_frame_side_data_add(sd, nb_sd, type, buf, 0)) |
2157 | 0 | ret = AVERROR(ENOMEM); |
2158 | | |
2159 | 118k | finish: |
2160 | 118k | av_buffer_unref(buf); |
2161 | | |
2162 | 118k | return ret; |
2163 | 118k | } |
2164 | | |
2165 | | int ff_frame_new_side_data_from_buf(const AVCodecContext *avctx, |
2166 | | AVFrame *frame, enum AVFrameSideDataType type, |
2167 | | AVBufferRef **buf) |
2168 | 117k | { |
2169 | 117k | return ff_frame_new_side_data_from_buf_ext(avctx, |
2170 | 117k | &frame->side_data, &frame->nb_side_data, |
2171 | 117k | type, buf); |
2172 | 117k | } |
2173 | | |
2174 | | int ff_decode_mastering_display_new_ext(const AVCodecContext *avctx, |
2175 | | AVFrameSideData ***sd, int *nb_sd, |
2176 | | struct AVMasteringDisplayMetadata **mdm) |
2177 | 10.4k | { |
2178 | 10.4k | AVBufferRef *buf; |
2179 | 10.4k | size_t size; |
2180 | | |
2181 | 10.4k | if (side_data_pref(avctx, sd, nb_sd, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA)) { |
2182 | 0 | *mdm = NULL; |
2183 | 0 | return 0; |
2184 | 0 | } |
2185 | | |
2186 | 10.4k | *mdm = av_mastering_display_metadata_alloc_size(&size); |
2187 | 10.4k | if (!*mdm) |
2188 | 0 | return AVERROR(ENOMEM); |
2189 | | |
2190 | 10.4k | buf = av_buffer_create((uint8_t *)*mdm, size, NULL, NULL, 0); |
2191 | 10.4k | if (!buf) { |
2192 | 0 | av_freep(mdm); |
2193 | 0 | return AVERROR(ENOMEM); |
2194 | 0 | } |
2195 | | |
2196 | 10.4k | if (!av_frame_side_data_add(sd, nb_sd, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, |
2197 | 10.4k | &buf, 0)) { |
2198 | 0 | *mdm = NULL; |
2199 | 0 | av_buffer_unref(&buf); |
2200 | 0 | return AVERROR(ENOMEM); |
2201 | 0 | } |
2202 | | |
2203 | 10.4k | return 0; |
2204 | 10.4k | } |
2205 | | |
2206 | | int ff_decode_mastering_display_new(const AVCodecContext *avctx, AVFrame *frame, |
2207 | | AVMasteringDisplayMetadata **mdm) |
2208 | 4.45k | { |
2209 | 4.45k | if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data, |
2210 | 4.45k | AV_FRAME_DATA_MASTERING_DISPLAY_METADATA)) { |
2211 | 0 | *mdm = NULL; |
2212 | 0 | return 0; |
2213 | 0 | } |
2214 | | |
2215 | 4.45k | *mdm = av_mastering_display_metadata_create_side_data(frame); |
2216 | 4.45k | return *mdm ? 0 : AVERROR(ENOMEM); |
2217 | 4.45k | } |
2218 | | |
2219 | | int ff_decode_content_light_new_ext(const AVCodecContext *avctx, |
2220 | | AVFrameSideData ***sd, int *nb_sd, |
2221 | | AVContentLightMetadata **clm) |
2222 | 1.21k | { |
2223 | 1.21k | AVBufferRef *buf; |
2224 | 1.21k | size_t size; |
2225 | | |
2226 | 1.21k | if (side_data_pref(avctx, sd, nb_sd, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) { |
2227 | 0 | *clm = NULL; |
2228 | 0 | return 0; |
2229 | 0 | } |
2230 | | |
2231 | 1.21k | *clm = av_content_light_metadata_alloc(&size); |
2232 | 1.21k | if (!*clm) |
2233 | 0 | return AVERROR(ENOMEM); |
2234 | | |
2235 | 1.21k | buf = av_buffer_create((uint8_t *)*clm, size, NULL, NULL, 0); |
2236 | 1.21k | if (!buf) { |
2237 | 0 | av_freep(clm); |
2238 | 0 | return AVERROR(ENOMEM); |
2239 | 0 | } |
2240 | | |
2241 | 1.21k | if (!av_frame_side_data_add(sd, nb_sd, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL, |
2242 | 1.21k | &buf, 0)) { |
2243 | 0 | *clm = NULL; |
2244 | 0 | av_buffer_unref(&buf); |
2245 | 0 | return AVERROR(ENOMEM); |
2246 | 0 | } |
2247 | | |
2248 | 1.21k | return 0; |
2249 | 1.21k | } |
2250 | | |
2251 | | int ff_decode_content_light_new(const AVCodecContext *avctx, AVFrame *frame, |
2252 | | AVContentLightMetadata **clm) |
2253 | 6.94k | { |
2254 | 6.94k | if (side_data_pref(avctx, &frame->side_data, &frame->nb_side_data, |
2255 | 6.94k | AV_FRAME_DATA_CONTENT_LIGHT_LEVEL)) { |
2256 | 0 | *clm = NULL; |
2257 | 0 | return 0; |
2258 | 0 | } |
2259 | | |
2260 | 6.94k | *clm = av_content_light_metadata_create_side_data(frame); |
2261 | 6.94k | return *clm ? 0 : AVERROR(ENOMEM); |
2262 | 6.94k | } |
2263 | | |
2264 | | int ff_copy_palette(void *dst, const AVPacket *src, void *logctx) |
2265 | 1.77M | { |
2266 | 1.77M | size_t size; |
2267 | 1.77M | const void *pal = av_packet_get_side_data(src, AV_PKT_DATA_PALETTE, &size); |
2268 | | |
2269 | 1.77M | if (pal && size == AVPALETTE_SIZE) { |
2270 | 0 | memcpy(dst, pal, AVPALETTE_SIZE); |
2271 | 0 | return 1; |
2272 | 1.77M | } else if (pal) { |
2273 | 0 | av_log(logctx, AV_LOG_ERROR, |
2274 | 0 | "Palette size %zu is wrong\n", size); |
2275 | 0 | } |
2276 | 1.77M | return 0; |
2277 | 1.77M | } |
2278 | | |
2279 | | int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private) |
2280 | 9.48M | { |
2281 | 9.48M | const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel); |
2282 | | |
2283 | 9.48M | if (!hwaccel || !hwaccel->frame_priv_data_size) |
2284 | 9.48M | return 0; |
2285 | | |
2286 | 0 | av_assert0(!*hwaccel_picture_private); |
2287 | | |
2288 | 0 | if (hwaccel->free_frame_priv) { |
2289 | 0 | AVHWFramesContext *frames_ctx; |
2290 | |
|
2291 | 0 | if (!avctx->hw_frames_ctx) |
2292 | 0 | return AVERROR(EINVAL); |
2293 | | |
2294 | 0 | frames_ctx = (AVHWFramesContext *) avctx->hw_frames_ctx->data; |
2295 | 0 | *hwaccel_picture_private = av_refstruct_alloc_ext(hwaccel->frame_priv_data_size, 0, |
2296 | 0 | frames_ctx->device_ctx, |
2297 | 0 | hwaccel->free_frame_priv); |
2298 | 0 | } else { |
2299 | 0 | *hwaccel_picture_private = av_refstruct_allocz(hwaccel->frame_priv_data_size); |
2300 | 0 | } |
2301 | | |
2302 | 0 | if (!*hwaccel_picture_private) |
2303 | 0 | return AVERROR(ENOMEM); |
2304 | | |
2305 | 0 | return 0; |
2306 | 0 | } |
2307 | | |
2308 | | av_cold void ff_decode_flush_buffers(AVCodecContext *avctx) |
2309 | 72.1M | { |
2310 | 72.1M | AVCodecInternal *avci = avctx->internal; |
2311 | 72.1M | DecodeContext *dc = decode_ctx(avci); |
2312 | | |
2313 | 72.1M | av_packet_unref(avci->last_pkt_props); |
2314 | 72.1M | av_packet_unref(avci->in_pkt); |
2315 | | |
2316 | 72.1M | dc->pts_correction_last_pts = |
2317 | 72.1M | dc->pts_correction_last_dts = INT64_MIN; |
2318 | | |
2319 | 72.1M | if (avci->bsf) |
2320 | 72.1M | av_bsf_flush(avci->bsf); |
2321 | | |
2322 | 72.1M | dc->nb_draining_errors = 0; |
2323 | 72.1M | dc->draining_started = 0; |
2324 | 72.1M | } |
2325 | | |
2326 | | av_cold AVCodecInternal *ff_decode_internal_alloc(void) |
2327 | 1.10M | { |
2328 | 1.10M | return av_mallocz(sizeof(DecodeContext)); |
2329 | 1.10M | } |
2330 | | |
2331 | | av_cold void ff_decode_internal_sync(AVCodecContext *dst, const AVCodecContext *src) |
2332 | 0 | { |
2333 | 0 | const DecodeContext *src_dc = decode_ctx(src->internal); |
2334 | 0 | DecodeContext *dst_dc = decode_ctx(dst->internal); |
2335 | |
|
2336 | 0 | dst_dc->initial_pict_type = src_dc->initial_pict_type; |
2337 | 0 | dst_dc->intra_only_flag = src_dc->intra_only_flag; |
2338 | 0 | dst_dc->side_data_pref_mask = src_dc->side_data_pref_mask; |
2339 | | #if CONFIG_LIBLCEVC_DEC |
2340 | | av_refstruct_replace(&dst_dc->lcevc.ctx, src_dc->lcevc.ctx); |
2341 | | #endif |
2342 | 0 | } |
2343 | | |
2344 | | av_cold void ff_decode_internal_uninit(AVCodecContext *avctx) |
2345 | 1.10M | { |
2346 | | #if CONFIG_LIBLCEVC_DEC |
2347 | | AVCodecInternal *avci = avctx->internal; |
2348 | | DecodeContext *dc = decode_ctx(avci); |
2349 | | |
2350 | | av_refstruct_unref(&dc->lcevc.ctx); |
2351 | | #endif |
2352 | 1.10M | } |
2353 | | |
2354 | | static int attach_displaymatrix(AVCodecContext *avctx, AVFrame *frame, int orientation) |
2355 | 3.99k | { |
2356 | 3.99k | AVFrameSideData *sd = NULL; |
2357 | 3.99k | int32_t *matrix; |
2358 | 3.99k | int ret; |
2359 | | /* invalid orientation */ |
2360 | 3.99k | if (orientation < 1 || orientation > 8) |
2361 | 489 | return AVERROR_INVALIDDATA; |
2362 | 3.50k | ret = ff_frame_new_side_data(avctx, frame, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9, &sd); |
2363 | 3.50k | if (ret < 0) { |
2364 | 0 | av_log(avctx, AV_LOG_ERROR, "Could not allocate frame side data: %s\n", av_err2str(ret)); |
2365 | 0 | return ret; |
2366 | 0 | } |
2367 | 3.50k | if (sd) { |
2368 | 3.50k | matrix = (int32_t *) sd->data; |
2369 | 3.50k | ret = av_exif_orientation_to_matrix(matrix, orientation); |
2370 | 3.50k | } |
2371 | | |
2372 | 3.50k | return ret; |
2373 | 3.50k | } |
2374 | | |
2375 | | static int exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd, AVBufferRef **pbuf) |
2376 | 110k | { |
2377 | 110k | const AVExifEntry *orient = NULL; |
2378 | 110k | AVExifMetadata *cloned = NULL; |
2379 | 110k | int ret; |
2380 | | |
2381 | 169k | for (size_t i = 0; i < ifd->count; i++) { |
2382 | 63.2k | const AVExifEntry *entry = &ifd->entries[i]; |
2383 | 63.2k | if (entry->id == av_exif_get_tag_id("Orientation") && |
2384 | 6.03k | entry->count > 0 && entry->type == AV_TIFF_SHORT) { |
2385 | 3.99k | orient = entry; |
2386 | 3.99k | break; |
2387 | 3.99k | } |
2388 | 63.2k | } |
2389 | | |
2390 | 110k | if (orient) { |
2391 | 3.99k | av_log(avctx, AV_LOG_DEBUG, "found EXIF orientation: %" PRIu64 "\n", orient->value.uint[0]); |
2392 | 3.99k | ret = attach_displaymatrix(avctx, frame, orient->value.uint[0]); |
2393 | 3.99k | if (ret < 0) { |
2394 | 489 | av_log(avctx, AV_LOG_WARNING, "unable to attach displaymatrix from EXIF\n"); |
2395 | 3.50k | } else { |
2396 | 3.50k | cloned = av_exif_clone_ifd(ifd); |
2397 | 3.50k | if (!cloned) { |
2398 | 0 | ret = AVERROR(ENOMEM); |
2399 | 0 | goto end; |
2400 | 0 | } |
2401 | 3.50k | av_exif_remove_entry(avctx, cloned, orient->id, 0); |
2402 | 3.50k | ifd = cloned; |
2403 | 3.50k | } |
2404 | 3.99k | } |
2405 | | |
2406 | 110k | ret = av_exif_ifd_to_dict(avctx, ifd, &frame->metadata); |
2407 | 110k | if (ret < 0) |
2408 | 0 | goto end; |
2409 | | |
2410 | 110k | if (cloned || !*pbuf) { |
2411 | 103k | av_buffer_unref(pbuf); |
2412 | 103k | ret = av_exif_write(avctx, ifd, pbuf, AV_EXIF_TIFF_HEADER); |
2413 | 103k | if (ret < 0) |
2414 | 0 | goto end; |
2415 | 103k | } |
2416 | | |
2417 | 110k | ret = ff_frame_new_side_data_from_buf(avctx, frame, AV_FRAME_DATA_EXIF, pbuf); |
2418 | 110k | if (ret < 0) |
2419 | 0 | goto end; |
2420 | | |
2421 | 110k | ret = 0; |
2422 | | |
2423 | 110k | end: |
2424 | 110k | av_buffer_unref(pbuf); |
2425 | 110k | av_exif_free(cloned); |
2426 | 110k | av_free(cloned); |
2427 | 110k | return ret; |
2428 | 110k | } |
2429 | | |
2430 | | int ff_decode_exif_attach_ifd(AVCodecContext *avctx, AVFrame *frame, const AVExifMetadata *ifd) |
2431 | 100k | { |
2432 | 100k | AVBufferRef *dummy = NULL; |
2433 | 100k | return exif_attach_ifd(avctx, frame, ifd, &dummy); |
2434 | 100k | } |
2435 | | |
2436 | | int ff_decode_exif_attach_buffer(AVCodecContext *avctx, AVFrame *frame, AVBufferRef **pbuf, |
2437 | | enum AVExifHeaderMode header_mode) |
2438 | 18.8k | { |
2439 | 18.8k | int ret; |
2440 | 18.8k | AVBufferRef *data = *pbuf; |
2441 | 18.8k | AVExifMetadata ifd = { 0 }; |
2442 | | |
2443 | 18.8k | ret = av_exif_parse_buffer(avctx, data->data, data->size, &ifd, header_mode); |
2444 | 18.8k | if (ret < 0) |
2445 | 9.32k | goto end; |
2446 | | |
2447 | 9.56k | ret = exif_attach_ifd(avctx, frame, &ifd, pbuf); |
2448 | | |
2449 | 18.8k | end: |
2450 | 18.8k | av_buffer_unref(pbuf); |
2451 | 18.8k | av_exif_free(&ifd); |
2452 | 18.8k | return ret; |
2453 | 9.56k | } |