/src/ffmpeg/libavformat/demux.c
Line | Count | Source |
1 | | /* |
2 | | * Core demuxing component |
3 | | * Copyright (c) 2000, 2001, 2002 Fabrice Bellard |
4 | | * |
5 | | * This file is part of FFmpeg. |
6 | | * |
7 | | * FFmpeg is free software; you can redistribute it and/or |
8 | | * modify it under the terms of the GNU Lesser General Public |
9 | | * License as published by the Free Software Foundation; either |
10 | | * version 2.1 of the License, or (at your option) any later version. |
11 | | * |
12 | | * FFmpeg is distributed in the hope that it will be useful, |
13 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | | * Lesser General Public License for more details. |
16 | | * |
17 | | * You should have received a copy of the GNU Lesser General Public |
18 | | * License along with FFmpeg; if not, write to the Free Software |
19 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | | */ |
21 | | |
22 | | #include <stdint.h> |
23 | | |
24 | | #include "config_components.h" |
25 | | |
26 | | #include "libavutil/avassert.h" |
27 | | #include "libavutil/avstring.h" |
28 | | #include "libavutil/dict.h" |
29 | | #include "libavutil/internal.h" |
30 | | #include "libavutil/intreadwrite.h" |
31 | | #include "libavutil/mathematics.h" |
32 | | #include "libavutil/mem.h" |
33 | | #include "libavutil/opt.h" |
34 | | #include "libavutil/pixfmt.h" |
35 | | #include "libavutil/time.h" |
36 | | #include "libavutil/timestamp.h" |
37 | | |
38 | | #include "libavcodec/avcodec.h" |
39 | | #include "libavcodec/bsf.h" |
40 | | #include "libavcodec/codec_desc.h" |
41 | | #include "libavcodec/internal.h" |
42 | | #include "libavcodec/packet_internal.h" |
43 | | #include "libavcodec/raw.h" |
44 | | |
45 | | #include "avformat.h" |
46 | | #include "avformat_internal.h" |
47 | | #include "avio_internal.h" |
48 | | #include "demux.h" |
49 | | #include "id3v2.h" |
50 | | #include "internal.h" |
51 | | #include "url.h" |
52 | | |
53 | | static int64_t wrap_timestamp(const AVStream *st, int64_t timestamp) |
54 | 0 | { |
55 | 0 | const FFStream *const sti = cffstream(st); |
56 | 0 | if (sti->pts_wrap_behavior != AV_PTS_WRAP_IGNORE && st->pts_wrap_bits < 64 && |
57 | 0 | sti->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) { |
58 | 0 | if (sti->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET && |
59 | 0 | timestamp < sti->pts_wrap_reference) |
60 | 0 | return timestamp + (1ULL << st->pts_wrap_bits); |
61 | 0 | else if (sti->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET && |
62 | 0 | timestamp >= sti->pts_wrap_reference) |
63 | 0 | return timestamp - (1ULL << st->pts_wrap_bits); |
64 | 0 | } |
65 | 0 | return timestamp; |
66 | 0 | } |
67 | | |
68 | | int64_t ff_wrap_timestamp(const AVStream *st, int64_t timestamp) |
69 | 0 | { |
70 | 0 | return wrap_timestamp(st, timestamp); |
71 | 0 | } |
72 | | |
73 | | static const AVCodec *find_probe_decoder(AVFormatContext *s, const AVStream *st, enum AVCodecID codec_id) |
74 | 0 | { |
75 | 0 | const AVCodec *codec; |
76 | |
|
77 | | #if CONFIG_H264_DECODER |
78 | | /* Other parts of the code assume this decoder to be used for h264, |
79 | | * so force it if possible. */ |
80 | | if (codec_id == AV_CODEC_ID_H264) |
81 | | return avcodec_find_decoder_by_name("h264"); |
82 | | #endif |
83 | |
|
84 | 0 | codec = ff_find_decoder(s, st, codec_id); |
85 | 0 | if (!codec) |
86 | 0 | return NULL; |
87 | | |
88 | 0 | if (codec->capabilities & AV_CODEC_CAP_AVOID_PROBING) { |
89 | 0 | const AVCodec *probe_codec = NULL; |
90 | 0 | void *iter = NULL; |
91 | 0 | while ((probe_codec = av_codec_iterate(&iter))) { |
92 | 0 | if (probe_codec->id == codec->id && |
93 | 0 | av_codec_is_decoder(probe_codec) && |
94 | 0 | !(probe_codec->capabilities & (AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_EXPERIMENTAL))) { |
95 | 0 | return probe_codec; |
96 | 0 | } |
97 | 0 | } |
98 | 0 | } |
99 | | |
100 | 0 | return codec; |
101 | 0 | } |
102 | | |
103 | | static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, |
104 | | AVProbeData *pd) |
105 | 0 | { |
106 | 0 | static const struct { |
107 | 0 | const char *name; |
108 | 0 | enum AVCodecID id; |
109 | 0 | enum AVMediaType type; |
110 | 0 | } fmt_id_type[] = { |
111 | 0 | { "aac", AV_CODEC_ID_AAC, AVMEDIA_TYPE_AUDIO }, |
112 | 0 | { "ac3", AV_CODEC_ID_AC3, AVMEDIA_TYPE_AUDIO }, |
113 | 0 | { "aptx", AV_CODEC_ID_APTX, AVMEDIA_TYPE_AUDIO }, |
114 | 0 | { "av1", AV_CODEC_ID_AV1, AVMEDIA_TYPE_VIDEO }, |
115 | 0 | { "dts", AV_CODEC_ID_DTS, AVMEDIA_TYPE_AUDIO }, |
116 | 0 | { "dvbsub", AV_CODEC_ID_DVB_SUBTITLE, AVMEDIA_TYPE_SUBTITLE }, |
117 | 0 | { "dvbtxt", AV_CODEC_ID_DVB_TELETEXT, AVMEDIA_TYPE_SUBTITLE }, |
118 | 0 | { "eac3", AV_CODEC_ID_EAC3, AVMEDIA_TYPE_AUDIO }, |
119 | 0 | { "h264", AV_CODEC_ID_H264, AVMEDIA_TYPE_VIDEO }, |
120 | 0 | { "hevc", AV_CODEC_ID_HEVC, AVMEDIA_TYPE_VIDEO }, |
121 | 0 | { "loas", AV_CODEC_ID_AAC_LATM, AVMEDIA_TYPE_AUDIO }, |
122 | 0 | { "m4v", AV_CODEC_ID_MPEG4, AVMEDIA_TYPE_VIDEO }, |
123 | 0 | { "mjpeg_2000", AV_CODEC_ID_JPEG2000, AVMEDIA_TYPE_VIDEO }, |
124 | 0 | { "mp3", AV_CODEC_ID_MP3, AVMEDIA_TYPE_AUDIO }, |
125 | 0 | { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO }, |
126 | 0 | { "truehd", AV_CODEC_ID_TRUEHD, AVMEDIA_TYPE_AUDIO }, |
127 | 0 | { "evc", AV_CODEC_ID_EVC, AVMEDIA_TYPE_VIDEO }, |
128 | 0 | { "vvc", AV_CODEC_ID_VVC, AVMEDIA_TYPE_VIDEO }, |
129 | 0 | { 0 } |
130 | 0 | }; |
131 | 0 | int score; |
132 | 0 | const AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score); |
133 | 0 | FFStream *const sti = ffstream(st); |
134 | |
|
135 | 0 | if (fmt) { |
136 | 0 | av_log(s, AV_LOG_DEBUG, |
137 | 0 | "Probe with size=%d, packets=%d detected %s with score=%d\n", |
138 | 0 | pd->buf_size, s->max_probe_packets - sti->probe_packets, |
139 | 0 | fmt->name, score); |
140 | 0 | for (int i = 0; fmt_id_type[i].name; i++) { |
141 | 0 | if (!strcmp(fmt->name, fmt_id_type[i].name)) { |
142 | 0 | if (fmt_id_type[i].type != AVMEDIA_TYPE_AUDIO && |
143 | 0 | st->codecpar->sample_rate) |
144 | 0 | continue; |
145 | 0 | if (sti->request_probe > score && |
146 | 0 | st->codecpar->codec_id != fmt_id_type[i].id) |
147 | 0 | continue; |
148 | 0 | st->codecpar->codec_id = fmt_id_type[i].id; |
149 | 0 | st->codecpar->codec_type = fmt_id_type[i].type; |
150 | 0 | sti->need_context_update = 1; |
151 | 0 | return score; |
152 | 0 | } |
153 | 0 | } |
154 | 0 | } |
155 | 0 | return 0; |
156 | 0 | } |
157 | | |
158 | | static int init_input(AVFormatContext *s, const char *filename, |
159 | | AVDictionary **options) |
160 | 1.36k | { |
161 | 1.36k | int ret; |
162 | 1.36k | AVProbeData pd = { filename, NULL, 0 }; |
163 | 1.36k | int score = AVPROBE_SCORE_RETRY; |
164 | | |
165 | 1.36k | if (s->pb) { |
166 | 0 | s->flags |= AVFMT_FLAG_CUSTOM_IO; |
167 | 0 | if (!s->iformat) |
168 | 0 | return av_probe_input_buffer2(s->pb, &s->iformat, filename, |
169 | 0 | s, 0, s->format_probesize); |
170 | 0 | else if (s->iformat->flags & AVFMT_NOFILE) |
171 | 0 | av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and " |
172 | 0 | "will be ignored with AVFMT_NOFILE format.\n"); |
173 | 0 | return 0; |
174 | 0 | } |
175 | | |
176 | 1.36k | if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) || |
177 | 1.36k | (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score)))) |
178 | 0 | return score; |
179 | | |
180 | 1.36k | if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0) |
181 | 1.36k | return ret; |
182 | | |
183 | 0 | if (s->iformat) |
184 | 0 | return 0; |
185 | 0 | return av_probe_input_buffer2(s->pb, &s->iformat, filename, |
186 | 0 | s, 0, s->format_probesize); |
187 | 0 | } |
188 | | |
189 | | static int codec_close(FFStream *sti); |
190 | | |
191 | | static int update_stream_avctx(AVFormatContext *s) |
192 | 0 | { |
193 | 0 | int ret; |
194 | 0 | for (unsigned i = 0; i < s->nb_streams; i++) { |
195 | 0 | AVStream *const st = s->streams[i]; |
196 | 0 | FFStream *const sti = ffstream(st); |
197 | |
|
198 | 0 | if (!sti->need_context_update) |
199 | 0 | continue; |
200 | | |
201 | 0 | if (avcodec_is_open(sti->avctx)) { |
202 | 0 | av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n"); |
203 | 0 | ret = codec_close(sti); |
204 | 0 | sti->info->found_decoder = 0; |
205 | 0 | if (ret < 0) |
206 | 0 | return ret; |
207 | 0 | } |
208 | | |
209 | | /* close parser, because it depends on the codec */ |
210 | 0 | if (sti->parser && sti->avctx->codec_id != st->codecpar->codec_id) { |
211 | 0 | av_parser_close(sti->parser); |
212 | 0 | sti->parser = NULL; |
213 | 0 | } |
214 | | |
215 | | /* update internal codec context, for the parser */ |
216 | 0 | ret = avcodec_parameters_to_context(sti->avctx, st->codecpar); |
217 | 0 | if (ret < 0) |
218 | 0 | return ret; |
219 | | |
220 | 0 | sti->codec_desc = avcodec_descriptor_get(sti->avctx->codec_id); |
221 | |
|
222 | 0 | sti->need_context_update = 0; |
223 | 0 | } |
224 | 0 | return 0; |
225 | 0 | } |
226 | | |
227 | 0 | static av_always_inline int is_id3v2_format(const AVInputFormat *fmt) { |
228 | 0 | return ffifmt(fmt)->flags_internal & FF_INFMT_FLAG_ID3V2_AUTO; |
229 | 0 | } |
230 | | |
231 | | int avformat_open_input(AVFormatContext **ps, const char *filename, |
232 | | const AVInputFormat *fmt, AVDictionary **options) |
233 | 1.36k | { |
234 | 1.36k | FormatContextInternal *fci; |
235 | 1.36k | AVFormatContext *s = *ps; |
236 | 1.36k | FFFormatContext *si; |
237 | 1.36k | AVDictionary *tmp = NULL; |
238 | 1.36k | ID3v2ExtraMeta *id3v2_extra_meta = NULL; |
239 | 1.36k | int ret = 0; |
240 | | |
241 | 1.36k | if (!s && !(s = avformat_alloc_context())) |
242 | 0 | return AVERROR(ENOMEM); |
243 | 1.36k | fci = ff_fc_internal(s); |
244 | 1.36k | si = &fci->fc; |
245 | 1.36k | if (!s->av_class) { |
246 | 0 | av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n"); |
247 | 0 | return AVERROR(EINVAL); |
248 | 0 | } |
249 | 1.36k | if (fmt) |
250 | 0 | s->iformat = fmt; |
251 | | |
252 | 1.36k | if (options) |
253 | 0 | av_dict_copy(&tmp, *options, 0); |
254 | | |
255 | 1.36k | if (s->pb) // must be before any goto fail |
256 | 0 | s->flags |= AVFMT_FLAG_CUSTOM_IO; |
257 | | |
258 | 1.36k | if ((ret = av_opt_set_dict(s, &tmp)) < 0) |
259 | 0 | goto fail; |
260 | | |
261 | 1.36k | if (!(s->url = av_strdup(filename ? filename : ""))) { |
262 | 0 | ret = AVERROR(ENOMEM); |
263 | 0 | goto fail; |
264 | 0 | } |
265 | | |
266 | 1.36k | if ((ret = init_input(s, filename, &tmp)) < 0) |
267 | 1.36k | goto fail; |
268 | 0 | s->probe_score = ret; |
269 | |
|
270 | 0 | if (!s->protocol_whitelist && s->pb && s->pb->protocol_whitelist) { |
271 | 0 | s->protocol_whitelist = av_strdup(s->pb->protocol_whitelist); |
272 | 0 | if (!s->protocol_whitelist) { |
273 | 0 | ret = AVERROR(ENOMEM); |
274 | 0 | goto fail; |
275 | 0 | } |
276 | 0 | } |
277 | | |
278 | 0 | if (!s->protocol_blacklist && s->pb && s->pb->protocol_blacklist) { |
279 | 0 | s->protocol_blacklist = av_strdup(s->pb->protocol_blacklist); |
280 | 0 | if (!s->protocol_blacklist) { |
281 | 0 | ret = AVERROR(ENOMEM); |
282 | 0 | goto fail; |
283 | 0 | } |
284 | 0 | } |
285 | | |
286 | 0 | if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) { |
287 | 0 | av_log(s, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", s->format_whitelist); |
288 | 0 | ret = AVERROR(EINVAL); |
289 | 0 | goto fail; |
290 | 0 | } |
291 | | |
292 | 0 | avio_skip(s->pb, s->skip_initial_bytes); |
293 | | |
294 | | /* Check filename in case an image number is expected. */ |
295 | 0 | if (s->iformat->flags & AVFMT_NEEDNUMBER) { |
296 | 0 | if (!av_filename_number_test(filename)) { |
297 | 0 | ret = AVERROR(EINVAL); |
298 | 0 | goto fail; |
299 | 0 | } |
300 | 0 | } |
301 | | |
302 | 0 | s->duration = s->start_time = AV_NOPTS_VALUE; |
303 | | |
304 | | /* Allocate private data. */ |
305 | 0 | if (ffifmt(s->iformat)->priv_data_size > 0) { |
306 | 0 | if (!(s->priv_data = av_mallocz(ffifmt(s->iformat)->priv_data_size))) { |
307 | 0 | ret = AVERROR(ENOMEM); |
308 | 0 | goto fail; |
309 | 0 | } |
310 | 0 | if (s->iformat->priv_class) { |
311 | 0 | *(const AVClass **) s->priv_data = s->iformat->priv_class; |
312 | 0 | av_opt_set_defaults(s->priv_data); |
313 | 0 | if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0) |
314 | 0 | goto fail; |
315 | 0 | } |
316 | 0 | } |
317 | | |
318 | | /* e.g. AVFMT_NOFILE formats will not have an AVIOContext */ |
319 | 0 | if (s->pb && is_id3v2_format(s->iformat)) |
320 | 0 | ff_id3v2_read_dict(s->pb, &si->id3v2_meta, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta); |
321 | |
|
322 | 0 | if (ffifmt(s->iformat)->read_header) |
323 | 0 | if ((ret = ffifmt(s->iformat)->read_header(s)) < 0) { |
324 | 0 | if (ffifmt(s->iformat)->flags_internal & FF_INFMT_FLAG_INIT_CLEANUP) |
325 | 0 | goto close; |
326 | 0 | goto fail; |
327 | 0 | } |
328 | | |
329 | 0 | if (!s->metadata) { |
330 | 0 | s->metadata = si->id3v2_meta; |
331 | 0 | si->id3v2_meta = NULL; |
332 | 0 | } else if (si->id3v2_meta) { |
333 | 0 | av_log(s, AV_LOG_WARNING, "Discarding ID3 tags because more suitable tags were found.\n"); |
334 | 0 | av_dict_free(&si->id3v2_meta); |
335 | 0 | } |
336 | |
|
337 | 0 | if (id3v2_extra_meta) { |
338 | 0 | if ((ret = ff_id3v2_parse_apic(s, id3v2_extra_meta)) < 0) |
339 | 0 | goto close; |
340 | 0 | if ((ret = ff_id3v2_parse_chapters(s, id3v2_extra_meta)) < 0) |
341 | 0 | goto close; |
342 | 0 | if ((ret = ff_id3v2_parse_priv(s, id3v2_extra_meta)) < 0) |
343 | 0 | goto close; |
344 | 0 | ff_id3v2_free_extra_meta(&id3v2_extra_meta); |
345 | 0 | } |
346 | | |
347 | 0 | if ((ret = avformat_queue_attached_pictures(s)) < 0) |
348 | 0 | goto close; |
349 | | |
350 | 0 | if (s->pb && !si->data_offset) |
351 | 0 | si->data_offset = avio_tell(s->pb); |
352 | |
|
353 | 0 | fci->raw_packet_buffer_size = 0; |
354 | |
|
355 | 0 | update_stream_avctx(s); |
356 | |
|
357 | 0 | if (options) { |
358 | 0 | av_dict_free(options); |
359 | 0 | *options = tmp; |
360 | 0 | } |
361 | 0 | *ps = s; |
362 | 0 | return 0; |
363 | | |
364 | 0 | close: |
365 | 0 | if (ffifmt(s->iformat)->read_close) |
366 | 0 | ffifmt(s->iformat)->read_close(s); |
367 | 1.36k | fail: |
368 | 1.36k | ff_id3v2_free_extra_meta(&id3v2_extra_meta); |
369 | 1.36k | av_dict_free(&tmp); |
370 | 1.36k | if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO)) |
371 | 0 | avio_closep(&s->pb); |
372 | 1.36k | avformat_free_context(s); |
373 | 1.36k | *ps = NULL; |
374 | 1.36k | return ret; |
375 | 0 | } |
376 | | |
377 | | void avformat_close_input(AVFormatContext **ps) |
378 | 682 | { |
379 | 682 | AVFormatContext *s; |
380 | 682 | AVIOContext *pb; |
381 | | |
382 | 682 | if (!ps || !*ps) |
383 | 682 | return; |
384 | | |
385 | 0 | s = *ps; |
386 | 0 | pb = s->pb; |
387 | |
|
388 | 0 | if ((s->iformat && strcmp(s->iformat->name, "image2") && s->iformat->flags & AVFMT_NOFILE) || |
389 | 0 | (s->flags & AVFMT_FLAG_CUSTOM_IO)) |
390 | 0 | pb = NULL; |
391 | |
|
392 | 0 | if (s->iformat) |
393 | 0 | if (ffifmt(s->iformat)->read_close) |
394 | 0 | ffifmt(s->iformat)->read_close(s); |
395 | |
|
396 | 0 | ff_format_io_close(s, &pb); |
397 | 0 | avformat_free_context(s); |
398 | |
|
399 | 0 | *ps = NULL; |
400 | 0 | } |
401 | | |
402 | | static void force_codec_ids(AVFormatContext *s, AVStream *st) |
403 | 0 | { |
404 | 0 | switch (st->codecpar->codec_type) { |
405 | 0 | case AVMEDIA_TYPE_VIDEO: |
406 | 0 | if (s->video_codec_id) |
407 | 0 | st->codecpar->codec_id = s->video_codec_id; |
408 | 0 | break; |
409 | 0 | case AVMEDIA_TYPE_AUDIO: |
410 | 0 | if (s->audio_codec_id) |
411 | 0 | st->codecpar->codec_id = s->audio_codec_id; |
412 | 0 | break; |
413 | 0 | case AVMEDIA_TYPE_SUBTITLE: |
414 | 0 | if (s->subtitle_codec_id) |
415 | 0 | st->codecpar->codec_id = s->subtitle_codec_id; |
416 | 0 | break; |
417 | 0 | case AVMEDIA_TYPE_DATA: |
418 | 0 | if (s->data_codec_id) |
419 | 0 | st->codecpar->codec_id = s->data_codec_id; |
420 | 0 | break; |
421 | 0 | } |
422 | 0 | } |
423 | | |
424 | | static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt) |
425 | 0 | { |
426 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
427 | 0 | FFStream *const sti = ffstream(st); |
428 | |
|
429 | 0 | if (sti->request_probe > 0) { |
430 | 0 | AVProbeData *const pd = &sti->probe_data; |
431 | 0 | int end; |
432 | 0 | av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, sti->probe_packets); |
433 | 0 | --sti->probe_packets; |
434 | |
|
435 | 0 | if (pkt) { |
436 | 0 | uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE); |
437 | 0 | if (!new_buf) { |
438 | 0 | av_log(s, AV_LOG_WARNING, |
439 | 0 | "Failed to reallocate probe buffer for stream %d\n", |
440 | 0 | st->index); |
441 | 0 | goto no_packet; |
442 | 0 | } |
443 | 0 | pd->buf = new_buf; |
444 | 0 | memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size); |
445 | 0 | pd->buf_size += pkt->size; |
446 | 0 | memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE); |
447 | 0 | } else { |
448 | 0 | no_packet: |
449 | 0 | sti->probe_packets = 0; |
450 | 0 | if (!pd->buf_size) { |
451 | 0 | av_log(s, AV_LOG_WARNING, |
452 | 0 | "nothing to probe for stream %d\n", st->index); |
453 | 0 | } |
454 | 0 | } |
455 | | |
456 | 0 | end = fci->raw_packet_buffer_size >= s->probesize || |
457 | 0 | sti->probe_packets <= 0; |
458 | |
|
459 | 0 | if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) { |
460 | 0 | int score = set_codec_from_probe_data(s, st, pd); |
461 | 0 | if ( (st->codecpar->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_STREAM_RETRY) |
462 | 0 | || end) { |
463 | 0 | pd->buf_size = 0; |
464 | 0 | av_freep(&pd->buf); |
465 | 0 | sti->request_probe = -1; |
466 | 0 | if (st->codecpar->codec_id != AV_CODEC_ID_NONE) { |
467 | 0 | av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index); |
468 | 0 | } else |
469 | 0 | av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index); |
470 | 0 | } |
471 | 0 | force_codec_ids(s, st); |
472 | 0 | } |
473 | 0 | } |
474 | 0 | return 0; |
475 | 0 | } |
476 | | |
477 | | static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt) |
478 | 0 | { |
479 | 0 | FFStream *const sti = ffstream(st); |
480 | 0 | int64_t ref = pkt->dts; |
481 | 0 | int pts_wrap_behavior; |
482 | 0 | int64_t pts_wrap_reference; |
483 | 0 | AVProgram *first_program; |
484 | |
|
485 | 0 | if (ref == AV_NOPTS_VALUE) |
486 | 0 | ref = pkt->pts; |
487 | 0 | if (sti->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow) |
488 | 0 | return 0; |
489 | 0 | ref &= (1LL << st->pts_wrap_bits)-1; |
490 | | |
491 | | // reference time stamp should be 60 s before first time stamp |
492 | 0 | pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num); |
493 | | // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset |
494 | 0 | pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) || |
495 | 0 | (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ? |
496 | 0 | AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET; |
497 | |
|
498 | 0 | first_program = av_find_program_from_stream(s, NULL, stream_index); |
499 | |
|
500 | 0 | if (!first_program) { |
501 | 0 | int default_stream_index = av_find_default_stream_index(s); |
502 | 0 | FFStream *const default_sti = ffstream(s->streams[default_stream_index]); |
503 | 0 | if (default_sti->pts_wrap_reference == AV_NOPTS_VALUE) { |
504 | 0 | for (unsigned i = 0; i < s->nb_streams; i++) { |
505 | 0 | FFStream *const sti = ffstream(s->streams[i]); |
506 | 0 | if (av_find_program_from_stream(s, NULL, i)) |
507 | 0 | continue; |
508 | 0 | sti->pts_wrap_reference = pts_wrap_reference; |
509 | 0 | sti->pts_wrap_behavior = pts_wrap_behavior; |
510 | 0 | } |
511 | 0 | } else { |
512 | 0 | sti->pts_wrap_reference = default_sti->pts_wrap_reference; |
513 | 0 | sti->pts_wrap_behavior = default_sti->pts_wrap_behavior; |
514 | 0 | } |
515 | 0 | } else { |
516 | 0 | AVProgram *program = first_program; |
517 | 0 | while (program) { |
518 | 0 | if (program->pts_wrap_reference != AV_NOPTS_VALUE) { |
519 | 0 | pts_wrap_reference = program->pts_wrap_reference; |
520 | 0 | pts_wrap_behavior = program->pts_wrap_behavior; |
521 | 0 | break; |
522 | 0 | } |
523 | 0 | program = av_find_program_from_stream(s, program, stream_index); |
524 | 0 | } |
525 | | |
526 | | // update every program with differing pts_wrap_reference |
527 | 0 | program = first_program; |
528 | 0 | while (program) { |
529 | 0 | if (program->pts_wrap_reference != pts_wrap_reference) { |
530 | 0 | for (unsigned i = 0; i < program->nb_stream_indexes; i++) { |
531 | 0 | FFStream *const sti = ffstream(s->streams[program->stream_index[i]]); |
532 | 0 | sti->pts_wrap_reference = pts_wrap_reference; |
533 | 0 | sti->pts_wrap_behavior = pts_wrap_behavior; |
534 | 0 | } |
535 | |
|
536 | 0 | program->pts_wrap_reference = pts_wrap_reference; |
537 | 0 | program->pts_wrap_behavior = pts_wrap_behavior; |
538 | 0 | } |
539 | 0 | program = av_find_program_from_stream(s, program, stream_index); |
540 | 0 | } |
541 | 0 | } |
542 | 0 | return 1; |
543 | 0 | } |
544 | | |
545 | | static void update_timestamps(AVFormatContext *s, AVStream *st, AVPacket *pkt) |
546 | 0 | { |
547 | 0 | FFStream *const sti = ffstream(st); |
548 | |
|
549 | 0 | if (update_wrap_reference(s, st, pkt->stream_index, pkt) && sti->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) { |
550 | | // correct first time stamps to negative values |
551 | 0 | if (!is_relative(sti->first_dts)) |
552 | 0 | sti->first_dts = wrap_timestamp(st, sti->first_dts); |
553 | 0 | if (!is_relative(st->start_time)) |
554 | 0 | st->start_time = wrap_timestamp(st, st->start_time); |
555 | 0 | if (!is_relative(sti->cur_dts)) |
556 | 0 | sti->cur_dts = wrap_timestamp(st, sti->cur_dts); |
557 | 0 | } |
558 | |
|
559 | 0 | pkt->dts = wrap_timestamp(st, pkt->dts); |
560 | 0 | pkt->pts = wrap_timestamp(st, pkt->pts); |
561 | |
|
562 | 0 | force_codec_ids(s, st); |
563 | | |
564 | | /* TODO: audio: time filter; video: frame reordering (pts != dts) */ |
565 | 0 | if (s->use_wallclock_as_timestamps) |
566 | 0 | pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base); |
567 | 0 | } |
568 | | |
569 | | /** |
570 | | * Handle a new packet and either return it directly if possible and |
571 | | * allow_passthrough is true or queue the packet (or drop the packet |
572 | | * if corrupt). |
573 | | * |
574 | | * @return < 0 on error, 0 if the packet was passed through, |
575 | | * 1 if it was queued or dropped |
576 | | */ |
577 | | static int handle_new_packet(AVFormatContext *s, AVPacket *pkt, int allow_passthrough) |
578 | 0 | { |
579 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
580 | 0 | AVStream *st; |
581 | 0 | FFStream *sti; |
582 | 0 | int err; |
583 | |
|
584 | 0 | av_assert0(pkt->stream_index < (unsigned)s->nb_streams && |
585 | 0 | "Invalid stream index.\n"); |
586 | | |
587 | 0 | if (pkt->flags & AV_PKT_FLAG_CORRUPT) { |
588 | 0 | av_log(s, AV_LOG_WARNING, |
589 | 0 | "Packet corrupt (stream = %d, dts = %s)%s.\n", |
590 | 0 | pkt->stream_index, av_ts2str(pkt->dts), |
591 | 0 | s->flags & AVFMT_FLAG_DISCARD_CORRUPT ? ", dropping it" : ""); |
592 | 0 | if (s->flags & AVFMT_FLAG_DISCARD_CORRUPT) { |
593 | 0 | av_packet_unref(pkt); |
594 | 0 | return 1; |
595 | 0 | } |
596 | 0 | } |
597 | | |
598 | 0 | st = s->streams[pkt->stream_index]; |
599 | 0 | sti = ffstream(st); |
600 | |
|
601 | 0 | update_timestamps(s, st, pkt); |
602 | |
|
603 | 0 | if (sti->request_probe <= 0 && allow_passthrough && !fci->raw_packet_buffer.head) |
604 | 0 | return 0; |
605 | | |
606 | 0 | err = avpriv_packet_list_put(&fci->raw_packet_buffer, pkt, NULL, 0); |
607 | 0 | if (err < 0) { |
608 | 0 | av_packet_unref(pkt); |
609 | 0 | return err; |
610 | 0 | } |
611 | | |
612 | 0 | pkt = &fci->raw_packet_buffer.tail->pkt; |
613 | 0 | fci->raw_packet_buffer_size += pkt->size; |
614 | |
|
615 | 0 | err = probe_codec(s, st, pkt); |
616 | 0 | if (err < 0) |
617 | 0 | return err; |
618 | | |
619 | 0 | return 1; |
620 | 0 | } |
621 | | |
622 | | int ff_buffer_packet(AVFormatContext *s, AVPacket *pkt) |
623 | 0 | { |
624 | 0 | int err = handle_new_packet(s, pkt, 0); |
625 | |
|
626 | 0 | return err < 0 ? err : 0; |
627 | 0 | } |
628 | | |
629 | | int ff_read_packet(AVFormatContext *s, AVPacket *pkt) |
630 | 0 | { |
631 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
632 | 0 | int err; |
633 | |
|
634 | 0 | #if FF_API_INIT_PACKET |
635 | 0 | FF_DISABLE_DEPRECATION_WARNINGS |
636 | 0 | pkt->data = NULL; |
637 | 0 | pkt->size = 0; |
638 | 0 | av_init_packet(pkt); |
639 | 0 | FF_ENABLE_DEPRECATION_WARNINGS |
640 | | #else |
641 | | av_packet_unref(pkt); |
642 | | #endif |
643 | |
|
644 | 0 | for (;;) { |
645 | 0 | PacketListEntry *pktl = fci->raw_packet_buffer.head; |
646 | |
|
647 | 0 | if (pktl) { |
648 | 0 | AVStream *const st = s->streams[pktl->pkt.stream_index]; |
649 | 0 | if (fci->raw_packet_buffer_size >= s->probesize) |
650 | 0 | if ((err = probe_codec(s, st, NULL)) < 0) |
651 | 0 | return err; |
652 | 0 | if (ffstream(st)->request_probe <= 0) { |
653 | 0 | avpriv_packet_list_get(&fci->raw_packet_buffer, pkt); |
654 | 0 | fci->raw_packet_buffer_size -= pkt->size; |
655 | 0 | return 0; |
656 | 0 | } |
657 | 0 | } |
658 | | |
659 | 0 | err = ffifmt(s->iformat)->read_packet(s, pkt); |
660 | 0 | if (err < 0) { |
661 | 0 | av_packet_unref(pkt); |
662 | | |
663 | | /* Some demuxers return FFERROR_REDO when they consume |
664 | | data and discard it (ignored streams, junk, extradata). |
665 | | We must re-call the demuxer to get the real packet. */ |
666 | 0 | if (err == FFERROR_REDO) |
667 | 0 | continue; |
668 | 0 | if (!pktl || err == AVERROR(EAGAIN)) |
669 | 0 | return err; |
670 | 0 | for (unsigned i = 0; i < s->nb_streams; i++) { |
671 | 0 | AVStream *const st = s->streams[i]; |
672 | 0 | FFStream *const sti = ffstream(st); |
673 | 0 | if (sti->probe_packets || sti->request_probe > 0) |
674 | 0 | if ((err = probe_codec(s, st, NULL)) < 0) |
675 | 0 | return err; |
676 | 0 | av_assert0(sti->request_probe <= 0); |
677 | 0 | } |
678 | 0 | continue; |
679 | 0 | } |
680 | | |
681 | 0 | err = av_packet_make_refcounted(pkt); |
682 | 0 | if (err < 0) { |
683 | 0 | av_packet_unref(pkt); |
684 | 0 | return err; |
685 | 0 | } |
686 | | |
687 | 0 | err = handle_new_packet(s, pkt, 1); |
688 | 0 | if (err <= 0) /* Error or passthrough */ |
689 | 0 | return err; |
690 | 0 | } |
691 | 0 | } |
692 | | |
693 | | /** |
694 | | * Return the frame duration in seconds. Return 0 if not available. |
695 | | */ |
696 | | static void compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, |
697 | | AVStream *st, AVCodecParserContext *pc, |
698 | | AVPacket *pkt) |
699 | 0 | { |
700 | 0 | FFStream *const sti = ffstream(st); |
701 | 0 | AVRational codec_framerate = sti->avctx->framerate; |
702 | 0 | int frame_size, sample_rate; |
703 | |
|
704 | 0 | *pnum = 0; |
705 | 0 | *pden = 0; |
706 | 0 | switch (st->codecpar->codec_type) { |
707 | 0 | case AVMEDIA_TYPE_VIDEO: |
708 | 0 | if (st->r_frame_rate.num && (!pc || !codec_framerate.num)) { |
709 | 0 | *pnum = st->r_frame_rate.den; |
710 | 0 | *pden = st->r_frame_rate.num; |
711 | 0 | } else if ((s->iformat->flags & AVFMT_NOTIMESTAMPS) && |
712 | 0 | !codec_framerate.num && |
713 | 0 | st->avg_frame_rate.num && st->avg_frame_rate.den) { |
714 | 0 | *pnum = st->avg_frame_rate.den; |
715 | 0 | *pden = st->avg_frame_rate.num; |
716 | 0 | } else if (st->time_base.num * 1000LL > st->time_base.den) { |
717 | 0 | *pnum = st->time_base.num; |
718 | 0 | *pden = st->time_base.den; |
719 | 0 | } else if (codec_framerate.den * 1000LL > codec_framerate.num) { |
720 | 0 | int ticks_per_frame = (sti->codec_desc && |
721 | 0 | (sti->codec_desc->props & AV_CODEC_PROP_FIELDS)) ? 2 : 1; |
722 | 0 | av_reduce(pnum, pden, |
723 | 0 | codec_framerate.den, |
724 | 0 | codec_framerate.num * (int64_t)ticks_per_frame, |
725 | 0 | INT_MAX); |
726 | |
|
727 | 0 | if (pc && pc->repeat_pict) { |
728 | 0 | av_reduce(pnum, pden, |
729 | 0 | (*pnum) * (1LL + pc->repeat_pict), |
730 | 0 | (*pden), |
731 | 0 | INT_MAX); |
732 | 0 | } |
733 | | /* If this codec can be interlaced or progressive then we need |
734 | | * a parser to compute duration of a packet. Thus if we have |
735 | | * no parser in such case leave duration undefined. */ |
736 | 0 | if (sti->codec_desc && |
737 | 0 | (sti->codec_desc->props & AV_CODEC_PROP_FIELDS) && !pc) |
738 | 0 | *pnum = *pden = 0; |
739 | 0 | } |
740 | 0 | break; |
741 | 0 | case AVMEDIA_TYPE_AUDIO: |
742 | 0 | if (sti->avctx_inited) { |
743 | 0 | frame_size = av_get_audio_frame_duration(sti->avctx, pkt->size); |
744 | 0 | sample_rate = sti->avctx->sample_rate; |
745 | 0 | } else { |
746 | 0 | frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size); |
747 | 0 | sample_rate = st->codecpar->sample_rate; |
748 | 0 | } |
749 | 0 | if (frame_size <= 0 || sample_rate <= 0) |
750 | 0 | break; |
751 | 0 | *pnum = frame_size; |
752 | 0 | *pden = sample_rate; |
753 | 0 | break; |
754 | 0 | default: |
755 | 0 | break; |
756 | 0 | } |
757 | 0 | } |
758 | | |
759 | | static int has_decode_delay_been_guessed(AVStream *st) |
760 | 0 | { |
761 | 0 | FFStream *const sti = ffstream(st); |
762 | 0 | if (st->codecpar->codec_id != AV_CODEC_ID_H264) return 1; |
763 | 0 | if (!sti->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy |
764 | 0 | return 1; |
765 | 0 | av_assert0(sti->avctx->codec_id == AV_CODEC_ID_H264); |
766 | | #if CONFIG_H264_DECODER |
767 | | if (sti->avctx->has_b_frames && avcodec_is_open(sti->avctx) && |
768 | | avpriv_h264_has_num_reorder_frames(sti->avctx) == sti->avctx->has_b_frames) |
769 | | return 1; |
770 | | #endif |
771 | 0 | if (sti->avctx->has_b_frames < 3) |
772 | 0 | return sti->nb_decoded_frames >= 7; |
773 | 0 | else if (sti->avctx->has_b_frames < 4) |
774 | 0 | return sti->nb_decoded_frames >= 18; |
775 | 0 | else |
776 | 0 | return sti->nb_decoded_frames >= 20; |
777 | 0 | } |
778 | | |
779 | | static PacketListEntry *get_next_pkt(AVFormatContext *s, AVStream *st, |
780 | | PacketListEntry *pktl) |
781 | 0 | { |
782 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
783 | 0 | FFFormatContext *const si = &fci->fc; |
784 | 0 | if (pktl->next) |
785 | 0 | return pktl->next; |
786 | 0 | if (pktl == si->packet_buffer.tail) |
787 | 0 | return fci->parse_queue.head; |
788 | 0 | return NULL; |
789 | 0 | } |
790 | | |
791 | | static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) |
792 | 0 | { |
793 | 0 | FFStream *const sti = ffstream(st); |
794 | 0 | int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 && |
795 | 0 | st->codecpar->codec_id != AV_CODEC_ID_HEVC && |
796 | 0 | st->codecpar->codec_id != AV_CODEC_ID_VVC; |
797 | |
|
798 | 0 | if (!onein_oneout) { |
799 | 0 | int delay = sti->avctx->has_b_frames; |
800 | |
|
801 | 0 | if (dts == AV_NOPTS_VALUE) { |
802 | 0 | int64_t best_score = INT64_MAX; |
803 | 0 | for (int i = 0; i < delay; i++) { |
804 | 0 | if (sti->pts_reorder_error_count[i]) { |
805 | 0 | int64_t score = sti->pts_reorder_error[i] / sti->pts_reorder_error_count[i]; |
806 | 0 | if (score < best_score) { |
807 | 0 | best_score = score; |
808 | 0 | dts = pts_buffer[i]; |
809 | 0 | } |
810 | 0 | } |
811 | 0 | } |
812 | 0 | } else { |
813 | 0 | for (int i = 0; i < delay; i++) { |
814 | 0 | if (pts_buffer[i] != AV_NOPTS_VALUE) { |
815 | 0 | int64_t diff = FFABS(pts_buffer[i] - dts) |
816 | 0 | + (uint64_t)sti->pts_reorder_error[i]; |
817 | 0 | diff = FFMAX(diff, sti->pts_reorder_error[i]); |
818 | 0 | sti->pts_reorder_error[i] = diff; |
819 | 0 | sti->pts_reorder_error_count[i]++; |
820 | 0 | if (sti->pts_reorder_error_count[i] > 250) { |
821 | 0 | sti->pts_reorder_error[i] >>= 1; |
822 | 0 | sti->pts_reorder_error_count[i] >>= 1; |
823 | 0 | } |
824 | 0 | } |
825 | 0 | } |
826 | 0 | } |
827 | 0 | } |
828 | |
|
829 | 0 | if (dts == AV_NOPTS_VALUE) |
830 | 0 | dts = pts_buffer[0]; |
831 | |
|
832 | 0 | return dts; |
833 | 0 | } |
834 | | |
835 | | /** |
836 | | * Updates the dts of packets of a stream in pkt_buffer, by re-ordering the pts |
837 | | * of the packets in a window. |
838 | | */ |
839 | | static void update_dts_from_pts(AVFormatContext *s, int stream_index, |
840 | | PacketListEntry *pkt_buffer) |
841 | 0 | { |
842 | 0 | AVStream *const st = s->streams[stream_index]; |
843 | 0 | int delay = ffstream(st)->avctx->has_b_frames; |
844 | |
|
845 | 0 | int64_t pts_buffer[MAX_REORDER_DELAY+1]; |
846 | |
|
847 | 0 | for (int i = 0; i < MAX_REORDER_DELAY + 1; i++) |
848 | 0 | pts_buffer[i] = AV_NOPTS_VALUE; |
849 | |
|
850 | 0 | for (; pkt_buffer; pkt_buffer = get_next_pkt(s, st, pkt_buffer)) { |
851 | 0 | if (pkt_buffer->pkt.stream_index != stream_index) |
852 | 0 | continue; |
853 | | |
854 | 0 | if (pkt_buffer->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) { |
855 | 0 | pts_buffer[0] = pkt_buffer->pkt.pts; |
856 | 0 | for (int i = 0; i < delay && pts_buffer[i] > pts_buffer[i + 1]; i++) |
857 | 0 | FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]); |
858 | |
|
859 | 0 | pkt_buffer->pkt.dts = select_from_pts_buffer(st, pts_buffer, pkt_buffer->pkt.dts); |
860 | 0 | } |
861 | 0 | } |
862 | 0 | } |
863 | | |
864 | | static void update_initial_timestamps(AVFormatContext *s, int stream_index, |
865 | | int64_t dts, int64_t pts, AVPacket *pkt) |
866 | 0 | { |
867 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
868 | 0 | FFFormatContext *const si = &fci->fc; |
869 | 0 | AVStream *const st = s->streams[stream_index]; |
870 | 0 | FFStream *const sti = ffstream(st); |
871 | 0 | PacketListEntry *pktl = si->packet_buffer.head ? si->packet_buffer.head : fci->parse_queue.head; |
872 | |
|
873 | 0 | uint64_t shift; |
874 | |
|
875 | 0 | if (sti->first_dts != AV_NOPTS_VALUE || |
876 | 0 | dts == AV_NOPTS_VALUE || |
877 | 0 | sti->cur_dts == AV_NOPTS_VALUE || |
878 | 0 | sti->cur_dts < INT_MIN + RELATIVE_TS_BASE || |
879 | 0 | dts < INT_MIN + (sti->cur_dts - RELATIVE_TS_BASE) || |
880 | 0 | is_relative(dts)) |
881 | 0 | return; |
882 | | |
883 | 0 | sti->first_dts = dts - (sti->cur_dts - RELATIVE_TS_BASE); |
884 | 0 | sti->cur_dts = dts; |
885 | 0 | shift = (uint64_t)sti->first_dts - RELATIVE_TS_BASE; |
886 | |
|
887 | 0 | if (is_relative(pts)) |
888 | 0 | pts += shift; |
889 | |
|
890 | 0 | for (PacketListEntry *pktl_it = pktl; pktl_it; pktl_it = get_next_pkt(s, st, pktl_it)) { |
891 | 0 | if (pktl_it->pkt.stream_index != stream_index) |
892 | 0 | continue; |
893 | 0 | if (is_relative(pktl_it->pkt.pts)) |
894 | 0 | pktl_it->pkt.pts += shift; |
895 | |
|
896 | 0 | if (is_relative(pktl_it->pkt.dts)) |
897 | 0 | pktl_it->pkt.dts += shift; |
898 | |
|
899 | 0 | if (st->start_time == AV_NOPTS_VALUE && pktl_it->pkt.pts != AV_NOPTS_VALUE) { |
900 | 0 | st->start_time = pktl_it->pkt.pts; |
901 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && st->codecpar->sample_rate) |
902 | 0 | st->start_time = av_sat_add64(st->start_time, av_rescale_q(sti->skip_samples, (AVRational){1, st->codecpar->sample_rate}, st->time_base)); |
903 | 0 | } |
904 | 0 | } |
905 | |
|
906 | 0 | if (has_decode_delay_been_guessed(st)) |
907 | 0 | update_dts_from_pts(s, stream_index, pktl); |
908 | |
|
909 | 0 | if (st->start_time == AV_NOPTS_VALUE) { |
910 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || !(pkt->flags & AV_PKT_FLAG_DISCARD)) { |
911 | 0 | st->start_time = pts; |
912 | 0 | } |
913 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && st->codecpar->sample_rate) |
914 | 0 | st->start_time = av_sat_add64(st->start_time, av_rescale_q(sti->skip_samples, (AVRational){1, st->codecpar->sample_rate}, st->time_base)); |
915 | 0 | } |
916 | 0 | } |
917 | | |
918 | | static void update_initial_durations(AVFormatContext *s, AVStream *st, |
919 | | int stream_index, int64_t duration) |
920 | 0 | { |
921 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
922 | 0 | FFFormatContext *const si = &fci->fc; |
923 | 0 | FFStream *const sti = ffstream(st); |
924 | 0 | PacketListEntry *pktl = si->packet_buffer.head ? si->packet_buffer.head : fci->parse_queue.head; |
925 | 0 | int64_t cur_dts = RELATIVE_TS_BASE; |
926 | |
|
927 | 0 | if (sti->first_dts != AV_NOPTS_VALUE) { |
928 | 0 | if (sti->update_initial_durations_done) |
929 | 0 | return; |
930 | 0 | sti->update_initial_durations_done = 1; |
931 | 0 | cur_dts = sti->first_dts; |
932 | 0 | for (; pktl; pktl = get_next_pkt(s, st, pktl)) { |
933 | 0 | if (pktl->pkt.stream_index == stream_index) { |
934 | 0 | if (pktl->pkt.pts != pktl->pkt.dts || |
935 | 0 | pktl->pkt.dts != AV_NOPTS_VALUE || |
936 | 0 | pktl->pkt.duration) |
937 | 0 | break; |
938 | 0 | cur_dts -= duration; |
939 | 0 | } |
940 | 0 | } |
941 | 0 | if (pktl && pktl->pkt.dts != sti->first_dts) { |
942 | 0 | av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %"PRId64") in the queue\n", |
943 | 0 | av_ts2str(sti->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration); |
944 | 0 | return; |
945 | 0 | } |
946 | 0 | if (!pktl) { |
947 | 0 | av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(sti->first_dts)); |
948 | 0 | return; |
949 | 0 | } |
950 | 0 | pktl = si->packet_buffer.head ? si->packet_buffer.head : fci->parse_queue.head; |
951 | 0 | sti->first_dts = cur_dts; |
952 | 0 | } else if (sti->cur_dts != RELATIVE_TS_BASE) |
953 | 0 | return; |
954 | | |
955 | 0 | for (; pktl; pktl = get_next_pkt(s, st, pktl)) { |
956 | 0 | if (pktl->pkt.stream_index != stream_index) |
957 | 0 | continue; |
958 | 0 | if ((pktl->pkt.pts == pktl->pkt.dts || |
959 | 0 | pktl->pkt.pts == AV_NOPTS_VALUE) && |
960 | 0 | (pktl->pkt.dts == AV_NOPTS_VALUE || |
961 | 0 | pktl->pkt.dts == sti->first_dts || |
962 | 0 | pktl->pkt.dts == RELATIVE_TS_BASE) && |
963 | 0 | !pktl->pkt.duration && |
964 | 0 | av_sat_add64(cur_dts, duration) == cur_dts + (uint64_t)duration |
965 | 0 | ) { |
966 | 0 | pktl->pkt.dts = cur_dts; |
967 | 0 | if (!sti->avctx->has_b_frames) |
968 | 0 | pktl->pkt.pts = cur_dts; |
969 | 0 | pktl->pkt.duration = duration; |
970 | 0 | } else |
971 | 0 | break; |
972 | 0 | cur_dts = pktl->pkt.dts + pktl->pkt.duration; |
973 | 0 | } |
974 | 0 | if (!pktl) |
975 | 0 | sti->cur_dts = cur_dts; |
976 | 0 | } |
977 | | |
978 | | static void compute_pkt_fields(AVFormatContext *s, AVStream *st, |
979 | | AVCodecParserContext *pc, AVPacket *pkt, |
980 | | int64_t next_dts, int64_t next_pts) |
981 | 0 | { |
982 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
983 | 0 | FFFormatContext *const si = &fci->fc; |
984 | 0 | FFStream *const sti = ffstream(st); |
985 | 0 | int num, den, presentation_delayed, delay; |
986 | 0 | int64_t offset; |
987 | 0 | AVRational duration; |
988 | 0 | int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 && |
989 | 0 | st->codecpar->codec_id != AV_CODEC_ID_HEVC && |
990 | 0 | st->codecpar->codec_id != AV_CODEC_ID_VVC; |
991 | |
|
992 | 0 | if (s->flags & AVFMT_FLAG_NOFILLIN) |
993 | 0 | return; |
994 | | |
995 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) { |
996 | 0 | if (pkt->dts == pkt->pts && sti->last_dts_for_order_check != AV_NOPTS_VALUE) { |
997 | 0 | if (sti->last_dts_for_order_check <= pkt->dts) { |
998 | 0 | sti->dts_ordered++; |
999 | 0 | } else { |
1000 | 0 | av_log(s, sti->dts_misordered ? AV_LOG_DEBUG : AV_LOG_WARNING, |
1001 | 0 | "DTS %"PRIi64" < %"PRIi64" out of order\n", |
1002 | 0 | pkt->dts, |
1003 | 0 | sti->last_dts_for_order_check); |
1004 | 0 | sti->dts_misordered++; |
1005 | 0 | } |
1006 | 0 | if (sti->dts_ordered + sti->dts_misordered > 250) { |
1007 | 0 | sti->dts_ordered >>= 1; |
1008 | 0 | sti->dts_misordered >>= 1; |
1009 | 0 | } |
1010 | 0 | } |
1011 | |
|
1012 | 0 | sti->last_dts_for_order_check = pkt->dts; |
1013 | 0 | if (sti->dts_ordered < 8 * sti->dts_misordered && pkt->dts == pkt->pts) |
1014 | 0 | pkt->dts = AV_NOPTS_VALUE; |
1015 | 0 | } |
1016 | |
|
1017 | 0 | if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE) |
1018 | 0 | pkt->dts = AV_NOPTS_VALUE; |
1019 | |
|
1020 | 0 | if (pc && pc->pict_type == AV_PICTURE_TYPE_B |
1021 | 0 | && !sti->avctx->has_b_frames) |
1022 | | //FIXME Set low_delay = 0 when has_b_frames = 1 |
1023 | 0 | sti->avctx->has_b_frames = 1; |
1024 | | |
1025 | | /* do we have a video B-frame ? */ |
1026 | 0 | delay = sti->avctx->has_b_frames; |
1027 | 0 | presentation_delayed = 0; |
1028 | | |
1029 | | /* XXX: need has_b_frame, but cannot get it if the codec is |
1030 | | * not initialized */ |
1031 | 0 | if (delay && |
1032 | 0 | pc && pc->pict_type != AV_PICTURE_TYPE_B) |
1033 | 0 | presentation_delayed = 1; |
1034 | |
|
1035 | 0 | if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && |
1036 | 0 | st->pts_wrap_bits < 63 && pkt->dts > INT64_MIN + (1LL << st->pts_wrap_bits) && |
1037 | 0 | pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) { |
1038 | 0 | if (is_relative(sti->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > sti->cur_dts) { |
1039 | 0 | pkt->dts -= 1LL << st->pts_wrap_bits; |
1040 | 0 | } else |
1041 | 0 | pkt->pts += 1LL << st->pts_wrap_bits; |
1042 | 0 | } |
1043 | | |
1044 | | /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg). |
1045 | | * We take the conservative approach and discard both. |
1046 | | * Note: If this is misbehaving for an H.264 file, then possibly |
1047 | | * presentation_delayed is not set correctly. */ |
1048 | 0 | if (delay == 1 && pkt->dts == pkt->pts && |
1049 | 0 | pkt->dts != AV_NOPTS_VALUE && presentation_delayed) { |
1050 | 0 | av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts); |
1051 | 0 | if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2") |
1052 | 0 | && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism |
1053 | 0 | pkt->dts = AV_NOPTS_VALUE; |
1054 | 0 | } |
1055 | |
|
1056 | 0 | duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base); |
1057 | 0 | if (pkt->duration <= 0) { |
1058 | 0 | compute_frame_duration(s, &num, &den, st, pc, pkt); |
1059 | 0 | if (den && num) { |
1060 | 0 | duration = (AVRational) {num, den}; |
1061 | 0 | pkt->duration = av_rescale_rnd(1, |
1062 | 0 | num * (int64_t) st->time_base.den, |
1063 | 0 | den * (int64_t) st->time_base.num, |
1064 | 0 | AV_ROUND_DOWN); |
1065 | 0 | } |
1066 | 0 | } |
1067 | |
|
1068 | 0 | if (pkt->duration > 0 && (si->packet_buffer.head || fci->parse_queue.head)) |
1069 | 0 | update_initial_durations(s, st, pkt->stream_index, pkt->duration); |
1070 | | |
1071 | | /* Correct timestamps with byte offset if demuxers only have timestamps |
1072 | | * on packet boundaries */ |
1073 | 0 | if (pc && sti->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) { |
1074 | | /* this will estimate bitrate based on this frame's duration and size */ |
1075 | 0 | offset = av_rescale(pc->offset, pkt->duration, pkt->size); |
1076 | 0 | if (pkt->pts != AV_NOPTS_VALUE) |
1077 | 0 | pkt->pts += offset; |
1078 | 0 | if (pkt->dts != AV_NOPTS_VALUE) |
1079 | 0 | pkt->dts += offset; |
1080 | 0 | } |
1081 | | |
1082 | | /* This may be redundant, but it should not hurt. */ |
1083 | 0 | if (pkt->dts != AV_NOPTS_VALUE && |
1084 | 0 | pkt->pts != AV_NOPTS_VALUE && |
1085 | 0 | pkt->pts > pkt->dts) |
1086 | 0 | presentation_delayed = 1; |
1087 | |
|
1088 | 0 | if (s->debug & FF_FDEBUG_TS) |
1089 | 0 | av_log(s, AV_LOG_DEBUG, |
1090 | 0 | "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%"PRId64" delay:%d onein_oneout:%d\n", |
1091 | 0 | presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(sti->cur_dts), |
1092 | 0 | pkt->stream_index, pc, pkt->duration, delay, onein_oneout); |
1093 | | |
1094 | | /* Interpolate PTS and DTS if they are not present. We skip H264 |
1095 | | * currently because delay and has_b_frames are not reliably set. */ |
1096 | 0 | if ((delay == 0 || (delay == 1 && pc)) && |
1097 | 0 | onein_oneout) { |
1098 | 0 | if (presentation_delayed) { |
1099 | | /* DTS = decompression timestamp */ |
1100 | | /* PTS = presentation timestamp */ |
1101 | 0 | if (pkt->dts == AV_NOPTS_VALUE) |
1102 | 0 | pkt->dts = sti->last_IP_pts; |
1103 | 0 | update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); |
1104 | 0 | if (pkt->dts == AV_NOPTS_VALUE) |
1105 | 0 | pkt->dts = sti->cur_dts; |
1106 | | |
1107 | | /* This is tricky: the dts must be incremented by the duration |
1108 | | * of the frame we are displaying, i.e. the last I- or P-frame. */ |
1109 | 0 | if (sti->last_IP_duration == 0 && (uint64_t)pkt->duration <= INT32_MAX) |
1110 | 0 | sti->last_IP_duration = pkt->duration; |
1111 | 0 | if (pkt->dts != AV_NOPTS_VALUE) |
1112 | 0 | sti->cur_dts = av_sat_add64(pkt->dts, sti->last_IP_duration); |
1113 | 0 | if (pkt->dts != AV_NOPTS_VALUE && |
1114 | 0 | pkt->pts == AV_NOPTS_VALUE && |
1115 | 0 | sti->last_IP_duration > 0 && |
1116 | 0 | ((uint64_t)sti->cur_dts - (uint64_t)next_dts + 1) <= 2 && |
1117 | 0 | next_dts != next_pts && |
1118 | 0 | next_pts != AV_NOPTS_VALUE) |
1119 | 0 | pkt->pts = next_dts; |
1120 | |
|
1121 | 0 | if ((uint64_t)pkt->duration <= INT32_MAX) |
1122 | 0 | sti->last_IP_duration = pkt->duration; |
1123 | 0 | sti->last_IP_pts = pkt->pts; |
1124 | | /* Cannot compute PTS if not present (we can compute it only |
1125 | | * by knowing the future. */ |
1126 | 0 | } else if (pkt->pts != AV_NOPTS_VALUE || |
1127 | 0 | pkt->dts != AV_NOPTS_VALUE || |
1128 | 0 | pkt->duration > 0 ) { |
1129 | | |
1130 | | /* presentation is not delayed : PTS and DTS are the same */ |
1131 | 0 | if (pkt->pts == AV_NOPTS_VALUE) |
1132 | 0 | pkt->pts = pkt->dts; |
1133 | 0 | update_initial_timestamps(s, pkt->stream_index, pkt->pts, |
1134 | 0 | pkt->pts, pkt); |
1135 | 0 | if (pkt->pts == AV_NOPTS_VALUE) |
1136 | 0 | pkt->pts = sti->cur_dts; |
1137 | 0 | pkt->dts = pkt->pts; |
1138 | 0 | if (pkt->pts != AV_NOPTS_VALUE && duration.num >= 0) |
1139 | 0 | sti->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1); |
1140 | 0 | } |
1141 | 0 | } |
1142 | |
|
1143 | 0 | if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) { |
1144 | 0 | sti->pts_buffer[0] = pkt->pts; |
1145 | 0 | for (int i = 0; i < delay && sti->pts_buffer[i] > sti->pts_buffer[i + 1]; i++) |
1146 | 0 | FFSWAP(int64_t, sti->pts_buffer[i], sti->pts_buffer[i + 1]); |
1147 | |
|
1148 | 0 | if (has_decode_delay_been_guessed(st)) |
1149 | 0 | pkt->dts = select_from_pts_buffer(st, sti->pts_buffer, pkt->dts); |
1150 | 0 | } |
1151 | | // We skipped it above so we try here. |
1152 | 0 | if (!onein_oneout) |
1153 | | // This should happen on the first packet |
1154 | 0 | update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); |
1155 | 0 | if (pkt->dts > sti->cur_dts) |
1156 | 0 | sti->cur_dts = pkt->dts; |
1157 | |
|
1158 | 0 | if (s->debug & FF_FDEBUG_TS) |
1159 | 0 | av_log(s, AV_LOG_DEBUG, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s st:%d (%d)\n", |
1160 | 0 | presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(sti->cur_dts), st->index, st->id); |
1161 | | |
1162 | | /* update flags */ |
1163 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA || ff_is_intra_only(st->codecpar->codec_id)) |
1164 | 0 | pkt->flags |= AV_PKT_FLAG_KEY; |
1165 | 0 | } |
1166 | | |
1167 | | /** |
1168 | | * Parse a packet, add all split parts to parse_queue. |
1169 | | * |
1170 | | * @param pkt Packet to parse; must not be NULL. |
1171 | | * @param flush Indicates whether to flush. If set, pkt must be blank. |
1172 | | */ |
1173 | | static int parse_packet(AVFormatContext *s, AVPacket *pkt, |
1174 | | int stream_index, int flush) |
1175 | 0 | { |
1176 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
1177 | 0 | AVStream *st = s->streams[stream_index]; |
1178 | 0 | FFStream *const sti = ffstream(st); |
1179 | 0 | AVPacket *out_pkt = sti->parse_pkt; |
1180 | 0 | const AVPacketSideData *sd = NULL; |
1181 | 0 | const uint8_t *data = pkt->data; |
1182 | 0 | uint8_t *extradata = sti->avctx->extradata; |
1183 | 0 | int extradata_size = sti->avctx->extradata_size; |
1184 | 0 | int size = pkt->size; |
1185 | 0 | int ret = 0, got_output = flush, pkt_side_data_consumed = 0; |
1186 | |
|
1187 | 0 | if (!size && !flush && sti->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) { |
1188 | | // preserve 0-size sync packets |
1189 | 0 | compute_pkt_fields(s, st, sti->parser, pkt, pkt->dts, pkt->pts); |
1190 | | |
1191 | | // Theora has valid 0-sized packets that need to be output |
1192 | 0 | if (st->codecpar->codec_id == AV_CODEC_ID_THEORA) { |
1193 | 0 | ret = avpriv_packet_list_put(&fci->parse_queue, |
1194 | 0 | pkt, NULL, 0); |
1195 | 0 | if (ret < 0) |
1196 | 0 | goto fail; |
1197 | 0 | } |
1198 | 0 | } |
1199 | | |
1200 | 0 | if (pkt->side_data_elems) |
1201 | 0 | sd = av_packet_side_data_get(pkt->side_data, pkt->side_data_elems, |
1202 | 0 | AV_PKT_DATA_NEW_EXTRADATA); |
1203 | 0 | if (sd) { |
1204 | 0 | av_assert1(size && !flush); |
1205 | |
|
1206 | 0 | sti->avctx->extradata = sd->data; |
1207 | 0 | sti->avctx->extradata_size = sd->size; |
1208 | 0 | } |
1209 | |
|
1210 | 0 | while (size > 0 || (flush && got_output)) { |
1211 | 0 | int64_t next_pts = pkt->pts; |
1212 | 0 | int64_t next_dts = pkt->dts; |
1213 | 0 | int len; |
1214 | |
|
1215 | 0 | len = av_parser_parse2(sti->parser, sti->avctx, |
1216 | 0 | &out_pkt->data, &out_pkt->size, data, size, |
1217 | 0 | pkt->pts, pkt->dts, pkt->pos); |
1218 | |
|
1219 | 0 | pkt->pts = pkt->dts = AV_NOPTS_VALUE; |
1220 | 0 | pkt->pos = -1; |
1221 | | /* increment read pointer */ |
1222 | 0 | av_assert1(data || !len); |
1223 | 0 | data = len ? data + len : data; |
1224 | 0 | size -= len; |
1225 | |
|
1226 | 0 | got_output = !!out_pkt->size; |
1227 | |
|
1228 | 0 | if (pkt->side_data && !out_pkt->side_data) { |
1229 | | /* for the first iteration, side_data are simply moved to output. |
1230 | | * in case of additional iterations, they are duplicated each time. */ |
1231 | 0 | if (!pkt_side_data_consumed) { |
1232 | 0 | pkt_side_data_consumed = 1; |
1233 | 0 | out_pkt->side_data = pkt->side_data; |
1234 | 0 | out_pkt->side_data_elems = pkt->side_data_elems; |
1235 | 0 | } else for (int i = 0; i < pkt->side_data_elems; i++) { |
1236 | 0 | const AVPacketSideData *const src_sd = &pkt->side_data[i]; |
1237 | 0 | uint8_t *dst_data = av_packet_new_side_data(out_pkt, src_sd->type, src_sd->size); |
1238 | 0 | if (!dst_data) { |
1239 | 0 | ret = AVERROR(ENOMEM); |
1240 | 0 | goto fail; |
1241 | 0 | } |
1242 | 0 | memcpy(dst_data, src_sd->data, src_sd->size); |
1243 | 0 | } |
1244 | 0 | } |
1245 | | |
1246 | 0 | if (!out_pkt->size) |
1247 | 0 | continue; |
1248 | | |
1249 | 0 | if (pkt->buf && out_pkt->data == pkt->data) { |
1250 | | /* reference pkt->buf only when out_pkt->data is guaranteed to point |
1251 | | * to data in it and not in the parser's internal buffer. */ |
1252 | | /* XXX: Ensure this is the case with all parsers when sti->parser->flags |
1253 | | * is PARSER_FLAG_COMPLETE_FRAMES and check for that instead? */ |
1254 | 0 | out_pkt->buf = av_buffer_ref(pkt->buf); |
1255 | 0 | if (!out_pkt->buf) { |
1256 | 0 | ret = AVERROR(ENOMEM); |
1257 | 0 | goto fail; |
1258 | 0 | } |
1259 | 0 | } else { |
1260 | 0 | ret = av_packet_make_refcounted(out_pkt); |
1261 | 0 | if (ret < 0) |
1262 | 0 | goto fail; |
1263 | 0 | } |
1264 | | |
1265 | | /* set the duration */ |
1266 | 0 | out_pkt->duration = (sti->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->duration : 0; |
1267 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) { |
1268 | 0 | if (sti->avctx->sample_rate > 0) { |
1269 | 0 | out_pkt->duration = |
1270 | 0 | av_rescale_q_rnd(sti->parser->duration, |
1271 | 0 | (AVRational) { 1, sti->avctx->sample_rate }, |
1272 | 0 | st->time_base, |
1273 | 0 | AV_ROUND_DOWN); |
1274 | 0 | } |
1275 | 0 | } else if (st->codecpar->codec_id == AV_CODEC_ID_GIF) { |
1276 | 0 | if (st->time_base.num > 0 && st->time_base.den > 0 && |
1277 | 0 | sti->parser->duration) { |
1278 | 0 | out_pkt->duration = sti->parser->duration; |
1279 | 0 | } |
1280 | 0 | } |
1281 | |
|
1282 | 0 | out_pkt->stream_index = st->index; |
1283 | 0 | out_pkt->pts = sti->parser->pts; |
1284 | 0 | out_pkt->dts = sti->parser->dts; |
1285 | 0 | out_pkt->pos = sti->parser->pos; |
1286 | 0 | out_pkt->flags |= pkt->flags & (AV_PKT_FLAG_DISCARD | AV_PKT_FLAG_CORRUPT); |
1287 | |
|
1288 | 0 | if (sti->need_parsing == AVSTREAM_PARSE_FULL_RAW) |
1289 | 0 | out_pkt->pos = sti->parser->frame_offset; |
1290 | |
|
1291 | 0 | if (sti->parser->key_frame == 1 || |
1292 | 0 | (sti->parser->key_frame == -1 && |
1293 | 0 | sti->parser->pict_type == AV_PICTURE_TYPE_I)) |
1294 | 0 | out_pkt->flags |= AV_PKT_FLAG_KEY; |
1295 | |
|
1296 | 0 | if (sti->parser->key_frame == -1 && sti->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY)) |
1297 | 0 | out_pkt->flags |= AV_PKT_FLAG_KEY; |
1298 | |
|
1299 | 0 | compute_pkt_fields(s, st, sti->parser, out_pkt, next_dts, next_pts); |
1300 | |
|
1301 | 0 | ret = avpriv_packet_list_put(&fci->parse_queue, |
1302 | 0 | out_pkt, NULL, 0); |
1303 | 0 | if (ret < 0) |
1304 | 0 | goto fail; |
1305 | 0 | } |
1306 | | |
1307 | | /* end of the stream => close and free the parser */ |
1308 | 0 | if (flush) { |
1309 | 0 | av_parser_close(sti->parser); |
1310 | 0 | sti->parser = NULL; |
1311 | 0 | } |
1312 | |
|
1313 | 0 | fail: |
1314 | 0 | if (sd) { |
1315 | 0 | sti->avctx->extradata = extradata; |
1316 | 0 | sti->avctx->extradata_size = extradata_size; |
1317 | 0 | } |
1318 | 0 | if (pkt_side_data_consumed) { |
1319 | 0 | pkt->side_data = NULL; |
1320 | 0 | pkt->side_data_elems = 0; |
1321 | 0 | } |
1322 | |
|
1323 | 0 | if (ret < 0) |
1324 | 0 | av_packet_unref(out_pkt); |
1325 | 0 | av_packet_unref(pkt); |
1326 | 0 | return ret; |
1327 | 0 | } |
1328 | | |
1329 | | static int64_t ts_to_samples(AVStream *st, int64_t ts) |
1330 | 0 | { |
1331 | 0 | return av_rescale(ts, st->time_base.num * st->codecpar->sample_rate, st->time_base.den); |
1332 | 0 | } |
1333 | | |
1334 | | static int codec_close(FFStream *sti) |
1335 | 0 | { |
1336 | 0 | AVCodecContext *avctx_new = NULL; |
1337 | 0 | AVCodecParameters *par_tmp = NULL; |
1338 | 0 | const AVCodec *new_codec = NULL; |
1339 | 0 | int ret; |
1340 | |
|
1341 | 0 | new_codec = |
1342 | 0 | (sti->avctx->codec_id != sti->pub.codecpar->codec_id) ? |
1343 | 0 | avcodec_find_decoder(sti->pub.codecpar->codec_id) : |
1344 | 0 | sti->avctx->codec; |
1345 | |
|
1346 | 0 | avctx_new = avcodec_alloc_context3(new_codec); |
1347 | 0 | if (!avctx_new) { |
1348 | 0 | ret = AVERROR(ENOMEM); |
1349 | 0 | goto fail; |
1350 | 0 | } |
1351 | | |
1352 | 0 | par_tmp = avcodec_parameters_alloc(); |
1353 | 0 | if (!par_tmp) { |
1354 | 0 | ret = AVERROR(ENOMEM); |
1355 | 0 | goto fail; |
1356 | 0 | } |
1357 | | |
1358 | 0 | ret = avcodec_parameters_from_context(par_tmp, sti->avctx); |
1359 | 0 | if (ret < 0) |
1360 | 0 | goto fail; |
1361 | | |
1362 | 0 | ret = avcodec_parameters_to_context(avctx_new, par_tmp); |
1363 | 0 | if (ret < 0) |
1364 | 0 | goto fail; |
1365 | | |
1366 | 0 | avctx_new->pkt_timebase = sti->avctx->pkt_timebase; |
1367 | |
|
1368 | 0 | avcodec_free_context(&sti->avctx); |
1369 | 0 | sti->avctx = avctx_new; |
1370 | |
|
1371 | 0 | avctx_new = NULL; |
1372 | 0 | ret = 0; |
1373 | |
|
1374 | 0 | fail: |
1375 | 0 | avcodec_free_context(&avctx_new); |
1376 | 0 | avcodec_parameters_free(&par_tmp); |
1377 | |
|
1378 | 0 | return ret; |
1379 | 0 | } |
1380 | | |
1381 | | static int extract_extradata(FFFormatContext *si, AVStream *st, const AVPacket *pkt); |
1382 | | |
1383 | | static int read_frame_internal(AVFormatContext *s, AVPacket *pkt) |
1384 | 0 | { |
1385 | 0 | FormatContextInternal *const fci = ff_fc_internal(s); |
1386 | 0 | FFFormatContext *const si = &fci->fc; |
1387 | 0 | int ret, got_packet = 0; |
1388 | 0 | AVDictionary *metadata = NULL; |
1389 | |
|
1390 | 0 | while (!got_packet && !fci->parse_queue.head) { |
1391 | 0 | AVStream *st; |
1392 | 0 | FFStream *sti; |
1393 | | |
1394 | | /* read next packet */ |
1395 | 0 | ret = ff_read_packet(s, pkt); |
1396 | 0 | if (ret < 0) { |
1397 | 0 | if (ret == AVERROR(EAGAIN)) |
1398 | 0 | return ret; |
1399 | | /* flush the parsers */ |
1400 | 0 | for (unsigned i = 0; i < s->nb_streams; i++) { |
1401 | 0 | AVStream *const st = s->streams[i]; |
1402 | 0 | FFStream *const sti = ffstream(st); |
1403 | 0 | if (sti->parser && sti->need_parsing) |
1404 | 0 | parse_packet(s, pkt, st->index, 1); |
1405 | 0 | } |
1406 | | /* all remaining packets are now in parse_queue => |
1407 | | * really terminate parsing */ |
1408 | 0 | break; |
1409 | 0 | } |
1410 | 0 | ret = 0; |
1411 | 0 | st = s->streams[pkt->stream_index]; |
1412 | 0 | sti = ffstream(st); |
1413 | |
|
1414 | 0 | st->event_flags |= AVSTREAM_EVENT_FLAG_NEW_PACKETS; |
1415 | |
|
1416 | 0 | int new_extradata = !!av_packet_side_data_get(pkt->side_data, pkt->side_data_elems, |
1417 | 0 | AV_PKT_DATA_NEW_EXTRADATA); |
1418 | 0 | if (new_extradata) |
1419 | 0 | sti->need_context_update = 1; |
1420 | | |
1421 | | /* update context if required */ |
1422 | 0 | if (sti->need_context_update) { |
1423 | 0 | if (avcodec_is_open(sti->avctx)) { |
1424 | 0 | av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n"); |
1425 | 0 | ret = codec_close(sti); |
1426 | 0 | sti->info->found_decoder = 0; |
1427 | 0 | if (ret < 0) |
1428 | 0 | return ret; |
1429 | 0 | } |
1430 | | |
1431 | | /* close parser, because it depends on the codec and extradata */ |
1432 | 0 | if (sti->parser && |
1433 | 0 | (sti->avctx->codec_id != st->codecpar->codec_id || new_extradata)) { |
1434 | 0 | av_parser_close(sti->parser); |
1435 | 0 | sti->parser = NULL; |
1436 | 0 | } |
1437 | |
|
1438 | 0 | ret = avcodec_parameters_to_context(sti->avctx, st->codecpar); |
1439 | 0 | if (ret < 0) { |
1440 | 0 | av_packet_unref(pkt); |
1441 | 0 | return ret; |
1442 | 0 | } |
1443 | | |
1444 | 0 | if (!sti->avctx->extradata) { |
1445 | 0 | sti->extract_extradata.inited = 0; |
1446 | |
|
1447 | 0 | ret = extract_extradata(si, st, pkt); |
1448 | 0 | if (ret < 0) { |
1449 | 0 | av_packet_unref(pkt); |
1450 | 0 | return ret; |
1451 | 0 | } |
1452 | 0 | } |
1453 | | |
1454 | 0 | sti->codec_desc = avcodec_descriptor_get(sti->avctx->codec_id); |
1455 | |
|
1456 | 0 | sti->need_context_update = 0; |
1457 | 0 | } |
1458 | | |
1459 | 0 | if (pkt->pts != AV_NOPTS_VALUE && |
1460 | 0 | pkt->dts != AV_NOPTS_VALUE && |
1461 | 0 | pkt->pts < pkt->dts) { |
1462 | 0 | av_log(s, AV_LOG_WARNING, |
1463 | 0 | "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n", |
1464 | 0 | pkt->stream_index, |
1465 | 0 | av_ts2str(pkt->pts), |
1466 | 0 | av_ts2str(pkt->dts), |
1467 | 0 | pkt->size); |
1468 | 0 | } |
1469 | 0 | if (s->debug & FF_FDEBUG_TS) |
1470 | 0 | av_log(s, AV_LOG_DEBUG, |
1471 | 0 | "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n", |
1472 | 0 | pkt->stream_index, |
1473 | 0 | av_ts2str(pkt->pts), |
1474 | 0 | av_ts2str(pkt->dts), |
1475 | 0 | pkt->size, pkt->duration, pkt->flags); |
1476 | |
|
1477 | 0 | if (sti->need_parsing && !sti->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) { |
1478 | 0 | sti->parser = av_parser_init(st->codecpar->codec_id); |
1479 | 0 | if (!sti->parser) { |
1480 | 0 | av_log(s, AV_LOG_VERBOSE, "parser not found for codec " |
1481 | 0 | "%s, packets or times may be invalid.\n", |
1482 | 0 | avcodec_get_name(st->codecpar->codec_id)); |
1483 | | /* no parser available: just output the raw packets */ |
1484 | 0 | sti->need_parsing = AVSTREAM_PARSE_NONE; |
1485 | 0 | } else if (sti->need_parsing == AVSTREAM_PARSE_HEADERS) |
1486 | 0 | sti->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; |
1487 | 0 | else if (sti->need_parsing == AVSTREAM_PARSE_FULL_ONCE) |
1488 | 0 | sti->parser->flags |= PARSER_FLAG_ONCE; |
1489 | 0 | else if (sti->need_parsing == AVSTREAM_PARSE_FULL_RAW) |
1490 | 0 | sti->parser->flags |= PARSER_FLAG_USE_CODEC_TS; |
1491 | 0 | } |
1492 | |
|
1493 | 0 | if (!sti->need_parsing || !sti->parser) { |
1494 | | /* no parsing needed: we just output the packet as is */ |
1495 | 0 | compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE); |
1496 | 0 | if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && |
1497 | 0 | (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) { |
1498 | 0 | ff_reduce_index(s, st->index); |
1499 | 0 | av_add_index_entry(st, pkt->pos, pkt->dts, |
1500 | 0 | 0, 0, AVINDEX_KEYFRAME); |
1501 | 0 | } |
1502 | 0 | got_packet = 1; |
1503 | 0 | } else if (st->discard < AVDISCARD_ALL) { |
1504 | 0 | if ((ret = parse_packet(s, pkt, pkt->stream_index, 0)) < 0) |
1505 | 0 | return ret; |
1506 | 0 | st->codecpar->sample_rate = sti->avctx->sample_rate; |
1507 | 0 | st->codecpar->bit_rate = sti->avctx->bit_rate; |
1508 | 0 | ret = av_channel_layout_copy(&st->codecpar->ch_layout, &sti->avctx->ch_layout); |
1509 | 0 | if (ret < 0) |
1510 | 0 | return ret; |
1511 | 0 | st->codecpar->codec_id = sti->avctx->codec_id; |
1512 | 0 | } else { |
1513 | | /* free packet */ |
1514 | 0 | av_packet_unref(pkt); |
1515 | 0 | } |
1516 | 0 | if (pkt->flags & AV_PKT_FLAG_KEY) |
1517 | 0 | sti->skip_to_keyframe = 0; |
1518 | 0 | if (sti->skip_to_keyframe) { |
1519 | 0 | av_packet_unref(pkt); |
1520 | 0 | got_packet = 0; |
1521 | 0 | } |
1522 | 0 | } |
1523 | | |
1524 | 0 | if (!got_packet && fci->parse_queue.head) |
1525 | 0 | ret = avpriv_packet_list_get(&fci->parse_queue, pkt); |
1526 | |
|
1527 | 0 | if (ret >= 0) { |
1528 | 0 | AVStream *const st = s->streams[pkt->stream_index]; |
1529 | 0 | FFStream *const sti = ffstream(st); |
1530 | 0 | int discard_padding = 0; |
1531 | 0 | if (sti->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) { |
1532 | 0 | int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0); |
1533 | 0 | int64_t sample = ts_to_samples(st, pts); |
1534 | 0 | int64_t duration = ts_to_samples(st, pkt->duration); |
1535 | 0 | int64_t end_sample = sample + duration; |
1536 | 0 | if (duration > 0 && end_sample >= sti->first_discard_sample && |
1537 | 0 | sample < sti->last_discard_sample) |
1538 | 0 | discard_padding = FFMIN(end_sample - sti->first_discard_sample, duration); |
1539 | 0 | } |
1540 | 0 | if (sti->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE)) |
1541 | 0 | sti->skip_samples = sti->start_skip_samples; |
1542 | 0 | sti->skip_samples = FFMAX(0, sti->skip_samples); |
1543 | 0 | if (sti->skip_samples || discard_padding) { |
1544 | 0 | uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10); |
1545 | 0 | if (p) { |
1546 | 0 | AV_WL32(p, sti->skip_samples); |
1547 | 0 | AV_WL32(p + 4, discard_padding); |
1548 | 0 | av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %u / discard %u\n", |
1549 | 0 | (unsigned)sti->skip_samples, (unsigned)discard_padding); |
1550 | 0 | } |
1551 | 0 | sti->skip_samples = 0; |
1552 | 0 | } |
1553 | 0 | } |
1554 | |
|
1555 | 0 | if (!fci->metafree) { |
1556 | 0 | int metaret = av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata); |
1557 | 0 | if (metadata) { |
1558 | 0 | s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; |
1559 | 0 | av_dict_copy(&s->metadata, metadata, 0); |
1560 | 0 | av_dict_free(&metadata); |
1561 | 0 | av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN); |
1562 | 0 | } |
1563 | 0 | fci->metafree = metaret == AVERROR_OPTION_NOT_FOUND; |
1564 | 0 | } |
1565 | |
|
1566 | 0 | if (s->debug & FF_FDEBUG_TS) |
1567 | 0 | av_log(s, AV_LOG_DEBUG, |
1568 | 0 | "read_frame_internal stream=%d, pts=%s, dts=%s, " |
1569 | 0 | "size=%d, duration=%"PRId64", flags=%d\n", |
1570 | 0 | pkt->stream_index, |
1571 | 0 | av_ts2str(pkt->pts), |
1572 | 0 | av_ts2str(pkt->dts), |
1573 | 0 | pkt->size, pkt->duration, pkt->flags); |
1574 | | |
1575 | | /* A demuxer might have returned EOF because of an IO error, let's |
1576 | | * propagate this back to the user. */ |
1577 | 0 | if (ret == AVERROR_EOF && s->pb && s->pb->error < 0 && s->pb->error != AVERROR(EAGAIN)) |
1578 | 0 | ret = s->pb->error; |
1579 | |
|
1580 | 0 | return ret; |
1581 | 0 | } |
1582 | | |
1583 | | int av_read_frame(AVFormatContext *s, AVPacket *pkt) |
1584 | 0 | { |
1585 | 0 | FFFormatContext *const si = ffformatcontext(s); |
1586 | 0 | const int genpts = s->flags & AVFMT_FLAG_GENPTS; |
1587 | 0 | int eof = 0; |
1588 | 0 | int ret; |
1589 | 0 | AVStream *st; |
1590 | |
|
1591 | 0 | if (!genpts) { |
1592 | 0 | ret = si->packet_buffer.head |
1593 | 0 | ? avpriv_packet_list_get(&si->packet_buffer, pkt) |
1594 | 0 | : read_frame_internal(s, pkt); |
1595 | 0 | if (ret < 0) |
1596 | 0 | return ret; |
1597 | 0 | goto return_packet; |
1598 | 0 | } |
1599 | | |
1600 | 0 | for (;;) { |
1601 | 0 | PacketListEntry *pktl = si->packet_buffer.head; |
1602 | |
|
1603 | 0 | if (pktl) { |
1604 | 0 | AVPacket *next_pkt = &pktl->pkt; |
1605 | |
|
1606 | 0 | if (next_pkt->dts != AV_NOPTS_VALUE) { |
1607 | 0 | int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits; |
1608 | | // last dts seen for this stream. if any of packets following |
1609 | | // current one had no dts, we will set this to AV_NOPTS_VALUE. |
1610 | 0 | int64_t last_dts = next_pkt->dts; |
1611 | 0 | av_assert2(wrap_bits <= 64); |
1612 | 0 | while (pktl && next_pkt->pts == AV_NOPTS_VALUE) { |
1613 | 0 | if (pktl->pkt.stream_index == next_pkt->stream_index && |
1614 | 0 | av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2ULL << (wrap_bits - 1)) < 0) { |
1615 | 0 | if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2ULL << (wrap_bits - 1))) { |
1616 | | // not B-frame |
1617 | 0 | next_pkt->pts = pktl->pkt.dts; |
1618 | 0 | } |
1619 | 0 | if (last_dts != AV_NOPTS_VALUE) { |
1620 | | // Once last dts was set to AV_NOPTS_VALUE, we don't change it. |
1621 | 0 | last_dts = pktl->pkt.dts; |
1622 | 0 | } |
1623 | 0 | } |
1624 | 0 | pktl = pktl->next; |
1625 | 0 | } |
1626 | 0 | if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) { |
1627 | | // Fixing the last reference frame had none pts issue (For MXF etc). |
1628 | | // We only do this when |
1629 | | // 1. eof. |
1630 | | // 2. we are not able to resolve a pts value for current packet. |
1631 | | // 3. the packets for this stream at the end of the files had valid dts. |
1632 | 0 | next_pkt->pts = last_dts + next_pkt->duration; |
1633 | 0 | } |
1634 | 0 | pktl = si->packet_buffer.head; |
1635 | 0 | } |
1636 | | |
1637 | | /* read packet from packet buffer, if there is data */ |
1638 | 0 | st = s->streams[next_pkt->stream_index]; |
1639 | 0 | if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL && |
1640 | 0 | next_pkt->dts != AV_NOPTS_VALUE && !eof)) { |
1641 | 0 | ret = avpriv_packet_list_get(&si->packet_buffer, pkt); |
1642 | 0 | goto return_packet; |
1643 | 0 | } |
1644 | 0 | } |
1645 | | |
1646 | 0 | ret = read_frame_internal(s, pkt); |
1647 | 0 | if (ret < 0) { |
1648 | 0 | if (pktl && ret != AVERROR(EAGAIN)) { |
1649 | 0 | eof = 1; |
1650 | 0 | continue; |
1651 | 0 | } else |
1652 | 0 | return ret; |
1653 | 0 | } |
1654 | | |
1655 | 0 | ret = avpriv_packet_list_put(&si->packet_buffer, |
1656 | 0 | pkt, NULL, 0); |
1657 | 0 | if (ret < 0) { |
1658 | 0 | av_packet_unref(pkt); |
1659 | 0 | return ret; |
1660 | 0 | } |
1661 | 0 | } |
1662 | | |
1663 | 0 | return_packet: |
1664 | 0 | st = s->streams[pkt->stream_index]; |
1665 | 0 | if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) { |
1666 | 0 | ff_reduce_index(s, st->index); |
1667 | 0 | av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME); |
1668 | 0 | } |
1669 | |
|
1670 | 0 | if (is_relative(pkt->dts)) |
1671 | 0 | pkt->dts -= RELATIVE_TS_BASE; |
1672 | 0 | if (is_relative(pkt->pts)) |
1673 | 0 | pkt->pts -= RELATIVE_TS_BASE; |
1674 | |
|
1675 | 0 | return ret; |
1676 | 0 | } |
1677 | | |
1678 | | /** |
1679 | | * Return TRUE if the stream has accurate duration in any stream. |
1680 | | * |
1681 | | * @return TRUE if the stream has accurate duration for at least one component. |
1682 | | */ |
1683 | | static int has_duration(AVFormatContext *ic) |
1684 | 0 | { |
1685 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1686 | 0 | const AVStream *const st = ic->streams[i]; |
1687 | 0 | if (st->duration != AV_NOPTS_VALUE) |
1688 | 0 | return 1; |
1689 | 0 | } |
1690 | 0 | if (ic->duration != AV_NOPTS_VALUE) |
1691 | 0 | return 1; |
1692 | 0 | return 0; |
1693 | 0 | } |
1694 | | |
1695 | | /** |
1696 | | * Estimate the stream timings from the one of each components. |
1697 | | * |
1698 | | * Also computes the global bitrate if possible. |
1699 | | */ |
1700 | | static void update_stream_timings(AVFormatContext *ic) |
1701 | 0 | { |
1702 | 0 | int64_t start_time, start_time1, start_time_text, end_time, end_time1, end_time_text; |
1703 | 0 | int64_t duration, duration1, duration_text, filesize; |
1704 | |
|
1705 | 0 | start_time = INT64_MAX; |
1706 | 0 | start_time_text = INT64_MAX; |
1707 | 0 | end_time = INT64_MIN; |
1708 | 0 | end_time_text = INT64_MIN; |
1709 | 0 | duration = INT64_MIN; |
1710 | 0 | duration_text = INT64_MIN; |
1711 | |
|
1712 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1713 | 0 | AVStream *const st = ic->streams[i]; |
1714 | 0 | int is_text = st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE || |
1715 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_DATA; |
1716 | |
|
1717 | 0 | if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) { |
1718 | 0 | start_time1 = av_rescale_q(st->start_time, st->time_base, |
1719 | 0 | AV_TIME_BASE_Q); |
1720 | 0 | if (is_text) |
1721 | 0 | start_time_text = FFMIN(start_time_text, start_time1); |
1722 | 0 | else |
1723 | 0 | start_time = FFMIN(start_time, start_time1); |
1724 | 0 | end_time1 = av_rescale_q_rnd(st->duration, st->time_base, |
1725 | 0 | AV_TIME_BASE_Q, |
1726 | 0 | AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); |
1727 | 0 | if (end_time1 != AV_NOPTS_VALUE && (end_time1 > 0 ? start_time1 <= INT64_MAX - end_time1 : start_time1 >= INT64_MIN - end_time1)) { |
1728 | 0 | end_time1 += start_time1; |
1729 | 0 | if (is_text) |
1730 | 0 | end_time_text = FFMAX(end_time_text, end_time1); |
1731 | 0 | else |
1732 | 0 | end_time = FFMAX(end_time, end_time1); |
1733 | 0 | } |
1734 | 0 | for (AVProgram *p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) { |
1735 | 0 | if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1) |
1736 | 0 | p->start_time = start_time1; |
1737 | 0 | if (p->end_time < end_time1) |
1738 | 0 | p->end_time = end_time1; |
1739 | 0 | } |
1740 | 0 | } |
1741 | 0 | if (st->duration != AV_NOPTS_VALUE) { |
1742 | 0 | duration1 = av_rescale_q(st->duration, st->time_base, |
1743 | 0 | AV_TIME_BASE_Q); |
1744 | 0 | if (is_text) |
1745 | 0 | duration_text = FFMAX(duration_text, duration1); |
1746 | 0 | else |
1747 | 0 | duration = FFMAX(duration, duration1); |
1748 | 0 | } |
1749 | 0 | } |
1750 | 0 | if (start_time == INT64_MAX || (start_time > start_time_text && start_time - (uint64_t)start_time_text < AV_TIME_BASE)) |
1751 | 0 | start_time = start_time_text; |
1752 | 0 | else if (start_time > start_time_text) |
1753 | 0 | av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE); |
1754 | |
|
1755 | 0 | if (end_time == INT64_MIN || (end_time < end_time_text && end_time_text - (uint64_t)end_time < AV_TIME_BASE)) |
1756 | 0 | end_time = end_time_text; |
1757 | 0 | else if (end_time < end_time_text) |
1758 | 0 | av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream endtime %f\n", end_time_text / (float)AV_TIME_BASE); |
1759 | |
|
1760 | 0 | if (duration == INT64_MIN || (duration < duration_text && (uint64_t)duration_text - duration < AV_TIME_BASE)) |
1761 | 0 | duration = duration_text; |
1762 | 0 | else if (duration < duration_text) |
1763 | 0 | av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream duration %f\n", duration_text / (float)AV_TIME_BASE); |
1764 | |
|
1765 | 0 | if (start_time != INT64_MAX) { |
1766 | 0 | ic->start_time = start_time; |
1767 | 0 | if (end_time != INT64_MIN) { |
1768 | 0 | if (ic->nb_programs > 1) { |
1769 | 0 | for (unsigned i = 0; i < ic->nb_programs; i++) { |
1770 | 0 | AVProgram *const p = ic->programs[i]; |
1771 | |
|
1772 | 0 | if (p->start_time != AV_NOPTS_VALUE && |
1773 | 0 | p->end_time > p->start_time && |
1774 | 0 | p->end_time - (uint64_t)p->start_time <= INT64_MAX) |
1775 | 0 | duration = FFMAX(duration, p->end_time - p->start_time); |
1776 | 0 | } |
1777 | 0 | } else if (end_time >= start_time && end_time - (uint64_t)start_time <= INT64_MAX) { |
1778 | 0 | duration = FFMAX(duration, end_time - start_time); |
1779 | 0 | } |
1780 | 0 | } |
1781 | 0 | } |
1782 | 0 | if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) { |
1783 | 0 | ic->duration = duration; |
1784 | 0 | } |
1785 | 0 | if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration > 0) { |
1786 | | /* compute the bitrate */ |
1787 | 0 | double bitrate = (double) filesize * 8.0 * AV_TIME_BASE / |
1788 | 0 | (double) ic->duration; |
1789 | 0 | if (bitrate >= 0 && bitrate <= INT64_MAX) |
1790 | 0 | ic->bit_rate = bitrate; |
1791 | 0 | } |
1792 | 0 | } |
1793 | | |
1794 | | static void fill_all_stream_timings(AVFormatContext *ic) |
1795 | 0 | { |
1796 | 0 | update_stream_timings(ic); |
1797 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1798 | 0 | AVStream *const st = ic->streams[i]; |
1799 | |
|
1800 | 0 | if (st->start_time == AV_NOPTS_VALUE) { |
1801 | 0 | if (ic->start_time != AV_NOPTS_VALUE) |
1802 | 0 | st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, |
1803 | 0 | st->time_base); |
1804 | 0 | if (ic->duration != AV_NOPTS_VALUE) |
1805 | 0 | st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, |
1806 | 0 | st->time_base); |
1807 | 0 | } |
1808 | 0 | } |
1809 | 0 | } |
1810 | | |
1811 | | static void estimate_timings_from_bit_rate(AVFormatContext *ic) |
1812 | 0 | { |
1813 | 0 | FFFormatContext *const si = ffformatcontext(ic); |
1814 | 0 | int show_warning = 0; |
1815 | | |
1816 | | /* if bit_rate is already set, we believe it */ |
1817 | 0 | if (ic->bit_rate <= 0) { |
1818 | 0 | int64_t bit_rate = 0; |
1819 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1820 | 0 | const AVStream *const st = ic->streams[i]; |
1821 | 0 | const FFStream *const sti = cffstream(st); |
1822 | 0 | if (st->codecpar->bit_rate <= 0 && sti->avctx->bit_rate > 0) |
1823 | 0 | st->codecpar->bit_rate = sti->avctx->bit_rate; |
1824 | 0 | if (st->codecpar->bit_rate > 0) { |
1825 | 0 | if (INT64_MAX - st->codecpar->bit_rate < bit_rate) { |
1826 | 0 | bit_rate = 0; |
1827 | 0 | break; |
1828 | 0 | } |
1829 | 0 | bit_rate += st->codecpar->bit_rate; |
1830 | 0 | } else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && sti->codec_info_nb_frames > 1) { |
1831 | | // If we have a videostream with packets but without a bitrate |
1832 | | // then consider the sum not known |
1833 | 0 | bit_rate = 0; |
1834 | 0 | break; |
1835 | 0 | } |
1836 | 0 | } |
1837 | 0 | ic->bit_rate = bit_rate; |
1838 | 0 | } |
1839 | | |
1840 | | /* if duration is already set, we believe it */ |
1841 | 0 | if (ic->duration == AV_NOPTS_VALUE && |
1842 | 0 | ic->bit_rate != 0) { |
1843 | 0 | int64_t filesize = ic->pb ? avio_size(ic->pb) : 0; |
1844 | 0 | if (filesize > si->data_offset) { |
1845 | 0 | filesize -= si->data_offset; |
1846 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1847 | 0 | AVStream *const st = ic->streams[i]; |
1848 | |
|
1849 | 0 | if ( st->time_base.num <= INT64_MAX / ic->bit_rate |
1850 | 0 | && st->duration == AV_NOPTS_VALUE) { |
1851 | 0 | st->duration = av_rescale(filesize, 8LL * st->time_base.den, |
1852 | 0 | ic->bit_rate * |
1853 | 0 | (int64_t) st->time_base.num); |
1854 | 0 | show_warning = 1; |
1855 | 0 | } |
1856 | 0 | } |
1857 | 0 | } |
1858 | 0 | } |
1859 | 0 | if (show_warning) |
1860 | 0 | av_log(ic, AV_LOG_WARNING, |
1861 | 0 | "Estimating duration from bitrate, this may be inaccurate\n"); |
1862 | 0 | } |
1863 | | |
1864 | 0 | #define DURATION_DEFAULT_MAX_READ_SIZE 250000LL |
1865 | 0 | #define DURATION_DEFAULT_MAX_RETRY 6 |
1866 | 0 | #define DURATION_MAX_RETRY 1 |
1867 | | |
1868 | | /* only usable for MPEG-PS streams */ |
1869 | | static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset) |
1870 | 0 | { |
1871 | 0 | FFFormatContext *const si = ffformatcontext(ic); |
1872 | 0 | AVPacket *const pkt = si->pkt; |
1873 | 0 | int num, den, read_size, ret; |
1874 | 0 | int64_t duration_max_read_size = ic->duration_probesize ? ic->duration_probesize >> DURATION_MAX_RETRY : DURATION_DEFAULT_MAX_READ_SIZE; |
1875 | 0 | int duration_max_retry = ic->duration_probesize ? DURATION_MAX_RETRY : DURATION_DEFAULT_MAX_RETRY; |
1876 | 0 | int found_duration = 0; |
1877 | 0 | int is_end; |
1878 | 0 | int64_t filesize, offset, duration; |
1879 | 0 | int retry = 0; |
1880 | | |
1881 | | /* flush packet queue */ |
1882 | 0 | ff_flush_packet_queue(ic); |
1883 | |
|
1884 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1885 | 0 | AVStream *const st = ic->streams[i]; |
1886 | 0 | FFStream *const sti = ffstream(st); |
1887 | |
|
1888 | 0 | if (st->start_time == AV_NOPTS_VALUE && |
1889 | 0 | sti->first_dts == AV_NOPTS_VALUE && |
1890 | 0 | st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN) |
1891 | 0 | av_log(ic, AV_LOG_WARNING, |
1892 | 0 | "start time for stream %d is not set in estimate_timings_from_pts\n", i); |
1893 | |
|
1894 | 0 | if (sti->parser) { |
1895 | 0 | av_parser_close(sti->parser); |
1896 | 0 | sti->parser = NULL; |
1897 | 0 | } |
1898 | 0 | } |
1899 | |
|
1900 | 0 | if (ic->skip_estimate_duration_from_pts) { |
1901 | 0 | av_log(ic, AV_LOG_INFO, "Skipping duration calculation in estimate_timings_from_pts\n"); |
1902 | 0 | goto skip_duration_calc; |
1903 | 0 | } |
1904 | | |
1905 | 0 | av_opt_set_int(ic, "skip_changes", 1, AV_OPT_SEARCH_CHILDREN); |
1906 | | /* estimate the end time (duration) */ |
1907 | | /* XXX: may need to support wrapping */ |
1908 | 0 | filesize = ic->pb ? avio_size(ic->pb) : 0; |
1909 | 0 | do { |
1910 | 0 | is_end = found_duration; |
1911 | 0 | offset = filesize - (duration_max_read_size << retry); |
1912 | 0 | if (offset < 0) |
1913 | 0 | offset = 0; |
1914 | |
|
1915 | 0 | avio_seek(ic->pb, offset, SEEK_SET); |
1916 | 0 | read_size = 0; |
1917 | 0 | for (;;) { |
1918 | 0 | AVStream *st; |
1919 | 0 | FFStream *sti; |
1920 | 0 | if (read_size >= duration_max_read_size << (FFMAX(retry - 1, 0))) |
1921 | 0 | break; |
1922 | | |
1923 | 0 | do { |
1924 | 0 | ret = ff_read_packet(ic, pkt); |
1925 | 0 | } while (ret == AVERROR(EAGAIN)); |
1926 | 0 | if (ret != 0) |
1927 | 0 | break; |
1928 | 0 | read_size += pkt->size; |
1929 | 0 | st = ic->streams[pkt->stream_index]; |
1930 | 0 | sti = ffstream(st); |
1931 | 0 | if (pkt->pts != AV_NOPTS_VALUE && |
1932 | 0 | (st->start_time != AV_NOPTS_VALUE || |
1933 | 0 | sti->first_dts != AV_NOPTS_VALUE)) { |
1934 | 0 | if (pkt->duration == 0) { |
1935 | 0 | compute_frame_duration(ic, &num, &den, st, sti->parser, pkt); |
1936 | 0 | if (den && num) { |
1937 | 0 | pkt->duration = av_rescale_rnd(1, |
1938 | 0 | num * (int64_t) st->time_base.den, |
1939 | 0 | den * (int64_t) st->time_base.num, |
1940 | 0 | AV_ROUND_DOWN); |
1941 | 0 | } |
1942 | 0 | } |
1943 | 0 | duration = pkt->pts + pkt->duration; |
1944 | 0 | found_duration = 1; |
1945 | 0 | if (st->start_time != AV_NOPTS_VALUE) |
1946 | 0 | duration -= st->start_time; |
1947 | 0 | else |
1948 | 0 | duration -= sti->first_dts; |
1949 | 0 | if (duration > 0) { |
1950 | 0 | if (st->duration == AV_NOPTS_VALUE || sti->info->last_duration<= 0 || |
1951 | 0 | (st->duration < duration && FFABS(duration - sti->info->last_duration) < 60LL*st->time_base.den / st->time_base.num)) |
1952 | 0 | st->duration = duration; |
1953 | 0 | sti->info->last_duration = duration; |
1954 | 0 | } |
1955 | 0 | } |
1956 | 0 | av_packet_unref(pkt); |
1957 | 0 | } |
1958 | | |
1959 | | /* check if all audio/video streams have valid duration */ |
1960 | 0 | if (!is_end) { |
1961 | 0 | is_end = 1; |
1962 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1963 | 0 | const AVStream *const st = ic->streams[i]; |
1964 | 0 | switch (st->codecpar->codec_type) { |
1965 | 0 | case AVMEDIA_TYPE_VIDEO: |
1966 | 0 | case AVMEDIA_TYPE_AUDIO: |
1967 | 0 | if (st->duration == AV_NOPTS_VALUE) |
1968 | 0 | is_end = 0; |
1969 | 0 | } |
1970 | 0 | } |
1971 | 0 | } |
1972 | 0 | } while (!is_end && |
1973 | 0 | offset && |
1974 | 0 | ++retry <= duration_max_retry); |
1975 | |
|
1976 | 0 | av_opt_set_int(ic, "skip_changes", 0, AV_OPT_SEARCH_CHILDREN); |
1977 | | |
1978 | | /* warn about audio/video streams which duration could not be estimated */ |
1979 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1980 | 0 | const AVStream *const st = ic->streams[i]; |
1981 | 0 | const FFStream *const sti = cffstream(st); |
1982 | |
|
1983 | 0 | if (st->duration == AV_NOPTS_VALUE) { |
1984 | 0 | switch (st->codecpar->codec_type) { |
1985 | 0 | case AVMEDIA_TYPE_VIDEO: |
1986 | 0 | case AVMEDIA_TYPE_AUDIO: |
1987 | 0 | if (st->start_time != AV_NOPTS_VALUE || sti->first_dts != AV_NOPTS_VALUE) { |
1988 | 0 | av_log(ic, AV_LOG_WARNING, "stream %d : no PTS found at end of file, duration not set\n", i); |
1989 | 0 | } else |
1990 | 0 | av_log(ic, AV_LOG_WARNING, "stream %d : no TS found at start of file, duration not set\n", i); |
1991 | 0 | } |
1992 | 0 | } |
1993 | 0 | } |
1994 | 0 | skip_duration_calc: |
1995 | 0 | fill_all_stream_timings(ic); |
1996 | |
|
1997 | 0 | avio_seek(ic->pb, old_offset, SEEK_SET); |
1998 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
1999 | 0 | AVStream *const st = ic->streams[i]; |
2000 | 0 | FFStream *const sti = ffstream(st); |
2001 | |
|
2002 | 0 | sti->cur_dts = sti->first_dts; |
2003 | 0 | sti->last_IP_pts = AV_NOPTS_VALUE; |
2004 | 0 | sti->last_dts_for_order_check = AV_NOPTS_VALUE; |
2005 | 0 | for (int j = 0; j < MAX_REORDER_DELAY + 1; j++) |
2006 | 0 | sti->pts_buffer[j] = AV_NOPTS_VALUE; |
2007 | 0 | } |
2008 | 0 | } |
2009 | | |
2010 | | /* 1:1 map to AVDurationEstimationMethod */ |
2011 | | static const char *const duration_name[] = { |
2012 | | [AVFMT_DURATION_FROM_PTS] = "pts", |
2013 | | [AVFMT_DURATION_FROM_STREAM] = "stream", |
2014 | | [AVFMT_DURATION_FROM_BITRATE] = "bit rate", |
2015 | | }; |
2016 | | |
2017 | | static const char *duration_estimate_name(enum AVDurationEstimationMethod method) |
2018 | 0 | { |
2019 | 0 | return duration_name[method]; |
2020 | 0 | } |
2021 | | |
2022 | | static void estimate_timings(AVFormatContext *ic, int64_t old_offset) |
2023 | 0 | { |
2024 | 0 | int64_t file_size; |
2025 | | |
2026 | | /* get the file size, if possible */ |
2027 | 0 | if (ic->iformat->flags & AVFMT_NOFILE) { |
2028 | 0 | file_size = 0; |
2029 | 0 | } else { |
2030 | 0 | file_size = avio_size(ic->pb); |
2031 | 0 | file_size = FFMAX(0, file_size); |
2032 | 0 | } |
2033 | |
|
2034 | 0 | if ((!strcmp(ic->iformat->name, "mpeg") || |
2035 | 0 | !strcmp(ic->iformat->name, "mpegts")) && |
2036 | 0 | file_size && (ic->pb->seekable & AVIO_SEEKABLE_NORMAL)) { |
2037 | | /* get accurate estimate from the PTSes */ |
2038 | 0 | estimate_timings_from_pts(ic, old_offset); |
2039 | 0 | ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS; |
2040 | 0 | } else if (has_duration(ic)) { |
2041 | | /* at least one component has timings - we use them for all |
2042 | | * the components */ |
2043 | 0 | fill_all_stream_timings(ic); |
2044 | | /* nut demuxer estimate the duration from PTS */ |
2045 | 0 | if (!strcmp(ic->iformat->name, "nut")) |
2046 | 0 | ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS; |
2047 | 0 | else |
2048 | 0 | ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM; |
2049 | 0 | } else { |
2050 | | /* less precise: use bitrate info */ |
2051 | 0 | estimate_timings_from_bit_rate(ic); |
2052 | 0 | ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE; |
2053 | 0 | } |
2054 | 0 | update_stream_timings(ic); |
2055 | |
|
2056 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2057 | 0 | AVStream *const st = ic->streams[i]; |
2058 | 0 | if (st->time_base.den) |
2059 | 0 | av_log(ic, AV_LOG_TRACE, "stream %u: start_time: %s duration: %s\n", i, |
2060 | 0 | av_ts2timestr(st->start_time, &st->time_base), |
2061 | 0 | av_ts2timestr(st->duration, &st->time_base)); |
2062 | 0 | } |
2063 | 0 | av_log(ic, AV_LOG_TRACE, |
2064 | 0 | "format: start_time: %s duration: %s (estimate from %s) bitrate=%"PRId64" kb/s\n", |
2065 | 0 | av_ts2timestr(ic->start_time, &AV_TIME_BASE_Q), |
2066 | 0 | av_ts2timestr(ic->duration, &AV_TIME_BASE_Q), |
2067 | 0 | duration_estimate_name(ic->duration_estimation_method), |
2068 | 0 | (int64_t)ic->bit_rate / 1000); |
2069 | 0 | } |
2070 | | |
2071 | | static int determinable_frame_size(const AVCodecContext *avctx) |
2072 | 0 | { |
2073 | 0 | switch(avctx->codec_id) { |
2074 | 0 | case AV_CODEC_ID_MP1: |
2075 | 0 | case AV_CODEC_ID_MP2: |
2076 | 0 | case AV_CODEC_ID_MP3: |
2077 | 0 | case AV_CODEC_ID_CODEC2: |
2078 | 0 | return 1; |
2079 | 0 | } |
2080 | | |
2081 | 0 | return 0; |
2082 | 0 | } |
2083 | | |
2084 | | static int has_codec_parameters(const AVStream *st, const char **errmsg_ptr) |
2085 | 0 | { |
2086 | 0 | const FFStream *const sti = cffstream(st); |
2087 | 0 | const AVCodecContext *const avctx = sti->avctx; |
2088 | |
|
2089 | 0 | #define FAIL(errmsg) do { \ |
2090 | 0 | if (errmsg_ptr) \ |
2091 | 0 | *errmsg_ptr = errmsg; \ |
2092 | 0 | return 0; \ |
2093 | 0 | } while (0) |
2094 | |
|
2095 | 0 | if ( avctx->codec_id == AV_CODEC_ID_NONE |
2096 | 0 | && avctx->codec_type != AVMEDIA_TYPE_DATA) |
2097 | 0 | FAIL("unknown codec"); |
2098 | 0 | switch (avctx->codec_type) { |
2099 | 0 | case AVMEDIA_TYPE_AUDIO: |
2100 | 0 | if (!avctx->frame_size && determinable_frame_size(avctx)) |
2101 | 0 | FAIL("unspecified frame size"); |
2102 | 0 | if (sti->info->found_decoder >= 0 && |
2103 | 0 | avctx->sample_fmt == AV_SAMPLE_FMT_NONE) |
2104 | 0 | FAIL("unspecified sample format"); |
2105 | 0 | if (!avctx->sample_rate) |
2106 | 0 | FAIL("unspecified sample rate"); |
2107 | 0 | if (!avctx->ch_layout.nb_channels) |
2108 | 0 | FAIL("unspecified number of channels"); |
2109 | 0 | if (sti->info->found_decoder >= 0 && !sti->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS) |
2110 | 0 | FAIL("no decodable DTS frames"); |
2111 | 0 | break; |
2112 | 0 | case AVMEDIA_TYPE_VIDEO: |
2113 | 0 | if (!avctx->width) |
2114 | 0 | FAIL("unspecified size"); |
2115 | 0 | if (sti->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE) |
2116 | 0 | FAIL("unspecified pixel format"); |
2117 | 0 | if (st->codecpar->codec_id == AV_CODEC_ID_RV30 || st->codecpar->codec_id == AV_CODEC_ID_RV40) |
2118 | 0 | if (!st->sample_aspect_ratio.num && !st->codecpar->sample_aspect_ratio.num && !sti->codec_info_nb_frames) |
2119 | 0 | FAIL("no frame in rv30/40 and no sar"); |
2120 | 0 | break; |
2121 | 0 | case AVMEDIA_TYPE_SUBTITLE: |
2122 | 0 | if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width) |
2123 | 0 | FAIL("unspecified size"); |
2124 | 0 | break; |
2125 | 0 | case AVMEDIA_TYPE_DATA: |
2126 | 0 | if (avctx->codec_id == AV_CODEC_ID_NONE) return 1; |
2127 | 0 | } |
2128 | | |
2129 | 0 | return 1; |
2130 | 0 | } |
2131 | | |
2132 | | /* returns 1 or 0 if or if not decoded data was returned, or a negative error */ |
2133 | | static int try_decode_frame(AVFormatContext *s, AVStream *st, |
2134 | | const AVPacket *pkt, AVDictionary **options) |
2135 | 0 | { |
2136 | 0 | FFStream *const sti = ffstream(st); |
2137 | 0 | AVCodecContext *const avctx = sti->avctx; |
2138 | 0 | const AVCodec *codec; |
2139 | 0 | int got_picture = 1, ret = 0; |
2140 | 0 | AVFrame *frame = av_frame_alloc(); |
2141 | 0 | AVSubtitle subtitle; |
2142 | 0 | int do_skip_frame = 0; |
2143 | 0 | enum AVDiscard skip_frame; |
2144 | 0 | int pkt_to_send = pkt->size > 0; |
2145 | |
|
2146 | 0 | if (!frame) |
2147 | 0 | return AVERROR(ENOMEM); |
2148 | | |
2149 | 0 | if (!avcodec_is_open(avctx) && |
2150 | 0 | sti->info->found_decoder <= 0 && |
2151 | 0 | (st->codecpar->codec_id != -sti->info->found_decoder || !st->codecpar->codec_id)) { |
2152 | 0 | AVDictionary *thread_opt = NULL; |
2153 | |
|
2154 | 0 | codec = find_probe_decoder(s, st, st->codecpar->codec_id); |
2155 | |
|
2156 | 0 | if (!codec) { |
2157 | 0 | sti->info->found_decoder = -st->codecpar->codec_id; |
2158 | 0 | ret = -1; |
2159 | 0 | goto fail; |
2160 | 0 | } |
2161 | | |
2162 | | /* Force thread count to 1 since the H.264 decoder will not extract |
2163 | | * SPS and PPS to extradata during multi-threaded decoding. */ |
2164 | 0 | av_dict_set(options ? options : &thread_opt, "threads", "1", 0); |
2165 | | /* Force lowres to 0. The decoder might reduce the video size by the |
2166 | | * lowres factor, and we don't want that propagated to the stream's |
2167 | | * codecpar */ |
2168 | 0 | av_dict_set(options ? options : &thread_opt, "lowres", "0", 0); |
2169 | 0 | if (s->codec_whitelist) |
2170 | 0 | av_dict_set(options ? options : &thread_opt, "codec_whitelist", s->codec_whitelist, 0); |
2171 | 0 | ret = avcodec_open2(avctx, codec, options ? options : &thread_opt); |
2172 | 0 | if (!options) |
2173 | 0 | av_dict_free(&thread_opt); |
2174 | 0 | if (ret < 0) { |
2175 | 0 | sti->info->found_decoder = -avctx->codec_id; |
2176 | 0 | goto fail; |
2177 | 0 | } |
2178 | 0 | sti->info->found_decoder = 1; |
2179 | 0 | } else if (!sti->info->found_decoder) |
2180 | 0 | sti->info->found_decoder = 1; |
2181 | | |
2182 | 0 | if (sti->info->found_decoder < 0) { |
2183 | 0 | ret = -1; |
2184 | 0 | goto fail; |
2185 | 0 | } |
2186 | | |
2187 | 0 | if (avpriv_codec_get_cap_skip_frame_fill_param(avctx->codec)) { |
2188 | 0 | do_skip_frame = 1; |
2189 | 0 | skip_frame = avctx->skip_frame; |
2190 | 0 | avctx->skip_frame = AVDISCARD_ALL; |
2191 | 0 | } |
2192 | |
|
2193 | 0 | while ((pkt_to_send || (!pkt->data && got_picture)) && |
2194 | 0 | ret >= 0 && |
2195 | 0 | (!has_codec_parameters(st, NULL) || !has_decode_delay_been_guessed(st) || |
2196 | 0 | (!sti->codec_info_nb_frames && |
2197 | 0 | (avctx->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) { |
2198 | 0 | got_picture = 0; |
2199 | 0 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || |
2200 | 0 | avctx->codec_type == AVMEDIA_TYPE_AUDIO) { |
2201 | 0 | ret = avcodec_send_packet(avctx, pkt); |
2202 | 0 | if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) |
2203 | 0 | break; |
2204 | 0 | if (ret >= 0) |
2205 | 0 | pkt_to_send = 0; |
2206 | 0 | ret = avcodec_receive_frame(avctx, frame); |
2207 | 0 | if (ret >= 0) |
2208 | 0 | got_picture = 1; |
2209 | 0 | if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) |
2210 | 0 | ret = 0; |
2211 | 0 | } else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) { |
2212 | 0 | ret = avcodec_decode_subtitle2(avctx, &subtitle, |
2213 | 0 | &got_picture, pkt); |
2214 | 0 | if (got_picture) |
2215 | 0 | avsubtitle_free(&subtitle); |
2216 | 0 | if (ret >= 0) |
2217 | 0 | pkt_to_send = 0; |
2218 | 0 | } |
2219 | 0 | if (ret >= 0) { |
2220 | 0 | if (got_picture) |
2221 | 0 | sti->nb_decoded_frames++; |
2222 | 0 | ret = got_picture; |
2223 | 0 | } |
2224 | 0 | } |
2225 | |
|
2226 | 0 | fail: |
2227 | 0 | if (do_skip_frame) { |
2228 | 0 | avctx->skip_frame = skip_frame; |
2229 | 0 | } |
2230 | |
|
2231 | 0 | av_frame_free(&frame); |
2232 | 0 | return ret; |
2233 | 0 | } |
2234 | | |
2235 | | static int chapter_start_cmp(const void *p1, const void *p2) |
2236 | 0 | { |
2237 | 0 | const AVChapter *const ch1 = *(AVChapter**)p1; |
2238 | 0 | const AVChapter *const ch2 = *(AVChapter**)p2; |
2239 | 0 | int delta = av_compare_ts(ch1->start, ch1->time_base, ch2->start, ch2->time_base); |
2240 | 0 | if (delta) |
2241 | 0 | return delta; |
2242 | 0 | return FFDIFFSIGN(ch1->id, ch2->id); |
2243 | 0 | } |
2244 | | |
2245 | | static int compute_chapters_end(AVFormatContext *s) |
2246 | 0 | { |
2247 | 0 | int64_t max_time = 0; |
2248 | 0 | AVChapter **timetable; |
2249 | |
|
2250 | 0 | if (!s->nb_chapters) |
2251 | 0 | return 0; |
2252 | | |
2253 | 0 | if (s->duration > 0 && s->start_time < INT64_MAX - s->duration) |
2254 | 0 | max_time = s->duration + |
2255 | 0 | ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time); |
2256 | |
|
2257 | 0 | timetable = av_memdup(s->chapters, s->nb_chapters * sizeof(*timetable)); |
2258 | 0 | if (!timetable) |
2259 | 0 | return AVERROR(ENOMEM); |
2260 | 0 | qsort(timetable, s->nb_chapters, sizeof(*timetable), chapter_start_cmp); |
2261 | |
|
2262 | 0 | for (unsigned i = 0; i < s->nb_chapters; i++) |
2263 | 0 | if (timetable[i]->end == AV_NOPTS_VALUE) { |
2264 | 0 | AVChapter *const ch = timetable[i]; |
2265 | 0 | int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, |
2266 | 0 | ch->time_base) |
2267 | 0 | : INT64_MAX; |
2268 | |
|
2269 | 0 | if (i + 1 < s->nb_chapters) { |
2270 | 0 | const AVChapter *const ch1 = timetable[i + 1]; |
2271 | 0 | int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, |
2272 | 0 | ch->time_base); |
2273 | 0 | if (next_start > ch->start && next_start < end) |
2274 | 0 | end = next_start; |
2275 | 0 | } |
2276 | 0 | ch->end = (end == INT64_MAX || end < ch->start) ? ch->start : end; |
2277 | 0 | } |
2278 | 0 | av_free(timetable); |
2279 | 0 | return 0; |
2280 | 0 | } |
2281 | | |
2282 | | static int get_std_framerate(int i) |
2283 | 0 | { |
2284 | 0 | if (i < 30*12) |
2285 | 0 | return (i + 1) * 1001; |
2286 | 0 | i -= 30*12; |
2287 | |
|
2288 | 0 | if (i < 30) |
2289 | 0 | return (i + 31) * 1001 * 12; |
2290 | 0 | i -= 30; |
2291 | |
|
2292 | 0 | if (i < 3) |
2293 | 0 | return ((const int[]) { 80, 120, 240})[i] * 1001 * 12; |
2294 | | |
2295 | 0 | i -= 3; |
2296 | |
|
2297 | 0 | return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12; |
2298 | 0 | } |
2299 | | |
2300 | | /* Is the time base unreliable? |
2301 | | * This is a heuristic to balance between quick acceptance of the values in |
2302 | | * the headers vs. some extra checks. |
2303 | | * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps. |
2304 | | * MPEG-2 commonly misuses field repeat flags to store different framerates. |
2305 | | * And there are "variable" fps files this needs to detect as well. */ |
2306 | | static int tb_unreliable(AVFormatContext *ic, AVStream *st) |
2307 | 0 | { |
2308 | 0 | FFStream *const sti = ffstream(st); |
2309 | 0 | const AVCodecDescriptor *desc = sti->codec_desc; |
2310 | 0 | AVCodecContext *c = sti->avctx; |
2311 | 0 | AVRational mul = (AVRational){ desc && (desc->props & AV_CODEC_PROP_FIELDS) ? 2 : 1, 1 }; |
2312 | 0 | AVRational time_base = c->framerate.num ? av_inv_q(av_mul_q(c->framerate, mul)) |
2313 | | /* NOHEADER check added to not break existing behavior */ |
2314 | 0 | : (((ic->ctx_flags & AVFMTCTX_NOHEADER) || |
2315 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ? (AVRational){0, 1} |
2316 | 0 | : st->time_base); |
2317 | |
|
2318 | 0 | if (time_base.den >= 101LL * time_base.num || |
2319 | 0 | time_base.den < 5LL * time_base.num || |
2320 | | // c->codec_tag == AV_RL32("DIVX") || |
2321 | | // c->codec_tag == AV_RL32("XVID") || |
2322 | 0 | c->codec_tag == AV_RL32("mp4v") || |
2323 | 0 | c->codec_id == AV_CODEC_ID_MPEG2VIDEO || |
2324 | 0 | c->codec_id == AV_CODEC_ID_GIF || |
2325 | 0 | c->codec_id == AV_CODEC_ID_HEVC || |
2326 | 0 | c->codec_id == AV_CODEC_ID_H264) |
2327 | 0 | return 1; |
2328 | 0 | return 0; |
2329 | 0 | } |
2330 | | |
2331 | | int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts) |
2332 | 0 | { |
2333 | 0 | FFStream *const sti = ffstream(st); |
2334 | 0 | FFStreamInfo *info = sti->info; |
2335 | 0 | int64_t last = info->last_dts; |
2336 | |
|
2337 | 0 | if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last |
2338 | 0 | && ts - (uint64_t)last < INT64_MAX) { |
2339 | 0 | double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base); |
2340 | 0 | int64_t duration = ts - last; |
2341 | |
|
2342 | 0 | if (!info->duration_error) |
2343 | 0 | info->duration_error = av_mallocz(sizeof(info->duration_error[0])*2); |
2344 | 0 | if (!info->duration_error) |
2345 | 0 | return AVERROR(ENOMEM); |
2346 | | |
2347 | | // if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) |
2348 | | // av_log(NULL, AV_LOG_ERROR, "%f\n", dts); |
2349 | 0 | for (int i = 0; i < MAX_STD_TIMEBASES; i++) { |
2350 | 0 | if (info->duration_error[0][1][i] < 1e10) { |
2351 | 0 | int framerate = get_std_framerate(i); |
2352 | 0 | double sdts = dts*framerate/(1001*12); |
2353 | 0 | for (int j = 0; j < 2; j++) { |
2354 | 0 | int64_t ticks = llrint(sdts+j*0.5); |
2355 | 0 | double error = sdts - ticks + j*0.5; |
2356 | 0 | info->duration_error[j][0][i] += error; |
2357 | 0 | info->duration_error[j][1][i] += error*error; |
2358 | 0 | } |
2359 | 0 | } |
2360 | 0 | } |
2361 | 0 | if (info->rfps_duration_sum <= INT64_MAX - duration) { |
2362 | 0 | info->duration_count++; |
2363 | 0 | info->rfps_duration_sum += duration; |
2364 | 0 | } |
2365 | |
|
2366 | 0 | if (info->duration_count % 10 == 0) { |
2367 | 0 | int n = info->duration_count; |
2368 | 0 | for (int i = 0; i < MAX_STD_TIMEBASES; i++) { |
2369 | 0 | if (info->duration_error[0][1][i] < 1e10) { |
2370 | 0 | double a0 = info->duration_error[0][0][i] / n; |
2371 | 0 | double error0 = info->duration_error[0][1][i] / n - a0*a0; |
2372 | 0 | double a1 = info->duration_error[1][0][i] / n; |
2373 | 0 | double error1 = info->duration_error[1][1][i] / n - a1*a1; |
2374 | 0 | if (error0 > 0.04 && error1 > 0.04) { |
2375 | 0 | info->duration_error[0][1][i] = 2e10; |
2376 | 0 | info->duration_error[1][1][i] = 2e10; |
2377 | 0 | } |
2378 | 0 | } |
2379 | 0 | } |
2380 | 0 | } |
2381 | | |
2382 | | // ignore the first 4 values, they might have some random jitter |
2383 | 0 | if (info->duration_count > 3 && is_relative(ts) == is_relative(last)) |
2384 | 0 | info->duration_gcd = av_gcd(info->duration_gcd, duration); |
2385 | 0 | } |
2386 | 0 | if (ts != AV_NOPTS_VALUE) |
2387 | 0 | info->last_dts = ts; |
2388 | |
|
2389 | 0 | return 0; |
2390 | 0 | } |
2391 | | |
2392 | | void ff_rfps_calculate(AVFormatContext *ic) |
2393 | 0 | { |
2394 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2395 | 0 | AVStream *const st = ic->streams[i]; |
2396 | 0 | FFStream *const sti = ffstream(st); |
2397 | |
|
2398 | 0 | if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) |
2399 | 0 | continue; |
2400 | | // the check for tb_unreliable() is not completely correct, since this is not about handling |
2401 | | // an unreliable/inexact time base, but a time base that is finer than necessary, as e.g. |
2402 | | // ipmovie.c produces. |
2403 | 0 | if (tb_unreliable(ic, st) && sti->info->duration_count > 15 && sti->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num && |
2404 | 0 | sti->info->duration_gcd < INT64_MAX / st->time_base.num) |
2405 | 0 | av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * sti->info->duration_gcd, INT_MAX); |
2406 | 0 | if (sti->info->duration_count > 1 && !st->r_frame_rate.num |
2407 | 0 | && tb_unreliable(ic, st)) { |
2408 | 0 | int num = 0; |
2409 | 0 | double best_error = 0.01; |
2410 | 0 | AVRational ref_rate = st->r_frame_rate.num ? st->r_frame_rate : av_inv_q(st->time_base); |
2411 | |
|
2412 | 0 | for (int j = 0; j < MAX_STD_TIMEBASES; j++) { |
2413 | 0 | if (sti->info->codec_info_duration && |
2414 | 0 | sti->info->codec_info_duration*av_q2d(st->time_base) < (1001*11.5)/get_std_framerate(j)) |
2415 | 0 | continue; |
2416 | 0 | if (!sti->info->codec_info_duration && get_std_framerate(j) < 1001*12) |
2417 | 0 | continue; |
2418 | | |
2419 | 0 | if (av_q2d(st->time_base) * sti->info->rfps_duration_sum / sti->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j)) |
2420 | 0 | continue; |
2421 | | |
2422 | 0 | for (int k = 0; k < 2; k++) { |
2423 | 0 | int n = sti->info->duration_count; |
2424 | 0 | double a = sti->info->duration_error[k][0][j] / n; |
2425 | 0 | double error = sti->info->duration_error[k][1][j]/n - a*a; |
2426 | |
|
2427 | 0 | if (error < best_error && best_error> 0.000000001) { |
2428 | 0 | best_error= error; |
2429 | 0 | num = get_std_framerate(j); |
2430 | 0 | } |
2431 | 0 | if (error < 0.02) |
2432 | 0 | av_log(ic, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error); |
2433 | 0 | } |
2434 | 0 | } |
2435 | | // do not increase frame rate by more than 1 % in order to match a standard rate. |
2436 | 0 | if (num && (!ref_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(ref_rate))) |
2437 | 0 | av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX); |
2438 | 0 | } |
2439 | 0 | if ( !st->avg_frame_rate.num |
2440 | 0 | && st->r_frame_rate.num && sti->info->rfps_duration_sum |
2441 | 0 | && sti->info->codec_info_duration <= 0 |
2442 | 0 | && sti->info->duration_count > 2 |
2443 | 0 | && fabs(1.0 / (av_q2d(st->r_frame_rate) * av_q2d(st->time_base)) - sti->info->rfps_duration_sum / (double)sti->info->duration_count) <= 1.0 |
2444 | 0 | ) { |
2445 | 0 | av_log(ic, AV_LOG_DEBUG, "Setting avg frame rate based on r frame rate\n"); |
2446 | 0 | st->avg_frame_rate = st->r_frame_rate; |
2447 | 0 | } |
2448 | |
|
2449 | 0 | av_freep(&sti->info->duration_error); |
2450 | 0 | sti->info->last_dts = AV_NOPTS_VALUE; |
2451 | 0 | sti->info->duration_count = 0; |
2452 | 0 | sti->info->rfps_duration_sum = 0; |
2453 | 0 | } |
2454 | 0 | } |
2455 | | |
2456 | | static int extract_extradata_check(AVStream *st) |
2457 | 0 | { |
2458 | 0 | const AVBitStreamFilter *const f = av_bsf_get_by_name("extract_extradata"); |
2459 | 0 | if (!f) |
2460 | 0 | return 0; |
2461 | | |
2462 | 0 | if (f->codec_ids) { |
2463 | 0 | const enum AVCodecID *ids; |
2464 | 0 | for (ids = f->codec_ids; *ids != AV_CODEC_ID_NONE; ids++) |
2465 | 0 | if (*ids == st->codecpar->codec_id) |
2466 | 0 | return 1; |
2467 | 0 | } |
2468 | | |
2469 | 0 | return 0; |
2470 | 0 | } |
2471 | | |
2472 | | static int extract_extradata_init(AVStream *st) |
2473 | 0 | { |
2474 | 0 | FFStream *const sti = ffstream(st); |
2475 | 0 | const AVBitStreamFilter *f; |
2476 | 0 | int ret; |
2477 | |
|
2478 | 0 | f = av_bsf_get_by_name("extract_extradata"); |
2479 | 0 | if (!f) |
2480 | 0 | goto finish; |
2481 | | |
2482 | | /* check that the codec id is supported */ |
2483 | 0 | ret = extract_extradata_check(st); |
2484 | 0 | if (!ret) |
2485 | 0 | goto finish; |
2486 | | |
2487 | 0 | av_bsf_free(&sti->extract_extradata.bsf); |
2488 | 0 | ret = av_bsf_alloc(f, &sti->extract_extradata.bsf); |
2489 | 0 | if (ret < 0) |
2490 | 0 | return ret; |
2491 | | |
2492 | 0 | ret = avcodec_parameters_copy(sti->extract_extradata.bsf->par_in, |
2493 | 0 | st->codecpar); |
2494 | 0 | if (ret < 0) |
2495 | 0 | goto fail; |
2496 | | |
2497 | 0 | sti->extract_extradata.bsf->time_base_in = st->time_base; |
2498 | |
|
2499 | 0 | ret = av_bsf_init(sti->extract_extradata.bsf); |
2500 | 0 | if (ret < 0) |
2501 | 0 | goto fail; |
2502 | | |
2503 | 0 | finish: |
2504 | 0 | sti->extract_extradata.inited = 1; |
2505 | |
|
2506 | 0 | return 0; |
2507 | 0 | fail: |
2508 | 0 | av_bsf_free(&sti->extract_extradata.bsf); |
2509 | 0 | return ret; |
2510 | 0 | } |
2511 | | |
2512 | | static int extract_extradata(FFFormatContext *si, AVStream *st, const AVPacket *pkt) |
2513 | 0 | { |
2514 | 0 | FFStream *const sti = ffstream(st); |
2515 | 0 | AVPacket *const pkt_ref = si->parse_pkt; |
2516 | 0 | int ret; |
2517 | |
|
2518 | 0 | if (!sti->extract_extradata.inited) { |
2519 | 0 | ret = extract_extradata_init(st); |
2520 | 0 | if (ret < 0) |
2521 | 0 | return ret; |
2522 | 0 | } |
2523 | | |
2524 | 0 | if (sti->extract_extradata.inited && !sti->extract_extradata.bsf) |
2525 | 0 | return 0; |
2526 | | |
2527 | 0 | ret = av_packet_ref(pkt_ref, pkt); |
2528 | 0 | if (ret < 0) |
2529 | 0 | return ret; |
2530 | | |
2531 | 0 | ret = av_bsf_send_packet(sti->extract_extradata.bsf, pkt_ref); |
2532 | 0 | if (ret < 0) { |
2533 | 0 | av_packet_unref(pkt_ref); |
2534 | 0 | return ret; |
2535 | 0 | } |
2536 | | |
2537 | 0 | while (ret >= 0 && !sti->avctx->extradata) { |
2538 | 0 | ret = av_bsf_receive_packet(sti->extract_extradata.bsf, pkt_ref); |
2539 | 0 | if (ret < 0) { |
2540 | 0 | if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) |
2541 | 0 | return ret; |
2542 | 0 | continue; |
2543 | 0 | } |
2544 | | |
2545 | 0 | for (int i = 0; i < pkt_ref->side_data_elems; i++) { |
2546 | 0 | AVPacketSideData *const side_data = &pkt_ref->side_data[i]; |
2547 | 0 | if (side_data->type == AV_PKT_DATA_NEW_EXTRADATA) { |
2548 | 0 | sti->avctx->extradata = side_data->data; |
2549 | 0 | sti->avctx->extradata_size = side_data->size; |
2550 | 0 | side_data->data = NULL; |
2551 | 0 | side_data->size = 0; |
2552 | 0 | break; |
2553 | 0 | } |
2554 | 0 | } |
2555 | 0 | av_packet_unref(pkt_ref); |
2556 | 0 | } |
2557 | | |
2558 | 0 | return 0; |
2559 | 0 | } |
2560 | | |
2561 | | static int parameters_from_context(AVFormatContext *ic, AVCodecParameters *par, |
2562 | | const AVCodecContext *avctx) |
2563 | 0 | { |
2564 | 0 | AVCodecParameters *par_tmp; |
2565 | 0 | int ret; |
2566 | |
|
2567 | 0 | par_tmp = avcodec_parameters_alloc(); |
2568 | 0 | if (!par_tmp) |
2569 | 0 | return AVERROR(ENOMEM); |
2570 | | |
2571 | 0 | ret = avcodec_parameters_copy(par_tmp, par); |
2572 | 0 | if (ret < 0) |
2573 | 0 | goto fail; |
2574 | | |
2575 | 0 | ret = avcodec_parameters_from_context(par, avctx); |
2576 | 0 | if (ret < 0) |
2577 | 0 | goto fail; |
2578 | | |
2579 | | /* Restore some values if they are signaled at the container level |
2580 | | * given they may have been replaced by codec level values as read |
2581 | | * internally by avformat_find_stream_info(). |
2582 | | */ |
2583 | 0 | if (par_tmp->color_range != AVCOL_RANGE_UNSPECIFIED) |
2584 | 0 | par->color_range = par_tmp->color_range; |
2585 | 0 | if (par_tmp->color_primaries != AVCOL_PRI_UNSPECIFIED || |
2586 | 0 | par_tmp->color_trc != AVCOL_TRC_UNSPECIFIED || |
2587 | 0 | par_tmp->color_space != AVCOL_SPC_UNSPECIFIED) { |
2588 | 0 | par->color_primaries = par_tmp->color_primaries; |
2589 | 0 | par->color_trc = par_tmp->color_trc; |
2590 | 0 | par->color_space = par_tmp->color_space; |
2591 | 0 | } |
2592 | 0 | if (par_tmp->chroma_location != AVCHROMA_LOC_UNSPECIFIED) |
2593 | 0 | par->chroma_location = par_tmp->chroma_location; |
2594 | |
|
2595 | 0 | ret = 0; |
2596 | 0 | fail: |
2597 | 0 | avcodec_parameters_free(&par_tmp); |
2598 | |
|
2599 | 0 | return ret; |
2600 | 0 | } |
2601 | | |
2602 | | int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options) |
2603 | 0 | { |
2604 | 0 | FFFormatContext *const si = ffformatcontext(ic); |
2605 | 0 | int count = 0, ret = 0, err; |
2606 | 0 | int64_t read_size; |
2607 | 0 | AVPacket *pkt1 = si->pkt; |
2608 | 0 | int64_t old_offset = avio_tell(ic->pb); |
2609 | | // new streams might appear, no options for those |
2610 | 0 | int orig_nb_streams = ic->nb_streams; |
2611 | 0 | int flush_codecs; |
2612 | 0 | int64_t max_analyze_duration = ic->max_analyze_duration; |
2613 | 0 | int64_t max_stream_analyze_duration; |
2614 | 0 | int64_t max_subtitle_analyze_duration; |
2615 | 0 | int64_t probesize = ic->probesize; |
2616 | 0 | int eof_reached = 0; |
2617 | |
|
2618 | 0 | flush_codecs = probesize > 0; |
2619 | |
|
2620 | 0 | av_opt_set_int(ic, "skip_clear", 1, AV_OPT_SEARCH_CHILDREN); |
2621 | |
|
2622 | 0 | max_stream_analyze_duration = max_analyze_duration; |
2623 | 0 | max_subtitle_analyze_duration = max_analyze_duration; |
2624 | 0 | if (!max_analyze_duration) { |
2625 | 0 | max_stream_analyze_duration = |
2626 | 0 | max_analyze_duration = 5*AV_TIME_BASE; |
2627 | 0 | max_subtitle_analyze_duration = 30*AV_TIME_BASE; |
2628 | 0 | if (!strcmp(ic->iformat->name, "flv")) |
2629 | 0 | max_stream_analyze_duration = 90*AV_TIME_BASE; |
2630 | 0 | if (!strcmp(ic->iformat->name, "mpeg") || !strcmp(ic->iformat->name, "mpegts")) |
2631 | 0 | max_stream_analyze_duration = 7*AV_TIME_BASE; |
2632 | 0 | } |
2633 | |
|
2634 | 0 | if (ic->pb) { |
2635 | 0 | FFIOContext *const ctx = ffiocontext(ic->pb); |
2636 | 0 | av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d nb_streams:%d\n", |
2637 | 0 | avio_tell(ic->pb), ctx->bytes_read, ctx->seek_count, ic->nb_streams); |
2638 | 0 | } |
2639 | |
|
2640 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2641 | 0 | const AVCodec *codec; |
2642 | 0 | AVDictionary *thread_opt = NULL; |
2643 | 0 | AVStream *const st = ic->streams[i]; |
2644 | 0 | FFStream *const sti = ffstream(st); |
2645 | 0 | AVCodecContext *const avctx = sti->avctx; |
2646 | | |
2647 | | /* check if the caller has overridden the codec id */ |
2648 | | // only for the split stuff |
2649 | 0 | if (!sti->parser && !(ic->flags & AVFMT_FLAG_NOPARSE) && sti->request_probe <= 0) { |
2650 | 0 | sti->parser = av_parser_init(st->codecpar->codec_id); |
2651 | 0 | if (sti->parser) { |
2652 | 0 | if (sti->need_parsing == AVSTREAM_PARSE_HEADERS) { |
2653 | 0 | sti->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES; |
2654 | 0 | } else if (sti->need_parsing == AVSTREAM_PARSE_FULL_RAW) { |
2655 | 0 | sti->parser->flags |= PARSER_FLAG_USE_CODEC_TS; |
2656 | 0 | } |
2657 | 0 | } else if (sti->need_parsing) { |
2658 | 0 | av_log(ic, AV_LOG_VERBOSE, "parser not found for codec " |
2659 | 0 | "%s, packets or times may be invalid.\n", |
2660 | 0 | avcodec_get_name(st->codecpar->codec_id)); |
2661 | 0 | } |
2662 | 0 | } |
2663 | |
|
2664 | 0 | ret = avcodec_parameters_to_context(avctx, st->codecpar); |
2665 | 0 | if (ret < 0) |
2666 | 0 | goto find_stream_info_err; |
2667 | 0 | if (sti->request_probe <= 0) |
2668 | 0 | sti->avctx_inited = 1; |
2669 | |
|
2670 | 0 | codec = find_probe_decoder(ic, st, st->codecpar->codec_id); |
2671 | | |
2672 | | /* Force thread count to 1 since the H.264 decoder will not extract |
2673 | | * SPS and PPS to extradata during multi-threaded decoding. */ |
2674 | 0 | av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0); |
2675 | | /* Force lowres to 0. The decoder might reduce the video size by the |
2676 | | * lowres factor, and we don't want that propagated to the stream's |
2677 | | * codecpar */ |
2678 | 0 | av_dict_set(options ? &options[i] : &thread_opt, "lowres", "0", 0); |
2679 | |
|
2680 | 0 | if (ic->codec_whitelist) |
2681 | 0 | av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0); |
2682 | | |
2683 | | // Try to just open decoders, in case this is enough to get parameters. |
2684 | | // Also ensure that subtitle_header is properly set. |
2685 | 0 | if (!has_codec_parameters(st, NULL) && sti->request_probe <= 0 || |
2686 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) { |
2687 | 0 | if (codec && !avctx->codec) |
2688 | 0 | if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0) |
2689 | 0 | av_log(ic, AV_LOG_WARNING, |
2690 | 0 | "Failed to open codec in %s\n", __func__); |
2691 | 0 | } |
2692 | 0 | if (!options) |
2693 | 0 | av_dict_free(&thread_opt); |
2694 | 0 | } |
2695 | | |
2696 | 0 | read_size = 0; |
2697 | 0 | for (;;) { |
2698 | 0 | const AVPacket *pkt; |
2699 | 0 | AVStream *st; |
2700 | 0 | FFStream *sti; |
2701 | 0 | AVCodecContext *avctx; |
2702 | 0 | int analyzed_all_streams; |
2703 | 0 | unsigned i; |
2704 | 0 | if (ff_check_interrupt(&ic->interrupt_callback)) { |
2705 | 0 | ret = AVERROR_EXIT; |
2706 | 0 | av_log(ic, AV_LOG_DEBUG, "interrupted\n"); |
2707 | 0 | break; |
2708 | 0 | } |
2709 | | |
2710 | | /* read_frame_internal() in a previous iteration of this loop may |
2711 | | * have made changes to streams without returning a packet for them. |
2712 | | * Handle that here. */ |
2713 | 0 | ret = update_stream_avctx(ic); |
2714 | 0 | if (ret < 0) |
2715 | 0 | goto unref_then_goto_end; |
2716 | | |
2717 | | /* check if one codec still needs to be handled */ |
2718 | 0 | for (i = 0; i < ic->nb_streams; i++) { |
2719 | 0 | AVStream *const st = ic->streams[i]; |
2720 | 0 | FFStream *const sti = ffstream(st); |
2721 | 0 | int fps_analyze_framecount = 20; |
2722 | 0 | int count; |
2723 | |
|
2724 | 0 | if (!has_codec_parameters(st, NULL)) |
2725 | 0 | break; |
2726 | | /* If the timebase is coarse (like the usual millisecond precision |
2727 | | * of mkv), we need to analyze more frames to reliably arrive at |
2728 | | * the correct fps. */ |
2729 | 0 | if (av_q2d(st->time_base) > 0.0005) |
2730 | 0 | fps_analyze_framecount *= 2; |
2731 | 0 | if (!tb_unreliable(ic, st)) |
2732 | 0 | fps_analyze_framecount = 0; |
2733 | 0 | if (ic->fps_probe_size >= 0) |
2734 | 0 | fps_analyze_framecount = ic->fps_probe_size; |
2735 | 0 | if (st->disposition & AV_DISPOSITION_ATTACHED_PIC) |
2736 | 0 | fps_analyze_framecount = 0; |
2737 | | /* variable fps and no guess at the real fps */ |
2738 | 0 | count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ? |
2739 | 0 | sti->info->codec_info_duration_fields/2 : |
2740 | 0 | sti->info->duration_count; |
2741 | 0 | if (!(st->r_frame_rate.num && st->avg_frame_rate.num) && |
2742 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
2743 | 0 | if (count < fps_analyze_framecount) |
2744 | 0 | break; |
2745 | 0 | } |
2746 | | // Look at the first 3 frames if there is evidence of frame delay |
2747 | | // but the decoder delay is not set. |
2748 | 0 | if (sti->info->frame_delay_evidence && count < 2 && sti->avctx->has_b_frames == 0) |
2749 | 0 | break; |
2750 | 0 | if (!sti->avctx->extradata && |
2751 | 0 | (!sti->extract_extradata.inited || sti->extract_extradata.bsf) && |
2752 | 0 | extract_extradata_check(st)) |
2753 | 0 | break; |
2754 | 0 | if (sti->first_dts == AV_NOPTS_VALUE && |
2755 | 0 | (!(ic->iformat->flags & AVFMT_NOTIMESTAMPS) || sti->need_parsing == AVSTREAM_PARSE_FULL_RAW) && |
2756 | 0 | sti->codec_info_nb_frames < ((st->disposition & AV_DISPOSITION_ATTACHED_PIC) ? 1 : ic->max_ts_probe) && |
2757 | 0 | (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || |
2758 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)) |
2759 | 0 | break; |
2760 | 0 | } |
2761 | 0 | analyzed_all_streams = 0; |
2762 | 0 | if (i == ic->nb_streams && !si->missing_streams) { |
2763 | 0 | analyzed_all_streams = 1; |
2764 | | /* NOTE: If the format has no header, then we need to read some |
2765 | | * packets to get most of the streams, so we cannot stop here. */ |
2766 | 0 | if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) { |
2767 | | /* If we found the info for all the codecs, we can stop. */ |
2768 | 0 | ret = count; |
2769 | 0 | av_log(ic, AV_LOG_DEBUG, "All info found\n"); |
2770 | 0 | flush_codecs = 0; |
2771 | 0 | break; |
2772 | 0 | } |
2773 | 0 | } |
2774 | | /* We did not get all the codec info, but we read too much data. */ |
2775 | 0 | if (read_size >= probesize) { |
2776 | 0 | ret = count; |
2777 | 0 | av_log(ic, AV_LOG_DEBUG, |
2778 | 0 | "Probe buffer size limit of %"PRId64" bytes reached\n", probesize); |
2779 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2780 | 0 | AVStream *const st = ic->streams[i]; |
2781 | 0 | FFStream *const sti = ffstream(st); |
2782 | 0 | if (!st->r_frame_rate.num && |
2783 | 0 | sti->info->duration_count <= 1 && |
2784 | 0 | st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && |
2785 | 0 | strcmp(ic->iformat->name, "image2")) |
2786 | 0 | av_log(ic, AV_LOG_WARNING, |
2787 | 0 | "Stream #%d: not enough frames to estimate rate; " |
2788 | 0 | "consider increasing probesize\n", i); |
2789 | 0 | } |
2790 | 0 | break; |
2791 | 0 | } |
2792 | | |
2793 | | /* NOTE: A new stream can be added there if no header in file |
2794 | | * (AVFMTCTX_NOHEADER). */ |
2795 | 0 | ret = read_frame_internal(ic, pkt1); |
2796 | 0 | if (ret == AVERROR(EAGAIN)) |
2797 | 0 | continue; |
2798 | | |
2799 | 0 | if (ret < 0) { |
2800 | | /* EOF or error*/ |
2801 | 0 | eof_reached = 1; |
2802 | 0 | break; |
2803 | 0 | } |
2804 | | |
2805 | 0 | if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) { |
2806 | 0 | ret = avpriv_packet_list_put(&si->packet_buffer, |
2807 | 0 | pkt1, NULL, 0); |
2808 | 0 | if (ret < 0) |
2809 | 0 | goto unref_then_goto_end; |
2810 | | |
2811 | 0 | pkt = &si->packet_buffer.tail->pkt; |
2812 | 0 | } else { |
2813 | 0 | pkt = pkt1; |
2814 | 0 | } |
2815 | | |
2816 | 0 | st = ic->streams[pkt->stream_index]; |
2817 | 0 | sti = ffstream(st); |
2818 | 0 | if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) |
2819 | 0 | read_size += pkt->size; |
2820 | |
|
2821 | 0 | avctx = sti->avctx; |
2822 | 0 | if (!sti->avctx_inited) { |
2823 | 0 | ret = avcodec_parameters_to_context(avctx, st->codecpar); |
2824 | 0 | if (ret < 0) |
2825 | 0 | goto unref_then_goto_end; |
2826 | 0 | sti->avctx_inited = 1; |
2827 | 0 | } |
2828 | | |
2829 | 0 | if (pkt->dts != AV_NOPTS_VALUE && sti->codec_info_nb_frames > 1) { |
2830 | | /* check for non-increasing dts */ |
2831 | 0 | if (sti->info->fps_last_dts != AV_NOPTS_VALUE && |
2832 | 0 | sti->info->fps_last_dts >= pkt->dts) { |
2833 | 0 | av_log(ic, AV_LOG_DEBUG, |
2834 | 0 | "Non-increasing DTS in stream %d: packet %d with DTS " |
2835 | 0 | "%"PRId64", packet %d with DTS %"PRId64"\n", |
2836 | 0 | st->index, sti->info->fps_last_dts_idx, |
2837 | 0 | sti->info->fps_last_dts, sti->codec_info_nb_frames, |
2838 | 0 | pkt->dts); |
2839 | 0 | sti->info->fps_first_dts = |
2840 | 0 | sti->info->fps_last_dts = AV_NOPTS_VALUE; |
2841 | 0 | } |
2842 | | /* Check for a discontinuity in dts. If the difference in dts |
2843 | | * is more than 1000 times the average packet duration in the |
2844 | | * sequence, we treat it as a discontinuity. */ |
2845 | 0 | if (sti->info->fps_last_dts != AV_NOPTS_VALUE && |
2846 | 0 | sti->info->fps_last_dts_idx > sti->info->fps_first_dts_idx && |
2847 | 0 | (pkt->dts - (uint64_t)sti->info->fps_last_dts) / 1000 > |
2848 | 0 | (sti->info->fps_last_dts - (uint64_t)sti->info->fps_first_dts) / |
2849 | 0 | (sti->info->fps_last_dts_idx - sti->info->fps_first_dts_idx)) { |
2850 | 0 | av_log(ic, AV_LOG_WARNING, |
2851 | 0 | "DTS discontinuity in stream %d: packet %d with DTS " |
2852 | 0 | "%"PRId64", packet %d with DTS %"PRId64"\n", |
2853 | 0 | st->index, sti->info->fps_last_dts_idx, |
2854 | 0 | sti->info->fps_last_dts, sti->codec_info_nb_frames, |
2855 | 0 | pkt->dts); |
2856 | 0 | sti->info->fps_first_dts = |
2857 | 0 | sti->info->fps_last_dts = AV_NOPTS_VALUE; |
2858 | 0 | } |
2859 | | |
2860 | | /* update stored dts values */ |
2861 | 0 | if (sti->info->fps_first_dts == AV_NOPTS_VALUE) { |
2862 | 0 | sti->info->fps_first_dts = pkt->dts; |
2863 | 0 | sti->info->fps_first_dts_idx = sti->codec_info_nb_frames; |
2864 | 0 | } |
2865 | 0 | sti->info->fps_last_dts = pkt->dts; |
2866 | 0 | sti->info->fps_last_dts_idx = sti->codec_info_nb_frames; |
2867 | 0 | } |
2868 | 0 | if (sti->codec_info_nb_frames > 1) { |
2869 | 0 | int64_t t = 0; |
2870 | 0 | int64_t limit; |
2871 | |
|
2872 | 0 | if (st->time_base.den > 0) |
2873 | 0 | t = av_rescale_q(sti->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q); |
2874 | 0 | if (st->avg_frame_rate.num > 0) |
2875 | 0 | t = FFMAX(t, av_rescale_q(sti->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q)); |
2876 | |
|
2877 | 0 | if ( t == 0 |
2878 | 0 | && sti->codec_info_nb_frames > 30 |
2879 | 0 | && sti->info->fps_first_dts != AV_NOPTS_VALUE |
2880 | 0 | && sti->info->fps_last_dts != AV_NOPTS_VALUE) { |
2881 | 0 | int64_t dur = av_sat_sub64(sti->info->fps_last_dts, sti->info->fps_first_dts); |
2882 | 0 | t = FFMAX(t, av_rescale_q(dur, st->time_base, AV_TIME_BASE_Q)); |
2883 | 0 | } |
2884 | |
|
2885 | 0 | if (analyzed_all_streams) limit = max_analyze_duration; |
2886 | 0 | else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) limit = max_subtitle_analyze_duration; |
2887 | 0 | else limit = max_stream_analyze_duration; |
2888 | |
|
2889 | 0 | if (t >= limit) { |
2890 | 0 | av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds st:%d\n", |
2891 | 0 | limit, |
2892 | 0 | t, pkt->stream_index); |
2893 | 0 | if (ic->flags & AVFMT_FLAG_NOBUFFER) |
2894 | 0 | av_packet_unref(pkt1); |
2895 | 0 | break; |
2896 | 0 | } |
2897 | 0 | if (pkt->duration > 0 && pkt->duration < INT64_MAX - sti->info->codec_info_duration) { |
2898 | 0 | const int fields = sti->codec_desc && (sti->codec_desc->props & AV_CODEC_PROP_FIELDS); |
2899 | 0 | if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE && pkt->pts != AV_NOPTS_VALUE && st->start_time != AV_NOPTS_VALUE && pkt->pts >= st->start_time |
2900 | 0 | && (uint64_t)pkt->pts - st->start_time < INT64_MAX |
2901 | 0 | ) { |
2902 | 0 | sti->info->codec_info_duration = FFMIN(pkt->pts - st->start_time, sti->info->codec_info_duration + pkt->duration); |
2903 | 0 | } else |
2904 | 0 | sti->info->codec_info_duration += pkt->duration; |
2905 | 0 | sti->info->codec_info_duration_fields += sti->parser && sti->need_parsing && fields |
2906 | 0 | ? sti->parser->repeat_pict + 1 : 2; |
2907 | 0 | } |
2908 | 0 | } |
2909 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
2910 | 0 | #if FF_API_R_FRAME_RATE |
2911 | 0 | ff_rfps_add_frame(ic, st, pkt->dts); |
2912 | 0 | #endif |
2913 | 0 | if (pkt->dts != pkt->pts && pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE) |
2914 | 0 | sti->info->frame_delay_evidence = 1; |
2915 | 0 | } |
2916 | 0 | if (!sti->avctx->extradata) { |
2917 | 0 | ret = extract_extradata(si, st, pkt); |
2918 | 0 | if (ret < 0) |
2919 | 0 | goto unref_then_goto_end; |
2920 | 0 | } |
2921 | | |
2922 | | /* If still no information, we try to open the codec and to |
2923 | | * decompress the frame. We try to avoid that in most cases as |
2924 | | * it takes longer and uses more memory. For MPEG-4, we need to |
2925 | | * decompress for QuickTime. |
2926 | | * |
2927 | | * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at |
2928 | | * least one frame of codec data, this makes sure the codec initializes |
2929 | | * the channel configuration and does not only trust the values from |
2930 | | * the container. */ |
2931 | 0 | try_decode_frame(ic, st, pkt, |
2932 | 0 | (options && i < orig_nb_streams) ? &options[i] : NULL); |
2933 | |
|
2934 | 0 | if (ic->flags & AVFMT_FLAG_NOBUFFER) |
2935 | 0 | av_packet_unref(pkt1); |
2936 | |
|
2937 | 0 | sti->codec_info_nb_frames++; |
2938 | 0 | count++; |
2939 | 0 | } |
2940 | | |
2941 | 0 | if (eof_reached) { |
2942 | 0 | for (unsigned stream_index = 0; stream_index < ic->nb_streams; stream_index++) { |
2943 | 0 | AVStream *const st = ic->streams[stream_index]; |
2944 | 0 | AVCodecContext *const avctx = ffstream(st)->avctx; |
2945 | 0 | if (!has_codec_parameters(st, NULL)) { |
2946 | 0 | const AVCodec *codec = find_probe_decoder(ic, st, st->codecpar->codec_id); |
2947 | 0 | if (codec && !avctx->codec) { |
2948 | 0 | AVDictionary *opts = NULL; |
2949 | 0 | if (ic->codec_whitelist) |
2950 | 0 | av_dict_set(&opts, "codec_whitelist", ic->codec_whitelist, 0); |
2951 | 0 | if (avcodec_open2(avctx, codec, (options && stream_index < orig_nb_streams) ? &options[stream_index] : &opts) < 0) |
2952 | 0 | av_log(ic, AV_LOG_WARNING, |
2953 | 0 | "Failed to open codec in %s\n", __func__); |
2954 | 0 | av_dict_free(&opts); |
2955 | 0 | } |
2956 | 0 | } |
2957 | | |
2958 | | // EOF already reached while reading the stream above. |
2959 | | // So continue with reoordering DTS with whatever delay we have. |
2960 | 0 | if (si->packet_buffer.head && !has_decode_delay_been_guessed(st)) { |
2961 | 0 | update_dts_from_pts(ic, stream_index, si->packet_buffer.head); |
2962 | 0 | } |
2963 | 0 | } |
2964 | 0 | } |
2965 | |
|
2966 | 0 | if (flush_codecs) { |
2967 | 0 | AVPacket *empty_pkt = si->pkt; |
2968 | 0 | int err = 0; |
2969 | 0 | av_packet_unref(empty_pkt); |
2970 | |
|
2971 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2972 | 0 | AVStream *const st = ic->streams[i]; |
2973 | 0 | FFStream *const sti = ffstream(st); |
2974 | | |
2975 | | /* flush the decoders */ |
2976 | 0 | if (sti->info->found_decoder == 1) { |
2977 | 0 | err = try_decode_frame(ic, st, empty_pkt, |
2978 | 0 | (options && i < orig_nb_streams) |
2979 | 0 | ? &options[i] : NULL); |
2980 | |
|
2981 | 0 | if (err < 0) { |
2982 | 0 | av_log(ic, AV_LOG_INFO, |
2983 | 0 | "decoding for stream %d failed\n", st->index); |
2984 | 0 | } |
2985 | 0 | } |
2986 | 0 | } |
2987 | 0 | } |
2988 | |
|
2989 | 0 | ff_rfps_calculate(ic); |
2990 | |
|
2991 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
2992 | 0 | AVStream *const st = ic->streams[i]; |
2993 | 0 | FFStream *const sti = ffstream(st); |
2994 | 0 | AVCodecContext *const avctx = sti->avctx; |
2995 | |
|
2996 | 0 | if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { |
2997 | 0 | if (avctx->codec_id == AV_CODEC_ID_RAWVIDEO && !avctx->codec_tag && !avctx->bits_per_coded_sample) { |
2998 | 0 | uint32_t tag= avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt); |
2999 | 0 | if (avpriv_pix_fmt_find(PIX_FMT_LIST_RAW, tag) == avctx->pix_fmt) |
3000 | 0 | avctx->codec_tag= tag; |
3001 | 0 | } |
3002 | | |
3003 | | /* estimate average framerate if not set by demuxer */ |
3004 | 0 | if (sti->info->codec_info_duration_fields && |
3005 | 0 | !st->avg_frame_rate.num && |
3006 | 0 | sti->info->codec_info_duration) { |
3007 | 0 | int best_fps = 0; |
3008 | 0 | double best_error = 0.01; |
3009 | 0 | AVRational codec_frame_rate = avctx->framerate; |
3010 | |
|
3011 | 0 | if (sti->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2|| |
3012 | 0 | sti->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den || |
3013 | 0 | sti->info->codec_info_duration < 0) |
3014 | 0 | continue; |
3015 | 0 | av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, |
3016 | 0 | sti->info->codec_info_duration_fields * (int64_t) st->time_base.den, |
3017 | 0 | sti->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000); |
3018 | | |
3019 | | /* Round guessed framerate to a "standard" framerate if it's |
3020 | | * within 1% of the original estimate. */ |
3021 | 0 | for (int j = 0; j < MAX_STD_TIMEBASES; j++) { |
3022 | 0 | AVRational std_fps = { get_std_framerate(j), 12 * 1001 }; |
3023 | 0 | double error = fabs(av_q2d(st->avg_frame_rate) / |
3024 | 0 | av_q2d(std_fps) - 1); |
3025 | |
|
3026 | 0 | if (error < best_error) { |
3027 | 0 | best_error = error; |
3028 | 0 | best_fps = std_fps.num; |
3029 | 0 | } |
3030 | |
|
3031 | 0 | if ((ffifmt(ic->iformat)->flags_internal & FF_INFMT_FLAG_PREFER_CODEC_FRAMERATE) && |
3032 | 0 | codec_frame_rate.num > 0 && codec_frame_rate.den > 0) { |
3033 | 0 | error = fabs(av_q2d(codec_frame_rate) / |
3034 | 0 | av_q2d(std_fps) - 1); |
3035 | 0 | if (error < best_error) { |
3036 | 0 | best_error = error; |
3037 | 0 | best_fps = std_fps.num; |
3038 | 0 | } |
3039 | 0 | } |
3040 | 0 | } |
3041 | 0 | if (best_fps) |
3042 | 0 | av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den, |
3043 | 0 | best_fps, 12 * 1001, INT_MAX); |
3044 | 0 | } |
3045 | 0 | if (!st->r_frame_rate.num) { |
3046 | 0 | const AVCodecDescriptor *desc = sti->codec_desc; |
3047 | 0 | AVRational mul = (AVRational){ desc && (desc->props & AV_CODEC_PROP_FIELDS) ? 2 : 1, 1 }; |
3048 | 0 | AVRational fr = av_mul_q(avctx->framerate, mul); |
3049 | |
|
3050 | 0 | if (fr.num && fr.den && av_cmp_q(st->time_base, av_inv_q(fr)) <= 0) { |
3051 | 0 | st->r_frame_rate = fr; |
3052 | 0 | } else { |
3053 | 0 | st->r_frame_rate.num = st->time_base.den; |
3054 | 0 | st->r_frame_rate.den = st->time_base.num; |
3055 | 0 | } |
3056 | 0 | } |
3057 | 0 | st->codecpar->framerate = avctx->framerate; |
3058 | 0 | if (sti->display_aspect_ratio.num && sti->display_aspect_ratio.den) { |
3059 | 0 | AVRational hw_ratio = { avctx->height, avctx->width }; |
3060 | 0 | st->sample_aspect_ratio = av_mul_q(sti->display_aspect_ratio, |
3061 | 0 | hw_ratio); |
3062 | 0 | } |
3063 | 0 | } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { |
3064 | 0 | if (!avctx->bits_per_coded_sample) |
3065 | 0 | avctx->bits_per_coded_sample = |
3066 | 0 | av_get_bits_per_sample(avctx->codec_id); |
3067 | | // set stream disposition based on audio service type |
3068 | 0 | switch (avctx->audio_service_type) { |
3069 | 0 | case AV_AUDIO_SERVICE_TYPE_EFFECTS: |
3070 | 0 | st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; |
3071 | 0 | break; |
3072 | 0 | case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: |
3073 | 0 | st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; |
3074 | 0 | break; |
3075 | 0 | case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: |
3076 | 0 | st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; |
3077 | 0 | break; |
3078 | 0 | case AV_AUDIO_SERVICE_TYPE_COMMENTARY: |
3079 | 0 | st->disposition = AV_DISPOSITION_COMMENT; |
3080 | 0 | break; |
3081 | 0 | case AV_AUDIO_SERVICE_TYPE_KARAOKE: |
3082 | 0 | st->disposition = AV_DISPOSITION_KARAOKE; |
3083 | 0 | break; |
3084 | 0 | } |
3085 | 0 | } |
3086 | 0 | } |
3087 | | |
3088 | 0 | if (probesize) |
3089 | 0 | estimate_timings(ic, old_offset); |
3090 | |
|
3091 | 0 | av_opt_set_int(ic, "skip_clear", 0, AV_OPT_SEARCH_CHILDREN); |
3092 | |
|
3093 | 0 | if (ret >= 0 && ic->nb_streams) |
3094 | | /* We could not have all the codec parameters before EOF. */ |
3095 | 0 | ret = -1; |
3096 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
3097 | 0 | AVStream *const st = ic->streams[i]; |
3098 | 0 | FFStream *const sti = ffstream(st); |
3099 | 0 | const char *errmsg; |
3100 | | |
3101 | | /* if no packet was ever seen, update context now for has_codec_parameters */ |
3102 | 0 | if (!sti->avctx_inited) { |
3103 | 0 | if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && |
3104 | 0 | st->codecpar->format == AV_SAMPLE_FMT_NONE) |
3105 | 0 | st->codecpar->format = sti->avctx->sample_fmt; |
3106 | 0 | ret = avcodec_parameters_to_context(sti->avctx, st->codecpar); |
3107 | 0 | if (ret < 0) |
3108 | 0 | goto find_stream_info_err; |
3109 | 0 | } |
3110 | 0 | if (!has_codec_parameters(st, &errmsg)) { |
3111 | 0 | char buf[256]; |
3112 | 0 | avcodec_string(buf, sizeof(buf), sti->avctx, 0); |
3113 | 0 | av_log(ic, AV_LOG_WARNING, |
3114 | 0 | "Could not find codec parameters for stream %d (%s): %s\n" |
3115 | 0 | "Consider increasing the value for the 'analyzeduration' (%"PRId64") and 'probesize' (%"PRId64") options\n", |
3116 | 0 | i, buf, errmsg, ic->max_analyze_duration, ic->probesize); |
3117 | 0 | } else { |
3118 | 0 | ret = 0; |
3119 | 0 | } |
3120 | 0 | } |
3121 | | |
3122 | 0 | err = compute_chapters_end(ic); |
3123 | 0 | if (err < 0) { |
3124 | 0 | ret = err; |
3125 | 0 | goto find_stream_info_err; |
3126 | 0 | } |
3127 | | |
3128 | | /* update the stream parameters from the internal codec contexts */ |
3129 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
3130 | 0 | AVStream *const st = ic->streams[i]; |
3131 | 0 | FFStream *const sti = ffstream(st); |
3132 | |
|
3133 | 0 | if (sti->avctx_inited) { |
3134 | 0 | ret = parameters_from_context(ic, st->codecpar, sti->avctx); |
3135 | 0 | if (ret < 0) |
3136 | 0 | goto find_stream_info_err; |
3137 | | |
3138 | 0 | if (sti->avctx->rc_buffer_size > 0 || sti->avctx->rc_max_rate > 0 || |
3139 | 0 | sti->avctx->rc_min_rate) { |
3140 | 0 | size_t cpb_size; |
3141 | 0 | AVCPBProperties *props = av_cpb_properties_alloc(&cpb_size); |
3142 | 0 | if (props) { |
3143 | 0 | if (sti->avctx->rc_buffer_size > 0) |
3144 | 0 | props->buffer_size = sti->avctx->rc_buffer_size; |
3145 | 0 | if (sti->avctx->rc_min_rate > 0) |
3146 | 0 | props->min_bitrate = sti->avctx->rc_min_rate; |
3147 | 0 | if (sti->avctx->rc_max_rate > 0) |
3148 | 0 | props->max_bitrate = sti->avctx->rc_max_rate; |
3149 | 0 | if (!av_packet_side_data_add(&st->codecpar->coded_side_data, |
3150 | 0 | &st->codecpar->nb_coded_side_data, |
3151 | 0 | AV_PKT_DATA_CPB_PROPERTIES, |
3152 | 0 | (uint8_t *)props, cpb_size, 0)) |
3153 | 0 | av_free(props); |
3154 | 0 | } |
3155 | 0 | } |
3156 | 0 | } |
3157 | | |
3158 | 0 | sti->avctx_inited = 0; |
3159 | 0 | } |
3160 | | |
3161 | 0 | find_stream_info_err: |
3162 | 0 | for (unsigned i = 0; i < ic->nb_streams; i++) { |
3163 | 0 | AVStream *const st = ic->streams[i]; |
3164 | 0 | FFStream *const sti = ffstream(st); |
3165 | 0 | int err; |
3166 | |
|
3167 | 0 | if (sti->info) { |
3168 | 0 | av_freep(&sti->info->duration_error); |
3169 | 0 | av_freep(&sti->info); |
3170 | 0 | } |
3171 | |
|
3172 | 0 | if (avcodec_is_open(sti->avctx)) { |
3173 | 0 | err = codec_close(sti); |
3174 | 0 | if (err < 0 && ret >= 0) |
3175 | 0 | ret = err; |
3176 | 0 | } |
3177 | |
|
3178 | 0 | av_bsf_free(&sti->extract_extradata.bsf); |
3179 | 0 | } |
3180 | 0 | if (ic->pb) { |
3181 | 0 | FFIOContext *const ctx = ffiocontext(ic->pb); |
3182 | 0 | av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n", |
3183 | 0 | avio_tell(ic->pb), ctx->bytes_read, ctx->seek_count, count); |
3184 | 0 | } |
3185 | 0 | return ret; |
3186 | | |
3187 | 0 | unref_then_goto_end: |
3188 | 0 | av_packet_unref(pkt1); |
3189 | 0 | goto find_stream_info_err; |
3190 | 0 | } |