Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This file is part of mpv. |
3 | | * |
4 | | * mpv is free software; you can redistribute it and/or |
5 | | * modify it under the terms of the GNU Lesser General Public |
6 | | * License as published by the Free Software Foundation; either |
7 | | * version 2.1 of the License, or (at your option) any later version. |
8 | | * |
9 | | * mpv is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | * GNU Lesser General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU Lesser General Public |
15 | | * License along with mpv. If not, see <http://www.gnu.org/licenses/>. |
16 | | */ |
17 | | |
18 | | #include <stddef.h> |
19 | | #include <stdbool.h> |
20 | | #include <inttypes.h> |
21 | | #include <math.h> |
22 | | #include <assert.h> |
23 | | |
24 | | #include "mpv_talloc.h" |
25 | | |
26 | | #include "common/msg.h" |
27 | | #include "options/options.h" |
28 | | #include "options/m_config.h" |
29 | | #include "options/m_option.h" |
30 | | #include "common/common.h" |
31 | | #include "common/encode.h" |
32 | | #include "options/m_property.h" |
33 | | #include "osdep/timer.h" |
34 | | |
35 | | #include "audio/out/ao.h" |
36 | | #include "audio/format.h" |
37 | | #include "demux/demux.h" |
38 | | #include "stream/stream.h" |
39 | | #include "sub/osd.h" |
40 | | #include "video/hwdec.h" |
41 | | #include "filters/f_decoder_wrapper.h" |
42 | | #include "video/out/vo.h" |
43 | | |
44 | | #include "core.h" |
45 | | #include "command.h" |
46 | | #include "screenshot.h" |
47 | | |
48 | | enum { |
49 | | // update_video() - code also uses: <0 error, 0 eof, >0 progress |
50 | | VD_ERROR = -1, |
51 | | VD_EOF = 0, // end of file - no new output |
52 | | VD_PROGRESS = 1, // progress, but no output; repeat call with no waiting |
53 | | VD_NEW_FRAME = 2, // the call produced a new frame |
54 | | VD_WAIT = 3, // no EOF, but no output; wait until wakeup |
55 | | }; |
56 | | |
57 | | static const char av_desync_help_text[] = |
58 | | "\n" |
59 | | "Audio/Video desynchronisation detected! Possible reasons include too slow\n" |
60 | | "hardware, temporary CPU spikes, broken drivers, and broken files. Audio\n" |
61 | | "position will not match to the video (see A-V status field).\n" |
62 | | "Consider trying `--profile=fast` and/or `--hwdec=auto` as they may help.\n" |
63 | | "\n"; |
64 | | |
65 | | static bool recreate_video_filters(struct MPContext *mpctx) |
66 | 57.3k | { |
67 | 57.3k | struct MPOpts *opts = mpctx->opts; |
68 | 57.3k | struct vo_chain *vo_c = mpctx->vo_chain; |
69 | 57.3k | mp_assert(vo_c); |
70 | | |
71 | 57.3k | return mp_output_chain_update_filters(vo_c->filter, opts->vf_settings); |
72 | 57.3k | } |
73 | | |
74 | | int reinit_video_filters(struct MPContext *mpctx) |
75 | 1.86k | { |
76 | 1.86k | struct vo_chain *vo_c = mpctx->vo_chain; |
77 | | |
78 | 1.86k | if (!vo_c) |
79 | 1.86k | return 0; |
80 | | |
81 | 0 | if (!recreate_video_filters(mpctx)) |
82 | 0 | return -1; |
83 | | |
84 | 0 | mp_force_video_refresh(mpctx); |
85 | |
|
86 | 0 | mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); |
87 | |
|
88 | 0 | return 0; |
89 | 0 | } |
90 | | |
91 | | static void vo_chain_reset_state(struct vo_chain *vo_c) |
92 | 126k | { |
93 | 126k | vo_seek_reset(vo_c->vo); |
94 | 126k | vo_c->underrun = false; |
95 | 126k | vo_c->underrun_signaled = false; |
96 | 126k | } |
97 | | |
98 | | void reset_video_state(struct MPContext *mpctx) |
99 | 754k | { |
100 | 754k | if (mpctx->vo_chain) { |
101 | 126k | vo_chain_reset_state(mpctx->vo_chain); |
102 | 126k | struct track *t = mpctx->vo_chain->track; |
103 | 126k | if (t && t->dec) |
104 | 126k | mp_decoder_wrapper_set_play_dir(t->dec, mpctx->play_dir); |
105 | 126k | } |
106 | | |
107 | 754k | for (int n = 0; n < mpctx->num_next_frames; n++) |
108 | 0 | mp_image_unrefp(&mpctx->next_frames[n]); |
109 | 754k | mpctx->num_next_frames = 0; |
110 | 754k | mp_image_unrefp(&mpctx->saved_frame); |
111 | | |
112 | 754k | mpctx->delay = 0; |
113 | 754k | mpctx->time_frame = 0; |
114 | 754k | mpctx->video_pts = MP_NOPTS_VALUE; |
115 | 754k | mpctx->last_frame_duration = 0; |
116 | 754k | mpctx->num_past_frames = 0; |
117 | 754k | mpctx->total_avsync_change = 0; |
118 | 754k | mpctx->last_av_difference = 0; |
119 | 754k | mpctx->mistimed_frames_total = 0; |
120 | 754k | mpctx->drop_message_shown = 0; |
121 | 754k | mpctx->display_sync_drift_dir = 0; |
122 | 754k | mpctx->display_sync_error = 0; |
123 | 754k | mpctx->display_sync_active = 0; |
124 | | |
125 | 754k | mpctx->video_status = mpctx->vo_chain ? STATUS_SYNCING : STATUS_EOF; |
126 | 754k | } |
127 | | |
128 | | void uninit_video_out(struct MPContext *mpctx) |
129 | 1.87M | { |
130 | 1.87M | uninit_video_chain(mpctx); |
131 | 1.87M | if (mpctx->video_out) { |
132 | 67.9k | vo_destroy(mpctx->video_out); |
133 | 67.9k | mpctx->video_out = NULL; |
134 | 67.9k | mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); |
135 | 67.9k | } |
136 | 1.87M | } |
137 | | |
138 | | static void vo_chain_uninit(struct vo_chain *vo_c) |
139 | 69.4k | { |
140 | 69.4k | struct track *track = vo_c->track; |
141 | 69.4k | if (track) { |
142 | 69.4k | mp_assert(track->vo_c == vo_c); |
143 | 69.4k | track->vo_c = NULL; |
144 | 69.4k | if (vo_c->dec_src) |
145 | 69.4k | mp_assert(track->dec->f->pins[0] == vo_c->dec_src); |
146 | 69.4k | talloc_free(track->dec->f); |
147 | 69.4k | track->dec = NULL; |
148 | 69.4k | } |
149 | | |
150 | 69.4k | if (vo_c->filter_src) |
151 | 0 | mp_pin_disconnect(vo_c->filter_src); |
152 | | |
153 | 69.4k | talloc_free(vo_c->filter->f); |
154 | 69.4k | talloc_free(vo_c); |
155 | | // this does not free the VO |
156 | 69.4k | } |
157 | | |
158 | | void uninit_video_chain(struct MPContext *mpctx) |
159 | 2.53M | { |
160 | 2.53M | if (mpctx->vo_chain) { |
161 | 69.4k | reset_video_state(mpctx); |
162 | 69.4k | vo_chain_uninit(mpctx->vo_chain); |
163 | 69.4k | mpctx->vo_chain = NULL; |
164 | | |
165 | 69.4k | mpctx->video_status = STATUS_EOF; |
166 | | |
167 | 69.4k | mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); |
168 | 69.4k | } |
169 | 2.53M | } |
170 | | |
171 | | int init_video_decoder(struct MPContext *mpctx, struct track *track) |
172 | 69.4k | { |
173 | 69.4k | mp_assert(!track->dec); |
174 | 69.4k | if (!track->stream) |
175 | 0 | goto err_out; |
176 | | |
177 | 69.4k | struct mp_filter *parent = mpctx->filter_root; |
178 | | |
179 | | // If possible, set this as parent so the decoder gets the hwdec and DR |
180 | | // interfaces. |
181 | | // Note: We rely on being able to get rid of all references to the VO by |
182 | | // destroying the VO chain. Thus, decoders not linked to vo_chain |
183 | | // must not use the hwdec context. |
184 | 69.4k | if (track->vo_c) |
185 | 69.4k | parent = track->vo_c->filter->f; |
186 | | |
187 | 69.4k | track->dec = mp_decoder_wrapper_create(parent, track->stream); |
188 | 69.4k | if (!track->dec) |
189 | 0 | goto err_out; |
190 | | |
191 | 69.4k | if (!mp_decoder_wrapper_reinit(track->dec)) |
192 | 12.0k | goto err_out; |
193 | | |
194 | 57.3k | return 1; |
195 | | |
196 | 12.0k | err_out: |
197 | 12.0k | if (track->sink) |
198 | 0 | mp_pin_disconnect(track->sink); |
199 | 12.0k | track->sink = NULL; |
200 | 12.0k | error_on_track(mpctx, track); |
201 | 12.0k | return 0; |
202 | 69.4k | } |
203 | | |
204 | | void reinit_video_chain(struct MPContext *mpctx) |
205 | 126k | { |
206 | 126k | struct track *track = mpctx->current_track[0][STREAM_VIDEO]; |
207 | 126k | if (!track || !track->stream) { |
208 | 56.8k | error_on_track(mpctx, track); |
209 | 56.8k | return; |
210 | 56.8k | } |
211 | 69.6k | reinit_video_chain_src(mpctx, track); |
212 | 69.6k | } |
213 | | |
214 | | static void filter_update_subtitles(void *ctx, double pts) |
215 | 3.73M | { |
216 | 3.73M | struct MPContext *mpctx = ctx; |
217 | | |
218 | 3.73M | if (osd_get_render_subs_in_filter(mpctx->osd)) |
219 | 945 | update_subtitles(mpctx, pts); |
220 | 3.73M | } |
221 | | |
222 | | // (track=NULL creates a blank chain, used for lavfi-complex) |
223 | | void reinit_video_chain_src(struct MPContext *mpctx, struct track *track) |
224 | 69.6k | { |
225 | 69.6k | mp_assert(!mpctx->vo_chain); |
226 | | |
227 | 69.6k | if (!mpctx->video_out) { |
228 | 68.1k | struct vo_extra ex = { |
229 | 68.1k | .input_ctx = mpctx->input, |
230 | 68.1k | .osd = mpctx->osd, |
231 | 68.1k | .encode_lavc_ctx = mpctx->encode_lavc_ctx, |
232 | 68.1k | .wakeup_cb = mp_wakeup_core_cb, |
233 | 68.1k | .wakeup_ctx = mpctx, |
234 | 68.1k | }; |
235 | 68.1k | mpctx->video_out = init_best_video_out(mpctx->global, &ex); |
236 | 68.1k | if (!mpctx->video_out) { |
237 | 235 | MP_FATAL(mpctx, "Error opening/initializing " |
238 | 235 | "the selected video_out (--vo) device.\n"); |
239 | 235 | mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED; |
240 | 235 | goto err_out; |
241 | 235 | } |
242 | 67.8k | mpctx->mouse_cursor_visible = true; |
243 | 67.8k | } |
244 | | |
245 | 69.4k | update_window_title(mpctx, true); |
246 | | |
247 | 69.4k | struct vo_chain *vo_c = talloc_zero(NULL, struct vo_chain); |
248 | 69.4k | mpctx->vo_chain = vo_c; |
249 | 69.4k | vo_c->log = mpctx->log; |
250 | 69.4k | vo_c->vo = mpctx->video_out; |
251 | 69.4k | vo_c->filter = |
252 | 69.4k | mp_output_chain_create(mpctx->filter_root, MP_OUTPUT_CHAIN_VIDEO); |
253 | 69.4k | mp_output_chain_set_vo(vo_c->filter, vo_c->vo); |
254 | 69.4k | vo_c->filter->update_subtitles = filter_update_subtitles; |
255 | 69.4k | vo_c->filter->update_subtitles_ctx = mpctx; |
256 | | |
257 | 69.4k | if (track) { |
258 | 69.4k | vo_c->track = track; |
259 | 69.4k | track->vo_c = vo_c; |
260 | 69.4k | if (!init_video_decoder(mpctx, track)) |
261 | 12.0k | goto err_out; |
262 | | |
263 | 57.3k | vo_c->dec_src = track->dec->f->pins[0]; |
264 | 57.3k | vo_c->filter->container_fps = |
265 | 57.3k | mp_decoder_wrapper_get_container_fps(track->dec); |
266 | 57.3k | vo_c->is_coverart = !!track->attached_picture; |
267 | 57.3k | vo_c->is_sparse = track->stream->still_image || vo_c->is_coverart; |
268 | | |
269 | 57.3k | if (vo_c->is_coverart) |
270 | 481 | mp_decoder_wrapper_set_coverart_flag(track->dec, true); |
271 | | |
272 | 57.3k | track->vo_c = vo_c; |
273 | 57.3k | vo_c->track = track; |
274 | | |
275 | 57.3k | mp_pin_connect(vo_c->filter->f->pins[0], vo_c->dec_src); |
276 | 57.3k | } |
277 | | |
278 | 57.3k | if (!recreate_video_filters(mpctx)) |
279 | 851 | goto err_out; |
280 | | |
281 | 56.5k | update_content_type(mpctx, track); |
282 | 56.5k | update_screensaver_state(mpctx); |
283 | | |
284 | 56.5k | vo_set_paused(vo_c->vo, get_internal_paused(mpctx)); |
285 | | |
286 | 56.5k | reset_video_state(mpctx); |
287 | 56.5k | term_osd_clear_subs(mpctx); |
288 | | |
289 | 56.5k | return; |
290 | | |
291 | 13.1k | err_out: |
292 | 13.1k | uninit_video_chain(mpctx); |
293 | 13.1k | error_on_track(mpctx, track); |
294 | 13.1k | handle_force_window(mpctx, true); |
295 | 13.1k | } |
296 | | |
297 | | // Try to refresh the video by doing a precise seek to the currently displayed |
298 | | // frame. This can go wrong in all sorts of ways, so use sparingly. |
299 | | void mp_force_video_refresh(struct MPContext *mpctx) |
300 | 0 | { |
301 | 0 | struct MPOpts *opts = mpctx->opts; |
302 | 0 | struct vo_chain *vo_c = mpctx->vo_chain; |
303 | |
|
304 | 0 | if (!vo_c) |
305 | 0 | return; |
306 | | |
307 | | // If not paused, the next frame should come soon enough. |
308 | 0 | if (opts->pause || mpctx->time_frame >= 0.5 || |
309 | 0 | mpctx->video_status == STATUS_EOF) |
310 | 0 | { |
311 | 0 | issue_refresh_seek(mpctx, MPSEEK_VERY_EXACT); |
312 | 0 | } |
313 | 0 | } |
314 | | |
315 | | static void check_framedrop(struct MPContext *mpctx, struct vo_chain *vo_c) |
316 | 3.73M | { |
317 | 3.73M | struct MPOpts *opts = mpctx->opts; |
318 | | // check for frame-drop: |
319 | 3.73M | if (mpctx->video_status == STATUS_PLAYING && !mpctx->paused && |
320 | 3.73M | mpctx->audio_status == STATUS_PLAYING && !ao_untimed(mpctx->ao) && |
321 | 3.73M | vo_c->track && vo_c->track->dec && (opts->frame_dropping & 2)) |
322 | 0 | { |
323 | 0 | float fps = vo_c->filter->container_fps; |
324 | | // it's a crappy heuristic; avoid getting upset by incorrect fps |
325 | 0 | if (fps <= 20 || fps >= 500) |
326 | 0 | return; |
327 | 0 | double frame_time = 1.0 / fps; |
328 | | // try to drop as many frames as we appear to be behind |
329 | 0 | mp_decoder_wrapper_set_frame_drops(vo_c->track->dec, |
330 | 0 | MPCLAMP((mpctx->last_av_difference - 0.010) / frame_time, 0, 100)); |
331 | 0 | } |
332 | 3.73M | } |
333 | | |
334 | | /* Modify video timing to match the audio timeline. There are two main |
335 | | * reasons this is needed. First, video and audio can start from different |
336 | | * positions at beginning of file or after a seek (MPlayer starts both |
337 | | * immediately even if they have different pts). Second, the file can have |
338 | | * audio timestamps that are inconsistent with the duration of the audio |
339 | | * packets, for example two consecutive timestamp values differing by |
340 | | * one second but only a packet with enough samples for half a second |
341 | | * of playback between them. |
342 | | */ |
343 | | static void adjust_sync(struct MPContext *mpctx, double v_pts, double frame_time) |
344 | 3.71M | { |
345 | 3.71M | struct MPOpts *opts = mpctx->opts; |
346 | | |
347 | 3.71M | if (mpctx->audio_status != STATUS_PLAYING) |
348 | 3.61M | return; |
349 | | |
350 | 95.4k | double a_pts = written_audio_pts(mpctx) + opts->audio_delay - mpctx->delay; |
351 | 95.4k | double av_delay = a_pts - v_pts; |
352 | | |
353 | 95.4k | double change = av_delay * 0.1; |
354 | 95.4k | double factor = fabs(av_delay) < 0.3 ? 0.1 : 0.4; |
355 | 95.4k | double max_change = opts->default_max_pts_correction >= 0 ? |
356 | 95.4k | opts->default_max_pts_correction : frame_time * factor; |
357 | 95.4k | if (change < -max_change) |
358 | 7.53k | change = -max_change; |
359 | 87.9k | else if (change > max_change) |
360 | 13.0k | change = max_change; |
361 | 95.4k | mpctx->delay += change; |
362 | 95.4k | mpctx->total_avsync_change += change; |
363 | | |
364 | 95.4k | if (mpctx->display_sync_active) |
365 | 0 | mpctx->total_avsync_change = 0; |
366 | 95.4k | } |
367 | | |
368 | | // Make the frame at position 0 "known" to the playback logic. This must happen |
369 | | // only once for each frame, so this function has to be called carefully. |
370 | | // Generally, if position 0 gets a new frame, this must be called. |
371 | | static void handle_new_frame(struct MPContext *mpctx) |
372 | 3.73M | { |
373 | 3.73M | mp_assert(mpctx->num_next_frames >= 1); |
374 | | |
375 | 3.73M | double frame_time = 0; |
376 | 3.73M | double pts = mpctx->next_frames[0]->pts; |
377 | 3.73M | bool is_sparse = mpctx->vo_chain && mpctx->vo_chain->is_sparse; |
378 | | |
379 | 3.73M | if (mpctx->video_pts != MP_NOPTS_VALUE) { |
380 | 3.71M | frame_time = pts - mpctx->video_pts; |
381 | 3.71M | double tolerance = mpctx->demuxer->ts_resets_possible && |
382 | 3.71M | !is_sparse ? 5 : 1e4; |
383 | 3.71M | if (frame_time <= 0 || frame_time >= tolerance) { |
384 | | // Assume a discontinuity. |
385 | 310k | MP_WARN(mpctx, "Invalid video timestamp: %f -> %f\n", |
386 | 310k | mpctx->video_pts, pts); |
387 | 310k | frame_time = 0; |
388 | 310k | } |
389 | 3.71M | } |
390 | 3.73M | mpctx->time_frame += frame_time / mpctx->video_speed; |
391 | 3.73M | if (mpctx->ao_chain && !mpctx->ao_chain->delaying_audio_start) |
392 | 154k | mpctx->delay -= frame_time; |
393 | 3.73M | if (mpctx->video_status >= STATUS_PLAYING) |
394 | 3.71M | adjust_sync(mpctx, pts, frame_time); |
395 | 3.73M | MP_TRACE(mpctx, "frametime=%5.3f\n", frame_time); |
396 | 3.73M | } |
397 | | |
398 | | // Remove the first frame in mpctx->next_frames |
399 | | static void shift_frames(struct MPContext *mpctx) |
400 | 3.73M | { |
401 | 3.73M | if (mpctx->num_next_frames < 1) |
402 | 0 | return; |
403 | 3.73M | talloc_free(mpctx->next_frames[0]); |
404 | 3.73M | for (int n = 0; n < mpctx->num_next_frames - 1; n++) |
405 | 0 | mpctx->next_frames[n] = mpctx->next_frames[n + 1]; |
406 | 3.73M | mpctx->num_next_frames -= 1; |
407 | 3.73M | } |
408 | | |
409 | | static bool use_video_lookahead(struct MPContext *mpctx) |
410 | 12.0M | { |
411 | 12.0M | return mpctx->video_out && |
412 | 12.0M | !(mpctx->video_out->driver->caps & VO_CAP_NORETAIN) && |
413 | 12.0M | !(mpctx->opts->untimed || (mpctx->video_out->driver->caps & VO_CAP_UNTIMED)) && |
414 | 12.0M | !mpctx->opts->video_latency_hacks; |
415 | 12.0M | } |
416 | | |
417 | | static int get_req_frames(struct MPContext *mpctx, bool eof) |
418 | 12.3M | { |
419 | | // On EOF, drain all frames. |
420 | 12.3M | if (eof) |
421 | 220k | return 1; |
422 | | |
423 | 12.0M | if (!use_video_lookahead(mpctx)) |
424 | 12.0M | return 1; |
425 | | |
426 | 0 | if (mpctx->vo_chain && mpctx->vo_chain->is_sparse) |
427 | 0 | return 1; |
428 | | |
429 | | // Normally require at least 2 frames, so we can compute a frame duration. |
430 | 0 | int min = 2; |
431 | | |
432 | | // On the first frame, output a new frame as quickly as possible. |
433 | 0 | if (mpctx->video_pts == MP_NOPTS_VALUE) |
434 | 0 | return min; |
435 | | |
436 | 0 | int req = vo_get_num_req_frames(mpctx->video_out); |
437 | 0 | return MPCLAMP(req, min, MP_ARRAY_SIZE(mpctx->next_frames) - 1); |
438 | 0 | } |
439 | | |
440 | | // Whether it's fine to call add_new_frame() now. |
441 | | static bool needs_new_frame(struct MPContext *mpctx) |
442 | 4.09M | { |
443 | 4.09M | return mpctx->num_next_frames < get_req_frames(mpctx, false); |
444 | 4.09M | } |
445 | | |
446 | | // Queue a frame to mpctx->next_frames[]. Call only if needs_new_frame() signals ok. |
447 | | static void add_new_frame(struct MPContext *mpctx, struct mp_image *frame) |
448 | 3.73M | { |
449 | 3.73M | mp_assert(mpctx->num_next_frames < MP_ARRAY_SIZE(mpctx->next_frames)); |
450 | 3.73M | mp_assert(frame); |
451 | 3.73M | mpctx->next_frames[mpctx->num_next_frames++] = frame; |
452 | 3.73M | if (mpctx->num_next_frames == 1) |
453 | 3.73M | handle_new_frame(mpctx); |
454 | 3.73M | } |
455 | | |
456 | | // Enough video filtered already to push one frame to the VO? |
457 | | // Set eof to true if no new frames are to be expected. |
458 | | static bool have_new_frame(struct MPContext *mpctx, bool eof) |
459 | 8.21M | { |
460 | 8.21M | return mpctx->num_next_frames >= get_req_frames(mpctx, eof); |
461 | 8.21M | } |
462 | | |
463 | | // Fill mpctx->next_frames[] with a newly filtered or decoded image. |
464 | | // logical_eof: is set to true if there is EOF after currently queued frames |
465 | | // returns VD_* code |
466 | | static int video_output_image(struct MPContext *mpctx, bool *logical_eof) |
467 | 4.11M | { |
468 | 4.11M | struct vo_chain *vo_c = mpctx->vo_chain; |
469 | 4.11M | bool hrseek = false; |
470 | 4.11M | double hrseek_pts = mpctx->hrseek_pts; |
471 | 4.11M | double tolerance = mpctx->hrseek_backstep ? 0 : .005; |
472 | 4.11M | if (mpctx->video_status == STATUS_SYNCING) { |
473 | 182k | hrseek = mpctx->hrseek_active; |
474 | | // playback_pts is normally only set when audio and video have started |
475 | | // playing normally. If video is in syncing mode, then this must mean |
476 | | // video was just enabled via track switching - skip to current time. |
477 | 182k | if (!hrseek && mpctx->playback_pts != MP_NOPTS_VALUE) { |
478 | 0 | hrseek = true; |
479 | 0 | hrseek_pts = mpctx->playback_pts; |
480 | 0 | } |
481 | 182k | } |
482 | | |
483 | 4.11M | if (vo_c->is_coverart) { |
484 | 1.63k | *logical_eof = true; |
485 | 1.63k | if (vo_has_frame(mpctx->video_out)) |
486 | 1.01k | return VD_EOF; |
487 | 627 | hrseek = false; |
488 | 627 | } |
489 | | |
490 | 4.11M | if (have_new_frame(mpctx, false)) |
491 | 19.2k | return VD_NEW_FRAME; |
492 | | |
493 | | // Get a new frame if we need one. |
494 | 4.09M | int r = VD_PROGRESS; |
495 | 4.09M | if (needs_new_frame(mpctx)) { |
496 | | // Filter a new frame. |
497 | 4.09M | struct mp_image *img = NULL; |
498 | 4.09M | struct mp_frame frame = mp_pin_out_read(vo_c->filter->f->pins[1]); |
499 | 4.09M | if (frame.type == MP_FRAME_NONE) { |
500 | 306k | r = vo_c->filter->got_output_eof ? VD_EOF : VD_WAIT; |
501 | 3.79M | } else if (frame.type == MP_FRAME_EOF) { |
502 | 56.3k | r = VD_EOF; |
503 | 3.73M | } else if (frame.type == MP_FRAME_VIDEO) { |
504 | 3.73M | img = frame.data; |
505 | 3.73M | } else { |
506 | 0 | MP_ERR(mpctx, "unexpected frame type %s\n", |
507 | 0 | mp_frame_type_str(frame.type)); |
508 | 0 | mp_frame_unref(&frame); |
509 | 0 | return VD_ERROR; |
510 | 0 | } |
511 | 4.09M | if (img) { |
512 | 3.73M | double endpts = get_play_end_pts(mpctx); |
513 | 3.73M | if (endpts != MP_NOPTS_VALUE) |
514 | 202 | endpts *= mpctx->play_dir; |
515 | 3.73M | if ((endpts != MP_NOPTS_VALUE && img->pts >= endpts) || |
516 | 3.73M | mpctx->max_frames == 0) |
517 | 173 | { |
518 | 173 | mp_pin_out_unread(vo_c->filter->f->pins[1], frame); |
519 | 173 | img = NULL; |
520 | 173 | r = VD_EOF; |
521 | 3.73M | } else if (hrseek && (img->pts < hrseek_pts - tolerance || |
522 | 82 | mpctx->hrseek_lastframe)) |
523 | 77 | { |
524 | | /* just skip - but save in case it was the last frame */ |
525 | 77 | mp_image_setrefp(&mpctx->saved_frame, img); |
526 | 3.73M | } else { |
527 | 3.73M | if (hrseek && mpctx->hrseek_backstep) { |
528 | 0 | if (mpctx->saved_frame) { |
529 | 0 | add_new_frame(mpctx, mpctx->saved_frame); |
530 | 0 | mpctx->saved_frame = NULL; |
531 | 0 | } else { |
532 | 0 | MP_WARN(mpctx, "Backstep failed.\n"); |
533 | 0 | } |
534 | 0 | mpctx->hrseek_backstep = false; |
535 | 0 | } |
536 | 3.73M | mp_image_unrefp(&mpctx->saved_frame); |
537 | 3.73M | add_new_frame(mpctx, img); |
538 | 3.73M | img = NULL; |
539 | 3.73M | } |
540 | 3.73M | talloc_free(img); |
541 | 3.73M | } |
542 | 4.09M | } |
543 | | |
544 | 4.09M | if (!hrseek) |
545 | 4.09M | mp_image_unrefp(&mpctx->saved_frame); |
546 | | |
547 | 4.09M | if (r == VD_EOF) { |
548 | | // If hr-seek went past EOF, use the last frame. |
549 | 220k | if (mpctx->saved_frame) |
550 | 38 | add_new_frame(mpctx, mpctx->saved_frame); |
551 | 220k | mpctx->saved_frame = NULL; |
552 | 220k | *logical_eof = true; |
553 | 220k | } |
554 | | |
555 | 4.09M | return have_new_frame(mpctx, r <= 0) ? VD_NEW_FRAME : r; |
556 | 4.09M | } |
557 | | |
558 | | static bool check_for_hwdec_fallback(struct MPContext *mpctx) |
559 | 221k | { |
560 | 221k | struct vo_chain *vo_c = mpctx->vo_chain; |
561 | | |
562 | 221k | if (!vo_c->filter->failed_output_conversion || !vo_c->track || !vo_c->track->dec) |
563 | 221k | return false; |
564 | | |
565 | 0 | if (mp_decoder_wrapper_control(vo_c->track->dec, |
566 | 0 | VDCTRL_FORCE_HWDEC_FALLBACK, NULL) != CONTROL_OK) |
567 | 0 | return false; |
568 | | |
569 | 0 | mp_output_chain_reset_harder(vo_c->filter); |
570 | 0 | return true; |
571 | 0 | } |
572 | | |
573 | | static bool check_for_forced_eof(struct MPContext *mpctx) |
574 | 221k | { |
575 | 221k | struct vo_chain *vo_c = mpctx->vo_chain; |
576 | | |
577 | 221k | if (!vo_c->track || !vo_c->track->dec) |
578 | 0 | return false; |
579 | | |
580 | 221k | struct mp_decoder_wrapper *dec = vo_c->track->dec; |
581 | 221k | bool forced_eof = false; |
582 | | |
583 | 221k | mp_decoder_wrapper_control(dec, VDCTRL_CHECK_FORCED_EOF, &forced_eof); |
584 | 221k | return forced_eof; |
585 | 221k | } |
586 | | |
587 | | /* Update avsync before a new video frame is displayed. Actually, this can be |
588 | | * called arbitrarily often before the actual display. |
589 | | * This adjusts the time of the next video frame */ |
590 | | static void update_avsync_before_frame(struct MPContext *mpctx) |
591 | 3.75M | { |
592 | 3.75M | struct MPOpts *opts = mpctx->opts; |
593 | 3.75M | struct vo *vo = mpctx->video_out; |
594 | | |
595 | 3.75M | if (mpctx->video_status < STATUS_READY) { |
596 | 25.6k | mpctx->time_frame = 0; |
597 | 3.73M | } else if (mpctx->display_sync_active || vo->opts->video_sync == VS_NONE) { |
598 | | // don't touch the timing |
599 | 3.73M | } else if (mpctx->audio_status == STATUS_PLAYING && |
600 | 3.73M | mpctx->video_status == STATUS_PLAYING && |
601 | 3.73M | !ao_untimed(mpctx->ao)) |
602 | 0 | { |
603 | 0 | double buffered_audio = ao_get_delay(mpctx->ao); |
604 | |
|
605 | 0 | double predicted = mpctx->delay / mpctx->video_speed + |
606 | 0 | mpctx->time_frame; |
607 | 0 | double difference = buffered_audio - predicted; |
608 | 0 | MP_STATS(mpctx, "value %f audio-diff", difference); |
609 | |
|
610 | 0 | if (opts->autosync) { |
611 | | /* Smooth reported playback position from AO by averaging |
612 | | * it with the value expected based on previous value and |
613 | | * time elapsed since then. May help smooth video timing |
614 | | * with audio output that have inaccurate position reporting. |
615 | | * This is badly implemented; the behavior of the smoothing |
616 | | * now undesirably depends on how often this code runs |
617 | | * (mainly depends on video frame rate). */ |
618 | 0 | buffered_audio = predicted + difference / opts->autosync; |
619 | 0 | } |
620 | |
|
621 | 0 | mpctx->time_frame = buffered_audio - mpctx->delay / mpctx->video_speed; |
622 | 3.73M | } else { |
623 | | /* If we're more than 200 ms behind the right playback |
624 | | * position, don't try to speed up display of following |
625 | | * frames to catch up; continue with default speed from |
626 | | * the current frame instead. |
627 | | * If untimed is set always output frames immediately |
628 | | * without sleeping. |
629 | | */ |
630 | 3.73M | if (mpctx->time_frame < -0.2 || opts->untimed || |
631 | 3.73M | (vo->driver->caps & VO_CAP_UNTIMED)) |
632 | 3.73M | mpctx->time_frame = 0; |
633 | 3.73M | } |
634 | 3.75M | } |
635 | | |
636 | | // Update the A/V sync difference when a new video frame is being shown. |
637 | | static void update_av_diff(struct MPContext *mpctx, double offset) |
638 | 3.73M | { |
639 | 3.73M | struct MPOpts *opts = mpctx->opts; |
640 | | |
641 | 3.73M | mpctx->last_av_difference = 0; |
642 | | |
643 | 3.73M | if (mpctx->audio_status != STATUS_PLAYING || |
644 | 3.73M | mpctx->video_status != STATUS_PLAYING) |
645 | 3.64M | return; |
646 | | |
647 | 95.4k | if (mpctx->vo_chain && mpctx->vo_chain->is_sparse) |
648 | 0 | return; |
649 | | |
650 | 95.4k | double a_pos = playing_audio_pts(mpctx); |
651 | 95.4k | if (a_pos != MP_NOPTS_VALUE && mpctx->video_pts != MP_NOPTS_VALUE) { |
652 | 95.4k | mpctx->last_av_difference = a_pos - mpctx->video_pts |
653 | 95.4k | + opts->audio_delay + offset; |
654 | 95.4k | } |
655 | | |
656 | 95.4k | if (fabs(mpctx->last_av_difference) > 0.5 && !mpctx->drop_message_shown) { |
657 | 575 | MP_WARN(mpctx, "%s", av_desync_help_text); |
658 | 575 | mpctx->drop_message_shown = true; |
659 | 575 | } |
660 | 95.4k | } |
661 | | |
662 | | double calc_average_frame_duration(struct MPContext *mpctx) |
663 | 0 | { |
664 | 0 | double total = 0; |
665 | 0 | int num = 0; |
666 | 0 | for (int n = 0; n < mpctx->num_past_frames; n++) { |
667 | 0 | double dur = mpctx->past_frames[n].approx_duration; |
668 | 0 | if (dur <= 0) |
669 | 0 | continue; |
670 | 0 | total += dur; |
671 | 0 | num += 1; |
672 | 0 | } |
673 | 0 | return num > 0 ? total / num : 0; |
674 | 0 | } |
675 | | |
676 | | // Find a speed factor such that the display FPS is an integer multiple of the |
677 | | // effective video FPS. If this is not possible, try to do it for multiples, |
678 | | // which still leads to an improved end result. |
679 | | // Both parameters are durations in seconds. |
680 | | static double calc_best_speed(double vsync, double frame, |
681 | | double max_change, int max_factor) |
682 | 0 | { |
683 | 0 | double ratio = frame / vsync; |
684 | 0 | for (int factor = 1; factor <= max_factor; factor++) { |
685 | 0 | double scale = ratio * factor / rint(ratio * factor); |
686 | 0 | if (fabs(scale - 1) <= max_change) |
687 | 0 | return scale; |
688 | 0 | } |
689 | 0 | return -1; |
690 | 0 | } |
691 | | |
692 | | static double find_best_speed(struct MPContext *mpctx, double vsync) |
693 | 0 | { |
694 | 0 | double total = 0; |
695 | 0 | int num = 0; |
696 | 0 | for (int n = 0; n < mpctx->num_past_frames; n++) { |
697 | 0 | double dur = mpctx->past_frames[n].approx_duration; |
698 | 0 | if (dur <= 0) |
699 | 0 | continue; |
700 | 0 | double best = calc_best_speed(vsync, dur / mpctx->opts->playback_speed, |
701 | 0 | mpctx->opts->sync_max_video_change / 100, |
702 | 0 | mpctx->opts->sync_max_factor); |
703 | 0 | if (best <= 0) |
704 | 0 | continue; |
705 | 0 | total += best; |
706 | 0 | num++; |
707 | 0 | } |
708 | | // If it doesn't work, play at normal speed. |
709 | 0 | return num > 0 ? total / num : 1; |
710 | 0 | } |
711 | | |
712 | | static bool using_spdif_passthrough(struct MPContext *mpctx) |
713 | 0 | { |
714 | 0 | if (mpctx->ao_chain && mpctx->ao_chain->ao) { |
715 | 0 | int samplerate; |
716 | 0 | int format; |
717 | 0 | struct mp_chmap channels; |
718 | 0 | ao_get_format(mpctx->ao_chain->ao, &samplerate, &format, &channels); |
719 | 0 | return !af_fmt_is_pcm(format); |
720 | 0 | } |
721 | 0 | return false; |
722 | 0 | } |
723 | | |
724 | | // Compute the relative audio speed difference by taking A/V dsync into account. |
725 | | static double compute_audio_drift(struct MPContext *mpctx, double vsync) |
726 | 0 | { |
727 | | // Least-squares linear regression, using relative real time for x, and |
728 | | // audio desync for y. Assume speed didn't change for the frames we're |
729 | | // looking at for simplicity. This also should actually use the realtime |
730 | | // (minus paused time) for x, but use vsync scheduling points instead. |
731 | 0 | if (mpctx->num_past_frames <= 10) |
732 | 0 | return NAN; |
733 | 0 | int num = mpctx->num_past_frames - 1; |
734 | 0 | double sum_x = 0, sum_y = 0, sum_xy = 0, sum_xx = 0; |
735 | 0 | double x = 0; |
736 | 0 | for (int n = 0; n < num; n++) { |
737 | 0 | struct frame_info *frame = &mpctx->past_frames[n + 1]; |
738 | 0 | if (frame->num_vsyncs < 0) |
739 | 0 | return NAN; |
740 | 0 | double y = frame->av_diff; |
741 | 0 | sum_x += x; |
742 | 0 | sum_y += y; |
743 | 0 | sum_xy += x * y; |
744 | 0 | sum_xx += x * x; |
745 | 0 | x -= frame->num_vsyncs * vsync; |
746 | 0 | } |
747 | 0 | return (sum_x * sum_y - num * sum_xy) / (sum_x * sum_x - num * sum_xx); |
748 | 0 | } |
749 | | |
750 | | static void adjust_audio_drift_compensation(struct MPContext *mpctx, double vsync) |
751 | 0 | { |
752 | 0 | struct MPOpts *opts = mpctx->opts; |
753 | 0 | int mode = mpctx->video_out->opts->video_sync; |
754 | |
|
755 | 0 | if ((mode != VS_DISP_RESAMPLE && mode != VS_DISP_TEMPO) || |
756 | 0 | mpctx->audio_status != STATUS_PLAYING) |
757 | 0 | { |
758 | 0 | mpctx->speed_factor_a = mpctx->speed_factor_v; |
759 | 0 | return; |
760 | 0 | } |
761 | | |
762 | | // Try to smooth out audio timing drifts. This can happen if either |
763 | | // video isn't playing at expected speed, or audio is not playing at |
764 | | // the requested speed. Both are unavoidable. |
765 | | // The audio desync is made up of 2 parts: 1. drift due to rounding |
766 | | // errors and imperfect information, and 2. an offset, due to |
767 | | // unaligned audio/video start, or disruptive events halting audio |
768 | | // or video for a small time. |
769 | | // Instead of trying to be clever, just apply an awfully dumb drift |
770 | | // compensation with a constant factor, which does what we want. In |
771 | | // theory we could calculate the exact drift compensation needed, |
772 | | // but it likely would be wrong anyway, and we'd run into the same |
773 | | // issues again, except with more complex code. |
774 | | // 1 means drifts to positive, -1 means drifts to negative |
775 | 0 | double max_drift = vsync / 2; |
776 | 0 | double av_diff = mpctx->last_av_difference; |
777 | 0 | int new = mpctx->display_sync_drift_dir; |
778 | 0 | if (av_diff * -mpctx->display_sync_drift_dir >= 0) |
779 | 0 | new = 0; |
780 | 0 | if (fabs(av_diff) > max_drift) |
781 | 0 | new = av_diff >= 0 ? 1 : -1; |
782 | |
|
783 | 0 | bool change = mpctx->display_sync_drift_dir != new; |
784 | 0 | if (new || change) { |
785 | 0 | if (change) |
786 | 0 | MP_VERBOSE(mpctx, "Change display sync audio drift: %d\n", new); |
787 | 0 | mpctx->display_sync_drift_dir = new; |
788 | |
|
789 | 0 | double max_correct = opts->sync_max_audio_change / 100; |
790 | 0 | double audio_factor = 1 + max_correct * -mpctx->display_sync_drift_dir; |
791 | |
|
792 | 0 | if (new == 0) { |
793 | | // If we're resetting, actually try to be clever and pick a speed |
794 | | // which compensates the general drift we're getting. |
795 | 0 | double drift = compute_audio_drift(mpctx, vsync); |
796 | 0 | if (isnormal(drift)) { |
797 | | // other = will be multiplied with audio_factor for final speed |
798 | 0 | double other = mpctx->opts->playback_speed * mpctx->speed_factor_v; |
799 | 0 | audio_factor = (mpctx->audio_speed - drift) / other; |
800 | 0 | MP_VERBOSE(mpctx, "Compensation factor: %f\n", audio_factor); |
801 | 0 | } |
802 | 0 | } |
803 | |
|
804 | 0 | audio_factor = MPCLAMP(audio_factor, 1 - max_correct, 1 + max_correct); |
805 | 0 | mpctx->speed_factor_a = audio_factor * mpctx->speed_factor_v; |
806 | 0 | } |
807 | 0 | } |
808 | | |
809 | | // Manipulate frame timing for display sync, or do nothing for normal timing. |
810 | | static void handle_display_sync_frame(struct MPContext *mpctx, |
811 | | struct vo_frame *frame) |
812 | 3.73M | { |
813 | 3.73M | struct MPOpts *opts = mpctx->opts; |
814 | 3.73M | struct vo *vo = mpctx->video_out; |
815 | 3.73M | int mode = vo->opts->video_sync; |
816 | | |
817 | 3.73M | if (!mpctx->display_sync_active) { |
818 | 3.73M | mpctx->display_sync_error = 0.0; |
819 | 3.73M | mpctx->display_sync_drift_dir = 0; |
820 | 3.73M | } |
821 | | |
822 | 3.73M | mpctx->display_sync_active = false; |
823 | | |
824 | 3.73M | if (!VS_IS_DISP(mode) || !vo_is_visible(vo)) |
825 | 3.73M | return; |
826 | | |
827 | 0 | bool resample = mode == VS_DISP_RESAMPLE || mode == VS_DISP_RESAMPLE_VDROP || |
828 | 0 | mode == VS_DISP_RESAMPLE_NONE; |
829 | 0 | bool drop = mode == VS_DISP_VDROP || mode == VS_DISP_RESAMPLE || |
830 | 0 | mode == VS_DISP_ADROP || mode == VS_DISP_RESAMPLE_VDROP || |
831 | 0 | mode == VS_DISP_TEMPO; |
832 | 0 | drop &= frame->can_drop; |
833 | |
|
834 | 0 | if (resample && using_spdif_passthrough(mpctx)) |
835 | 0 | return; |
836 | | |
837 | 0 | double vsync = vo_get_vsync_interval(vo) / 1e9; |
838 | 0 | if (vsync <= 0) |
839 | 0 | return; |
840 | | |
841 | 0 | double approx_duration = MPMAX(0, mpctx->past_frames[0].approx_duration); |
842 | 0 | double adjusted_duration = approx_duration / opts->playback_speed; |
843 | 0 | if (adjusted_duration > 0.5) |
844 | 0 | return; |
845 | | |
846 | 0 | mpctx->speed_factor_v = 1.0; |
847 | 0 | if (mode != VS_DISP_VDROP) |
848 | 0 | mpctx->speed_factor_v = find_best_speed(mpctx, vsync); |
849 | | |
850 | | // Determine for how many vsyncs a frame should be displayed. This can be |
851 | | // e.g. 2 for 30hz on a 60hz display. It can also be 0 if the video |
852 | | // framerate is higher than the display framerate. |
853 | | // We use the speed-adjusted (i.e. real) frame duration for this. |
854 | 0 | double frame_duration = adjusted_duration / mpctx->speed_factor_v; |
855 | 0 | double ratio = (frame_duration + mpctx->display_sync_error) / vsync; |
856 | 0 | int num_vsyncs = MPMAX(lrint(ratio), 0); |
857 | 0 | double prev_error = mpctx->display_sync_error; |
858 | 0 | mpctx->display_sync_error += frame_duration - num_vsyncs * vsync; |
859 | |
|
860 | 0 | MP_TRACE(mpctx, "s=%f vsyncs=%d dur=%f ratio=%f err=%.20f (%f/%f)\n", |
861 | 0 | mpctx->speed_factor_v, num_vsyncs, adjusted_duration, ratio, |
862 | 0 | mpctx->display_sync_error, mpctx->display_sync_error / vsync, |
863 | 0 | mpctx->display_sync_error / frame_duration); |
864 | |
|
865 | 0 | double av_diff = mpctx->last_av_difference; |
866 | 0 | MP_STATS(mpctx, "value %f avdiff", av_diff); |
867 | | |
868 | | // Intended number of additional display frames to drop (<0) or repeat (>0) |
869 | 0 | int drop_repeat = 0; |
870 | | |
871 | | // If we are too far ahead/behind, attempt to drop/repeat frames. |
872 | | // Tolerate some desync to avoid frame dropping due to jitter. |
873 | 0 | if (drop && fabs(av_diff) >= 0.020 && fabs(av_diff) / vsync >= 1) |
874 | 0 | drop_repeat = -av_diff / vsync; // round towards 0 |
875 | | |
876 | | // We can only drop all frames at most. We can repeat much more frames, |
877 | | // but we still limit it to 10 times the original frames to avoid that |
878 | | // corner cases or exceptional situations cause too much havoc. |
879 | 0 | drop_repeat = MPCLAMP(drop_repeat, -num_vsyncs, num_vsyncs * 10); |
880 | 0 | num_vsyncs += drop_repeat; |
881 | | |
882 | | // Always show the first frame. |
883 | 0 | if (mpctx->num_past_frames <= 1 && num_vsyncs < 1) |
884 | 0 | num_vsyncs = 1; |
885 | | |
886 | | // Estimate the video position, so we can calculate a good A/V difference |
887 | | // value below. This is used to estimate A/V drift. |
888 | 0 | double time_left = vo_get_delay(vo); |
889 | | |
890 | | // We also know that the timing is (necessarily) off, because we have to |
891 | | // align frame timings on the vsync boundaries. This is unavoidable, and |
892 | | // for the sake of the A/V sync calculations we pretend it's perfect. |
893 | 0 | time_left += prev_error; |
894 | | // Likewise, we know sync is off, but is going to be compensated. |
895 | 0 | time_left += drop_repeat * vsync; |
896 | | |
897 | | // If syncing took too long, disregard timing of the first frame. |
898 | 0 | if (mpctx->num_past_frames == 2 && time_left < 0) { |
899 | 0 | vo_discard_timing_info(vo); |
900 | 0 | time_left = 0; |
901 | 0 | } |
902 | |
|
903 | 0 | if (drop_repeat) { |
904 | 0 | mpctx->mistimed_frames_total += 1; |
905 | 0 | MP_STATS(mpctx, "mistimed"); |
906 | 0 | } |
907 | |
|
908 | 0 | mpctx->total_avsync_change = 0; |
909 | 0 | update_av_diff(mpctx, time_left * opts->playback_speed); |
910 | |
|
911 | 0 | mpctx->past_frames[0].num_vsyncs = num_vsyncs; |
912 | 0 | mpctx->past_frames[0].av_diff = mpctx->last_av_difference; |
913 | |
|
914 | 0 | if (resample || mode == VS_DISP_ADROP || mode == VS_DISP_TEMPO) { |
915 | 0 | adjust_audio_drift_compensation(mpctx, vsync); |
916 | 0 | } else { |
917 | 0 | mpctx->speed_factor_a = 1.0; |
918 | 0 | } |
919 | | |
920 | | // A bad guess, only needed when reverting to audio sync. |
921 | 0 | mpctx->time_frame = time_left; |
922 | |
|
923 | 0 | frame->vsync_interval = vsync; |
924 | 0 | frame->vsync_offset = -prev_error; |
925 | 0 | frame->ideal_frame_duration = frame_duration; |
926 | 0 | frame->ideal_frame_vsync = (-prev_error / frame_duration) * approx_duration; |
927 | 0 | frame->ideal_frame_vsync_duration = (vsync / frame_duration) * approx_duration; |
928 | 0 | frame->num_vsyncs = num_vsyncs; |
929 | 0 | frame->display_synced = true; |
930 | 0 | frame->approx_duration = approx_duration; |
931 | | |
932 | | // Adjust frame virtual vsyncs by the repeat count |
933 | 0 | if (drop_repeat > 0) |
934 | 0 | frame->ideal_frame_vsync_duration /= drop_repeat; |
935 | |
|
936 | 0 | mpctx->display_sync_active = true; |
937 | | // Try to avoid audio underruns that may occur if we update |
938 | | // the playback speed while in the STATUS_SYNCING state. |
939 | 0 | if (mpctx->video_status != STATUS_SYNCING) |
940 | 0 | update_playback_speed(mpctx); |
941 | |
|
942 | 0 | MP_STATS(mpctx, "value %f aspeed", mpctx->speed_factor_a - 1); |
943 | 0 | MP_STATS(mpctx, "value %f vspeed", mpctx->speed_factor_v - 1); |
944 | 0 | } |
945 | | |
946 | | static void schedule_frame(struct MPContext *mpctx, struct vo_frame *frame) |
947 | 3.73M | { |
948 | 3.73M | handle_display_sync_frame(mpctx, frame); |
949 | | |
950 | 3.73M | if (mpctx->num_past_frames > 1 && |
951 | 3.73M | ((mpctx->past_frames[1].num_vsyncs >= 0) != mpctx->display_sync_active)) |
952 | 0 | { |
953 | 0 | MP_VERBOSE(mpctx, "Video sync mode %s.\n", |
954 | 0 | mpctx->display_sync_active ? "enabled" : "disabled"); |
955 | 0 | } |
956 | | |
957 | 3.73M | if (!mpctx->display_sync_active) { |
958 | 3.73M | mpctx->speed_factor_a = 1.0; |
959 | 3.73M | mpctx->speed_factor_v = 1.0; |
960 | 3.73M | update_playback_speed(mpctx); |
961 | | |
962 | 3.73M | update_av_diff(mpctx, mpctx->time_frame > 0 ? |
963 | 3.73M | mpctx->time_frame * mpctx->video_speed : 0); |
964 | 3.73M | } |
965 | 3.73M | } |
966 | | |
967 | | // Determine the mpctx->past_frames[0] frame duration. |
968 | | static void calculate_frame_duration(struct MPContext *mpctx) |
969 | 3.73M | { |
970 | 3.73M | struct vo_chain *vo_c = mpctx->vo_chain; |
971 | 3.73M | mp_assert(mpctx->num_past_frames >= 1 && mpctx->num_next_frames >= 1); |
972 | | |
973 | 3.73M | double demux_duration = vo_c->filter->container_fps > 0 |
974 | 3.73M | ? 1.0 / vo_c->filter->container_fps : -1; |
975 | 3.73M | double duration = demux_duration; |
976 | | |
977 | 3.73M | if (mpctx->num_next_frames >= 2) { |
978 | 0 | double pts0 = mpctx->next_frames[0]->pts; |
979 | 0 | double pts1 = mpctx->next_frames[1]->pts; |
980 | 0 | if (pts0 != MP_NOPTS_VALUE && pts1 != MP_NOPTS_VALUE && pts1 >= pts0) |
981 | 0 | duration = pts1 - pts0; |
982 | 0 | } |
983 | | |
984 | | // The following code tries to compensate for rounded Matroska timestamps |
985 | | // by "unrounding" frame durations, or if not possible, approximating them. |
986 | | // These formats usually round on 1ms. Some muxers do this incorrectly, |
987 | | // and might go off by 1ms more, and compensate for it later by an equal |
988 | | // rounding error into the opposite direction. |
989 | 3.73M | double tolerance = 0.001 * 3 + 0.0001; |
990 | | |
991 | 3.73M | double total = 0; |
992 | 3.73M | int num_dur = 0; |
993 | 347M | for (int n = 1; n < mpctx->num_past_frames; n++) { |
994 | | // Eliminate likely outliers using a really dumb heuristic. |
995 | 344M | double dur = mpctx->past_frames[n].duration; |
996 | 344M | if (dur <= 0 || fabs(dur - duration) >= tolerance) |
997 | 81.6k | break; |
998 | 344M | total += dur; |
999 | 344M | num_dur += 1; |
1000 | 344M | } |
1001 | 3.73M | double approx_duration = num_dur > 0 ? total / num_dur : duration; |
1002 | | |
1003 | | // Try if the demuxer frame rate fits - if so, just take it. |
1004 | 3.73M | if (demux_duration > 0) { |
1005 | | // Note that even if each timestamp is within rounding tolerance, it |
1006 | | // could literally not add up (e.g. if demuxer FPS is rounded itself). |
1007 | 3.64M | if (fabs(duration - demux_duration) < tolerance && |
1008 | 3.64M | fabs(total - demux_duration * num_dur) < tolerance && |
1009 | 3.64M | (num_dur >= 16 || num_dur >= mpctx->num_past_frames - 4)) |
1010 | 3.64M | { |
1011 | 3.64M | approx_duration = demux_duration; |
1012 | 3.64M | } |
1013 | 3.64M | } |
1014 | | |
1015 | 3.73M | mpctx->past_frames[0].duration = duration; |
1016 | 3.73M | mpctx->past_frames[0].approx_duration = approx_duration; |
1017 | | |
1018 | 3.73M | MP_STATS(mpctx, "value %f frame-duration", MPMAX(0, duration)); |
1019 | 3.73M | MP_STATS(mpctx, "value %f frame-duration-approx", MPMAX(0, approx_duration)); |
1020 | 3.73M | } |
1021 | | |
1022 | | static void apply_video_crop(struct MPContext *mpctx, struct vo *vo) |
1023 | 3.75M | { |
1024 | 7.51M | for (int n = 0; n < mpctx->num_next_frames; n++) { |
1025 | 3.75M | struct m_geometry *gm = &vo->opts->video_crop; |
1026 | 3.75M | struct mp_image_params p = mpctx->next_frames[n]->params; |
1027 | 3.75M | if (gm->xy_valid || (gm->wh_valid && (gm->w > 0 || gm->h > 0))) |
1028 | 0 | { |
1029 | 0 | m_rect_apply(&p.crop, p.w, p.h, gm); |
1030 | 0 | } |
1031 | | |
1032 | 3.75M | if (p.crop.x1 == 0 && p.crop.y1 == 0) |
1033 | 0 | return; |
1034 | | |
1035 | 3.75M | if (!mp_image_crop_valid(&p)) { |
1036 | 0 | char *str = m_option_type_rect.print(NULL, gm); |
1037 | 0 | MP_WARN(vo, "Ignoring invalid --video-crop=%s for %dx%d image\n", |
1038 | 0 | str, p.w, p.h); |
1039 | 0 | talloc_free(str); |
1040 | 0 | *gm = (struct m_geometry){0}; |
1041 | 0 | mp_property_do("video-crop", M_PROPERTY_SET, gm, mpctx); |
1042 | 0 | return; |
1043 | 0 | } |
1044 | 3.75M | mpctx->next_frames[n]->params.crop = p.crop; |
1045 | 3.75M | } |
1046 | 3.75M | } |
1047 | | |
1048 | | void write_video(struct MPContext *mpctx) |
1049 | 5.65M | { |
1050 | 5.65M | struct MPOpts *opts = mpctx->opts; |
1051 | | |
1052 | 5.65M | if (!mpctx->vo_chain) |
1053 | 1.50M | return; |
1054 | 4.14M | struct track *track = mpctx->vo_chain->track; |
1055 | 4.14M | struct vo_chain *vo_c = mpctx->vo_chain; |
1056 | 4.14M | struct vo *vo = vo_c->vo; |
1057 | | |
1058 | 4.14M | if (vo_c->filter->reconfig_happened) { |
1059 | 45.9k | mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); |
1060 | 45.9k | vo_c->filter->reconfig_happened = false; |
1061 | 45.9k | } |
1062 | | |
1063 | | // Actual playback starts when both audio and video are ready. |
1064 | 4.14M | if (mpctx->video_status == STATUS_READY) |
1065 | 18.3k | return; |
1066 | | |
1067 | 4.12M | if (mpctx->paused && mpctx->video_status >= STATUS_READY) |
1068 | 6.54k | return; |
1069 | | |
1070 | 4.11M | bool logical_eof = false; |
1071 | 4.11M | int r = video_output_image(mpctx, &logical_eof); |
1072 | 4.11M | MP_TRACE(mpctx, "video_output_image: r=%d/eof=%d/st=%s\n", r, logical_eof, |
1073 | 4.11M | mp_status_str(mpctx->video_status)); |
1074 | | |
1075 | 4.11M | if (r < 0) |
1076 | 0 | goto error; |
1077 | | |
1078 | 4.11M | if (r == VD_WAIT) { |
1079 | | // Heuristic to detect underruns. |
1080 | 141k | if (mpctx->video_status == STATUS_PLAYING && !vo_still_displaying(vo) && |
1081 | 141k | !vo_c->underrun_signaled) |
1082 | 6.62k | { |
1083 | 6.62k | vo_c->underrun = true; |
1084 | 6.62k | vo_c->underrun_signaled = true; |
1085 | 6.62k | } |
1086 | | // Demuxer will wake us up for more packets to decode. |
1087 | 141k | return; |
1088 | 141k | } |
1089 | | |
1090 | 3.97M | if (r == VD_EOF) { |
1091 | 221k | if (check_for_hwdec_fallback(mpctx)) |
1092 | 0 | return; |
1093 | 221k | if (check_for_forced_eof(mpctx)) { |
1094 | 1 | uninit_video_chain(mpctx); |
1095 | 1 | handle_force_window(mpctx, true); |
1096 | 1 | return; |
1097 | 1 | } |
1098 | 221k | if (vo_c->filter->failed_output_conversion) |
1099 | 0 | goto error; |
1100 | | |
1101 | 221k | mpctx->delay = 0; |
1102 | 221k | mpctx->last_av_difference = 0; |
1103 | | |
1104 | 221k | if (mpctx->video_status <= STATUS_PLAYING) { |
1105 | 56.3k | mpctx->video_status = STATUS_DRAINING; |
1106 | 56.3k | get_relative_time(mpctx); |
1107 | 56.3k | if (vo_c->is_sparse && !mpctx->ao_chain) { |
1108 | 179 | MP_VERBOSE(mpctx, "assuming this is an image\n"); |
1109 | 179 | mpctx->time_frame += opts->image_display_duration; |
1110 | 56.1k | } else if (mpctx->last_frame_duration > 0) { |
1111 | 13.4k | MP_VERBOSE(mpctx, "using demuxer frame duration for last frame\n"); |
1112 | 13.4k | mpctx->time_frame += mpctx->last_frame_duration; |
1113 | 42.7k | } else { |
1114 | 42.7k | mpctx->time_frame = 0; |
1115 | 42.7k | } |
1116 | | // Encode mode can't honor this; it'll only delay finishing. |
1117 | 56.3k | if (mpctx->encode_lavc_ctx) |
1118 | 0 | mpctx->time_frame = 0; |
1119 | 56.3k | } |
1120 | | |
1121 | | // Wait for the VO to signal actual EOF, then exit if the frame timer |
1122 | | // has expired. |
1123 | 221k | bool has_frame = vo_has_frame(vo); // maybe not configured |
1124 | 221k | if (mpctx->video_status == STATUS_DRAINING && |
1125 | 221k | (vo_is_ready_for_frame(vo, -1) || !has_frame)) |
1126 | 198k | { |
1127 | 198k | mpctx->time_frame -= get_relative_time(mpctx); |
1128 | 198k | mp_set_timeout(mpctx, mpctx->time_frame); |
1129 | 198k | if (mpctx->time_frame <= 0 || !has_frame) { |
1130 | 56.3k | MP_VERBOSE(mpctx, "video EOF reached\n"); |
1131 | 56.3k | mpctx->video_status = STATUS_EOF; |
1132 | 56.3k | } |
1133 | 198k | } |
1134 | | |
1135 | | // Avoid pointlessly spamming the logs every frame. |
1136 | 221k | if (!vo_c->is_sparse || !vo_c->sparse_eof_signalled) { |
1137 | 221k | MP_DBG(mpctx, "video EOF (status=%d)\n", mpctx->video_status); |
1138 | 221k | vo_c->sparse_eof_signalled = vo_c->is_sparse; |
1139 | 221k | } |
1140 | 221k | return; |
1141 | 221k | } |
1142 | | |
1143 | 3.75M | if (mpctx->video_status > STATUS_PLAYING) |
1144 | 6 | mpctx->video_status = STATUS_PLAYING; |
1145 | | |
1146 | 3.75M | if (r != VD_NEW_FRAME) { |
1147 | 77 | mp_wakeup_core(mpctx); // Decode more in next iteration. |
1148 | 77 | return; |
1149 | 77 | } |
1150 | | |
1151 | 3.75M | if (logical_eof && !mpctx->num_past_frames && mpctx->num_next_frames == 1 && |
1152 | 3.75M | use_video_lookahead(mpctx) && !vo_c->is_sparse) |
1153 | 0 | { |
1154 | | // Too much danger to accidentally mark video as sparse when e.g. |
1155 | | // seeking exactly to the last frame, so as a heuristic, do this only |
1156 | | // if it looks like the "first" video frame (unreliable, but often |
1157 | | // works out well). Helps with seeking with single-image video tracks, |
1158 | | // as well as detecting whether as video track is really an image. |
1159 | 0 | if (mpctx->next_frames[0]->pts == 0) { |
1160 | 0 | MP_VERBOSE(mpctx, "assuming single-image video stream\n"); |
1161 | 0 | vo_c->is_sparse = true; |
1162 | 0 | } |
1163 | 0 | } |
1164 | | |
1165 | | // Inject vo crop to notify and reconfig if needed |
1166 | 3.75M | apply_video_crop(mpctx, vo); |
1167 | | |
1168 | | // Filter output is different from VO input? |
1169 | 3.75M | struct mp_image_params *p = &mpctx->next_frames[0]->params; |
1170 | 3.75M | if (!vo->params || !mp_image_params_static_equal(p, vo->params)) { |
1171 | | // Changing config deletes the current frame; wait until it's finished. |
1172 | 45.8k | if (vo_still_displaying(vo)) { |
1173 | 0 | vo_request_wakeup_on_done(vo); |
1174 | 0 | return; |
1175 | 0 | } |
1176 | | |
1177 | 45.8k | const struct vo_driver *info = mpctx->video_out->driver; |
1178 | 45.8k | char extra[20] = {0}; |
1179 | 45.8k | if (p->p_w != p->p_h) { |
1180 | 4.51k | int d_w, d_h; |
1181 | 4.51k | mp_image_params_get_dsize(p, &d_w, &d_h); |
1182 | 4.51k | snprintf(extra, sizeof(extra), " => %dx%d", d_w, d_h); |
1183 | 4.51k | } |
1184 | 45.8k | char sfmt[20] = {0}; |
1185 | 45.8k | if (p->hw_subfmt) |
1186 | 0 | snprintf(sfmt, sizeof(sfmt), "[%s]", mp_imgfmt_to_name(p->hw_subfmt)); |
1187 | 45.8k | MP_INFO(mpctx, "VO: [%s] %dx%d%s %s%s\n", |
1188 | 45.8k | info->name, p->w, p->h, extra, mp_imgfmt_to_name(p->imgfmt), sfmt); |
1189 | 45.8k | MP_VERBOSE(mpctx, "VO: Description: %s\n", info->description); |
1190 | | |
1191 | 45.8k | int vo_r = vo_reconfig2(vo, mpctx->next_frames[0]); |
1192 | 45.8k | if (vo_r < 0) { |
1193 | 0 | mpctx->error_playing = MPV_ERROR_VO_INIT_FAILED; |
1194 | 0 | goto error; |
1195 | 0 | } |
1196 | 45.8k | mp_notify(mpctx, MPV_EVENT_VIDEO_RECONFIG, NULL); |
1197 | 3.71M | } else { |
1198 | | // Update parameters that don't require reconfiguring the VO. |
1199 | 3.71M | mp_mutex_lock(&vo->params_mutex); |
1200 | 3.71M | mp_image_params_update_dynamic(vo->params, p, vo->has_peak_detect_values); |
1201 | 3.71M | mp_mutex_unlock(&vo->params_mutex); |
1202 | 3.71M | } |
1203 | | |
1204 | 3.75M | mpctx->time_frame -= get_relative_time(mpctx); |
1205 | 3.75M | update_avsync_before_frame(mpctx); |
1206 | | |
1207 | | // Enforce timing subtitles to video frames. |
1208 | 3.75M | osd_set_force_video_pts(mpctx->osd, MP_NOPTS_VALUE); |
1209 | | |
1210 | 3.75M | if (!update_subtitles(mpctx, mpctx->next_frames[0]->pts)) { |
1211 | 0 | MP_VERBOSE(mpctx, "Video frame delayed due to waiting on subtitles.\n"); |
1212 | 0 | return; |
1213 | 0 | } |
1214 | | |
1215 | 3.75M | double time_frame = MPMAX(mpctx->time_frame, -1); |
1216 | 3.75M | int64_t pts = mp_time_ns() + (int64_t)(time_frame * 1e9); |
1217 | | |
1218 | | // wait until VO wakes us up to get more frames |
1219 | | // (NB: in theory, the 1st frame after display sync mode change uses the |
1220 | | // wrong waiting mode) |
1221 | 3.75M | if (!vo_is_ready_for_frame(vo, mpctx->display_sync_active ? -1 : pts)) |
1222 | 19.2k | return; |
1223 | | |
1224 | 3.73M | mp_assert(mpctx->num_next_frames >= 1); |
1225 | | |
1226 | 3.73M | if (mpctx->num_past_frames >= MAX_NUM_VO_PTS) |
1227 | 3.39M | mpctx->num_past_frames--; |
1228 | 3.73M | MP_TARRAY_INSERT_AT(mpctx, mpctx->past_frames, mpctx->num_past_frames, 0, |
1229 | 3.73M | (struct frame_info){0}); |
1230 | 3.73M | mpctx->past_frames[0] = (struct frame_info){ |
1231 | 3.73M | .pts = mpctx->next_frames[0]->pts, |
1232 | 3.73M | .num_vsyncs = -1, |
1233 | 3.73M | }; |
1234 | 3.73M | calculate_frame_duration(mpctx); |
1235 | | |
1236 | 3.73M | int req = vo_get_num_req_frames(mpctx->video_out); |
1237 | 3.73M | mp_assert(req >= 1 && req <= VO_MAX_REQ_FRAMES); |
1238 | 3.73M | struct vo_frame dummy = { |
1239 | 3.73M | .pts = pts, |
1240 | 3.73M | .duration = -1, |
1241 | 3.73M | .still = mpctx->step_frames > 0, |
1242 | 3.73M | .can_drop = opts->frame_dropping & 1, |
1243 | 3.73M | .num_frames = MPMIN(mpctx->num_next_frames, req), |
1244 | 3.73M | .num_vsyncs = 1, |
1245 | 3.73M | }; |
1246 | 7.47M | for (int n = 0; n < dummy.num_frames; n++) |
1247 | 3.73M | dummy.frames[n] = mpctx->next_frames[n]; |
1248 | 3.73M | struct vo_frame *frame = vo_frame_ref(&dummy); |
1249 | | |
1250 | 3.73M | double diff = mpctx->past_frames[0].approx_duration; |
1251 | 3.73M | if (opts->untimed || (vo->driver->caps & VO_CAP_UNTIMED)) |
1252 | 3.73M | diff = -1; // disable frame dropping and aspects of frame timing |
1253 | 3.73M | if (diff >= 0) { |
1254 | 0 | diff /= mpctx->video_speed; |
1255 | 0 | frame->duration = MP_TIME_S_TO_NS(MPCLAMP(diff, 0, 10)); |
1256 | 0 | } |
1257 | | |
1258 | 3.73M | mpctx->video_pts = mpctx->next_frames[0]->pts; |
1259 | 3.73M | mpctx->last_frame_duration = |
1260 | 3.73M | mpctx->next_frames[0]->pkt_duration / mpctx->video_speed; |
1261 | | |
1262 | 3.73M | shift_frames(mpctx); |
1263 | | |
1264 | 3.73M | schedule_frame(mpctx, frame); |
1265 | | |
1266 | 3.73M | mpctx->osd_force_update = true; |
1267 | 3.73M | update_osd_msg(mpctx); |
1268 | | |
1269 | 3.73M | vo_queue_frame(vo, frame); |
1270 | | |
1271 | 3.73M | check_framedrop(mpctx, vo_c); |
1272 | | |
1273 | | // The frames were shifted down; "initialize" the new first entry. |
1274 | 3.73M | if (mpctx->num_next_frames >= 1) |
1275 | 0 | handle_new_frame(mpctx); |
1276 | | |
1277 | 3.73M | mpctx->shown_vframes++; |
1278 | 3.73M | if (mpctx->video_status < STATUS_PLAYING) { |
1279 | 25.6k | mpctx->video_status = STATUS_READY; |
1280 | | // After a seek, make sure to wait until the first frame is visible. |
1281 | 25.6k | if (!opts->video_latency_hacks) { |
1282 | 25.6k | vo_wait_frame(vo); |
1283 | 25.6k | MP_VERBOSE(mpctx, "first video frame after restart shown\n"); |
1284 | 25.6k | } |
1285 | 25.6k | } |
1286 | | |
1287 | 3.73M | mp_notify(mpctx, MPV_EVENT_TICK, NULL); |
1288 | | |
1289 | | // hr-seek past EOF -> returns last frame, but terminates playback. The |
1290 | | // early EOF is needed to trigger the exit before the next seek is executed. |
1291 | | // Always using early EOF breaks other cases, like images. |
1292 | 3.73M | if (logical_eof && !mpctx->num_next_frames && mpctx->ao_chain) |
1293 | 369 | mpctx->video_status = STATUS_EOF; |
1294 | | |
1295 | 3.73M | if (mpctx->video_status != STATUS_EOF) { |
1296 | 3.73M | if (mpctx->step_frames > 0) { |
1297 | 0 | mpctx->step_frames--; |
1298 | 0 | if (!mpctx->step_frames) { |
1299 | 0 | set_pause_state(mpctx, true); |
1300 | 0 | step_frame_mute(mpctx, false); |
1301 | 0 | } |
1302 | 0 | } |
1303 | 3.73M | if (mpctx->max_frames == 0 && !mpctx->stop_play) |
1304 | 0 | mpctx->stop_play = AT_END_OF_FILE; |
1305 | 3.73M | if (mpctx->max_frames > 0) |
1306 | 75 | mpctx->max_frames--; |
1307 | 3.73M | } |
1308 | | |
1309 | 3.73M | vo_c->underrun_signaled = false; |
1310 | | |
1311 | 3.73M | if (mpctx->video_status == STATUS_EOF || mpctx->stop_play) |
1312 | 369 | mp_wakeup_core(mpctx); |
1313 | 3.73M | return; |
1314 | | |
1315 | 0 | error: |
1316 | 0 | MP_FATAL(mpctx, "Could not initialize video chain.\n"); |
1317 | 0 | uninit_video_chain(mpctx); |
1318 | 0 | error_on_track(mpctx, track); |
1319 | 0 | handle_force_window(mpctx, true); |
1320 | 0 | mp_wakeup_core(mpctx); |
1321 | 0 | } |