Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * This file is part of mpv. |
3 | | * |
4 | | * mpv is free software; you can redistribute it and/or |
5 | | * modify it under the terms of the GNU Lesser General Public |
6 | | * License as published by the Free Software Foundation; either |
7 | | * version 2.1 of the License, or (at your option) any later version. |
8 | | * |
9 | | * mpv is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | * GNU Lesser General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU Lesser General Public |
15 | | * License along with mpv. If not, see <http://www.gnu.org/licenses/>. |
16 | | */ |
17 | | |
18 | | #include <assert.h> |
19 | | #include <math.h> |
20 | | #include <stdatomic.h> |
21 | | #include <stdbool.h> |
22 | | #include <stdio.h> |
23 | | #include <stdlib.h> |
24 | | #include <string.h> |
25 | | |
26 | | #include "mpv_talloc.h" |
27 | | |
28 | | #include "config.h" |
29 | | #include "osdep/timer.h" |
30 | | #include "osdep/threads.h" |
31 | | #include "misc/dispatch.h" |
32 | | #include "misc/rendezvous.h" |
33 | | #include "options/options.h" |
34 | | #include "misc/bstr.h" |
35 | | #include "vo.h" |
36 | | #include "aspect.h" |
37 | | #include "dr_helper.h" |
38 | | #include "input/input.h" |
39 | | #include "options/m_config.h" |
40 | | #include "common/msg.h" |
41 | | #include "common/global.h" |
42 | | #include "common/stats.h" |
43 | | #include "video/hwdec.h" |
44 | | #include "video/mp_image.h" |
45 | | #include "sub/osd.h" |
46 | | #include "osdep/io.h" |
47 | | #include "osdep/threads.h" |
48 | | |
49 | | extern const struct vo_driver video_out_mediacodec_embed; |
50 | | extern const struct vo_driver video_out_x11; |
51 | | extern const struct vo_driver video_out_vdpau; |
52 | | extern const struct vo_driver video_out_xv; |
53 | | extern const struct vo_driver video_out_gpu; |
54 | | extern const struct vo_driver video_out_gpu_next; |
55 | | extern const struct vo_driver video_out_libmpv; |
56 | | extern const struct vo_driver video_out_null; |
57 | | extern const struct vo_driver video_out_image; |
58 | | extern const struct vo_driver video_out_lavc; |
59 | | extern const struct vo_driver video_out_caca; |
60 | | extern const struct vo_driver video_out_drm; |
61 | | extern const struct vo_driver video_out_direct3d; |
62 | | extern const struct vo_driver video_out_sdl; |
63 | | extern const struct vo_driver video_out_vaapi; |
64 | | extern const struct vo_driver video_out_dmabuf_wayland; |
65 | | extern const struct vo_driver video_out_wlshm; |
66 | | extern const struct vo_driver video_out_tct; |
67 | | extern const struct vo_driver video_out_sixel; |
68 | | extern const struct vo_driver video_out_kitty; |
69 | | |
70 | | static const struct vo_driver *const video_out_drivers[] = |
71 | | { |
72 | | // high-quality and well-supported VOs first: |
73 | | &video_out_gpu_next, |
74 | | &video_out_gpu, |
75 | | |
76 | | #if HAVE_VDPAU |
77 | | &video_out_vdpau, |
78 | | #endif |
79 | | #if HAVE_DIRECT3D |
80 | | &video_out_direct3d, |
81 | | #endif |
82 | | #if HAVE_WAYLAND && HAVE_MEMFD_CREATE |
83 | | &video_out_wlshm, |
84 | | #endif |
85 | | #if HAVE_XV |
86 | | &video_out_xv, |
87 | | #endif |
88 | | #if HAVE_ANDROID |
89 | | &video_out_mediacodec_embed, |
90 | | #endif |
91 | | #if HAVE_SDL2_VIDEO |
92 | | &video_out_sdl, |
93 | | #endif |
94 | | #if HAVE_DMABUF_WAYLAND |
95 | | &video_out_dmabuf_wayland, |
96 | | #endif |
97 | | #if HAVE_VAAPI_X11 && HAVE_GPL |
98 | | &video_out_vaapi, |
99 | | #endif |
100 | | #if HAVE_X11 |
101 | | &video_out_x11, |
102 | | #endif |
103 | | &video_out_libmpv, |
104 | | &video_out_null, |
105 | | |
106 | | // should not be auto-selected |
107 | | &video_out_image, |
108 | | &video_out_tct, |
109 | | #if HAVE_CACA |
110 | | &video_out_caca, |
111 | | #endif |
112 | | #if HAVE_DRM |
113 | | &video_out_drm, |
114 | | #endif |
115 | | #if HAVE_SIXEL |
116 | | &video_out_sixel, |
117 | | #endif |
118 | | &video_out_kitty, |
119 | | &video_out_lavc, |
120 | | }; |
121 | | |
122 | | struct vo_internal { |
123 | | mp_thread thread; |
124 | | struct mp_dispatch_queue *dispatch; |
125 | | struct dr_helper *dr_helper; |
126 | | |
127 | | // --- The following fields are protected by lock |
128 | | mp_mutex lock; |
129 | | mp_cond wakeup; |
130 | | |
131 | | bool need_wakeup; |
132 | | bool terminate; |
133 | | |
134 | | bool hasframe; |
135 | | bool hasframe_rendered; |
136 | | bool request_redraw; // redraw request from player to VO |
137 | | bool want_redraw; // redraw request from VO to player |
138 | | bool send_reset; // send VOCTRL_RESET |
139 | | bool paused; |
140 | | bool visible; |
141 | | bool wakeup_on_done; |
142 | | int queued_events; // event mask for the user |
143 | | int internal_events; // event mask for us |
144 | | |
145 | | double nominal_vsync_interval; |
146 | | |
147 | | double vsync_interval; |
148 | | int64_t *vsync_samples; |
149 | | int num_vsync_samples; |
150 | | int64_t num_total_vsync_samples; |
151 | | int64_t prev_vsync; |
152 | | double base_vsync; |
153 | | int drop_point; |
154 | | double estimated_vsync_interval; |
155 | | double estimated_vsync_jitter; |
156 | | bool expecting_vsync; |
157 | | int64_t num_successive_vsyncs; |
158 | | |
159 | | int64_t flip_queue_offset; // queue flip events at most this much in advance |
160 | | int64_t timing_offset; // same (but from options; not VO configured) |
161 | | |
162 | | int64_t delayed_count; |
163 | | int64_t drop_count; |
164 | | bool dropped_frame; // the previous frame was dropped |
165 | | |
166 | | struct vo_frame *current_frame; // last frame queued to the VO |
167 | | |
168 | | int64_t wakeup_pts; // time at which to pull frame from decoder |
169 | | |
170 | | bool rendering; // true if an image is being rendered |
171 | | struct vo_frame *frame_queued; // should be drawn next |
172 | | int req_frames; // VO's requested value of num_frames |
173 | | uint64_t current_frame_id; |
174 | | |
175 | | double display_fps; |
176 | | double reported_display_fps; |
177 | | |
178 | | struct stats_ctx *stats; |
179 | | }; |
180 | | |
181 | | extern const struct m_sub_options gl_video_conf; |
182 | | |
183 | | static void forget_frames(struct vo *vo); |
184 | | static MP_THREAD_VOID vo_thread(void *ptr); |
185 | | |
186 | | static bool get_desc(struct m_obj_desc *dst, int index) |
187 | 3.58M | { |
188 | 3.58M | if (index >= MP_ARRAY_SIZE(video_out_drivers)) |
189 | 200k | return false; |
190 | 3.38M | const struct vo_driver *vo = video_out_drivers[index]; |
191 | 3.38M | *dst = (struct m_obj_desc) { |
192 | 3.38M | .name = vo->name, |
193 | 3.38M | .description = vo->description, |
194 | 3.38M | .priv_size = vo->priv_size, |
195 | 3.38M | .priv_defaults = vo->priv_defaults, |
196 | 3.38M | .options = vo->options, |
197 | 3.38M | .options_prefix = vo->options_prefix, |
198 | 3.38M | .global_opts = vo->global_opts, |
199 | 3.38M | .hidden = vo->encode, |
200 | 3.38M | .p = vo, |
201 | 3.38M | }; |
202 | 3.38M | return true; |
203 | 3.58M | } |
204 | | |
205 | | // For the vo option |
206 | | const struct m_obj_list vo_obj_list = { |
207 | | .get_desc = get_desc, |
208 | | .description = "video outputs", |
209 | | .aliases = { |
210 | | {"gl", "gpu"}, |
211 | | {"direct3d_shaders", "direct3d"}, |
212 | | {"opengl", "gpu"}, |
213 | | {"opengl-cb", "libmpv"}, |
214 | | {0} |
215 | | }, |
216 | | .allow_trailer = true, |
217 | | .disallow_positional_parameters = true, |
218 | | .use_global_options = true, |
219 | | }; |
220 | | |
221 | | static void dispatch_wakeup_cb(void *ptr) |
222 | 635k | { |
223 | 635k | struct vo *vo = ptr; |
224 | 635k | vo_wakeup(vo); |
225 | 635k | } |
226 | | |
227 | | // Initialize or update options from vo->opts |
228 | | static void read_opts(struct vo *vo) |
229 | 43.8k | { |
230 | 43.8k | struct vo_internal *in = vo->in; |
231 | | |
232 | 43.8k | mp_mutex_lock(&in->lock); |
233 | 43.8k | in->timing_offset = (uint64_t)(MP_TIME_S_TO_NS(vo->opts->timing_offset)); |
234 | 43.8k | mp_mutex_unlock(&in->lock); |
235 | 43.8k | } |
236 | | |
237 | | static void update_opts(void *p) |
238 | 491k | { |
239 | 491k | struct vo *vo = p; |
240 | | |
241 | 491k | if (m_config_cache_update(vo->opts_cache)) { |
242 | 0 | read_opts(vo); |
243 | 0 | if (vo->driver->control) { |
244 | 0 | vo->driver->control(vo, VOCTRL_VO_OPTS_CHANGED, NULL); |
245 | | // "Legacy" update of video position related options. |
246 | | // Unlike VOCTRL_VO_OPTS_CHANGED, often not propagated to backends. |
247 | 0 | vo->driver->control(vo, VOCTRL_SET_PANSCAN, NULL); |
248 | 0 | } |
249 | 0 | } |
250 | 491k | } |
251 | | |
252 | | // Does not include thread- and VO uninit. |
253 | | static void dealloc_vo(struct vo *vo) |
254 | 49.2k | { |
255 | 49.2k | forget_frames(vo); // implicitly synchronized |
256 | | |
257 | | // These must be free'd before vo->in->dispatch. |
258 | 49.2k | talloc_free(vo->opts_cache); |
259 | 49.2k | talloc_free(vo->gl_opts_cache); |
260 | 49.2k | talloc_free(vo->eq_opts_cache); |
261 | 49.2k | mp_mutex_destroy(&vo->params_mutex); |
262 | | |
263 | 49.2k | mp_mutex_destroy(&vo->in->lock); |
264 | 49.2k | mp_cond_destroy(&vo->in->wakeup); |
265 | 49.2k | talloc_free(vo); |
266 | 49.2k | } |
267 | | |
268 | | static struct vo *vo_create(bool probing, struct mpv_global *global, |
269 | | struct vo_extra *ex, char *name) |
270 | 54.6k | { |
271 | 54.6k | mp_assert(ex->wakeup_cb); |
272 | | |
273 | 54.6k | struct mp_log *log = mp_log_new(NULL, global->log, "vo"); |
274 | 54.6k | struct m_obj_desc desc; |
275 | 54.6k | if (!m_obj_list_find(&desc, &vo_obj_list, bstr0(name))) { |
276 | 5.37k | mp_msg(log, MSGL_ERR, "Video output %s not found!\n", name); |
277 | 5.37k | talloc_free(log); |
278 | 5.37k | return NULL; |
279 | 49.2k | }; |
280 | 49.2k | struct vo *vo = talloc_ptrtype(NULL, vo); |
281 | 49.2k | *vo = (struct vo) { |
282 | 49.2k | .log = mp_log_new(vo, log, name), |
283 | 49.2k | .driver = desc.p, |
284 | 49.2k | .global = global, |
285 | 49.2k | .encode_lavc_ctx = ex->encode_lavc_ctx, |
286 | 49.2k | .input_ctx = ex->input_ctx, |
287 | 49.2k | .osd = ex->osd, |
288 | 49.2k | .monitor_par = 1, |
289 | 49.2k | .extra = *ex, |
290 | 49.2k | .probing = probing, |
291 | 49.2k | .in = talloc(vo, struct vo_internal), |
292 | 49.2k | }; |
293 | 49.2k | mp_mutex_init(&vo->params_mutex); |
294 | 49.2k | talloc_steal(vo, log); |
295 | 49.2k | *vo->in = (struct vo_internal) { |
296 | 49.2k | .dispatch = mp_dispatch_create(vo), |
297 | 49.2k | .req_frames = 1, |
298 | 49.2k | .estimated_vsync_jitter = -1, |
299 | 49.2k | .stats = stats_ctx_create(vo, global, "vo"), |
300 | 49.2k | }; |
301 | 49.2k | mp_dispatch_set_wakeup_fn(vo->in->dispatch, dispatch_wakeup_cb, vo); |
302 | 49.2k | mp_mutex_init(&vo->in->lock); |
303 | 49.2k | mp_cond_init(&vo->in->wakeup); |
304 | | |
305 | 49.2k | vo->opts_cache = m_config_cache_alloc(NULL, global, &vo_sub_opts); |
306 | 49.2k | vo->opts = vo->opts_cache->opts; |
307 | | |
308 | 49.2k | m_config_cache_set_dispatch_change_cb(vo->opts_cache, vo->in->dispatch, |
309 | 49.2k | update_opts, vo); |
310 | | |
311 | 49.2k | vo->gl_opts_cache = m_config_cache_alloc(NULL, global, &gl_video_conf); |
312 | 49.2k | vo->eq_opts_cache = m_config_cache_alloc(NULL, global, &mp_csp_equalizer_conf); |
313 | | |
314 | 49.2k | mp_input_set_mouse_transform(vo->input_ctx, NULL, NULL); |
315 | 49.2k | if (vo->driver->encode != !!vo->encode_lavc_ctx) |
316 | 66 | goto error; |
317 | 49.2k | vo->priv = m_config_group_from_desc(vo, vo->log, global, &desc, name); |
318 | 49.2k | if (!vo->priv) |
319 | 0 | goto error; |
320 | | |
321 | 49.2k | if (mp_thread_create(&vo->in->thread, vo_thread, vo)) |
322 | 0 | goto error; |
323 | 49.2k | if (mp_rendezvous(vo, 0) < 0) { // init barrier |
324 | 5.39k | mp_thread_join(vo->in->thread); |
325 | 5.39k | goto error; |
326 | 5.39k | } |
327 | 43.8k | return vo; |
328 | | |
329 | 5.45k | error: |
330 | 5.45k | dealloc_vo(vo); |
331 | 5.45k | return NULL; |
332 | 49.2k | } |
333 | | |
334 | | struct vo *init_best_video_out(struct mpv_global *global, struct vo_extra *ex) |
335 | 44.0k | { |
336 | 44.0k | struct mp_vo_opts *opts = mp_get_config_group(NULL, global, &vo_sub_opts); |
337 | 44.0k | struct m_obj_settings *vo_list = opts->video_driver_list; |
338 | 44.0k | struct vo *vo = NULL; |
339 | | // first try the preferred drivers, with their optional subdevice param: |
340 | 44.0k | if (vo_list && vo_list[0].name) { |
341 | 54.4k | for (int n = 0; vo_list[n].name; n++) { |
342 | | // Something like "-vo name," allows fallback to autoprobing. |
343 | 54.3k | if (strlen(vo_list[n].name) == 0) |
344 | 146 | goto autoprobe; |
345 | 54.2k | bool p = !!vo_list[n + 1].name; |
346 | 54.2k | vo = vo_create(p, global, ex, vo_list[n].name); |
347 | 54.2k | if (vo) |
348 | 43.8k | goto done; |
349 | 54.2k | } |
350 | 121 | goto done; |
351 | 44.0k | } |
352 | 147 | autoprobe: |
353 | | // now try the rest... |
354 | 588 | for (int i = 0; i < MP_ARRAY_SIZE(video_out_drivers); i++) { |
355 | 588 | const struct vo_driver *driver = video_out_drivers[i]; |
356 | 588 | if (driver == &video_out_null) |
357 | 147 | break; |
358 | 441 | vo = vo_create(true, global, ex, (char *)driver->name); |
359 | 441 | if (vo) |
360 | 0 | goto done; |
361 | 441 | } |
362 | 44.0k | done: |
363 | 44.0k | talloc_free(opts); |
364 | 44.0k | return vo; |
365 | 147 | } |
366 | | |
367 | | static void terminate_vo(void *p) |
368 | 43.8k | { |
369 | 43.8k | struct vo *vo = p; |
370 | 43.8k | struct vo_internal *in = vo->in; |
371 | 43.8k | in->terminate = true; |
372 | 43.8k | } |
373 | | |
374 | | void vo_destroy(struct vo *vo) |
375 | 43.8k | { |
376 | 43.8k | struct vo_internal *in = vo->in; |
377 | 43.8k | mp_dispatch_run(in->dispatch, terminate_vo, vo); |
378 | 43.8k | mp_thread_join(vo->in->thread); |
379 | 43.8k | dealloc_vo(vo); |
380 | 43.8k | } |
381 | | |
382 | | // Wakeup the playloop to queue new video frames etc. |
383 | | static void wakeup_core(struct vo *vo) |
384 | 779k | { |
385 | 779k | vo->extra.wakeup_cb(vo->extra.wakeup_ctx); |
386 | 779k | } |
387 | | |
388 | | // Drop timing information on discontinuities like seeking. |
389 | | // Always called locked. |
390 | | static void reset_vsync_timings(struct vo *vo) |
391 | 467k | { |
392 | 467k | struct vo_internal *in = vo->in; |
393 | 467k | in->drop_point = 0; |
394 | 467k | in->base_vsync = 0; |
395 | 467k | in->expecting_vsync = false; |
396 | 467k | in->num_successive_vsyncs = 0; |
397 | 467k | } |
398 | | |
399 | | static double vsync_stddef(struct vo *vo, double ref_vsync) |
400 | 0 | { |
401 | 0 | struct vo_internal *in = vo->in; |
402 | 0 | double jitter = 0; |
403 | 0 | for (int n = 0; n < in->num_vsync_samples; n++) { |
404 | 0 | double diff = in->vsync_samples[n] - ref_vsync; |
405 | 0 | jitter += diff * diff; |
406 | 0 | } |
407 | 0 | return sqrt(jitter / in->num_vsync_samples); |
408 | 0 | } |
409 | | |
410 | 0 | #define MAX_VSYNC_SAMPLES 1000 |
411 | 0 | #define DELAY_VSYNC_SAMPLES 10 |
412 | | |
413 | | // Check if we should switch to measured average display FPS if it seems |
414 | | // "better" then the system-reported one. (Note that small differences are |
415 | | // handled as drift instead.) |
416 | | static void check_estimated_display_fps(struct vo *vo) |
417 | 0 | { |
418 | 0 | struct vo_internal *in = vo->in; |
419 | |
|
420 | 0 | bool use_estimated = false; |
421 | 0 | if (in->num_total_vsync_samples >= MAX_VSYNC_SAMPLES / 2 && |
422 | 0 | in->estimated_vsync_interval <= 1e9 / 20.0 && |
423 | 0 | in->estimated_vsync_interval >= 1e9 / 400.0) |
424 | 0 | { |
425 | 0 | for (int n = 0; n < in->num_vsync_samples; n++) { |
426 | 0 | if (fabs(in->vsync_samples[n] - in->estimated_vsync_interval) |
427 | 0 | >= in->estimated_vsync_interval / 4) |
428 | 0 | goto done; |
429 | 0 | } |
430 | 0 | double mjitter = vsync_stddef(vo, in->estimated_vsync_interval); |
431 | 0 | double njitter = vsync_stddef(vo, in->nominal_vsync_interval); |
432 | 0 | if (mjitter * 1.01 < njitter) |
433 | 0 | use_estimated = true; |
434 | 0 | done: ; |
435 | 0 | } |
436 | 0 | if (use_estimated == (fabs(in->vsync_interval - in->nominal_vsync_interval) < 1e9)) { |
437 | 0 | if (use_estimated) { |
438 | 0 | MP_TRACE(vo, "adjusting display FPS to a value closer to %.3f Hz\n", |
439 | 0 | 1e9 / in->estimated_vsync_interval); |
440 | 0 | } else { |
441 | 0 | MP_TRACE(vo, "switching back to assuming display fps = %.3f Hz\n", |
442 | 0 | 1e9 / in->nominal_vsync_interval); |
443 | 0 | } |
444 | 0 | } |
445 | 0 | in->vsync_interval = use_estimated ? in->estimated_vsync_interval |
446 | 0 | : in->nominal_vsync_interval; |
447 | 0 | } |
448 | | |
449 | | // Attempt to detect vsyncs delayed/skipped by the driver. This tries to deal |
450 | | // with strong jitter too, because some drivers have crap vsync timing. |
451 | | static void vsync_skip_detection(struct vo *vo) |
452 | 0 | { |
453 | 0 | struct vo_internal *in = vo->in; |
454 | |
|
455 | 0 | int window = 4; |
456 | 0 | double t_r = in->prev_vsync, t_e = in->base_vsync, diff = 0.0, desync_early = 0.0; |
457 | 0 | for (int n = 0; n < in->drop_point; n++) { |
458 | 0 | diff += t_r - t_e; |
459 | 0 | t_r -= in->vsync_samples[n]; |
460 | 0 | t_e -= in->vsync_interval; |
461 | 0 | if (n == window + 1) |
462 | 0 | desync_early = diff / window; |
463 | 0 | } |
464 | 0 | double desync = diff / in->num_vsync_samples; |
465 | 0 | if (in->drop_point > window * 2 && |
466 | 0 | fabs(desync - desync_early) >= in->vsync_interval * 3 / 4) |
467 | 0 | { |
468 | | // Assume a drop. An underflow can technically speaking not be a drop |
469 | | // (it's up to the driver what this is supposed to mean), but no reason |
470 | | // to treat it differently. |
471 | 0 | in->base_vsync = in->prev_vsync; |
472 | 0 | in->delayed_count += 1; |
473 | 0 | in->drop_point = 0; |
474 | 0 | MP_STATS(vo, "vo-delayed"); |
475 | 0 | } |
476 | 0 | if (in->drop_point > 10) |
477 | 0 | in->base_vsync += desync / 10; // smooth out drift |
478 | 0 | } |
479 | | |
480 | | // Always called locked. |
481 | | static void update_vsync_timing_after_swap(struct vo *vo, |
482 | | struct vo_vsync_info *vsync) |
483 | 366k | { |
484 | 366k | struct vo_internal *in = vo->in; |
485 | | |
486 | 366k | int64_t vsync_time = vsync->last_queue_display_time; |
487 | 366k | int64_t prev_vsync = in->prev_vsync; |
488 | 366k | in->prev_vsync = vsync_time; |
489 | | |
490 | 366k | if (!in->expecting_vsync) { |
491 | 366k | reset_vsync_timings(vo); |
492 | 366k | return; |
493 | 366k | } |
494 | | |
495 | 0 | in->num_successive_vsyncs++; |
496 | 0 | if (in->num_successive_vsyncs <= DELAY_VSYNC_SAMPLES) { |
497 | 0 | in->base_vsync = vsync_time; |
498 | 0 | return; |
499 | 0 | } |
500 | | |
501 | 0 | if (vsync_time <= 0 || vsync_time <= prev_vsync) { |
502 | 0 | in->prev_vsync = 0; |
503 | 0 | in->base_vsync = 0; |
504 | 0 | return; |
505 | 0 | } |
506 | | |
507 | 0 | if (prev_vsync <= 0) |
508 | 0 | return; |
509 | | |
510 | 0 | if (in->num_vsync_samples >= MAX_VSYNC_SAMPLES) |
511 | 0 | in->num_vsync_samples -= 1; |
512 | 0 | MP_TARRAY_INSERT_AT(in, in->vsync_samples, in->num_vsync_samples, 0, |
513 | 0 | vsync_time - prev_vsync); |
514 | 0 | in->drop_point = MPMIN(in->drop_point + 1, in->num_vsync_samples); |
515 | 0 | in->num_total_vsync_samples += 1; |
516 | 0 | if (in->base_vsync) { |
517 | 0 | in->base_vsync += in->vsync_interval; |
518 | 0 | } else { |
519 | 0 | in->base_vsync = vsync_time; |
520 | 0 | } |
521 | |
|
522 | 0 | double avg = 0; |
523 | 0 | for (int n = 0; n < in->num_vsync_samples; n++) { |
524 | 0 | mp_assert(in->vsync_samples[n] > 0); |
525 | 0 | avg += in->vsync_samples[n]; |
526 | 0 | } |
527 | 0 | in->estimated_vsync_interval = avg / in->num_vsync_samples; |
528 | 0 | in->estimated_vsync_jitter = |
529 | 0 | vsync_stddef(vo, in->vsync_interval) / in->vsync_interval; |
530 | |
|
531 | 0 | check_estimated_display_fps(vo); |
532 | 0 | vsync_skip_detection(vo); |
533 | |
|
534 | 0 | MP_STATS(vo, "value %f jitter", in->estimated_vsync_jitter); |
535 | 0 | MP_STATS(vo, "value %f vsync-diff", MP_TIME_NS_TO_S(in->vsync_samples[0])); |
536 | 0 | } |
537 | | |
538 | | // to be called from VO thread only |
539 | | static void update_display_fps(struct vo *vo) |
540 | 1.26M | { |
541 | 1.26M | struct vo_internal *in = vo->in; |
542 | 1.26M | mp_mutex_lock(&in->lock); |
543 | 1.26M | if (in->internal_events & VO_EVENT_WIN_STATE) { |
544 | 43.8k | in->internal_events &= ~(unsigned)VO_EVENT_WIN_STATE; |
545 | | |
546 | 43.8k | mp_mutex_unlock(&in->lock); |
547 | | |
548 | 43.8k | double fps = 0; |
549 | 43.8k | vo->driver->control(vo, VOCTRL_GET_DISPLAY_FPS, &fps); |
550 | | |
551 | 43.8k | mp_mutex_lock(&in->lock); |
552 | | |
553 | 43.8k | in->reported_display_fps = fps; |
554 | 43.8k | } |
555 | | |
556 | 1.26M | double display_fps = vo->opts->display_fps_override; |
557 | 1.26M | if (display_fps <= 0) |
558 | 1.26M | display_fps = in->reported_display_fps; |
559 | | |
560 | 1.26M | if (in->display_fps != display_fps) { |
561 | 4 | in->nominal_vsync_interval = display_fps > 0 ? 1e9 / display_fps : 0; |
562 | 4 | in->vsync_interval = MPMAX(in->nominal_vsync_interval, 1); |
563 | 4 | in->display_fps = display_fps; |
564 | | |
565 | 4 | MP_VERBOSE(vo, "Assuming %f FPS for display sync.\n", display_fps); |
566 | | |
567 | | // make sure to update the player |
568 | 4 | in->queued_events |= VO_EVENT_WIN_STATE; |
569 | 4 | wakeup_core(vo); |
570 | 4 | } |
571 | | |
572 | 1.26M | mp_mutex_unlock(&in->lock); |
573 | 1.26M | } |
574 | | |
575 | | static void check_vo_caps(struct vo *vo) |
576 | 20.7k | { |
577 | 20.7k | int rot = vo->params->rotate; |
578 | 20.7k | if (rot) { |
579 | 85 | bool ok = rot % 90 ? false : (vo->driver->caps & VO_CAP_ROTATE90); |
580 | 85 | if (!ok) { |
581 | 85 | MP_WARN(vo, "Video is flagged as rotated by %d degrees, but the " |
582 | 85 | "video output does not support this.\n", rot); |
583 | 85 | } |
584 | 85 | } |
585 | 20.7k | if (vo->params->vflip && !(vo->driver->caps & VO_CAP_VFLIP)) |
586 | 20.7k | MP_WARN(vo, "Video is flagged as vertically flipped, but the " |
587 | 20.7k | "video output does not support this.\n"); |
588 | 20.7k | } |
589 | | |
590 | | static void run_reconfig(void *p) |
591 | 20.7k | { |
592 | 20.7k | void **pp = p; |
593 | 20.7k | struct vo *vo = pp[0]; |
594 | 20.7k | struct mp_image *img = pp[1]; |
595 | 20.7k | int *ret = pp[2]; |
596 | | |
597 | 20.7k | struct mp_image_params *params = &img->params; |
598 | | |
599 | 20.7k | struct vo_internal *in = vo->in; |
600 | | |
601 | 20.7k | MP_VERBOSE(vo, "reconfig to %s\n", mp_image_params_to_str(params)); |
602 | | |
603 | 20.7k | update_opts(vo); |
604 | | |
605 | 20.7k | mp_image_params_get_dsize(params, &vo->dwidth, &vo->dheight); |
606 | | |
607 | 20.7k | mp_mutex_lock(&vo->params_mutex); |
608 | 20.7k | talloc_free(vo->params); |
609 | 20.7k | vo->params = talloc_dup(vo, params); |
610 | 20.7k | vo->has_peak_detect_values = false; |
611 | 20.7k | mp_mutex_unlock(&vo->params_mutex); |
612 | | |
613 | 20.7k | if (vo->driver->reconfig2) { |
614 | 0 | *ret = vo->driver->reconfig2(vo, img); |
615 | 20.7k | } else { |
616 | 20.7k | *ret = vo->driver->reconfig(vo, vo->params); |
617 | 20.7k | } |
618 | 20.7k | vo->config_ok = *ret >= 0; |
619 | 20.7k | if (vo->config_ok) { |
620 | 20.7k | check_vo_caps(vo); |
621 | 20.7k | } else { |
622 | 0 | mp_mutex_lock(&vo->params_mutex); |
623 | 0 | talloc_free(vo->params); |
624 | 0 | vo->params = NULL; |
625 | 0 | vo->has_peak_detect_values = false; |
626 | 0 | mp_mutex_unlock(&vo->params_mutex); |
627 | 0 | } |
628 | | |
629 | 20.7k | mp_mutex_lock(&in->lock); |
630 | 20.7k | talloc_free(in->current_frame); |
631 | 20.7k | in->current_frame = NULL; |
632 | 20.7k | forget_frames(vo); |
633 | 20.7k | reset_vsync_timings(vo); |
634 | 20.7k | mp_mutex_unlock(&in->lock); |
635 | | |
636 | 20.7k | update_display_fps(vo); |
637 | 20.7k | } |
638 | | |
639 | | int vo_reconfig(struct vo *vo, struct mp_image_params *params) |
640 | 11 | { |
641 | 11 | int ret; |
642 | 11 | struct mp_image dummy = {0}; |
643 | 11 | mp_image_set_params(&dummy, params); |
644 | 11 | void *p[] = {vo, &dummy, &ret}; |
645 | 11 | mp_dispatch_run(vo->in->dispatch, run_reconfig, p); |
646 | 11 | return ret; |
647 | 11 | } |
648 | | |
649 | | int vo_reconfig2(struct vo *vo, struct mp_image *img) |
650 | 20.7k | { |
651 | 20.7k | int ret; |
652 | 20.7k | void *p[] = {vo, img, &ret}; |
653 | 20.7k | mp_dispatch_run(vo->in->dispatch, run_reconfig, p); |
654 | 20.7k | return ret; |
655 | 20.7k | } |
656 | | |
657 | | static void run_control(void *p) |
658 | 470k | { |
659 | 470k | void **pp = p; |
660 | 470k | struct vo *vo = pp[0]; |
661 | 470k | int request = (intptr_t)pp[1]; |
662 | 470k | void *data = pp[2]; |
663 | 470k | update_opts(vo); |
664 | 470k | int ret = vo->driver->control(vo, request, data); |
665 | 470k | if (pp[3]) |
666 | 126k | *(int *)pp[3] = ret; |
667 | 470k | } |
668 | | |
669 | | int vo_control(struct vo *vo, int request, void *data) |
670 | 126k | { |
671 | 126k | int ret; |
672 | 126k | void *p[] = {vo, (void *)(intptr_t)request, data, &ret}; |
673 | 126k | mp_dispatch_run(vo->in->dispatch, run_control, p); |
674 | 126k | return ret; |
675 | 126k | } |
676 | | |
677 | | // Run vo_control() without waiting for a reply. |
678 | | // (Only works for some VOCTRLs.) |
679 | | void vo_control_async(struct vo *vo, int request, void *data) |
680 | 344k | { |
681 | 344k | void *p[4] = {vo, (void *)(intptr_t)request, NULL, NULL}; |
682 | 344k | void **d = talloc_memdup(NULL, p, sizeof(p)); |
683 | | |
684 | 344k | switch (request) { |
685 | 228k | case VOCTRL_UPDATE_PLAYBACK_STATE: |
686 | 228k | d[2] = talloc_dup(d, (struct voctrl_playback_state *)data); |
687 | 228k | break; |
688 | 22.3k | case VOCTRL_KILL_SCREENSAVER: |
689 | 115k | case VOCTRL_RESTORE_SCREENSAVER: |
690 | 115k | break; |
691 | 0 | default: |
692 | 0 | abort(); // requires explicit support |
693 | 344k | } |
694 | | |
695 | 344k | mp_dispatch_enqueue_autofree(vo->in->dispatch, run_control, d); |
696 | 344k | } |
697 | | |
698 | | // must be called locked |
699 | | static void forget_frames(struct vo *vo) |
700 | 193k | { |
701 | 193k | struct vo_internal *in = vo->in; |
702 | 193k | in->hasframe = false; |
703 | 193k | in->hasframe_rendered = false; |
704 | 193k | in->drop_count = 0; |
705 | 193k | in->delayed_count = 0; |
706 | 193k | talloc_free(in->frame_queued); |
707 | 193k | in->frame_queued = NULL; |
708 | 193k | in->current_frame_id += VO_MAX_REQ_FRAMES + 1; |
709 | | // don't unref current_frame; we always want to be able to redraw it |
710 | 193k | if (in->current_frame) { |
711 | 36.1k | in->current_frame->num_vsyncs = 0; // but reset future repeats |
712 | 36.1k | in->current_frame->display_synced = false; // mark discontinuity |
713 | 36.1k | } |
714 | 193k | } |
715 | | |
716 | | // VOs which have no special requirements on UI event loops etc. can set the |
717 | | // vo_driver.wait_events callback to this (and leave vo_driver.wakeup unset). |
718 | | // This function must not be used or called for other purposes. |
719 | | void vo_wait_default(struct vo *vo, int64_t until_time) |
720 | 835k | { |
721 | 835k | struct vo_internal *in = vo->in; |
722 | | |
723 | 835k | mp_mutex_lock(&in->lock); |
724 | 835k | if (!in->need_wakeup) |
725 | 788k | mp_cond_timedwait_until(&in->wakeup, &in->lock, until_time); |
726 | 835k | mp_mutex_unlock(&in->lock); |
727 | 835k | } |
728 | | |
729 | | // Called unlocked. |
730 | | static void wait_vo(struct vo *vo, int64_t until_time) |
731 | 835k | { |
732 | 835k | struct vo_internal *in = vo->in; |
733 | | |
734 | 835k | if (vo->driver->wait_events) { |
735 | 0 | vo->driver->wait_events(vo, until_time); |
736 | 835k | } else { |
737 | 835k | vo_wait_default(vo, until_time); |
738 | 835k | } |
739 | 835k | mp_mutex_lock(&in->lock); |
740 | 835k | in->need_wakeup = false; |
741 | 835k | mp_mutex_unlock(&in->lock); |
742 | 835k | } |
743 | | |
744 | | static void wakeup_locked(struct vo *vo) |
745 | 1.14M | { |
746 | 1.14M | struct vo_internal *in = vo->in; |
747 | | |
748 | 1.14M | mp_cond_broadcast(&in->wakeup); |
749 | 1.14M | if (vo->driver->wakeup) |
750 | 0 | vo->driver->wakeup(vo); |
751 | 1.14M | in->need_wakeup = true; |
752 | 1.14M | } |
753 | | |
754 | | // Wakeup VO thread, and make it check for new events with VOCTRL_CHECK_EVENTS. |
755 | | // To be used by threaded VO backends. |
756 | | void vo_wakeup(struct vo *vo) |
757 | 635k | { |
758 | 635k | struct vo_internal *in = vo->in; |
759 | | |
760 | 635k | mp_mutex_lock(&in->lock); |
761 | 635k | wakeup_locked(vo); |
762 | 635k | mp_mutex_unlock(&in->lock); |
763 | 635k | } |
764 | | |
765 | | static int64_t get_current_frame_end(struct vo *vo) |
766 | 24.7k | { |
767 | 24.7k | struct vo_internal *in = vo->in; |
768 | 24.7k | if (!in->current_frame) |
769 | 18.0k | return -1; |
770 | 6.69k | return in->current_frame->pts + MPMAX(in->current_frame->duration, 0); |
771 | 24.7k | } |
772 | | |
773 | | static int64_t get_display_synced_frame_end(struct vo *vo) |
774 | 24.7k | { |
775 | 24.7k | struct vo_internal *in = vo->in; |
776 | 24.7k | mp_assert(!in->frame_queued); |
777 | 24.7k | int64_t res = 0; |
778 | 24.7k | if (in->base_vsync && in->vsync_interval > 1 && in->current_frame) { |
779 | 0 | res = in->base_vsync; |
780 | 0 | int extra = !!in->rendering; |
781 | 0 | res += (in->current_frame->num_vsyncs + extra) * in->vsync_interval; |
782 | 0 | if (!in->current_frame->display_synced) |
783 | 0 | res = 0; |
784 | 0 | } |
785 | 24.7k | return res; |
786 | 24.7k | } |
787 | | |
788 | | static bool still_displaying(struct vo *vo) |
789 | 24.8k | { |
790 | 24.8k | struct vo_internal *in = vo->in; |
791 | 24.8k | bool working = in->rendering || in->frame_queued; |
792 | 24.8k | if (working) |
793 | 57 | goto done; |
794 | | |
795 | 24.7k | int64_t frame_end = get_display_synced_frame_end(vo); |
796 | 24.7k | if (frame_end > 0) { |
797 | 0 | working = frame_end > in->base_vsync; |
798 | 0 | goto done; |
799 | 0 | } |
800 | | |
801 | 24.7k | frame_end = get_current_frame_end(vo); |
802 | 24.7k | if (frame_end < 0) |
803 | 18.0k | goto done; |
804 | 6.69k | working = mp_time_ns() < frame_end; |
805 | | |
806 | 24.8k | done: |
807 | 24.8k | return working && in->hasframe; |
808 | 6.69k | } |
809 | | |
810 | | // Return true if there is still a frame being displayed (or queued). |
811 | | bool vo_still_displaying(struct vo *vo) |
812 | 24.8k | { |
813 | 24.8k | mp_mutex_lock(&vo->in->lock); |
814 | 24.8k | bool res = still_displaying(vo); |
815 | 24.8k | mp_mutex_unlock(&vo->in->lock); |
816 | 24.8k | return res; |
817 | 24.8k | } |
818 | | |
819 | | // Make vo issue a wakeup once vo_still_displaying() becomes false. |
820 | | void vo_request_wakeup_on_done(struct vo *vo) |
821 | 0 | { |
822 | 0 | struct vo_internal *in = vo->in; |
823 | 0 | mp_mutex_lock(&vo->in->lock); |
824 | 0 | if (still_displaying(vo)) { |
825 | 0 | in->wakeup_on_done = true; |
826 | 0 | } else { |
827 | 0 | wakeup_core(vo); |
828 | 0 | } |
829 | 0 | mp_mutex_unlock(&vo->in->lock); |
830 | 0 | } |
831 | | |
832 | | // Whether vo_queue_frame() can be called. If the VO is not ready yet, the |
833 | | // function will return false, and the VO will call the wakeup callback once |
834 | | // it's ready. |
835 | | // next_pts is the exact time when the next frame should be displayed. If the |
836 | | // VO is ready, but the time is too "early", return false, and call the wakeup |
837 | | // callback once the time is right. |
838 | | // If next_pts is negative, disable any timing and draw the frame as fast as |
839 | | // possible. |
840 | | bool vo_is_ready_for_frame(struct vo *vo, int64_t next_pts) |
841 | 599k | { |
842 | 599k | struct vo_internal *in = vo->in; |
843 | 599k | mp_mutex_lock(&in->lock); |
844 | 599k | bool r = vo->config_ok && !in->frame_queued && |
845 | 599k | (!in->current_frame || in->current_frame->num_vsyncs < 1); |
846 | 599k | if (r && next_pts >= 0) { |
847 | | // Don't show the frame too early - it would basically freeze the |
848 | | // display by disallowing OSD redrawing or VO interaction. |
849 | | // Actually render the frame at earliest the given offset before target |
850 | | // time. |
851 | 366k | next_pts -= in->timing_offset; |
852 | 366k | next_pts -= in->flip_queue_offset; |
853 | 366k | int64_t now = mp_time_ns(); |
854 | 366k | if (next_pts > now) |
855 | 0 | r = false; |
856 | 366k | if (!in->wakeup_pts || next_pts < in->wakeup_pts) { |
857 | 366k | in->wakeup_pts = next_pts; |
858 | | // If we have to wait, update the vo thread's timer. |
859 | 366k | if (!r) |
860 | 0 | wakeup_locked(vo); |
861 | 366k | } |
862 | 366k | } |
863 | 599k | mp_mutex_unlock(&in->lock); |
864 | 599k | return r; |
865 | 599k | } |
866 | | |
867 | | // Check if the VO reports that the mpv window is visible. |
868 | | bool vo_is_visible(struct vo *vo) |
869 | 0 | { |
870 | 0 | struct vo_internal *in = vo->in; |
871 | 0 | mp_mutex_lock(&in->lock); |
872 | 0 | bool r = in->visible; |
873 | 0 | mp_mutex_unlock(&in->lock); |
874 | 0 | return r; |
875 | 0 | } |
876 | | |
877 | | // Direct the VO thread to put the currently queued image on the screen. |
878 | | // vo_is_ready_for_frame() must have returned true before this call. |
879 | | // Ownership of frame is handed to the vo. |
880 | | void vo_queue_frame(struct vo *vo, struct vo_frame *frame) |
881 | 366k | { |
882 | 366k | struct vo_internal *in = vo->in; |
883 | 366k | mp_mutex_lock(&in->lock); |
884 | 366k | mp_assert(vo->config_ok && !in->frame_queued && |
885 | 366k | (!in->current_frame || in->current_frame->num_vsyncs < 1)); |
886 | 366k | in->hasframe = true; |
887 | 366k | frame->frame_id = ++(in->current_frame_id); |
888 | 366k | in->frame_queued = frame; |
889 | 366k | in->wakeup_pts = frame->display_synced |
890 | 366k | ? 0 : frame->pts + MPMAX(frame->duration, 0); |
891 | 366k | wakeup_locked(vo); |
892 | 366k | mp_mutex_unlock(&in->lock); |
893 | 366k | } |
894 | | |
895 | | // If a frame is currently being rendered (or queued), wait until it's done. |
896 | | // Otherwise, return immediately. |
897 | | void vo_wait_frame(struct vo *vo) |
898 | 18.0k | { |
899 | 18.0k | struct vo_internal *in = vo->in; |
900 | 18.0k | mp_mutex_lock(&in->lock); |
901 | 36.1k | while (in->frame_queued || in->rendering) |
902 | 18.0k | mp_cond_wait(&in->wakeup, &in->lock); |
903 | 18.0k | mp_mutex_unlock(&in->lock); |
904 | 18.0k | } |
905 | | |
906 | | // Wait until realtime is >= ts |
907 | | // called without lock |
908 | | static void wait_until(struct vo *vo, int64_t target) |
909 | 366k | { |
910 | 366k | struct vo_internal *in = vo->in; |
911 | 366k | mp_mutex_lock(&in->lock); |
912 | 366k | while (target > mp_time_ns()) { |
913 | 0 | if (in->queued_events & VO_EVENT_LIVE_RESIZING) |
914 | 0 | break; |
915 | 0 | if (mp_cond_timedwait_until(&in->wakeup, &in->lock, target)) |
916 | 0 | break; |
917 | 0 | } |
918 | 366k | mp_mutex_unlock(&in->lock); |
919 | 366k | } |
920 | | |
921 | | static bool render_frame(struct vo *vo) |
922 | 1.19M | { |
923 | 1.19M | struct vo_internal *in = vo->in; |
924 | 1.19M | struct vo_frame *frame = NULL; |
925 | 1.19M | bool more_frames = false; |
926 | | |
927 | 1.19M | update_display_fps(vo); |
928 | | |
929 | 1.19M | mp_mutex_lock(&in->lock); |
930 | | |
931 | 1.19M | if (in->frame_queued) { |
932 | 366k | talloc_free(in->current_frame); |
933 | 366k | in->current_frame = in->frame_queued; |
934 | 366k | in->frame_queued = NULL; |
935 | 832k | } else if (in->paused || !in->current_frame || !in->hasframe || |
936 | 832k | (in->current_frame->display_synced && in->current_frame->num_vsyncs < 1) || |
937 | 832k | !in->current_frame->display_synced) |
938 | 832k | { |
939 | 832k | goto done; |
940 | 832k | } |
941 | | |
942 | 366k | frame = vo_frame_ref(in->current_frame); |
943 | 366k | mp_assert(frame); |
944 | | |
945 | 366k | if (frame->display_synced) { |
946 | 0 | frame->pts = 0; |
947 | 0 | frame->duration = -1; |
948 | 0 | } |
949 | | |
950 | 366k | int64_t now = mp_time_ns(); |
951 | 366k | int64_t pts = frame->pts; |
952 | 366k | int64_t duration = frame->duration; |
953 | 366k | int64_t end_time = pts + duration; |
954 | | |
955 | | // Time at which we should flip_page on the VO. |
956 | 366k | int64_t target = frame->display_synced ? 0 : pts - in->flip_queue_offset; |
957 | | |
958 | | // "normal" strict drop threshold. |
959 | 366k | in->dropped_frame = duration >= 0 && end_time < now; |
960 | | |
961 | 366k | in->dropped_frame &= !frame->display_synced; |
962 | 366k | in->dropped_frame &= !(vo->driver->caps & VO_CAP_FRAMEDROP); |
963 | 366k | in->dropped_frame &= frame->can_drop; |
964 | | // Even if we're hopelessly behind, rather degrade to 10 FPS playback, |
965 | | // instead of just freezing the display forever. |
966 | 366k | in->dropped_frame &= now - in->prev_vsync < MP_TIME_MS_TO_NS(100); |
967 | 366k | in->dropped_frame &= in->hasframe_rendered; |
968 | | |
969 | | // Setup parameters for the next time this frame is drawn. ("frame" is the |
970 | | // frame currently drawn, while in->current_frame is the potentially next.) |
971 | 366k | in->current_frame->repeat = true; |
972 | 366k | if (frame->display_synced) { |
973 | | // Increment the offset only if it's not the last vsync. The current_frame |
974 | | // can still be reused. This is mostly important for redraws that might |
975 | | // overshoot the target vsync point. |
976 | 0 | if (in->current_frame->num_vsyncs > 1) { |
977 | 0 | in->current_frame->vsync_offset += in->current_frame->vsync_interval; |
978 | 0 | in->current_frame->ideal_frame_vsync += in->current_frame->ideal_frame_vsync_duration; |
979 | 0 | } |
980 | 0 | in->dropped_frame |= in->current_frame->num_vsyncs < 1; |
981 | 0 | } |
982 | 366k | if (in->current_frame->num_vsyncs > 0) |
983 | 366k | in->current_frame->num_vsyncs -= 1; |
984 | | |
985 | | // Always render when paused (it's typically the last frame for a while). |
986 | 366k | in->dropped_frame &= !in->paused; |
987 | | |
988 | 366k | bool use_vsync = in->current_frame->display_synced && !in->paused; |
989 | 366k | if (use_vsync && !in->expecting_vsync) // first DS frame in a row |
990 | 0 | in->prev_vsync = now; |
991 | 366k | in->expecting_vsync = use_vsync; |
992 | | |
993 | | // Store the initial value before we unlock. |
994 | 366k | bool request_redraw = in->request_redraw; |
995 | | |
996 | 366k | if (in->dropped_frame) { |
997 | 0 | in->drop_count += 1; |
998 | 0 | wakeup_core(vo); |
999 | 366k | } else { |
1000 | 366k | in->rendering = true; |
1001 | 366k | in->hasframe_rendered = true; |
1002 | 366k | int64_t prev_drop_count = vo->in->drop_count; |
1003 | | // Can the core queue new video now? Non-display-sync uses a separate |
1004 | | // timer instead, but possibly benefits from preparing a frame early. |
1005 | 366k | bool can_queue = !in->frame_queued && |
1006 | 366k | (in->current_frame->num_vsyncs < 1 || !use_vsync); |
1007 | 366k | mp_mutex_unlock(&in->lock); |
1008 | | |
1009 | 366k | if (can_queue) |
1010 | 366k | wakeup_core(vo); |
1011 | | |
1012 | 366k | stats_time_start(in->stats, "video-draw"); |
1013 | | |
1014 | 366k | in->visible = vo->driver->draw_frame(vo, frame); |
1015 | | |
1016 | 366k | stats_time_end(in->stats, "video-draw"); |
1017 | | |
1018 | 366k | wait_until(vo, target); |
1019 | | |
1020 | 366k | stats_time_start(in->stats, "video-flip"); |
1021 | | |
1022 | 366k | vo->driver->flip_page(vo); |
1023 | | |
1024 | 366k | struct vo_vsync_info vsync = { |
1025 | 366k | .last_queue_display_time = -1, |
1026 | 366k | .skipped_vsyncs = -1, |
1027 | 366k | }; |
1028 | 366k | if (vo->driver->get_vsync) |
1029 | 0 | vo->driver->get_vsync(vo, &vsync); |
1030 | | |
1031 | | // Make up some crap if presentation feedback is missing. |
1032 | 366k | if (vsync.last_queue_display_time <= 0) |
1033 | 366k | vsync.last_queue_display_time = mp_time_ns(); |
1034 | | |
1035 | 366k | stats_time_end(in->stats, "video-flip"); |
1036 | | |
1037 | 366k | mp_mutex_lock(&in->lock); |
1038 | 366k | in->dropped_frame = prev_drop_count < vo->in->drop_count; |
1039 | 366k | in->rendering = false; |
1040 | | |
1041 | 366k | update_vsync_timing_after_swap(vo, &vsync); |
1042 | 366k | } |
1043 | | |
1044 | 366k | if (vo->driver->caps & VO_CAP_NORETAIN) { |
1045 | 0 | talloc_free(in->current_frame); |
1046 | 0 | in->current_frame = NULL; |
1047 | 0 | } |
1048 | | |
1049 | 366k | if (in->dropped_frame) { |
1050 | 0 | MP_STATS(vo, "drop-vo"); |
1051 | 366k | } else { |
1052 | | // If the initial redraw request was true or mpv is still playing, |
1053 | | // then we can clear it here since we just performed a redraw, or the |
1054 | | // next loop will draw what we need. However if there initially is |
1055 | | // no redraw request, then something can change this (i.e. the OSD) |
1056 | | // while the vo was unlocked. If we are paused, don't touch |
1057 | | // in->request_redraw in that case. |
1058 | 366k | if (request_redraw || !in->paused) |
1059 | 366k | in->request_redraw = false; |
1060 | 366k | } |
1061 | | |
1062 | 366k | if (in->current_frame && in->current_frame->num_vsyncs && |
1063 | 366k | in->current_frame->display_synced) |
1064 | 0 | more_frames = true; |
1065 | | |
1066 | 366k | if (in->frame_queued && in->frame_queued->display_synced) |
1067 | 0 | more_frames = true; |
1068 | | |
1069 | 366k | mp_cond_broadcast(&in->wakeup); // for vo_wait_frame() |
1070 | | |
1071 | 1.19M | done: |
1072 | 1.19M | if (!(vo->driver->caps & VO_CAP_FRAMEOWNER) || in->dropped_frame) |
1073 | 1.19M | talloc_free(frame); |
1074 | 1.19M | mp_mutex_unlock(&in->lock); |
1075 | | |
1076 | 1.19M | return more_frames; |
1077 | 366k | } |
1078 | | |
1079 | | static void do_redraw(struct vo *vo) |
1080 | 363k | { |
1081 | 363k | struct vo_internal *in = vo->in; |
1082 | | |
1083 | 363k | if (!vo->config_ok) |
1084 | 347k | return; |
1085 | | |
1086 | 15.5k | mp_mutex_lock(&in->lock); |
1087 | 15.5k | in->request_redraw = false; |
1088 | | |
1089 | 15.5k | if (vo->driver->caps & (VO_CAP_NORETAIN | VO_CAP_UNTIMED)) { |
1090 | 17 | mp_mutex_unlock(&in->lock); |
1091 | 17 | return; |
1092 | 17 | } |
1093 | | |
1094 | 15.4k | bool full_redraw = in->dropped_frame; |
1095 | 15.4k | struct vo_frame *frame = vo_frame_ref(in->current_frame); |
1096 | 15.4k | if (frame) |
1097 | 15.1k | in->dropped_frame = false; |
1098 | 15.4k | struct vo_frame dummy = {0}; |
1099 | 15.4k | if (!frame) |
1100 | 339 | frame = &dummy; |
1101 | 15.4k | frame->redraw = !full_redraw; // unconditionally redraw if it was dropped |
1102 | 15.4k | frame->repeat = false; |
1103 | 15.4k | frame->still = true; |
1104 | 15.4k | frame->pts = 0; |
1105 | 15.4k | frame->duration = -1; |
1106 | 15.4k | mp_mutex_unlock(&in->lock); |
1107 | | |
1108 | 15.4k | vo->driver->draw_frame(vo, frame); |
1109 | 15.4k | vo->driver->flip_page(vo); |
1110 | | |
1111 | 15.4k | if (frame != &dummy && !(vo->driver->caps & VO_CAP_FRAMEOWNER)) |
1112 | 15.1k | talloc_free(frame); |
1113 | 15.4k | } |
1114 | | |
1115 | | static struct mp_image *get_image_vo(void *ctx, int imgfmt, int w, int h, |
1116 | | int stride_align, int flags) |
1117 | 0 | { |
1118 | 0 | struct vo *vo = ctx; |
1119 | 0 | return vo->driver->get_image(vo, imgfmt, w, h, stride_align, flags); |
1120 | 0 | } |
1121 | | |
1122 | | static MP_THREAD_VOID vo_thread(void *ptr) |
1123 | 49.2k | { |
1124 | 49.2k | struct vo *vo = ptr; |
1125 | 49.2k | struct vo_internal *in = vo->in; |
1126 | 49.2k | bool vo_paused = false; |
1127 | | |
1128 | 49.2k | mp_thread_set_name("vo"); |
1129 | | |
1130 | 49.2k | if (vo->driver->get_image) { |
1131 | 4.92k | in->dr_helper = dr_helper_create(in->dispatch, get_image_vo, vo); |
1132 | 4.92k | dr_helper_acquire_thread(in->dr_helper); |
1133 | 4.92k | } |
1134 | | |
1135 | 49.2k | int r = vo->driver->preinit(vo) ? -1 : 0; |
1136 | 49.2k | mp_rendezvous(vo, r); // init barrier |
1137 | 49.2k | if (r < 0) |
1138 | 5.39k | goto done; |
1139 | | |
1140 | 43.8k | read_opts(vo); |
1141 | 43.8k | update_display_fps(vo); |
1142 | 43.8k | vo_event(vo, VO_EVENT_WIN_STATE); |
1143 | | |
1144 | 1.24M | while (1) { |
1145 | 1.24M | mp_dispatch_queue_process(vo->in->dispatch, 0); |
1146 | 1.24M | if (in->terminate) |
1147 | 43.8k | break; |
1148 | 1.19M | stats_event(in->stats, "iterations"); |
1149 | 1.19M | vo->driver->control(vo, VOCTRL_CHECK_EVENTS, NULL); |
1150 | 1.19M | bool working = render_frame(vo); |
1151 | 1.19M | int64_t now = mp_time_ns(); |
1152 | 1.19M | int64_t wait_until = now + MP_TIME_S_TO_NS(working ? 0 : 1000); |
1153 | 1.19M | bool wakeup_on_done = false; |
1154 | 1.19M | int64_t wakeup_core_after = 0; |
1155 | | |
1156 | 1.19M | mp_mutex_lock(&in->lock); |
1157 | 1.19M | if (in->wakeup_pts) { |
1158 | 369k | if (in->wakeup_pts > now) { |
1159 | 0 | wait_until = MPMIN(wait_until, in->wakeup_pts); |
1160 | 369k | } else { |
1161 | 369k | in->wakeup_pts = 0; |
1162 | 369k | wakeup_core(vo); |
1163 | 369k | } |
1164 | 369k | } |
1165 | 1.19M | if (vo->want_redraw && !in->want_redraw) { |
1166 | 13 | in->want_redraw = true; |
1167 | 13 | wakeup_core(vo); |
1168 | 13 | } |
1169 | 1.19M | if ((!working && !in->rendering && !in->frame_queued) && in->wakeup_on_done) { |
1170 | | // At this point we know VO is going to sleep |
1171 | 0 | int64_t frame_end = get_current_frame_end(vo); |
1172 | 0 | if (frame_end >= 0) |
1173 | 0 | wakeup_core_after = frame_end; |
1174 | 0 | wakeup_on_done = true; |
1175 | 0 | in->wakeup_on_done = false; |
1176 | 0 | } |
1177 | 1.19M | vo->want_redraw = false; |
1178 | 1.19M | bool redraw = in->request_redraw; |
1179 | 1.19M | bool send_reset = in->send_reset; |
1180 | 1.19M | in->send_reset = false; |
1181 | 1.19M | bool send_pause = in->paused != vo_paused; |
1182 | 1.19M | vo_paused = in->paused; |
1183 | 1.19M | mp_mutex_unlock(&in->lock); |
1184 | | |
1185 | 1.19M | if (send_reset) |
1186 | 77.8k | vo->driver->control(vo, VOCTRL_RESET, NULL); |
1187 | 1.19M | if (send_pause) |
1188 | 853 | vo->driver->control(vo, vo_paused ? VOCTRL_PAUSE : VOCTRL_RESUME, NULL); |
1189 | 1.19M | if (wait_until > now && redraw) { |
1190 | 363k | vo->driver->control(vo, VOCTRL_REDRAW, NULL); |
1191 | 363k | do_redraw(vo); // now is a good time |
1192 | 363k | continue; |
1193 | 363k | } |
1194 | 835k | if (vo->want_redraw) // might have been set by VOCTRLs |
1195 | 0 | wait_until = 0; |
1196 | | |
1197 | 835k | if (wait_until <= now) |
1198 | 0 | continue; |
1199 | | |
1200 | 835k | if (wakeup_on_done) { |
1201 | | // At this point wait_until should be longer than frame duration |
1202 | 0 | if (wakeup_core_after >= 0 && wait_until >= wakeup_core_after) { |
1203 | 0 | wait_vo(vo, wakeup_core_after); |
1204 | 0 | mp_mutex_lock(&in->lock); |
1205 | 0 | in->need_wakeup = true; |
1206 | 0 | mp_mutex_unlock(&in->lock); |
1207 | 0 | } |
1208 | 0 | wakeup_core(vo); |
1209 | 0 | } |
1210 | | |
1211 | 835k | wait_vo(vo, wait_until); |
1212 | 835k | } |
1213 | 43.8k | forget_frames(vo); // implicitly synchronized |
1214 | 43.8k | talloc_free(in->current_frame); |
1215 | 43.8k | in->current_frame = NULL; |
1216 | 43.8k | vo->driver->uninit(vo); |
1217 | 49.2k | done: |
1218 | 49.2k | TA_FREEP(&in->dr_helper); |
1219 | 49.2k | MP_THREAD_RETURN(); |
1220 | 43.8k | } |
1221 | | |
1222 | | void vo_set_paused(struct vo *vo, bool paused) |
1223 | 36.4k | { |
1224 | 36.4k | struct vo_internal *in = vo->in; |
1225 | 36.4k | mp_mutex_lock(&in->lock); |
1226 | 36.4k | if (in->paused != paused) { |
1227 | 879 | in->paused = paused; |
1228 | 879 | if (in->paused && in->dropped_frame) { |
1229 | 0 | in->request_redraw = true; |
1230 | 0 | wakeup_core(vo); |
1231 | 0 | } |
1232 | 879 | reset_vsync_timings(vo); |
1233 | 879 | wakeup_locked(vo); |
1234 | 879 | } |
1235 | 36.4k | mp_mutex_unlock(&in->lock); |
1236 | 36.4k | } |
1237 | | |
1238 | | int64_t vo_get_drop_count(struct vo *vo) |
1239 | 24 | { |
1240 | 24 | mp_mutex_lock(&vo->in->lock); |
1241 | 24 | int64_t r = vo->in->drop_count; |
1242 | 24 | mp_mutex_unlock(&vo->in->lock); |
1243 | 24 | return r; |
1244 | 24 | } |
1245 | | |
1246 | | void vo_increment_drop_count(struct vo *vo, int64_t n) |
1247 | 0 | { |
1248 | 0 | mp_mutex_lock(&vo->in->lock); |
1249 | 0 | vo->in->drop_count += n; |
1250 | 0 | mp_mutex_unlock(&vo->in->lock); |
1251 | 0 | } |
1252 | | |
1253 | | // Make the VO redraw the OSD at some point in the future. |
1254 | | void vo_redraw(struct vo *vo) |
1255 | 15.9k | { |
1256 | 15.9k | struct vo_internal *in = vo->in; |
1257 | 15.9k | mp_mutex_lock(&in->lock); |
1258 | 15.9k | if (!in->request_redraw) { |
1259 | 15.9k | in->request_redraw = true; |
1260 | 15.9k | in->want_redraw = false; |
1261 | 15.9k | wakeup_locked(vo); |
1262 | 15.9k | } |
1263 | 15.9k | mp_mutex_unlock(&in->lock); |
1264 | 15.9k | } |
1265 | | |
1266 | | bool vo_want_redraw(struct vo *vo) |
1267 | 268k | { |
1268 | 268k | struct vo_internal *in = vo->in; |
1269 | 268k | mp_mutex_lock(&in->lock); |
1270 | 268k | bool r = in->want_redraw; |
1271 | 268k | mp_mutex_unlock(&in->lock); |
1272 | 268k | return r; |
1273 | 268k | } |
1274 | | |
1275 | | void vo_seek_reset(struct vo *vo) |
1276 | 79.5k | { |
1277 | 79.5k | struct vo_internal *in = vo->in; |
1278 | 79.5k | mp_mutex_lock(&in->lock); |
1279 | 79.5k | forget_frames(vo); |
1280 | 79.5k | reset_vsync_timings(vo); |
1281 | 79.5k | in->send_reset = true; |
1282 | 79.5k | wakeup_locked(vo); |
1283 | 79.5k | mp_mutex_unlock(&in->lock); |
1284 | 79.5k | } |
1285 | | |
1286 | | // Whether at least 1 frame was queued or rendered since last seek or reconfig. |
1287 | | bool vo_has_frame(struct vo *vo) |
1288 | 304k | { |
1289 | 304k | return vo->in->hasframe; |
1290 | 304k | } |
1291 | | |
1292 | | static void run_query_format(void *p) |
1293 | 100k | { |
1294 | 100k | void **pp = p; |
1295 | 100k | struct vo *vo = pp[0]; |
1296 | 100k | uint8_t *list = pp[1]; |
1297 | 55.9M | for (int format = IMGFMT_START; format < IMGFMT_END; format++) |
1298 | 55.8M | list[format - IMGFMT_START] = vo->driver->query_format(vo, format); |
1299 | 100k | } |
1300 | | |
1301 | | // For each item in the list (allocated as uint8_t[IMGFMT_END - IMGFMT_START]), |
1302 | | // set the supported format flags. |
1303 | | void vo_query_formats(struct vo *vo, uint8_t *list) |
1304 | 100k | { |
1305 | 100k | void *p[] = {vo, list}; |
1306 | 100k | mp_dispatch_run(vo->in->dispatch, run_query_format, p); |
1307 | 100k | } |
1308 | | |
1309 | | // Calculate the appropriate source and destination rectangle to |
1310 | | // get a correctly scaled picture, including pan-scan. |
1311 | | // out_src: visible part of the video |
1312 | | // out_dst: area of screen covered by the video source rectangle |
1313 | | // out_osd: OSD size, OSD margins, etc. |
1314 | | // Must be called from the VO thread only. |
1315 | | void vo_get_src_dst_rects(struct vo *vo, struct mp_rect *out_src, |
1316 | | struct mp_rect *out_dst, struct mp_osd_res *out_osd) |
1317 | 13 | { |
1318 | 13 | if (!vo->params) { |
1319 | 0 | *out_src = *out_dst = (struct mp_rect){0}; |
1320 | 0 | *out_osd = (struct mp_osd_res){0}; |
1321 | 0 | return; |
1322 | 0 | } |
1323 | 13 | mp_get_src_dst_rects(vo->log, vo->opts, vo->driver->caps, vo->params, |
1324 | 13 | vo->dwidth, vo->dheight, vo->monitor_par, |
1325 | 13 | out_src, out_dst, out_osd); |
1326 | 13 | } |
1327 | | |
1328 | | // flip_page[_timed] will be called offset_us nanoseconds too early. |
1329 | | // (For vo_vdpau, which does its own timing.) |
1330 | | // num_req_frames set the requested number of requested vo_frame.frames. |
1331 | | // (For vo_gpu interpolation.) |
1332 | | void vo_set_queue_params(struct vo *vo, int64_t offset_ns, int num_req_frames) |
1333 | 0 | { |
1334 | 0 | struct vo_internal *in = vo->in; |
1335 | 0 | mp_mutex_lock(&in->lock); |
1336 | 0 | in->flip_queue_offset = offset_ns; |
1337 | 0 | in->req_frames = MPCLAMP(num_req_frames, 1, VO_MAX_REQ_FRAMES); |
1338 | 0 | mp_mutex_unlock(&in->lock); |
1339 | 0 | } |
1340 | | |
1341 | | int vo_get_num_req_frames(struct vo *vo) |
1342 | 366k | { |
1343 | 366k | struct vo_internal *in = vo->in; |
1344 | 366k | mp_mutex_lock(&in->lock); |
1345 | 366k | int res = in->req_frames; |
1346 | 366k | mp_mutex_unlock(&in->lock); |
1347 | 366k | return res; |
1348 | 366k | } |
1349 | | |
1350 | | double vo_get_vsync_interval(struct vo *vo) |
1351 | 0 | { |
1352 | 0 | struct vo_internal *in = vo->in; |
1353 | 0 | mp_mutex_lock(&in->lock); |
1354 | 0 | double res = vo->in->vsync_interval > 1 ? vo->in->vsync_interval : -1; |
1355 | 0 | mp_mutex_unlock(&in->lock); |
1356 | 0 | return res; |
1357 | 0 | } |
1358 | | |
1359 | | double vo_get_estimated_vsync_interval(struct vo *vo) |
1360 | 0 | { |
1361 | 0 | struct vo_internal *in = vo->in; |
1362 | 0 | mp_mutex_lock(&in->lock); |
1363 | 0 | double res = in->estimated_vsync_interval; |
1364 | 0 | mp_mutex_unlock(&in->lock); |
1365 | 0 | return res; |
1366 | 0 | } |
1367 | | |
1368 | | double vo_get_estimated_vsync_jitter(struct vo *vo) |
1369 | 0 | { |
1370 | 0 | struct vo_internal *in = vo->in; |
1371 | 0 | mp_mutex_lock(&in->lock); |
1372 | 0 | double res = in->estimated_vsync_jitter; |
1373 | 0 | mp_mutex_unlock(&in->lock); |
1374 | 0 | return res; |
1375 | 0 | } |
1376 | | |
1377 | | // Get the time in seconds at after which the currently rendering frame will |
1378 | | // end. Returns positive values if the frame is yet to be finished, negative |
1379 | | // values if it already finished. |
1380 | | // This can only be called while no new frame is queued (after |
1381 | | // vo_is_ready_for_frame). Returns 0 for non-display synced frames, or if the |
1382 | | // deadline for continuous display was missed. |
1383 | | double vo_get_delay(struct vo *vo) |
1384 | 0 | { |
1385 | 0 | struct vo_internal *in = vo->in; |
1386 | 0 | mp_mutex_lock(&in->lock); |
1387 | 0 | int64_t res = get_display_synced_frame_end(vo); |
1388 | 0 | mp_mutex_unlock(&in->lock); |
1389 | 0 | return res ? MP_TIME_NS_TO_S(res - mp_time_ns()) : 0; |
1390 | 0 | } |
1391 | | |
1392 | | void vo_discard_timing_info(struct vo *vo) |
1393 | 0 | { |
1394 | 0 | struct vo_internal *in = vo->in; |
1395 | 0 | mp_mutex_lock(&in->lock); |
1396 | 0 | reset_vsync_timings(vo); |
1397 | 0 | mp_mutex_unlock(&in->lock); |
1398 | 0 | } |
1399 | | |
1400 | | int64_t vo_get_delayed_count(struct vo *vo) |
1401 | 0 | { |
1402 | 0 | struct vo_internal *in = vo->in; |
1403 | 0 | mp_mutex_lock(&in->lock); |
1404 | 0 | int64_t res = vo->in->delayed_count; |
1405 | 0 | mp_mutex_unlock(&in->lock); |
1406 | 0 | return res; |
1407 | 0 | } |
1408 | | |
1409 | | double vo_get_display_fps(struct vo *vo) |
1410 | 0 | { |
1411 | 0 | struct vo_internal *in = vo->in; |
1412 | 0 | mp_mutex_lock(&in->lock); |
1413 | 0 | double res = vo->in->display_fps; |
1414 | 0 | mp_mutex_unlock(&in->lock); |
1415 | 0 | return res; |
1416 | 0 | } |
1417 | | |
1418 | | void * vo_get_display_swapchain(struct vo *vo) |
1419 | 0 | { |
1420 | 0 | return vo->display_swapchain; |
1421 | 0 | } |
1422 | | |
1423 | | // Set specific event flags, and wakeup the playback core if needed. |
1424 | | // vo_query_and_reset_events() can retrieve the events again. |
1425 | | void vo_event(struct vo *vo, int event) |
1426 | 43.8k | { |
1427 | 43.8k | struct vo_internal *in = vo->in; |
1428 | 43.8k | mp_mutex_lock(&in->lock); |
1429 | 43.8k | if ((in->queued_events & event & VO_EVENTS_USER) != (event & VO_EVENTS_USER)) |
1430 | 43.8k | wakeup_core(vo); |
1431 | 43.8k | if (event) |
1432 | 43.8k | wakeup_locked(vo); |
1433 | 43.8k | in->queued_events |= event; |
1434 | 43.8k | in->internal_events |= event; |
1435 | 43.8k | mp_mutex_unlock(&in->lock); |
1436 | 43.8k | } |
1437 | | |
1438 | | // Check event flags set with vo_event(). Return the mask of events that was |
1439 | | // set and included in the events parameter. Clear the returned events. |
1440 | | int vo_query_and_reset_events(struct vo *vo, int events) |
1441 | 881k | { |
1442 | 881k | struct vo_internal *in = vo->in; |
1443 | 881k | mp_mutex_lock(&in->lock); |
1444 | 881k | int r = in->queued_events & events; |
1445 | 881k | in->queued_events &= ~(unsigned)r; |
1446 | 881k | mp_mutex_unlock(&in->lock); |
1447 | 881k | return r; |
1448 | 881k | } |
1449 | | |
1450 | | struct mp_image *vo_get_current_frame(struct vo *vo) |
1451 | 0 | { |
1452 | 0 | struct vo_internal *in = vo->in; |
1453 | 0 | mp_mutex_lock(&in->lock); |
1454 | 0 | struct mp_image *r = NULL; |
1455 | 0 | if (vo->in->current_frame) |
1456 | 0 | r = mp_image_new_ref(vo->in->current_frame->current); |
1457 | 0 | mp_mutex_unlock(&in->lock); |
1458 | 0 | return r; |
1459 | 0 | } |
1460 | | |
1461 | | struct vo_frame *vo_get_current_vo_frame(struct vo *vo) |
1462 | 0 | { |
1463 | 0 | struct vo_internal *in = vo->in; |
1464 | 0 | mp_mutex_lock(&in->lock); |
1465 | 0 | struct vo_frame *r = vo_frame_ref(vo->in->current_frame); |
1466 | 0 | mp_mutex_unlock(&in->lock); |
1467 | 0 | return r; |
1468 | 0 | } |
1469 | | |
1470 | | struct mp_image *vo_get_image(struct vo *vo, int imgfmt, int w, int h, |
1471 | | int stride_align, int flags) |
1472 | 13.2k | { |
1473 | 13.2k | if (vo->driver->get_image_ts) |
1474 | 0 | return vo->driver->get_image_ts(vo, imgfmt, w, h, stride_align, flags); |
1475 | 13.2k | if (vo->in->dr_helper) |
1476 | 0 | return dr_helper_get_image(vo->in->dr_helper, imgfmt, w, h, stride_align, flags); |
1477 | 13.2k | return NULL; |
1478 | 13.2k | } |
1479 | | |
1480 | | static void destroy_frame(void *p) |
1481 | 748k | { |
1482 | 748k | struct vo_frame *frame = p; |
1483 | 1.49M | for (int n = 0; n < frame->num_frames; n++) |
1484 | 748k | talloc_free(frame->frames[n]); |
1485 | 748k | } |
1486 | | |
1487 | | // Return a new reference to the given frame. The image pointers are also new |
1488 | | // references. Calling talloc_free() on the frame unrefs all currently set |
1489 | | // image references. (Assuming current==frames[0].) |
1490 | | struct vo_frame *vo_frame_ref(struct vo_frame *frame) |
1491 | 748k | { |
1492 | 748k | if (!frame) |
1493 | 339 | return NULL; |
1494 | | |
1495 | 748k | struct vo_frame *new = talloc_ptrtype(NULL, new); |
1496 | 748k | talloc_set_destructor(new, destroy_frame); |
1497 | 748k | *new = *frame; |
1498 | 1.49M | for (int n = 0; n < frame->num_frames; n++) |
1499 | 748k | new->frames[n] = mp_image_new_ref(frame->frames[n]); |
1500 | 748k | new->current = new->num_frames ? new->frames[0] : NULL; |
1501 | 748k | return new; |
1502 | 748k | } |
1503 | | |
1504 | | /* |
1505 | | * lookup an integer in a table, table must have 0 as the last key |
1506 | | * param: key key to search for |
1507 | | * returns translation corresponding to key or "to" value of last mapping |
1508 | | * if not found. |
1509 | | */ |
1510 | | int lookup_keymap_table(const struct mp_keymap *map, int key) |
1511 | 0 | { |
1512 | 0 | while (map->from && map->from != key) |
1513 | 0 | map++; |
1514 | 0 | return map->to; |
1515 | 0 | } |
1516 | | |
1517 | | struct mp_image_params vo_get_current_params(struct vo *vo) |
1518 | 3.73k | { |
1519 | 3.73k | struct mp_image_params p = {0}; |
1520 | 3.73k | mp_mutex_lock(&vo->params_mutex); |
1521 | 3.73k | if (vo->params) |
1522 | 3.51k | p = *vo->params; |
1523 | 3.73k | mp_mutex_unlock(&vo->params_mutex); |
1524 | 3.73k | return p; |
1525 | 3.73k | } |
1526 | | |
1527 | | struct mp_image_params vo_get_target_params(struct vo *vo) |
1528 | 0 | { |
1529 | 0 | struct mp_image_params p = {0}; |
1530 | 0 | mp_mutex_lock(&vo->params_mutex); |
1531 | 0 | if (vo->target_params) |
1532 | 0 | p = *vo->target_params; |
1533 | 0 | mp_mutex_unlock(&vo->params_mutex); |
1534 | 0 | return p; |
1535 | 0 | } |