Coverage Report

Created: 2025-06-24 07:38

/src/mpv/video/out/vo_gpu_next.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2021 Niklas Haas
3
 *
4
 * This file is part of mpv.
5
 *
6
 * mpv is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * mpv is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with mpv.  If not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
#include <sys/stat.h>
21
#include <time.h>
22
23
#include <libplacebo/colorspace.h>
24
#include <libplacebo/options.h>
25
#include <libplacebo/renderer.h>
26
#include <libplacebo/shaders/lut.h>
27
#include <libplacebo/shaders/icc.h>
28
#include <libplacebo/utils/libav.h>
29
#include <libplacebo/utils/frame_queue.h>
30
31
#include "config.h"
32
#include "common/common.h"
33
#include "misc/io_utils.h"
34
#include "options/m_config.h"
35
#include "options/options.h"
36
#include "options/path.h"
37
#include "osdep/io.h"
38
#include "osdep/threads.h"
39
#include "stream/stream.h"
40
#include "sub/draw_bmp.h"
41
#include "video/fmt-conversion.h"
42
#include "video/mp_image.h"
43
#include "video/out/placebo/ra_pl.h"
44
#include "placebo/utils.h"
45
#include "gpu/context.h"
46
#include "gpu/hwdec.h"
47
#include "gpu/video.h"
48
#include "gpu/video_shaders.h"
49
#include "sub/osd.h"
50
#include "gpu_next/context.h"
51
52
#if HAVE_GL && defined(PL_HAVE_OPENGL)
53
#include <libplacebo/opengl.h>
54
#include "video/out/opengl/ra_gl.h"
55
#endif
56
57
#if HAVE_D3D11 && defined(PL_HAVE_D3D11)
58
#include <libplacebo/d3d11.h>
59
#include "video/out/d3d11/ra_d3d11.h"
60
#include "osdep/windows_utils.h"
61
#endif
62
63
64
struct osd_entry {
65
    pl_tex tex;
66
    struct pl_overlay_part *parts;
67
    int num_parts;
68
};
69
70
struct osd_state {
71
    struct osd_entry entries[MAX_OSD_PARTS];
72
    struct pl_overlay overlays[MAX_OSD_PARTS];
73
};
74
75
struct scaler_params {
76
    struct pl_filter_config config;
77
};
78
79
struct user_hook {
80
    char *path;
81
    const struct pl_hook *hook;
82
};
83
84
struct user_lut {
85
    char *opt;
86
    char *path;
87
    int type;
88
    struct pl_custom_lut *lut;
89
};
90
91
struct frame_info {
92
    int count;
93
    struct pl_dispatch_info info[VO_PASS_PERF_MAX];
94
};
95
96
struct cache {
97
    struct mp_log *log;
98
    struct mpv_global *global;
99
    char *dir;
100
    const char *name;
101
    size_t size_limit;
102
    pl_cache cache;
103
};
104
105
struct priv {
106
    struct mp_log *log;
107
    struct mpv_global *global;
108
    struct ra_ctx *ra_ctx;
109
    struct gpu_ctx *context;
110
    struct ra_hwdec_ctx hwdec_ctx;
111
    struct ra_hwdec_mapper *hwdec_mapper;
112
113
    // Allocated DR buffers
114
    mp_mutex dr_lock;
115
    pl_buf *dr_buffers;
116
    int num_dr_buffers;
117
118
    pl_log pllog;
119
    pl_gpu gpu;
120
    pl_renderer rr;
121
    pl_queue queue;
122
    pl_swapchain sw;
123
    pl_fmt osd_fmt[SUBBITMAP_COUNT];
124
    pl_tex *sub_tex;
125
    int num_sub_tex;
126
127
    struct mp_rect src, dst;
128
    struct mp_osd_res osd_res;
129
    struct osd_state osd_state;
130
131
    uint64_t last_id;
132
    uint64_t osd_sync;
133
    double last_pts;
134
    bool is_interpolated;
135
    bool want_reset;
136
    bool frame_pending;
137
138
    pl_options pars;
139
    struct m_config_cache *opts_cache;
140
    struct m_config_cache *next_opts_cache;
141
    struct gl_next_opts *next_opts;
142
    struct cache shader_cache, icc_cache;
143
    struct mp_csp_equalizer_state *video_eq;
144
    struct scaler_params scalers[SCALER_COUNT];
145
    const struct pl_hook **hooks; // storage for `params.hooks`
146
    enum pl_color_levels output_levels;
147
148
    struct pl_icc_params icc_params;
149
    char *icc_path;
150
    pl_icc_object icc_profile;
151
152
    // Cached shaders, preserved across options updates
153
    struct user_hook *user_hooks;
154
    int num_user_hooks;
155
156
    // Performance data of last frame
157
    struct frame_info perf_fresh;
158
    struct frame_info perf_redraw;
159
160
    struct mp_image_params target_params;
161
};
162
163
static void update_render_options(struct vo *vo);
164
static void update_lut(struct priv *p, struct user_lut *lut);
165
166
struct gl_next_opts {
167
    bool delayed_peak;
168
    int border_background;
169
    float corner_rounding;
170
    bool inter_preserve;
171
    struct user_lut lut;
172
    struct user_lut image_lut;
173
    struct user_lut target_lut;
174
    int target_hint;
175
    char **raw_opts;
176
};
177
178
const struct m_opt_choice_alternatives lut_types[] = {
179
    {"auto",        PL_LUT_UNKNOWN},
180
    {"native",      PL_LUT_NATIVE},
181
    {"normalized",  PL_LUT_NORMALIZED},
182
    {"conversion",  PL_LUT_CONVERSION},
183
    {0}
184
};
185
186
#define OPT_BASE_STRUCT struct gl_next_opts
187
const struct m_sub_options gl_next_conf = {
188
    .opts = (const struct m_option[]) {
189
        {"allow-delayed-peak-detect", OPT_BOOL(delayed_peak)},
190
        {"border-background", OPT_CHOICE(border_background,
191
            {"none",  BACKGROUND_NONE},
192
            {"color", BACKGROUND_COLOR},
193
            {"tiles", BACKGROUND_TILES})},
194
        {"corner-rounding", OPT_FLOAT(corner_rounding), M_RANGE(0, 1)},
195
        {"interpolation-preserve", OPT_BOOL(inter_preserve)},
196
        {"lut", OPT_STRING(lut.opt), .flags = M_OPT_FILE},
197
        {"lut-type", OPT_CHOICE_C(lut.type, lut_types)},
198
        {"image-lut", OPT_STRING(image_lut.opt), .flags = M_OPT_FILE},
199
        {"image-lut-type", OPT_CHOICE_C(image_lut.type, lut_types)},
200
        {"target-lut", OPT_STRING(target_lut.opt), .flags = M_OPT_FILE},
201
        {"target-colorspace-hint", OPT_CHOICE(target_hint, {"auto", -1}, {"no", 0}, {"yes", 1})},
202
        // No `target-lut-type` because we don't support non-RGB targets
203
        {"libplacebo-opts", OPT_KEYVALUELIST(raw_opts)},
204
        {0},
205
    },
206
    .defaults = &(struct gl_next_opts) {
207
        .border_background = BACKGROUND_COLOR,
208
        .inter_preserve = true,
209
    },
210
    .size = sizeof(struct gl_next_opts),
211
    .change_flags = UPDATE_VIDEO,
212
};
213
214
static pl_buf get_dr_buf(struct priv *p, const uint8_t *ptr)
215
0
{
216
0
    mp_mutex_lock(&p->dr_lock);
217
218
0
    for (int i = 0; i < p->num_dr_buffers; i++) {
219
0
        pl_buf buf = p->dr_buffers[i];
220
0
        if (ptr >= buf->data && ptr < buf->data + buf->params.size) {
221
0
            mp_mutex_unlock(&p->dr_lock);
222
0
            return buf;
223
0
        }
224
0
    }
225
226
0
    mp_mutex_unlock(&p->dr_lock);
227
0
    return NULL;
228
0
}
229
230
static void free_dr_buf(void *opaque, uint8_t *data)
231
0
{
232
0
    struct priv *p = opaque;
233
0
    mp_mutex_lock(&p->dr_lock);
234
235
0
    for (int i = 0; i < p->num_dr_buffers; i++) {
236
0
        if (p->dr_buffers[i]->data == data) {
237
0
            pl_buf_destroy(p->gpu, &p->dr_buffers[i]);
238
0
            MP_TARRAY_REMOVE_AT(p->dr_buffers, p->num_dr_buffers, i);
239
0
            mp_mutex_unlock(&p->dr_lock);
240
0
            return;
241
0
        }
242
0
    }
243
244
0
    MP_ASSERT_UNREACHABLE();
245
0
}
246
247
static struct mp_image *get_image(struct vo *vo, int imgfmt, int w, int h,
248
                                  int stride_align, int flags)
249
0
{
250
0
    struct priv *p = vo->priv;
251
0
    pl_gpu gpu = p->gpu;
252
0
    if (!gpu->limits.thread_safe || !gpu->limits.max_mapped_size)
253
0
        return NULL;
254
255
0
    if ((flags & VO_DR_FLAG_HOST_CACHED) && !gpu->limits.host_cached)
256
0
        return NULL;
257
258
0
    stride_align = mp_lcm(stride_align, gpu->limits.align_tex_xfer_pitch);
259
0
    stride_align = mp_lcm(stride_align, gpu->limits.align_tex_xfer_offset);
260
0
    int size = mp_image_get_alloc_size(imgfmt, w, h, stride_align);
261
0
    if (size < 0)
262
0
        return NULL;
263
264
0
    pl_buf buf = pl_buf_create(gpu, &(struct pl_buf_params) {
265
0
        .memory_type = PL_BUF_MEM_HOST,
266
0
        .host_mapped = true,
267
0
        .size = size + stride_align,
268
0
    });
269
270
0
    if (!buf)
271
0
        return NULL;
272
273
0
    struct mp_image *mpi = mp_image_from_buffer(imgfmt, w, h, stride_align,
274
0
                                                buf->data, buf->params.size,
275
0
                                                p, free_dr_buf);
276
0
    if (!mpi) {
277
0
        pl_buf_destroy(gpu, &buf);
278
0
        return NULL;
279
0
    }
280
281
0
    mp_mutex_lock(&p->dr_lock);
282
0
    MP_TARRAY_APPEND(p, p->dr_buffers, p->num_dr_buffers, buf);
283
0
    mp_mutex_unlock(&p->dr_lock);
284
285
0
    return mpi;
286
0
}
287
288
static void update_overlays(struct vo *vo, struct mp_osd_res res,
289
                            int flags, enum pl_overlay_coords coords,
290
                            struct osd_state *state, struct pl_frame *frame,
291
                            struct mp_image *src)
292
0
{
293
0
    struct priv *p = vo->priv;
294
0
    double pts = src ? src->pts : 0;
295
0
    struct sub_bitmap_list *subs = osd_render(vo->osd, res, pts, flags, mp_draw_sub_formats);
296
297
0
    frame->overlays = state->overlays;
298
0
    frame->num_overlays = 0;
299
300
0
    for (int n = 0; n < subs->num_items; n++) {
301
0
        const struct sub_bitmaps *item = subs->items[n];
302
0
        if (!item->num_parts || !item->packed)
303
0
            continue;
304
0
        struct osd_entry *entry = &state->entries[item->render_index];
305
0
        pl_fmt tex_fmt = p->osd_fmt[item->format];
306
0
        if (!entry->tex)
307
0
            MP_TARRAY_POP(p->sub_tex, p->num_sub_tex, &entry->tex);
308
0
        bool ok = pl_tex_recreate(p->gpu, &entry->tex, &(struct pl_tex_params) {
309
0
            .format = tex_fmt,
310
0
            .w = MPMAX(item->packed_w, entry->tex ? entry->tex->params.w : 0),
311
0
            .h = MPMAX(item->packed_h, entry->tex ? entry->tex->params.h : 0),
312
0
            .host_writable = true,
313
0
            .sampleable = true,
314
0
        });
315
0
        if (!ok) {
316
0
            MP_ERR(vo, "Failed recreating OSD texture!\n");
317
0
            break;
318
0
        }
319
0
        ok = pl_tex_upload(p->gpu, &(struct pl_tex_transfer_params) {
320
0
            .tex        = entry->tex,
321
0
            .rc         = { .x1 = item->packed_w, .y1 = item->packed_h, },
322
0
            .row_pitch  = item->packed->stride[0],
323
0
            .ptr        = item->packed->planes[0],
324
0
        });
325
0
        if (!ok) {
326
0
            MP_ERR(vo, "Failed uploading OSD texture!\n");
327
0
            break;
328
0
        }
329
330
0
        entry->num_parts = 0;
331
0
        for (int i = 0; i < item->num_parts; i++) {
332
0
            const struct sub_bitmap *b = &item->parts[i];
333
0
            if (b->dw == 0 || b->dh == 0)
334
0
                continue;
335
0
            uint32_t c = b->libass.color;
336
0
            struct pl_overlay_part part = {
337
0
                .src = { b->src_x, b->src_y, b->src_x + b->w, b->src_y + b->h },
338
0
                .dst = { b->x, b->y, b->x + b->dw, b->y + b->dh },
339
0
                .color = {
340
0
                    (c >> 24) / 255.0,
341
0
                    ((c >> 16) & 0xFF) / 255.0,
342
0
                    ((c >> 8) & 0xFF) / 255.0,
343
0
                    1.0 - (c & 0xFF) / 255.0,
344
0
                }
345
0
            };
346
0
            MP_TARRAY_APPEND(p, entry->parts, entry->num_parts, part);
347
0
        }
348
349
0
        struct pl_overlay *ol = &state->overlays[frame->num_overlays++];
350
0
        *ol = (struct pl_overlay) {
351
0
            .tex = entry->tex,
352
0
            .parts = entry->parts,
353
0
            .num_parts = entry->num_parts,
354
0
            .color = {
355
0
                .primaries = PL_COLOR_PRIM_BT_709,
356
0
                .transfer = PL_COLOR_TRC_SRGB,
357
0
            },
358
0
            .coords = coords,
359
0
        };
360
361
0
        switch (item->format) {
362
0
        case SUBBITMAP_BGRA:
363
0
            ol->mode = PL_OVERLAY_NORMAL;
364
0
            ol->repr.alpha = PL_ALPHA_PREMULTIPLIED;
365
            // Infer bitmap colorspace from source
366
0
            if (src) {
367
0
                ol->color = src->params.color;
368
                // Seems like HDR subtitles are targeting SDR white
369
0
                if (pl_color_transfer_is_hdr(ol->color.transfer)) {
370
0
                    ol->color.hdr = (struct pl_hdr_metadata) {
371
0
                        .max_luma = PL_COLOR_SDR_WHITE,
372
0
                    };
373
0
                }
374
0
            }
375
0
            break;
376
0
        case SUBBITMAP_LIBASS:
377
0
            if (src && item->video_color_space && !pl_color_space_is_hdr(&src->params.color))
378
0
                ol->color = src->params.color;
379
0
            ol->mode = PL_OVERLAY_MONOCHROME;
380
0
            ol->repr.alpha = PL_ALPHA_INDEPENDENT;
381
0
            break;
382
0
        }
383
0
    }
384
385
0
    talloc_free(subs);
386
0
}
387
388
struct frame_priv {
389
    struct vo *vo;
390
    struct osd_state subs;
391
    uint64_t osd_sync;
392
    struct ra_hwdec *hwdec;
393
};
394
395
static int plane_data_from_imgfmt(struct pl_plane_data out_data[4],
396
                                  struct pl_bit_encoding *out_bits,
397
                                  enum mp_imgfmt imgfmt)
398
0
{
399
0
    struct mp_imgfmt_desc desc = mp_imgfmt_get_desc(imgfmt);
400
0
    if (!desc.num_planes || !(desc.flags & MP_IMGFLAG_HAS_COMPS))
401
0
        return 0;
402
403
0
    if (desc.flags & MP_IMGFLAG_HWACCEL)
404
0
        return 0; // HW-accelerated frames need to be mapped differently
405
406
0
    if (!(desc.flags & MP_IMGFLAG_NE))
407
0
        return 0; // GPU endianness follows the host's
408
409
0
    if (desc.flags & MP_IMGFLAG_PAL)
410
0
        return 0; // Palette formats (currently) not supported in libplacebo
411
412
0
    if ((desc.flags & MP_IMGFLAG_TYPE_FLOAT) && (desc.flags & MP_IMGFLAG_YUV))
413
0
        return 0; // Floating-point YUV (currently) unsupported
414
415
0
    bool has_bits = false;
416
0
    bool any_padded = false;
417
418
0
    for (int p = 0; p < desc.num_planes; p++) {
419
0
        struct pl_plane_data *data = &out_data[p];
420
0
        struct mp_imgfmt_comp_desc sorted[MP_NUM_COMPONENTS];
421
0
        int num_comps = 0;
422
0
        if (desc.bpp[p] % 8)
423
0
            return 0; // Pixel size is not byte-aligned
424
425
0
        for (int c = 0; c < mp_imgfmt_desc_get_num_comps(&desc); c++) {
426
0
            if (desc.comps[c].plane != p)
427
0
                continue;
428
429
0
            data->component_map[num_comps] = c;
430
0
            sorted[num_comps] = desc.comps[c];
431
0
            num_comps++;
432
433
            // Sort components by offset order, while keeping track of the
434
            // semantic mapping in `data->component_map`
435
0
            for (int i = num_comps - 1; i > 0; i--) {
436
0
                if (sorted[i].offset >= sorted[i - 1].offset)
437
0
                    break;
438
0
                MPSWAP(struct mp_imgfmt_comp_desc, sorted[i], sorted[i - 1]);
439
0
                MPSWAP(int, data->component_map[i], data->component_map[i - 1]);
440
0
            }
441
0
        }
442
443
0
        uint64_t total_bits = 0;
444
445
        // Fill in the pl_plane_data fields for each component
446
0
        memset(data->component_size, 0, sizeof(data->component_size));
447
0
        for (int c = 0; c < num_comps; c++) {
448
0
            data->component_size[c] = sorted[c].size;
449
0
            data->component_pad[c] = sorted[c].offset - total_bits;
450
0
            total_bits += data->component_pad[c] + data->component_size[c];
451
0
            any_padded |= sorted[c].pad;
452
453
            // Ignore bit encoding of alpha channel
454
0
            if (!out_bits || data->component_map[c] == PL_CHANNEL_A)
455
0
                continue;
456
457
0
            struct pl_bit_encoding bits = {
458
0
                .sample_depth = data->component_size[c],
459
0
                .color_depth = sorted[c].size - abs(sorted[c].pad),
460
0
                .bit_shift = MPMAX(sorted[c].pad, 0),
461
0
            };
462
463
0
            if (!has_bits) {
464
0
                *out_bits = bits;
465
0
                has_bits = true;
466
0
            } else {
467
0
                if (!pl_bit_encoding_equal(out_bits, &bits)) {
468
                    // Bit encoding differs between components/planes,
469
                    // cannot handle this
470
0
                    *out_bits = (struct pl_bit_encoding) {0};
471
0
                    out_bits = NULL;
472
0
                }
473
0
            }
474
0
        }
475
476
0
        data->pixel_stride = desc.bpp[p] / 8;
477
0
        data->type = (desc.flags & MP_IMGFLAG_TYPE_FLOAT)
478
0
                            ? PL_FMT_FLOAT
479
0
                            : PL_FMT_UNORM;
480
0
    }
481
482
0
    if (any_padded && !out_bits)
483
0
        return 0; // can't handle padded components without `pl_bit_encoding`
484
485
0
    return desc.num_planes;
486
0
}
487
488
static bool hwdec_reconfig(struct priv *p, struct ra_hwdec *hwdec,
489
                           const struct mp_image_params *par)
490
0
{
491
0
    if (p->hwdec_mapper) {
492
0
        if (mp_image_params_static_equal(par, &p->hwdec_mapper->src_params)) {
493
0
            p->hwdec_mapper->src_params.repr.dovi = par->repr.dovi;
494
0
            p->hwdec_mapper->dst_params.repr.dovi = par->repr.dovi;
495
0
            p->hwdec_mapper->src_params.color.hdr = par->color.hdr;
496
0
            p->hwdec_mapper->dst_params.color.hdr = par->color.hdr;
497
0
            return p->hwdec_mapper;
498
0
        } else {
499
0
            ra_hwdec_mapper_free(&p->hwdec_mapper);
500
0
        }
501
0
    }
502
503
0
    p->hwdec_mapper = ra_hwdec_mapper_create(hwdec, par);
504
0
    if (!p->hwdec_mapper) {
505
0
        MP_ERR(p, "Initializing texture for hardware decoding failed.\n");
506
0
        return NULL;
507
0
    }
508
509
0
    return p->hwdec_mapper;
510
0
}
511
512
// For RAs not based on ra_pl, this creates a new pl_tex wrapper
513
static pl_tex hwdec_get_tex(struct priv *p, int n)
514
0
{
515
0
    struct ra_tex *ratex = p->hwdec_mapper->tex[n];
516
0
    struct ra *ra = p->hwdec_mapper->ra;
517
0
    if (ra_pl_get(ra))
518
0
        return (pl_tex) ratex->priv;
519
520
0
#if HAVE_GL && defined(PL_HAVE_OPENGL)
521
0
    if (ra_is_gl(ra) && pl_opengl_get(p->gpu)) {
522
0
        struct pl_opengl_wrap_params par = {
523
0
            .width = ratex->params.w,
524
0
            .height = ratex->params.h,
525
0
        };
526
527
0
        ra_gl_get_format(ratex->params.format, &par.iformat,
528
0
                         &(GLenum){0}, &(GLenum){0});
529
0
        ra_gl_get_raw_tex(ra, ratex, &par.texture, &par.target);
530
0
        return pl_opengl_wrap(p->gpu, &par);
531
0
    }
532
0
#endif
533
534
#if HAVE_D3D11 && defined(PL_HAVE_D3D11)
535
    if (ra_is_d3d11(ra)) {
536
        int array_slice = 0;
537
        ID3D11Resource *res = ra_d3d11_get_raw_tex(ra, ratex, &array_slice);
538
        pl_tex tex = pl_d3d11_wrap(p->gpu, pl_d3d11_wrap_params(
539
            .tex = res,
540
            .array_slice = array_slice,
541
            .fmt = ra_d3d11_get_format(ratex->params.format),
542
            .w = ratex->params.w,
543
            .h = ratex->params.h,
544
        ));
545
        SAFE_RELEASE(res);
546
        return tex;
547
    }
548
#endif
549
550
0
    MP_ERR(p, "Failed mapping hwdec frame? Open a bug!\n");
551
0
    return false;
552
0
}
553
554
static bool hwdec_acquire(pl_gpu gpu, struct pl_frame *frame)
555
0
{
556
0
    struct mp_image *mpi = frame->user_data;
557
0
    struct frame_priv *fp = mpi->priv;
558
0
    struct priv *p = fp->vo->priv;
559
0
    if (!hwdec_reconfig(p, fp->hwdec, &mpi->params))
560
0
        return false;
561
562
0
    if (ra_hwdec_mapper_map(p->hwdec_mapper, mpi) < 0) {
563
0
        MP_ERR(p, "Mapping hardware decoded surface failed.\n");
564
0
        return false;
565
0
    }
566
567
0
    for (int n = 0; n < frame->num_planes; n++) {
568
0
        if (!(frame->planes[n].texture = hwdec_get_tex(p, n)))
569
0
            return false;
570
0
    }
571
572
0
    return true;
573
0
}
574
575
static void hwdec_release(pl_gpu gpu, struct pl_frame *frame)
576
0
{
577
0
    struct mp_image *mpi = frame->user_data;
578
0
    struct frame_priv *fp = mpi->priv;
579
0
    struct priv *p = fp->vo->priv;
580
0
    if (!ra_pl_get(p->hwdec_mapper->ra)) {
581
0
        for (int n = 0; n < frame->num_planes; n++)
582
0
            pl_tex_destroy(p->gpu, &frame->planes[n].texture);
583
0
    }
584
585
0
    ra_hwdec_mapper_unmap(p->hwdec_mapper);
586
0
}
587
588
static bool map_frame(pl_gpu gpu, pl_tex *tex, const struct pl_source_frame *src,
589
                      struct pl_frame *frame)
590
0
{
591
0
    struct mp_image *mpi = src->frame_data;
592
0
    struct mp_image_params par = mpi->params;
593
0
    struct frame_priv *fp = mpi->priv;
594
0
    struct vo *vo = fp->vo;
595
0
    struct priv *p = vo->priv;
596
597
0
    fp->hwdec = ra_hwdec_get(&p->hwdec_ctx, mpi->imgfmt);
598
0
    if (fp->hwdec) {
599
        // Note: We don't actually need the mapper to map the frame yet, we
600
        // only reconfig the mapper here (potentially creating it) to access
601
        // `dst_params`. In practice, though, this should not matter unless the
602
        // image format changes mid-stream.
603
0
        if (!hwdec_reconfig(p, fp->hwdec, &mpi->params)) {
604
0
            talloc_free(mpi);
605
0
            return false;
606
0
        }
607
608
0
        par = p->hwdec_mapper->dst_params;
609
0
    }
610
611
0
    mp_image_params_guess_csp(&par);
612
613
0
    *frame = (struct pl_frame) {
614
0
        .color = par.color,
615
0
        .repr = par.repr,
616
0
        .profile = {
617
0
            .data = mpi->icc_profile ? mpi->icc_profile->data : NULL,
618
0
            .len = mpi->icc_profile ? mpi->icc_profile->size : 0,
619
0
        },
620
0
        .rotation = par.rotate / 90,
621
0
        .user_data = mpi,
622
0
    };
623
624
0
    if (fp->hwdec) {
625
626
0
        struct mp_imgfmt_desc desc = mp_imgfmt_get_desc(par.imgfmt);
627
0
        frame->acquire = hwdec_acquire;
628
0
        frame->release = hwdec_release;
629
0
        frame->num_planes = desc.num_planes;
630
0
        for (int n = 0; n < frame->num_planes; n++) {
631
0
            struct pl_plane *plane = &frame->planes[n];
632
0
            int *map = plane->component_mapping;
633
0
            for (int c = 0; c < mp_imgfmt_desc_get_num_comps(&desc); c++) {
634
0
                if (desc.comps[c].plane != n)
635
0
                    continue;
636
637
                // Sort by component offset
638
0
                uint8_t offset = desc.comps[c].offset;
639
0
                int index = plane->components++;
640
0
                while (index > 0 && desc.comps[map[index - 1]].offset > offset) {
641
0
                    map[index] = map[index - 1];
642
0
                    index--;
643
0
                }
644
0
                map[index] = c;
645
0
            }
646
0
        }
647
648
0
    } else { // swdec
649
650
0
        struct pl_plane_data data[4] = {0};
651
0
        frame->num_planes = plane_data_from_imgfmt(data, &frame->repr.bits, mpi->imgfmt);
652
0
        for (int n = 0; n < frame->num_planes; n++) {
653
0
            struct pl_plane *plane = &frame->planes[n];
654
0
            data[n].width = mp_image_plane_w(mpi, n);
655
0
            data[n].height = mp_image_plane_h(mpi, n);
656
0
            if (mpi->stride[n] < 0) {
657
0
                data[n].pixels = mpi->planes[n] + (data[n].height - 1) * mpi->stride[n];
658
0
                data[n].row_stride = -mpi->stride[n];
659
0
                plane->flipped = true;
660
0
            } else {
661
0
                data[n].pixels = mpi->planes[n];
662
0
                data[n].row_stride = mpi->stride[n];
663
0
            }
664
665
0
            pl_buf buf = get_dr_buf(p, data[n].pixels);
666
0
            if (buf) {
667
0
                data[n].buf = buf;
668
0
                data[n].buf_offset = (uint8_t *) data[n].pixels - buf->data;
669
0
                data[n].pixels = NULL;
670
0
            } else if (gpu->limits.callbacks) {
671
0
                data[n].callback = talloc_free;
672
0
                data[n].priv = mp_image_new_ref(mpi);
673
0
            }
674
675
0
            if (!pl_upload_plane(gpu, plane, &tex[n], &data[n])) {
676
0
                MP_ERR(vo, "Failed uploading frame!\n");
677
0
                talloc_free(data[n].priv);
678
0
                talloc_free(mpi);
679
0
                return false;
680
0
            }
681
0
        }
682
683
0
    }
684
685
    // Update chroma location, must be done after initializing planes
686
0
    pl_frame_set_chroma_location(frame, par.chroma_location);
687
688
0
    if (mpi->film_grain)
689
0
        pl_film_grain_from_av(&frame->film_grain, (AVFilmGrainParams *) mpi->film_grain->data);
690
691
    // Compute a unique signature for any attached ICC profile. Wasteful in
692
    // theory if the ICC profile is the same for multiple frames, but in
693
    // practice ICC profiles are overwhelmingly going to be attached to
694
    // still images so it shouldn't matter.
695
0
    pl_icc_profile_compute_signature(&frame->profile);
696
697
    // Update LUT attached to this frame
698
0
    update_lut(p, &p->next_opts->image_lut);
699
0
    frame->lut = p->next_opts->image_lut.lut;
700
0
    frame->lut_type = p->next_opts->image_lut.type;
701
0
    return true;
702
0
}
703
704
static void unmap_frame(pl_gpu gpu, struct pl_frame *frame,
705
                        const struct pl_source_frame *src)
706
0
{
707
0
    struct mp_image *mpi = src->frame_data;
708
0
    struct frame_priv *fp = mpi->priv;
709
0
    struct priv *p = fp->vo->priv;
710
0
    for (int i = 0; i < MP_ARRAY_SIZE(fp->subs.entries); i++) {
711
0
        pl_tex tex = fp->subs.entries[i].tex;
712
0
        if (tex)
713
0
            MP_TARRAY_APPEND(p, p->sub_tex, p->num_sub_tex, tex);
714
0
    }
715
0
    talloc_free(mpi);
716
0
}
717
718
static void discard_frame(const struct pl_source_frame *src)
719
0
{
720
0
    struct mp_image *mpi = src->frame_data;
721
0
    talloc_free(mpi);
722
0
}
723
724
static void info_callback(void *priv, const struct pl_render_info *info)
725
0
{
726
0
    struct vo *vo = priv;
727
0
    struct priv *p = vo->priv;
728
0
    if (info->index >= VO_PASS_PERF_MAX)
729
0
        return; // silently ignore clipped passes, whatever
730
731
0
    struct frame_info *frame;
732
0
    switch (info->stage) {
733
0
    case PL_RENDER_STAGE_FRAME: frame = &p->perf_fresh; break;
734
0
    case PL_RENDER_STAGE_BLEND: frame = &p->perf_redraw; break;
735
0
    default: abort();
736
0
    }
737
738
0
    frame->count = info->index + 1;
739
0
    pl_dispatch_info_move(&frame->info[info->index], info->pass);
740
0
}
741
742
static void update_options(struct vo *vo)
743
0
{
744
0
    struct priv *p = vo->priv;
745
0
    pl_options pars = p->pars;
746
0
    bool changed = m_config_cache_update(p->opts_cache);
747
0
    changed = m_config_cache_update(p->next_opts_cache) || changed;
748
0
    if (changed)
749
0
        update_render_options(vo);
750
751
0
    update_lut(p, &p->next_opts->lut);
752
0
    pars->params.lut = p->next_opts->lut.lut;
753
0
    pars->params.lut_type = p->next_opts->lut.type;
754
755
    // Update equalizer state
756
0
    struct mp_csp_params cparams = MP_CSP_PARAMS_DEFAULTS;
757
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
758
0
    mp_csp_equalizer_state_get(p->video_eq, &cparams);
759
0
    pars->color_adjustment.brightness = cparams.brightness;
760
0
    pars->color_adjustment.contrast = cparams.contrast;
761
0
    pars->color_adjustment.hue = cparams.hue;
762
0
    pars->color_adjustment.saturation = cparams.saturation;
763
0
    pars->color_adjustment.gamma = cparams.gamma * opts->gamma;
764
0
    p->output_levels = cparams.levels_out;
765
766
0
    for (char **kv = p->next_opts->raw_opts; kv && kv[0]; kv += 2)
767
0
        pl_options_set_str(pars, kv[0], kv[1]);
768
0
}
769
770
static void apply_target_contrast(struct priv *p, struct pl_color_space *color, float min_luma)
771
0
{
772
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
773
774
    // Auto mode, use target value if available
775
0
    if (!opts->target_contrast) {
776
0
        color->hdr.min_luma = min_luma;
777
0
        return;
778
0
    }
779
780
    // Infinite contrast
781
0
    if (opts->target_contrast == -1) {
782
0
        color->hdr.min_luma = 1e-7;
783
0
        return;
784
0
    }
785
786
    // Infer max_luma for current pl_color_space
787
0
    pl_color_space_nominal_luma_ex(pl_nominal_luma_params(
788
0
        .color = color,
789
        // with HDR10 meta to respect value if already set
790
0
        .metadata = PL_HDR_METADATA_HDR10,
791
0
        .scaling = PL_HDR_NITS,
792
0
        .out_max = &color->hdr.max_luma
793
0
    ));
794
795
0
    color->hdr.min_luma = color->hdr.max_luma / opts->target_contrast;
796
0
}
797
798
static void apply_target_options(struct priv *p, struct pl_frame *target,
799
                                 float target_peak, float min_luma)
800
0
{
801
0
    update_lut(p, &p->next_opts->target_lut);
802
0
    target->lut = p->next_opts->target_lut.lut;
803
0
    target->lut_type = p->next_opts->target_lut.type;
804
805
    // Colorspace overrides
806
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
807
0
    if (p->output_levels)
808
0
        target->repr.levels = p->output_levels;
809
0
    if (opts->target_prim)
810
0
        target->color.primaries = opts->target_prim;
811
0
    if (opts->target_trc)
812
0
        target->color.transfer = opts->target_trc;
813
    // If swapchain returned a value use this, override is used in hint
814
0
    if (target_peak && !target->color.hdr.max_luma)
815
0
        target->color.hdr.max_luma = target_peak;
816
0
    if (!target->color.hdr.min_luma)
817
0
        apply_target_contrast(p, &target->color, min_luma);
818
0
    if (opts->target_gamut) {
819
        // Ensure resulting gamut still fits inside container
820
0
        const struct pl_raw_primaries *gamut, *container;
821
0
        gamut = pl_raw_primaries_get(opts->target_gamut);
822
0
        container = pl_raw_primaries_get(target->color.primaries);
823
0
        target->color.hdr.prim = pl_primaries_clip(gamut, container);
824
0
    }
825
0
    int dither_depth = opts->dither_depth;
826
0
    if (dither_depth == 0) {
827
0
        struct ra_swapchain *sw = p->ra_ctx->swapchain;
828
0
        if (sw->fns->color_depth && sw->fns->color_depth(sw) != -1) {
829
0
            dither_depth = sw->fns->color_depth(sw);
830
0
        } else if (!pl_color_transfer_is_hdr(target->color.transfer)) {
831
0
            dither_depth = 8;
832
0
        }
833
0
    }
834
0
    if (dither_depth > 0) {
835
0
        struct pl_bit_encoding *tbits = &target->repr.bits;
836
0
        tbits->color_depth += dither_depth - tbits->sample_depth;
837
0
        tbits->sample_depth = dither_depth;
838
0
    }
839
840
0
    if (opts->icc_opts->icc_use_luma) {
841
0
        p->icc_params.max_luma = 0.0f;
842
0
    } else {
843
0
        pl_color_space_nominal_luma_ex(pl_nominal_luma_params(
844
0
            .color    = &target->color,
845
0
            .metadata = PL_HDR_METADATA_HDR10, // use only static HDR nits
846
0
            .scaling  = PL_HDR_NITS,
847
0
            .out_max  = &p->icc_params.max_luma,
848
0
        ));
849
0
    }
850
851
0
    pl_icc_update(p->pllog, &p->icc_profile, NULL, &p->icc_params);
852
0
    target->icc = p->icc_profile;
853
0
}
854
855
static void apply_crop(struct pl_frame *frame, struct mp_rect crop,
856
                       int width, int height)
857
0
{
858
0
    frame->crop = (struct pl_rect2df) {
859
0
        .x0 = crop.x0,
860
0
        .y0 = crop.y0,
861
0
        .x1 = crop.x1,
862
0
        .y1 = crop.y1,
863
0
    };
864
865
    // mpv gives us rotated/flipped rects, libplacebo expects unrotated
866
0
    pl_rect2df_rotate(&frame->crop, -frame->rotation);
867
0
    if (frame->crop.x1 < frame->crop.x0) {
868
0
        frame->crop.x0 = width - frame->crop.x0;
869
0
        frame->crop.x1 = width - frame->crop.x1;
870
0
    }
871
872
0
    if (frame->crop.y1 < frame->crop.y0) {
873
0
        frame->crop.y0 = height - frame->crop.y0;
874
0
        frame->crop.y1 = height - frame->crop.y1;
875
0
    }
876
0
}
877
878
static void update_tm_viz(struct pl_color_map_params *params,
879
                          const struct pl_frame *target)
880
0
{
881
0
    if (!params->visualize_lut)
882
0
        return;
883
884
    // Use right half of screen for TM visualization, constrain to 1:1 AR
885
0
    const float out_w = fabsf(pl_rect_w(target->crop));
886
0
    const float out_h = fabsf(pl_rect_h(target->crop));
887
0
    const float size = MPMIN(out_w / 2.0f, out_h);
888
0
    params->visualize_rect = (pl_rect2df) {
889
0
        .x0 = 1.0f - size / out_w,
890
0
        .x1 = 1.0f,
891
0
        .y0 = 0.0f,
892
0
        .y1 = size / out_h,
893
0
    };
894
895
    // Visualize red-blue plane
896
0
    params->visualize_hue = M_PI / 4.0;
897
0
}
898
899
static void update_hook_opts_dynamic(struct priv *p, const struct pl_hook *hook,
900
                                     const struct mp_image *mpi);
901
902
static bool draw_frame(struct vo *vo, struct vo_frame *frame)
903
0
{
904
0
    struct priv *p = vo->priv;
905
0
    pl_options pars = p->pars;
906
0
    pl_gpu gpu = p->gpu;
907
0
    update_options(vo);
908
909
0
    struct pl_render_params params = pars->params;
910
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
911
0
    bool will_redraw = frame->display_synced && frame->num_vsyncs > 1;
912
0
    bool cache_frame = will_redraw || frame->still;
913
0
    bool can_interpolate = opts->interpolation && frame->display_synced &&
914
0
                           !frame->still && frame->num_frames > 1;
915
0
    double pts_offset = can_interpolate ? frame->ideal_frame_vsync : 0;
916
0
    params.info_callback = info_callback;
917
0
    params.info_priv = vo;
918
0
    params.skip_caching_single_frame = !cache_frame;
919
0
    params.preserve_mixing_cache = p->next_opts->inter_preserve && !frame->still;
920
0
    if (frame->still)
921
0
        params.frame_mixer = NULL;
922
923
0
    if (frame->current && frame->current->params.vflip) {
924
0
        pl_matrix2x2 m = { .m = {{1, 0}, {0, -1}}, };
925
0
        pars->distort_params.transform.mat = m;
926
0
        params.distort_params = &pars->distort_params;
927
0
    } else {
928
0
        params.distort_params = NULL;
929
0
    }
930
931
    // pl_queue advances its internal virtual PTS and culls available frames
932
    // based on this value and the VPS/FPS ratio. Requesting a non-monotonic PTS
933
    // is an invalid use of pl_queue. Reset it if this happens in an attempt to
934
    // recover as much as possible. Ideally, this should never occur, and if it
935
    // does, it should be corrected. The ideal_frame_vsync may be negative if
936
    // the last draw did not align perfectly with the vsync. In this case, we
937
    // should have the previous frame available in pl_queue, or a reset is
938
    // already requested. Clamp the check to 0, as we don't have the previous
939
    // frame in vo_frame anyway.
940
0
    struct pl_source_frame vpts;
941
0
    if (frame->current && !p->want_reset) {
942
0
        if (pl_queue_peek(p->queue, 0, &vpts) &&
943
0
            frame->current->pts + MPMAX(0, pts_offset) < vpts.pts)
944
0
        {
945
0
            MP_VERBOSE(vo, "Forcing queue refill, PTS(%f + %f | %f) < VPTS(%f)\n",
946
0
                       frame->current->pts, pts_offset,
947
0
                       frame->ideal_frame_vsync_duration, vpts.pts);
948
0
            p->want_reset = true;
949
0
        }
950
0
    }
951
952
    // Push all incoming frames into the frame queue
953
0
    for (int n = 0; n < frame->num_frames; n++) {
954
0
        int id = frame->frame_id + n;
955
956
0
        if (p->want_reset) {
957
0
            pl_renderer_flush_cache(p->rr);
958
0
            pl_queue_reset(p->queue);
959
0
            p->last_pts = 0.0;
960
0
            p->last_id = 0;
961
0
            p->want_reset = false;
962
0
        }
963
964
0
        if (id <= p->last_id)
965
0
            continue; // ignore already seen frames
966
967
0
        struct mp_image *mpi = mp_image_new_ref(frame->frames[n]);
968
0
        struct frame_priv *fp = talloc_zero(mpi, struct frame_priv);
969
0
        mpi->priv = fp;
970
0
        fp->vo = vo;
971
972
0
        pl_queue_push(p->queue, &(struct pl_source_frame) {
973
0
            .pts = mpi->pts,
974
0
            .duration = can_interpolate ? frame->approx_duration : 0,
975
0
            .frame_data = mpi,
976
0
            .map = map_frame,
977
0
            .unmap = unmap_frame,
978
0
            .discard = discard_frame,
979
0
        });
980
981
0
        p->last_id = id;
982
0
    }
983
984
0
    struct ra_swapchain *sw = p->ra_ctx->swapchain;
985
986
0
    bool pass_colorspace = false;
987
0
    struct pl_color_space target_csp;
988
    // Assume HDR is supported, if query is not available
989
    // TODO: Implement this for all backends
990
0
    target_csp = sw->fns->target_csp
991
0
                     ? sw->fns->target_csp(sw)
992
0
                     : (struct pl_color_space){ .transfer = PL_COLOR_TRC_PQ };
993
0
    if (!pl_color_transfer_is_hdr(target_csp.transfer)) {
994
0
        target_csp.hdr.max_luma = 0;
995
0
        target_csp.hdr.min_luma = 0;
996
0
    }
997
998
0
    float target_peak = opts->target_peak ? opts->target_peak : target_csp.hdr.max_luma;
999
0
    struct pl_color_space hint;
1000
0
    bool target_hint = p->next_opts->target_hint == 1 ||
1001
0
                       (p->next_opts->target_hint == -1 &&
1002
0
                        pl_color_transfer_is_hdr(target_csp.transfer));
1003
0
    if (target_hint && frame->current) {
1004
0
        hint = frame->current->params.color;
1005
0
        if (p->ra_ctx->fns->pass_colorspace && p->ra_ctx->fns->pass_colorspace(p->ra_ctx))
1006
0
            pass_colorspace = true;
1007
0
        if (opts->target_prim)
1008
0
            hint.primaries = opts->target_prim;
1009
0
        if (opts->target_trc)
1010
0
            hint.transfer = opts->target_trc;
1011
0
        if (target_peak)
1012
0
            hint.hdr.max_luma = target_peak;
1013
0
        apply_target_contrast(p, &hint, target_csp.hdr.min_luma);
1014
0
        if (!pass_colorspace)
1015
0
            pl_swapchain_colorspace_hint(p->sw, &hint);
1016
0
    } else if (!target_hint) {
1017
0
        pl_swapchain_colorspace_hint(p->sw, NULL);
1018
0
    }
1019
1020
0
    struct pl_swapchain_frame swframe;
1021
0
    bool should_draw = sw->fns->start_frame(sw, NULL); // for wayland logic
1022
0
    if (!should_draw || !pl_swapchain_start_frame(p->sw, &swframe)) {
1023
0
        if (frame->current) {
1024
            // Advance the queue state to the current PTS to discard unused frames
1025
0
            struct pl_queue_params qparams = *pl_queue_params(
1026
0
                .pts = frame->current->pts + pts_offset,
1027
0
                .radius = pl_frame_mix_radius(&params),
1028
0
                .vsync_duration = can_interpolate ? frame->ideal_frame_vsync_duration : 0,
1029
0
            );
1030
0
#if PL_API_VER >= 340
1031
0
            qparams.drift_compensation = 0;
1032
0
#endif
1033
0
            pl_queue_update(p->queue, NULL, &qparams);
1034
0
        }
1035
0
        return VO_FALSE;
1036
0
    }
1037
1038
0
    bool valid = false;
1039
0
    p->is_interpolated = false;
1040
1041
    // Calculate target
1042
0
    struct pl_frame target;
1043
0
    pl_frame_from_swapchain(&target, &swframe);
1044
0
    apply_target_options(p, &target, target_peak, target_csp.hdr.min_luma);
1045
0
    update_overlays(vo, p->osd_res,
1046
0
                    (frame->current && opts->blend_subs) ? OSD_DRAW_OSD_ONLY : 0,
1047
0
                    PL_OVERLAY_COORDS_DST_FRAME, &p->osd_state, &target, frame->current);
1048
0
    apply_crop(&target, p->dst, swframe.fbo->params.w, swframe.fbo->params.h);
1049
0
    update_tm_viz(&pars->color_map_params, &target);
1050
1051
0
    struct pl_frame_mix mix = {0};
1052
0
    if (frame->current) {
1053
        // Update queue state
1054
0
        struct pl_queue_params qparams = *pl_queue_params(
1055
0
            .pts = frame->current->pts + pts_offset,
1056
0
            .radius = pl_frame_mix_radius(&params),
1057
0
            .vsync_duration = can_interpolate ? frame->ideal_frame_vsync_duration : 0,
1058
0
            .interpolation_threshold = opts->interpolation_threshold,
1059
0
        );
1060
0
#if PL_API_VER >= 340
1061
0
        qparams.drift_compensation = 0;
1062
0
#endif
1063
1064
        // Depending on the vsync ratio, we may be up to half of the vsync
1065
        // duration before the current frame time. This works fine because
1066
        // pl_queue will have this frame, unless it's after a reset event. In
1067
        // this case, start from the first available frame.
1068
0
        struct pl_source_frame first;
1069
0
        if (pl_queue_peek(p->queue, 0, &first) && qparams.pts < first.pts) {
1070
0
            if (first.pts != frame->current->pts)
1071
0
                MP_VERBOSE(vo, "Current PTS(%f) != VPTS(%f)\n", frame->current->pts, first.pts);
1072
0
            MP_VERBOSE(vo, "Clamping first frame PTS from %f to %f\n", qparams.pts, first.pts);
1073
0
            qparams.pts = first.pts;
1074
0
        }
1075
0
        p->last_pts = qparams.pts;
1076
1077
0
        switch (pl_queue_update(p->queue, &mix, &qparams)) {
1078
0
        case PL_QUEUE_ERR:
1079
0
            MP_ERR(vo, "Failed updating frames!\n");
1080
0
            goto done;
1081
0
        case PL_QUEUE_EOF:
1082
0
            abort(); // we never signal EOF
1083
0
        case PL_QUEUE_MORE:
1084
            // This is expected to happen semi-frequently near the start and
1085
            // end of a file, so only log it at high verbosity and move on.
1086
0
            MP_DBG(vo, "Render queue underrun.\n");
1087
0
            break;
1088
0
        case PL_QUEUE_OK:
1089
0
            break;
1090
0
        }
1091
1092
        // Update source crop and overlays on all existing frames. We
1093
        // technically own the `pl_frame` struct so this is kosher. This could
1094
        // be partially avoided by instead flushing the queue on resizes, but
1095
        // doing it this way avoids unnecessarily re-uploading frames.
1096
0
        for (int i = 0; i < mix.num_frames; i++) {
1097
0
            struct pl_frame *image = (struct pl_frame *) mix.frames[i];
1098
0
            struct mp_image *mpi = image->user_data;
1099
0
            struct frame_priv *fp = mpi->priv;
1100
0
            apply_crop(image, p->src, vo->params->w, vo->params->h);
1101
0
            if (opts->blend_subs) {
1102
0
                if (frame->redraw)
1103
0
                    p->osd_sync++;
1104
0
                if (fp->osd_sync < p->osd_sync) {
1105
0
                    float rx = pl_rect_w(p->dst) / pl_rect_w(image->crop);
1106
0
                    float ry = pl_rect_h(p->dst) / pl_rect_h(image->crop);
1107
0
                    struct mp_osd_res res = {
1108
0
                        .w = pl_rect_w(p->dst),
1109
0
                        .h = pl_rect_h(p->dst),
1110
0
                        .ml = -image->crop.x0 * rx,
1111
0
                        .mr = (image->crop.x1 - vo->params->w) * rx,
1112
0
                        .mt = -image->crop.y0 * ry,
1113
0
                        .mb = (image->crop.y1 - vo->params->h) * ry,
1114
0
                        .display_par = 1.0,
1115
0
                    };
1116
0
                    update_overlays(vo, res, OSD_DRAW_SUB_ONLY,
1117
0
                                    PL_OVERLAY_COORDS_DST_CROP,
1118
0
                                    &fp->subs, image, mpi);
1119
0
                    fp->osd_sync = p->osd_sync;
1120
0
                }
1121
0
            } else {
1122
                // Disable overlays when blend_subs is disabled
1123
0
                image->num_overlays = 0;
1124
0
                fp->osd_sync = 0;
1125
0
            }
1126
1127
            // Update the frame signature to include the current OSD sync
1128
            // value, in order to disambiguate between identical frames with
1129
            // modified OSD. Shift the OSD sync value by a lot to avoid
1130
            // collisions with low signature values.
1131
            //
1132
            // This is safe to do because `pl_frame_mix.signature` lives in
1133
            // temporary memory that is only valid for this `pl_queue_update`.
1134
0
            ((uint64_t *) mix.signatures)[i] ^= fp->osd_sync << 48;
1135
0
        }
1136
1137
        // Update dynamic hook parameters
1138
0
        for (int i = 0; i < pars->params.num_hooks; i++)
1139
0
            update_hook_opts_dynamic(p, p->hooks[i], frame->current);
1140
0
    }
1141
1142
    // Render frame
1143
0
    if (!pl_render_image_mix(p->rr, &mix, &target, &params)) {
1144
0
        MP_ERR(vo, "Failed rendering frame!\n");
1145
0
        goto done;
1146
0
    }
1147
1148
0
    struct pl_frame ref_frame;
1149
0
    pl_frames_infer_mix(p->rr, &mix, &target, &ref_frame);
1150
1151
0
    mp_mutex_lock(&vo->params_mutex);
1152
0
    p->target_params = (struct mp_image_params){
1153
0
        .imgfmt_name = swframe.fbo->params.format
1154
0
                        ? swframe.fbo->params.format->name : NULL,
1155
0
        .w = mp_rect_w(p->dst),
1156
0
        .h = mp_rect_h(p->dst),
1157
0
        .color = pass_colorspace ? hint : target.color,
1158
0
        .repr = target.repr,
1159
0
        .rotate = target.rotation,
1160
0
    };
1161
0
    vo->target_params = &p->target_params;
1162
1163
0
    if (vo->params) {
1164
        // Augment metadata with peak detection max_pq_y / avg_pq_y
1165
0
        vo->has_peak_detect_values = pl_renderer_get_hdr_metadata(p->rr, &vo->params->color.hdr);
1166
0
    }
1167
0
    mp_mutex_unlock(&vo->params_mutex);
1168
1169
0
    p->is_interpolated = pts_offset != 0 && mix.num_frames > 1;
1170
0
    valid = true;
1171
    // fall through
1172
1173
0
done:
1174
0
    if (!valid) // clear with purple to indicate error
1175
0
        pl_tex_clear(gpu, swframe.fbo, (float[4]){ 0.5, 0.0, 1.0, 1.0 });
1176
1177
0
    pl_gpu_flush(gpu);
1178
0
    p->frame_pending = true;
1179
0
    return VO_TRUE;
1180
0
}
1181
1182
static void flip_page(struct vo *vo)
1183
0
{
1184
0
    struct priv *p = vo->priv;
1185
0
    struct ra_swapchain *sw = p->ra_ctx->swapchain;
1186
1187
0
    if (p->frame_pending) {
1188
0
        if (!pl_swapchain_submit_frame(p->sw))
1189
0
            MP_ERR(vo, "Failed presenting frame!\n");
1190
0
        p->frame_pending = false;
1191
0
    }
1192
1193
0
    sw->fns->swap_buffers(sw);
1194
0
}
1195
1196
static void get_vsync(struct vo *vo, struct vo_vsync_info *info)
1197
0
{
1198
0
    struct priv *p = vo->priv;
1199
0
    struct ra_swapchain *sw = p->ra_ctx->swapchain;
1200
0
    if (sw->fns->get_vsync)
1201
0
        sw->fns->get_vsync(sw, info);
1202
0
}
1203
1204
static int query_format(struct vo *vo, int format)
1205
0
{
1206
0
    struct priv *p = vo->priv;
1207
0
    if (ra_hwdec_get(&p->hwdec_ctx, format))
1208
0
        return true;
1209
1210
0
    struct pl_bit_encoding bits;
1211
0
    struct pl_plane_data data[4] = {0};
1212
0
    int planes = plane_data_from_imgfmt(data, &bits, format);
1213
0
    if (!planes)
1214
0
        return false;
1215
1216
0
    for (int i = 0; i < planes; i++) {
1217
0
        if (!pl_plane_find_fmt(p->gpu, NULL, &data[i]))
1218
0
            return false;
1219
0
    }
1220
1221
0
    return true;
1222
0
}
1223
1224
static void resize(struct vo *vo)
1225
0
{
1226
0
    struct priv *p = vo->priv;
1227
0
    struct mp_rect src, dst;
1228
0
    struct mp_osd_res osd;
1229
0
    vo_get_src_dst_rects(vo, &src, &dst, &osd);
1230
0
    if (vo->dwidth && vo->dheight) {
1231
0
        gpu_ctx_resize(p->context, vo->dwidth, vo->dheight);
1232
0
        vo->want_redraw = true;
1233
0
    }
1234
1235
0
    if (mp_rect_equals(&p->src, &src) &&
1236
0
        mp_rect_equals(&p->dst, &dst) &&
1237
0
        osd_res_equals(p->osd_res, osd))
1238
0
        return;
1239
1240
0
    p->osd_sync++;
1241
0
    p->osd_res = osd;
1242
0
    p->src = src;
1243
0
    p->dst = dst;
1244
0
}
1245
1246
static int reconfig(struct vo *vo, struct mp_image_params *params)
1247
0
{
1248
0
    struct priv *p = vo->priv;
1249
0
    if (!p->ra_ctx->fns->reconfig(p->ra_ctx))
1250
0
        return -1;
1251
1252
0
    resize(vo);
1253
0
    mp_mutex_lock(&vo->params_mutex);
1254
0
    vo->target_params = NULL;
1255
0
    mp_mutex_unlock(&vo->params_mutex);
1256
0
    return 0;
1257
0
}
1258
1259
// Takes over ownership of `icc`. Can be used to unload profile (icc.len == 0)
1260
static bool update_icc(struct priv *p, struct bstr icc)
1261
0
{
1262
0
    struct pl_icc_profile profile = {
1263
0
        .data = icc.start,
1264
0
        .len  = icc.len,
1265
0
    };
1266
1267
0
    pl_icc_profile_compute_signature(&profile);
1268
1269
0
    bool ok = pl_icc_update(p->pllog, &p->icc_profile, &profile, &p->icc_params);
1270
0
    talloc_free(icc.start);
1271
0
    return ok;
1272
0
}
1273
1274
// Returns whether the ICC profile was updated (even on failure)
1275
static bool update_auto_profile(struct priv *p, int *events)
1276
0
{
1277
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
1278
0
    if (!opts->icc_opts || !opts->icc_opts->profile_auto || p->icc_path)
1279
0
        return false;
1280
1281
0
    MP_VERBOSE(p, "Querying ICC profile...\n");
1282
0
    bstr icc = {0};
1283
0
    int r = p->ra_ctx->fns->control(p->ra_ctx, events, VOCTRL_GET_ICC_PROFILE, &icc);
1284
1285
0
    if (r != VO_NOTAVAIL) {
1286
0
        if (r == VO_FALSE) {
1287
0
            MP_WARN(p, "Could not retrieve an ICC profile.\n");
1288
0
        } else if (r == VO_NOTIMPL) {
1289
0
            MP_ERR(p, "icc-profile-auto not implemented on this platform.\n");
1290
0
        }
1291
1292
0
        update_icc(p, icc);
1293
0
        return true;
1294
0
    }
1295
1296
0
    return false;
1297
0
}
1298
1299
static void video_screenshot(struct vo *vo, struct voctrl_screenshot *args)
1300
0
{
1301
0
    struct priv *p = vo->priv;
1302
0
    pl_options pars = p->pars;
1303
0
    pl_gpu gpu = p->gpu;
1304
0
    pl_tex fbo = NULL;
1305
0
    args->res = NULL;
1306
1307
0
    update_options(vo);
1308
0
    struct pl_render_params params = pars->params;
1309
0
    params.info_callback = NULL;
1310
0
    params.skip_caching_single_frame = true;
1311
0
    params.preserve_mixing_cache = false;
1312
0
    params.frame_mixer = NULL;
1313
1314
0
    struct pl_peak_detect_params peak_params;
1315
0
    if (params.peak_detect_params) {
1316
0
        peak_params = *params.peak_detect_params;
1317
0
        params.peak_detect_params = &peak_params;
1318
0
        peak_params.allow_delayed = false;
1319
0
    }
1320
1321
    // Retrieve the current frame from the frame queue
1322
0
    struct pl_frame_mix mix;
1323
0
    enum pl_queue_status status;
1324
0
    struct pl_queue_params qparams = *pl_queue_params(
1325
0
        .pts = p->last_pts,
1326
0
    );
1327
0
#if PL_API_VER >= 340
1328
0
        qparams.drift_compensation = 0;
1329
0
#endif
1330
0
    status = pl_queue_update(p->queue, &mix, &qparams);
1331
0
    mp_assert(status != PL_QUEUE_EOF);
1332
0
    if (status == PL_QUEUE_ERR) {
1333
0
        MP_ERR(vo, "Unknown error occurred while trying to take screenshot!\n");
1334
0
        return;
1335
0
    }
1336
0
    if (!mix.num_frames) {
1337
0
        MP_ERR(vo, "No frames available to take screenshot of, is a file loaded?\n");
1338
0
        return;
1339
0
    }
1340
1341
    // Passing an interpolation radius of 0 guarantees that the first frame in
1342
    // the resulting mix is the correct frame for this PTS
1343
0
    struct pl_frame image = *(struct pl_frame *) mix.frames[0];
1344
0
    struct mp_image *mpi = image.user_data;
1345
0
    struct mp_rect src = p->src, dst = p->dst;
1346
0
    struct mp_osd_res osd = p->osd_res;
1347
0
    if (!args->scaled) {
1348
0
        int w, h;
1349
0
        mp_image_params_get_dsize(&mpi->params, &w, &h);
1350
0
        if (w < 1 || h < 1)
1351
0
            return;
1352
1353
0
        int src_w = mpi->params.w;
1354
0
        int src_h = mpi->params.h;
1355
0
        src = (struct mp_rect) {0, 0, src_w, src_h};
1356
0
        dst = (struct mp_rect) {0, 0, w, h};
1357
1358
0
        if (mp_image_crop_valid(&mpi->params))
1359
0
            src = mpi->params.crop;
1360
1361
0
        if (mpi->params.rotate % 180 == 90) {
1362
0
            MPSWAP(int, w, h);
1363
0
            MPSWAP(int, src_w, src_h);
1364
0
        }
1365
0
        mp_rect_rotate(&src, src_w, src_h, mpi->params.rotate);
1366
0
        mp_rect_rotate(&dst, w, h, mpi->params.rotate);
1367
1368
0
        osd = (struct mp_osd_res) {
1369
0
            .display_par = 1.0,
1370
0
            .w = mp_rect_w(dst),
1371
0
            .h = mp_rect_h(dst),
1372
0
        };
1373
0
    }
1374
1375
    // Create target FBO, try high bit depth first
1376
0
    int mpfmt;
1377
0
    for (int depth = args->high_bit_depth ? 16 : 8; depth; depth -= 8) {
1378
0
        if (depth == 16) {
1379
0
            mpfmt = IMGFMT_RGBA64;
1380
0
        } else {
1381
0
            mpfmt = p->ra_ctx->opts.want_alpha ? IMGFMT_RGBA : IMGFMT_RGB0;
1382
0
        }
1383
0
        pl_fmt fmt = pl_find_fmt(gpu, PL_FMT_UNORM, 4, depth, depth,
1384
0
                                 PL_FMT_CAP_RENDERABLE | PL_FMT_CAP_HOST_READABLE);
1385
0
        if (!fmt)
1386
0
            continue;
1387
1388
0
        fbo = pl_tex_create(gpu, pl_tex_params(
1389
0
            .w = osd.w,
1390
0
            .h = osd.h,
1391
0
            .format = fmt,
1392
0
            .blit_dst = true,
1393
0
            .renderable = true,
1394
0
            .host_readable = true,
1395
0
            .storable = fmt->caps & PL_FMT_CAP_STORABLE,
1396
0
        ));
1397
0
        if (fbo)
1398
0
            break;
1399
0
    }
1400
1401
0
    if (!fbo) {
1402
0
        MP_ERR(vo, "Failed creating target FBO for screenshot!\n");
1403
0
        return;
1404
0
    }
1405
1406
0
    struct pl_frame target = {
1407
0
        .repr = pl_color_repr_rgb,
1408
0
        .num_planes = 1,
1409
0
        .planes[0] = {
1410
0
            .texture = fbo,
1411
0
            .components = 4,
1412
0
            .component_mapping = {0, 1, 2, 3},
1413
0
        },
1414
0
    };
1415
1416
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
1417
0
    if (args->scaled) {
1418
        // Apply target LUT, ICC profile and CSP override only in window mode
1419
0
        apply_target_options(p, &target, opts->target_peak, 0);
1420
0
    } else if (args->native_csp) {
1421
0
        target.color = image.color;
1422
0
    } else {
1423
0
        target.color = pl_color_space_srgb;
1424
0
    }
1425
1426
0
    apply_crop(&image, src, mpi->params.w, mpi->params.h);
1427
0
    apply_crop(&target, dst, fbo->params.w, fbo->params.h);
1428
0
    update_tm_viz(&pars->color_map_params, &target);
1429
1430
0
    int osd_flags = 0;
1431
0
    if (!args->subs)
1432
0
        osd_flags |= OSD_DRAW_OSD_ONLY;
1433
0
    if (!args->osd)
1434
0
        osd_flags |= OSD_DRAW_SUB_ONLY;
1435
1436
0
    struct frame_priv *fp = mpi->priv;
1437
0
    if (opts->blend_subs) {
1438
0
        float rx = pl_rect_w(dst) / pl_rect_w(image.crop);
1439
0
        float ry = pl_rect_h(dst) / pl_rect_h(image.crop);
1440
0
        struct mp_osd_res res = {
1441
0
            .w = pl_rect_w(dst),
1442
0
            .h = pl_rect_h(dst),
1443
0
            .ml = -image.crop.x0 * rx,
1444
0
            .mr = (image.crop.x1 - vo->params->w) * rx,
1445
0
            .mt = -image.crop.y0 * ry,
1446
0
            .mb = (image.crop.y1 - vo->params->h) * ry,
1447
0
            .display_par = 1.0,
1448
0
        };
1449
0
        update_overlays(vo, res, osd_flags,
1450
0
                        PL_OVERLAY_COORDS_DST_CROP,
1451
0
                        &fp->subs, &image, mpi);
1452
0
    } else {
1453
        // Disable overlays when blend_subs is disabled
1454
0
        update_overlays(vo, osd, osd_flags, PL_OVERLAY_COORDS_DST_FRAME,
1455
0
                        &p->osd_state, &target, mpi);
1456
0
        image.num_overlays = 0;
1457
0
    }
1458
1459
0
    if (!pl_render_image(p->rr, &image, &target, &params)) {
1460
0
        MP_ERR(vo, "Failed rendering frame!\n");
1461
0
        goto done;
1462
0
    }
1463
1464
0
    args->res = mp_image_alloc(mpfmt, fbo->params.w, fbo->params.h);
1465
0
    if (!args->res)
1466
0
        goto done;
1467
1468
0
    args->res->params.color.primaries = target.color.primaries;
1469
0
    args->res->params.color.transfer = target.color.transfer;
1470
0
    args->res->params.repr.levels = target.repr.levels;
1471
0
    args->res->params.color.hdr = target.color.hdr;
1472
0
    if (args->scaled)
1473
0
        args->res->params.p_w = args->res->params.p_h = 1;
1474
1475
0
    bool ok = pl_tex_download(gpu, pl_tex_transfer_params(
1476
0
        .tex = fbo,
1477
0
        .ptr = args->res->planes[0],
1478
0
        .row_pitch = args->res->stride[0],
1479
0
    ));
1480
1481
0
    if (!ok)
1482
0
        TA_FREEP(&args->res);
1483
1484
    // fall through
1485
0
done:
1486
0
    pl_tex_destroy(gpu, &fbo);
1487
0
}
1488
1489
static inline void copy_frame_info_to_mp(struct frame_info *pl,
1490
0
                                         struct mp_frame_perf *mp) {
1491
0
    static_assert(MP_ARRAY_SIZE(pl->info) == MP_ARRAY_SIZE(mp->perf), "");
1492
0
    mp_assert(pl->count <= VO_PASS_PERF_MAX);
1493
0
    mp->count = MPMIN(pl->count, VO_PASS_PERF_MAX);
1494
1495
0
    for (int i = 0; i < mp->count; ++i) {
1496
0
        const struct pl_dispatch_info *pass = &pl->info[i];
1497
1498
0
        static_assert(VO_PERF_SAMPLE_COUNT >= MP_ARRAY_SIZE(pass->samples), "");
1499
0
        mp_assert(pass->num_samples <= MP_ARRAY_SIZE(pass->samples));
1500
1501
0
        struct mp_pass_perf *perf = &mp->perf[i];
1502
0
        perf->count = MPMIN(pass->num_samples, VO_PERF_SAMPLE_COUNT);
1503
0
        memcpy(perf->samples, pass->samples, perf->count * sizeof(pass->samples[0]));
1504
0
        perf->last = pass->last;
1505
0
        perf->peak = pass->peak;
1506
0
        perf->avg = pass->average;
1507
1508
0
        strncpy(mp->desc[i], pass->shader->description, sizeof(mp->desc[i]) - 1);
1509
0
        mp->desc[i][sizeof(mp->desc[i]) - 1] = '\0';
1510
0
    }
1511
0
}
1512
1513
static void update_ra_ctx_options(struct vo *vo, struct ra_ctx_opts *ctx_opts)
1514
160
{
1515
160
    struct priv *p = vo->priv;
1516
160
    struct gl_video_opts *gl_opts = p->opts_cache->opts;
1517
160
    bool border_alpha = (p->next_opts->border_background == BACKGROUND_COLOR &&
1518
160
                         gl_opts->background_color.a != 255) ||
1519
160
                         p->next_opts->border_background == BACKGROUND_NONE;
1520
160
    ctx_opts->want_alpha = (gl_opts->background == BACKGROUND_COLOR &&
1521
160
                            gl_opts->background_color.a != 255) ||
1522
160
                            gl_opts->background == BACKGROUND_NONE ||
1523
160
                            border_alpha;
1524
160
}
1525
1526
static int control(struct vo *vo, uint32_t request, void *data)
1527
0
{
1528
0
    struct priv *p = vo->priv;
1529
1530
0
    switch (request) {
1531
0
    case VOCTRL_SET_PANSCAN:
1532
0
        resize(vo);
1533
0
        return VO_TRUE;
1534
0
    case VOCTRL_PAUSE:
1535
0
        if (p->is_interpolated)
1536
0
            vo->want_redraw = true;
1537
0
        return VO_TRUE;
1538
1539
0
    case VOCTRL_UPDATE_RENDER_OPTS: {
1540
0
        update_ra_ctx_options(vo, &p->ra_ctx->opts);
1541
0
        if (p->ra_ctx->fns->update_render_opts)
1542
0
            p->ra_ctx->fns->update_render_opts(p->ra_ctx);
1543
0
        vo->want_redraw = true;
1544
1545
        // Special case for --image-lut which requires a full reset.
1546
0
        int old_type = p->next_opts->image_lut.type;
1547
0
        update_options(vo);
1548
0
        struct user_lut image_lut = p->next_opts->image_lut;
1549
0
        p->want_reset |= image_lut.opt && ((!image_lut.path && image_lut.opt) ||
1550
0
                         (image_lut.path && strcmp(image_lut.path, image_lut.opt)) ||
1551
0
                         (old_type != image_lut.type));
1552
1553
        // Also re-query the auto profile, in case `update_render_options`
1554
        // unloaded a manually specified icc profile in favor of
1555
        // icc-profile-auto
1556
0
        int events = 0;
1557
0
        update_auto_profile(p, &events);
1558
0
        vo_event(vo, events);
1559
0
        return VO_TRUE;
1560
0
    }
1561
1562
0
    case VOCTRL_RESET:
1563
        // Defer until the first new frame (unique ID) actually arrives
1564
0
        p->want_reset = true;
1565
0
        return VO_TRUE;
1566
1567
0
    case VOCTRL_PERFORMANCE_DATA: {
1568
0
        struct voctrl_performance_data *perf = data;
1569
0
        copy_frame_info_to_mp(&p->perf_fresh, &perf->fresh);
1570
0
        copy_frame_info_to_mp(&p->perf_redraw, &perf->redraw);
1571
0
        return true;
1572
0
    }
1573
1574
0
    case VOCTRL_SCREENSHOT:
1575
0
        video_screenshot(vo, data);
1576
0
        return true;
1577
1578
0
    case VOCTRL_EXTERNAL_RESIZE:
1579
0
        reconfig(vo, NULL);
1580
0
        return true;
1581
1582
0
    case VOCTRL_LOAD_HWDEC_API:
1583
0
        ra_hwdec_ctx_load_fmt(&p->hwdec_ctx, vo->hwdec_devs, data);
1584
0
        return true;
1585
0
    }
1586
1587
0
    int events = 0;
1588
0
    int r = p->ra_ctx->fns->control(p->ra_ctx, &events, request, data);
1589
0
    if (events & VO_EVENT_ICC_PROFILE_CHANGED) {
1590
0
        if (update_auto_profile(p, &events))
1591
0
            vo->want_redraw = true;
1592
0
    }
1593
0
    if (events & VO_EVENT_RESIZE)
1594
0
        resize(vo);
1595
0
    if (events & VO_EVENT_EXPOSE)
1596
0
        vo->want_redraw = true;
1597
0
    vo_event(vo, events);
1598
1599
0
    return r;
1600
0
}
1601
1602
static void wakeup(struct vo *vo)
1603
0
{
1604
0
    struct priv *p = vo->priv;
1605
0
    if (p->ra_ctx && p->ra_ctx->fns->wakeup)
1606
0
        p->ra_ctx->fns->wakeup(p->ra_ctx);
1607
0
}
1608
1609
static void wait_events(struct vo *vo, int64_t until_time_ns)
1610
0
{
1611
0
    struct priv *p = vo->priv;
1612
0
    if (p->ra_ctx && p->ra_ctx->fns->wait_events) {
1613
0
        p->ra_ctx->fns->wait_events(p->ra_ctx, until_time_ns);
1614
0
    } else {
1615
0
        vo_wait_default(vo, until_time_ns);
1616
0
    }
1617
0
}
1618
1619
static char *cache_filepath(void *ta_ctx, char *dir, const char *prefix, uint64_t key)
1620
0
{
1621
0
    bstr filename = {0};
1622
0
    bstr_xappend_asprintf(ta_ctx, &filename, "%s_%016" PRIx64, prefix, key);
1623
0
    return mp_path_join_bstr(ta_ctx, bstr0(dir), filename);
1624
0
}
1625
1626
static pl_cache_obj cache_load_obj(void *p, uint64_t key)
1627
0
{
1628
0
    struct cache *c = p;
1629
0
    void *ta_ctx = talloc_new(NULL);
1630
0
    pl_cache_obj obj = {0};
1631
1632
0
    if (!c->dir)
1633
0
        goto done;
1634
1635
0
    char *filepath = cache_filepath(ta_ctx, c->dir, c->name, key);
1636
0
    if (!filepath)
1637
0
        goto done;
1638
1639
0
    if (stat(filepath, &(struct stat){0}))
1640
0
        goto done;
1641
1642
0
    int64_t load_start = mp_time_ns();
1643
0
    struct bstr data = stream_read_file(filepath, ta_ctx, c->global, STREAM_MAX_READ_SIZE);
1644
0
    int64_t load_end = mp_time_ns();
1645
0
    MP_DBG(c, "%s: key(%" PRIx64 "), size(%zu), load time(%.3f ms)\n",
1646
0
           __func__, key, data.len,
1647
0
           MP_TIME_NS_TO_MS(load_end - load_start));
1648
1649
0
    obj = (pl_cache_obj){
1650
0
        .key = key,
1651
0
        .data = talloc_steal(NULL, data.start),
1652
0
        .size = data.len,
1653
0
        .free = talloc_free,
1654
0
    };
1655
1656
0
done:
1657
0
    talloc_free(ta_ctx);
1658
0
    return obj;
1659
0
}
1660
1661
static void cache_save_obj(void *p, pl_cache_obj obj)
1662
0
{
1663
0
    const struct cache *c = p;
1664
0
    void *ta_ctx = talloc_new(NULL);
1665
1666
0
    if (!c->dir)
1667
0
        goto done;
1668
1669
0
    char *filepath = cache_filepath(ta_ctx, c->dir, c->name, obj.key);
1670
0
    if (!filepath)
1671
0
        goto done;
1672
1673
0
    if (!obj.data || !obj.size) {
1674
0
        unlink(filepath);
1675
0
        goto done;
1676
0
    }
1677
1678
    // Don't save if already exists
1679
0
    struct stat st;
1680
0
    if (!stat(filepath, &st) && st.st_size == obj.size) {
1681
0
        MP_DBG(c, "%s: key(%"PRIx64"), size(%zu)\n", __func__, obj.key, obj.size);
1682
0
        goto done;
1683
0
    }
1684
1685
0
    int64_t save_start = mp_time_ns();
1686
0
    mp_save_to_file(filepath, obj.data, obj.size);
1687
0
    int64_t save_end = mp_time_ns();
1688
0
    MP_DBG(c, "%s: key(%" PRIx64 "), size(%zu), save time(%.3f ms)\n",
1689
0
           __func__, obj.key, obj.size,
1690
0
           MP_TIME_NS_TO_MS(save_end - save_start));
1691
1692
0
done:
1693
0
    talloc_free(ta_ctx);
1694
0
}
1695
1696
static void cache_init(struct vo *vo, struct cache *cache, size_t max_size,
1697
                       const char *dir_opt)
1698
0
{
1699
0
    struct priv *p = vo->priv;
1700
0
    const char *name = cache == &p->shader_cache ? "shader" : "icc";
1701
0
    const size_t limit = cache == &p->shader_cache ? 128 << 20 : 1536 << 20;
1702
1703
0
    char *dir;
1704
0
    if (dir_opt && dir_opt[0]) {
1705
0
        dir = mp_get_user_path(vo, p->global, dir_opt);
1706
0
    } else {
1707
0
        dir = mp_find_user_file(vo, p->global, "cache", "");
1708
0
    }
1709
0
    if (!dir || !dir[0])
1710
0
        return;
1711
1712
0
    mp_mkdirp(dir);
1713
0
    *cache = (struct cache){
1714
0
        .log        = p->log,
1715
0
        .global     = p->global,
1716
0
        .dir        = dir,
1717
0
        .name       = name,
1718
0
        .size_limit = limit,
1719
0
        .cache = pl_cache_create(pl_cache_params(
1720
0
            .log = p->pllog,
1721
0
            .get = cache_load_obj,
1722
0
            .set = cache_save_obj,
1723
0
            .priv = cache
1724
0
        )),
1725
0
    };
1726
0
}
1727
1728
struct file_entry {
1729
    char *filepath;
1730
    size_t size;
1731
    time_t atime;
1732
};
1733
1734
static int compare_atime(const void *a, const void *b)
1735
0
{
1736
0
    return (((struct file_entry *)b)->atime - ((struct file_entry *)a)->atime);
1737
0
}
1738
1739
static void cache_uninit(struct priv *p, struct cache *cache)
1740
320
{
1741
320
    if (!cache->cache)
1742
320
        return;
1743
1744
0
    void *ta_ctx = talloc_new(NULL);
1745
0
    struct file_entry *files = NULL;
1746
0
    size_t num_files = 0;
1747
0
    mp_assert(cache->dir);
1748
0
    mp_assert(cache->name);
1749
1750
0
    DIR *d = opendir(cache->dir);
1751
0
    if (!d)
1752
0
        goto done;
1753
1754
0
    struct dirent *dir;
1755
0
    while ((dir = readdir(d)) != NULL) {
1756
0
        char *filepath = mp_path_join(ta_ctx, cache->dir, dir->d_name);
1757
0
        if (!filepath)
1758
0
            continue;
1759
0
        struct stat filestat;
1760
0
        if (stat(filepath, &filestat))
1761
0
            continue;
1762
0
        if (!S_ISREG(filestat.st_mode))
1763
0
            continue;
1764
0
        bstr fname = bstr0(dir->d_name);
1765
0
        if (!bstr_eatstart0(&fname, cache->name))
1766
0
            continue;
1767
0
        if (!bstr_eatstart0(&fname, "_"))
1768
0
            continue;
1769
0
        if (fname.len != 16) // %016x
1770
0
            continue;
1771
0
        MP_TARRAY_APPEND(ta_ctx, files, num_files,
1772
0
                         (struct file_entry){
1773
0
                             .filepath = filepath,
1774
0
                             .size     = filestat.st_size,
1775
0
                             .atime    = filestat.st_atime,
1776
0
                         });
1777
0
    }
1778
0
    closedir(d);
1779
1780
0
    if (!num_files)
1781
0
        goto done;
1782
1783
0
    qsort(files, num_files, sizeof(struct file_entry), compare_atime);
1784
1785
0
    time_t t = time(NULL);
1786
0
    size_t cache_size = 0;
1787
0
    size_t cache_limit = cache->size_limit ? cache->size_limit : SIZE_MAX;
1788
0
    for (int i = 0; i < num_files; i++) {
1789
        // Remove files that exceed the size limit but are older than one day.
1790
        // This allows for temporary maintaining a larger cache size while
1791
        // adjusting the configuration. The cache will be cleared the next day
1792
        // for unused entries. We don't need to be overly aggressive with cache
1793
        // cleaning; in most cases, it will not grow much, and in others, it may
1794
        // actually be useful to cache more.
1795
0
        cache_size += files[i].size;
1796
0
        double rel_use = difftime(t, files[i].atime);
1797
0
        if (cache_size > cache_limit && rel_use > 60 * 60 * 24) {
1798
0
            MP_VERBOSE(p, "Removing %s | size: %9zu bytes | last used: %9d seconds ago\n",
1799
0
                       files[i].filepath, files[i].size, (int)rel_use);
1800
0
            unlink(files[i].filepath);
1801
0
        }
1802
0
    }
1803
1804
0
done:
1805
0
    talloc_free(ta_ctx);
1806
0
    pl_cache_destroy(&cache->cache);
1807
0
}
1808
1809
static void uninit(struct vo *vo)
1810
160
{
1811
160
    struct priv *p = vo->priv;
1812
160
    pl_queue_destroy(&p->queue); // destroy this first
1813
960
    for (int i = 0; i < MP_ARRAY_SIZE(p->osd_state.entries); i++)
1814
800
        pl_tex_destroy(p->gpu, &p->osd_state.entries[i].tex);
1815
160
    for (int i = 0; i < p->num_sub_tex; i++)
1816
0
        pl_tex_destroy(p->gpu, &p->sub_tex[i]);
1817
160
    for (int i = 0; i < p->num_user_hooks; i++)
1818
0
        pl_mpv_user_shader_destroy(&p->user_hooks[i].hook);
1819
1820
160
    if (vo->hwdec_devs) {
1821
0
        ra_hwdec_mapper_free(&p->hwdec_mapper);
1822
0
        ra_hwdec_ctx_uninit(&p->hwdec_ctx);
1823
0
        hwdec_devices_set_loader(vo->hwdec_devs, NULL, NULL);
1824
0
        hwdec_devices_destroy(vo->hwdec_devs);
1825
0
    }
1826
1827
160
    mp_assert(p->num_dr_buffers == 0);
1828
160
    mp_mutex_destroy(&p->dr_lock);
1829
1830
160
    cache_uninit(p, &p->shader_cache);
1831
160
    cache_uninit(p, &p->icc_cache);
1832
1833
160
    pl_lut_free(&p->next_opts->image_lut.lut);
1834
160
    pl_lut_free(&p->next_opts->lut.lut);
1835
160
    pl_lut_free(&p->next_opts->target_lut.lut);
1836
1837
160
    pl_icc_close(&p->icc_profile);
1838
160
    pl_renderer_destroy(&p->rr);
1839
1840
10.4k
    for (int i = 0; i < VO_PASS_PERF_MAX; ++i) {
1841
10.2k
        pl_shader_info_deref(&p->perf_fresh.info[i].shader);
1842
10.2k
        pl_shader_info_deref(&p->perf_redraw.info[i].shader);
1843
10.2k
    }
1844
1845
160
    pl_options_free(&p->pars);
1846
1847
160
    p->ra_ctx = NULL;
1848
160
    p->pllog = NULL;
1849
160
    p->gpu = NULL;
1850
160
    p->sw = NULL;
1851
160
    gpu_ctx_destroy(&p->context);
1852
160
}
1853
1854
static void load_hwdec_api(void *ctx, struct hwdec_imgfmt_request *params)
1855
0
{
1856
0
    vo_control(ctx, VOCTRL_LOAD_HWDEC_API, params);
1857
0
}
1858
1859
static int preinit(struct vo *vo)
1860
160
{
1861
160
    struct priv *p = vo->priv;
1862
160
    p->opts_cache = m_config_cache_alloc(p, vo->global, &gl_video_conf);
1863
160
    p->next_opts_cache = m_config_cache_alloc(p, vo->global, &gl_next_conf);
1864
160
    p->next_opts = p->next_opts_cache->opts;
1865
160
    p->video_eq = mp_csp_equalizer_create(p, vo->global);
1866
160
    p->global = vo->global;
1867
160
    p->log = vo->log;
1868
1869
160
    struct gl_video_opts *gl_opts = p->opts_cache->opts;
1870
160
    struct ra_ctx_opts *ctx_opts = mp_get_config_group(vo, vo->global, &ra_ctx_conf);
1871
160
    update_ra_ctx_options(vo, ctx_opts);
1872
160
    p->context = gpu_ctx_create(vo, ctx_opts);
1873
160
    talloc_free(ctx_opts);
1874
160
    if (!p->context)
1875
160
        goto err_out;
1876
    // For the time being
1877
0
    p->ra_ctx = p->context->ra_ctx;
1878
0
    p->pllog = p->context->pllog;
1879
0
    p->gpu = p->context->gpu;
1880
0
    p->sw = p->context->swapchain;
1881
0
    p->hwdec_ctx = (struct ra_hwdec_ctx) {
1882
0
        .log = p->log,
1883
0
        .global = p->global,
1884
0
        .ra_ctx = p->ra_ctx,
1885
0
    };
1886
1887
0
    vo->hwdec_devs = hwdec_devices_create();
1888
0
    hwdec_devices_set_loader(vo->hwdec_devs, load_hwdec_api, vo);
1889
0
    ra_hwdec_ctx_init(&p->hwdec_ctx, vo->hwdec_devs, gl_opts->hwdec_interop, false);
1890
0
    mp_mutex_init(&p->dr_lock);
1891
1892
0
    if (gl_opts->shader_cache)
1893
0
        cache_init(vo, &p->shader_cache, 10 << 20, gl_opts->shader_cache_dir);
1894
0
    if (gl_opts->icc_opts->cache)
1895
0
        cache_init(vo, &p->icc_cache, 20 << 20, gl_opts->icc_opts->cache_dir);
1896
1897
0
    pl_gpu_set_cache(p->gpu, p->shader_cache.cache);
1898
0
    p->rr = pl_renderer_create(p->pllog, p->gpu);
1899
0
    p->queue = pl_queue_create(p->gpu);
1900
0
    p->osd_fmt[SUBBITMAP_LIBASS] = pl_find_named_fmt(p->gpu, "r8");
1901
0
    p->osd_fmt[SUBBITMAP_BGRA] = pl_find_named_fmt(p->gpu, "bgra8");
1902
0
    p->osd_sync = 1;
1903
1904
0
    p->pars = pl_options_alloc(p->pllog);
1905
0
    update_render_options(vo);
1906
0
    return 0;
1907
1908
160
err_out:
1909
160
    uninit(vo);
1910
160
    return -1;
1911
160
}
1912
1913
static const struct pl_filter_config *map_scaler(struct priv *p,
1914
                                                 enum scaler_unit unit)
1915
0
{
1916
0
    const struct pl_filter_preset fixed_scalers[] = {
1917
0
        { "bilinear",       &pl_filter_bilinear },
1918
0
        { "bicubic_fast",   &pl_filter_bicubic },
1919
0
        { "nearest",        &pl_filter_nearest },
1920
0
        { "oversample",     &pl_filter_oversample },
1921
0
        {0},
1922
0
    };
1923
1924
0
    const struct pl_filter_preset fixed_frame_mixers[] = {
1925
0
        { "linear",         &pl_filter_bilinear },
1926
0
        { "oversample",     &pl_filter_oversample },
1927
0
        {0},
1928
0
    };
1929
1930
0
    const struct pl_filter_preset *fixed_presets =
1931
0
        unit == SCALER_TSCALE ? fixed_frame_mixers : fixed_scalers;
1932
1933
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
1934
0
    const struct scaler_config *cfg = &opts->scaler[unit];
1935
0
    if (cfg->kernel.function == SCALER_INHERIT)
1936
0
        cfg = &opts->scaler[SCALER_SCALE];
1937
0
    const char *kernel_name = m_opt_choice_str(cfg->kernel.functions,
1938
0
                                               cfg->kernel.function);
1939
1940
0
    for (int i = 0; fixed_presets[i].name; i++) {
1941
0
        if (strcmp(kernel_name, fixed_presets[i].name) == 0)
1942
0
            return fixed_presets[i].filter;
1943
0
    }
1944
1945
    // Attempt loading filter preset first, fall back to raw filter function
1946
0
    struct scaler_params *par = &p->scalers[unit];
1947
0
    const struct pl_filter_preset *preset;
1948
0
    const struct pl_filter_function_preset *fpreset;
1949
0
    if ((preset = pl_find_filter_preset(kernel_name))) {
1950
0
        par->config = *preset->filter;
1951
0
    } else if ((fpreset = pl_find_filter_function_preset(kernel_name))) {
1952
0
        par->config = (struct pl_filter_config) {
1953
0
            .kernel = fpreset->function,
1954
0
            .params[0] = fpreset->function->params[0],
1955
0
            .params[1] = fpreset->function->params[1],
1956
0
        };
1957
0
    } else {
1958
0
        MP_ERR(p, "Failed mapping filter function '%s', no libplacebo analog?\n",
1959
0
               kernel_name);
1960
0
        return &pl_filter_bilinear;
1961
0
    }
1962
1963
0
    const struct pl_filter_function_preset *wpreset;
1964
0
    if ((wpreset = pl_find_filter_function_preset(
1965
0
             m_opt_choice_str(cfg->window.functions, cfg->window.function)))) {
1966
0
        par->config.window = wpreset->function;
1967
0
        par->config.wparams[0] = wpreset->function->params[0];
1968
0
        par->config.wparams[1] = wpreset->function->params[1];
1969
0
    }
1970
1971
0
    for (int i = 0; i < 2; i++) {
1972
0
        if (!isnan(cfg->kernel.params[i]))
1973
0
            par->config.params[i] = cfg->kernel.params[i];
1974
0
        if (!isnan(cfg->window.params[i]))
1975
0
            par->config.wparams[i] = cfg->window.params[i];
1976
0
    }
1977
1978
0
    par->config.clamp = cfg->clamp;
1979
0
    if (cfg->antiring > 0.0)
1980
0
        par->config.antiring = cfg->antiring;
1981
0
    if (cfg->kernel.blur > 0.0)
1982
0
        par->config.blur = cfg->kernel.blur;
1983
0
    if (cfg->kernel.taper > 0.0)
1984
0
        par->config.taper = cfg->kernel.taper;
1985
0
    if (cfg->radius > 0.0) {
1986
0
        if (par->config.kernel->resizable) {
1987
0
            par->config.radius = cfg->radius;
1988
0
        } else {
1989
0
            MP_WARN(p, "Filter radius specified but filter '%s' is not "
1990
0
                    "resizable, ignoring\n", kernel_name);
1991
0
        }
1992
0
    }
1993
1994
0
    return &par->config;
1995
0
}
1996
1997
static const struct pl_hook *load_hook(struct priv *p, const char *path)
1998
0
{
1999
0
    if (!path || !path[0])
2000
0
        return NULL;
2001
2002
0
    for (int i = 0; i < p->num_user_hooks; i++) {
2003
0
        if (strcmp(p->user_hooks[i].path, path) == 0)
2004
0
            return p->user_hooks[i].hook;
2005
0
    }
2006
2007
0
    char *fname = mp_get_user_path(NULL, p->global, path);
2008
0
    bstr shader = stream_read_file(fname, p, p->global, 1000000000); // 1GB
2009
0
    talloc_free(fname);
2010
2011
0
    const struct pl_hook *hook = NULL;
2012
0
    if (shader.len)
2013
0
        hook = pl_mpv_user_shader_parse(p->gpu, shader.start, shader.len);
2014
2015
0
    MP_TARRAY_APPEND(p, p->user_hooks, p->num_user_hooks, (struct user_hook) {
2016
0
        .path = talloc_strdup(p, path),
2017
0
        .hook = hook,
2018
0
    });
2019
2020
0
    return hook;
2021
0
}
2022
2023
static void update_icc_opts(struct priv *p, const struct mp_icc_opts *opts)
2024
0
{
2025
0
    if (!opts)
2026
0
        return;
2027
2028
0
    if (!opts->profile_auto && !p->icc_path) {
2029
        // Un-set any auto-loaded profiles if icc-profile-auto was disabled
2030
0
        update_icc(p, (bstr) {0});
2031
0
    }
2032
2033
0
    int s_r = 0, s_g = 0, s_b = 0;
2034
0
    gl_parse_3dlut_size(opts->size_str, &s_r, &s_g, &s_b);
2035
0
    p->icc_params = pl_icc_default_params;
2036
0
    p->icc_params.intent = opts->intent;
2037
0
    p->icc_params.size_r = s_r;
2038
0
    p->icc_params.size_g = s_g;
2039
0
    p->icc_params.size_b = s_b;
2040
0
    p->icc_params.cache = p->icc_cache.cache;
2041
2042
0
    if (!opts->profile || !opts->profile[0]) {
2043
        // No profile enabled, un-load any existing profiles
2044
0
        update_icc(p, (bstr) {0});
2045
0
        TA_FREEP(&p->icc_path);
2046
0
        return;
2047
0
    }
2048
2049
0
    if (p->icc_path && strcmp(opts->profile, p->icc_path) == 0)
2050
0
        return; // ICC profile hasn't changed
2051
2052
0
    char *fname = mp_get_user_path(NULL, p->global, opts->profile);
2053
0
    MP_VERBOSE(p, "Opening ICC profile '%s'\n", fname);
2054
0
    struct bstr icc = stream_read_file(fname, p, p->global, 100000000); // 100 MB
2055
0
    talloc_free(fname);
2056
0
    update_icc(p, icc);
2057
2058
    // Update cached path
2059
0
    talloc_replace(p, p->icc_path, opts->profile);
2060
0
}
2061
2062
static void update_lut(struct priv *p, struct user_lut *lut)
2063
0
{
2064
0
    if (!lut->opt) {
2065
0
        pl_lut_free(&lut->lut);
2066
0
        TA_FREEP(&lut->path);
2067
0
        return;
2068
0
    }
2069
2070
0
    if (lut->path && strcmp(lut->path, lut->opt) == 0)
2071
0
        return; // no change
2072
2073
    // Update cached path
2074
0
    pl_lut_free(&lut->lut);
2075
0
    talloc_replace(p, lut->path, lut->opt);
2076
2077
    // Load LUT file
2078
0
    char *fname = mp_get_user_path(NULL, p->global, lut->path);
2079
0
    MP_VERBOSE(p, "Loading custom LUT '%s'\n", fname);
2080
0
    const int lut_max_size = 1536 << 20; // 1.5 GiB, matches lut cache limit
2081
0
    struct bstr lutdata = stream_read_file(fname, NULL, p->global, lut_max_size);
2082
0
    if (!lutdata.len) {
2083
0
        MP_ERR(p, "Failed to read LUT data from %s, make sure it's a valid file "
2084
0
                  "and smaller or equal to %d bytes\n", fname, lut_max_size);
2085
0
    } else {
2086
0
        lut->lut = pl_lut_parse_cube(p->pllog, lutdata.start, lutdata.len);
2087
0
    }
2088
0
    talloc_free(fname);
2089
0
    talloc_free(lutdata.start);
2090
0
}
2091
2092
static void update_hook_opts_dynamic(struct priv *p, const struct pl_hook *hook,
2093
                                     const struct mp_image *mpi)
2094
0
{
2095
0
    float chroma_offset_x, chroma_offset_y;
2096
0
    pl_chroma_location_offset(mpi->params.chroma_location,
2097
0
                              &chroma_offset_x, &chroma_offset_y);
2098
0
    const struct {
2099
0
        const char *name;
2100
0
        double value;
2101
0
    } opts[] = {
2102
0
        {             "PTS", mpi->pts                           },
2103
0
        { "chroma_offset_x", chroma_offset_x                    },
2104
0
        { "chroma_offset_y", chroma_offset_y                    },
2105
0
        {        "min_luma", mpi->params.color.hdr.min_luma     },
2106
0
        {        "max_luma", mpi->params.color.hdr.max_luma     },
2107
0
        {         "max_cll", mpi->params.color.hdr.max_cll      },
2108
0
        {        "max_fall", mpi->params.color.hdr.max_fall     },
2109
0
        {     "scene_max_r", mpi->params.color.hdr.scene_max[0] },
2110
0
        {     "scene_max_g", mpi->params.color.hdr.scene_max[1] },
2111
0
        {     "scene_max_b", mpi->params.color.hdr.scene_max[2] },
2112
0
        {       "scene_avg", mpi->params.color.hdr.scene_avg    },
2113
0
        {        "max_pq_y", mpi->params.color.hdr.max_pq_y     },
2114
0
        {        "avg_pq_y", mpi->params.color.hdr.avg_pq_y     },
2115
0
    };
2116
2117
0
    for (int i = 0; i < hook->num_parameters; i++) {
2118
0
        const struct pl_hook_par *hp = &hook->parameters[i];
2119
0
        for (int n = 0; n < MP_ARRAY_SIZE(opts); n++) {
2120
0
            if (strcmp(hp->name, opts[n].name) != 0)
2121
0
                continue;
2122
2123
0
            switch (hp->type) {
2124
0
                case PL_VAR_FLOAT: hp->data->f = opts[n].value; break;
2125
0
                case PL_VAR_SINT:  hp->data->i = lrint(opts[n].value); break;
2126
0
                case PL_VAR_UINT:  hp->data->u = lrint(opts[n].value); break;
2127
0
            }
2128
0
        }
2129
0
    }
2130
0
}
2131
2132
static void update_hook_opts(struct priv *p, char **opts, const char *shaderpath,
2133
                             const struct pl_hook *hook)
2134
0
{
2135
0
    if (!opts)
2136
0
        return;
2137
2138
0
    const char *basename = mp_basename(shaderpath);
2139
0
    struct bstr shadername;
2140
0
    if (!mp_splitext(basename, &shadername))
2141
0
        shadername = bstr0(basename);
2142
2143
0
    for (int i = 0; i < hook->num_parameters; i++) {
2144
0
        const struct pl_hook_par *hp = &hook->parameters[i];
2145
0
        memcpy(hp->data, &hp->initial, sizeof(*hp->data));
2146
0
    }
2147
2148
0
    for (int n = 0; opts[n * 2]; n++) {
2149
0
        struct bstr k = bstr0(opts[n * 2 + 0]);
2150
0
        struct bstr v = bstr0(opts[n * 2 + 1]);
2151
0
        int pos;
2152
0
        if ((pos = bstrchr(k, '/')) >= 0) {
2153
0
            if (!bstr_equals(bstr_splice(k, 0, pos), shadername))
2154
0
                continue;
2155
0
            k = bstr_cut(k, pos + 1);
2156
0
        }
2157
2158
0
        for (int i = 0; i < hook->num_parameters; i++) {
2159
0
            const struct pl_hook_par *hp = &hook->parameters[i];
2160
0
            if (!bstr_equals0(k, hp->name) != 0)
2161
0
                continue;
2162
2163
0
            m_option_t opt = {
2164
0
                .name = hp->name,
2165
0
            };
2166
2167
0
            if (hp->names) {
2168
0
                for (int j = hp->minimum.i; j <= hp->maximum.i; j++) {
2169
0
                    if (bstr_equals0(v, hp->names[j])) {
2170
0
                        hp->data->i = j;
2171
0
                        goto next_hook;
2172
0
                    }
2173
0
                }
2174
0
            }
2175
2176
0
            switch (hp->type) {
2177
0
            case PL_VAR_FLOAT:
2178
0
                opt.type = &m_option_type_float;
2179
0
                opt.min = hp->minimum.f;
2180
0
                opt.max = hp->maximum.f;
2181
0
                break;
2182
0
            case PL_VAR_SINT:
2183
0
                opt.type = &m_option_type_int;
2184
0
                opt.min = hp->minimum.i;
2185
0
                opt.max = hp->maximum.i;
2186
0
                break;
2187
0
            case PL_VAR_UINT:
2188
0
                opt.type = &m_option_type_int;
2189
0
                opt.min = MPMIN(hp->minimum.u, INT_MAX);
2190
0
                opt.max = MPMIN(hp->maximum.u, INT_MAX);
2191
0
                break;
2192
0
            }
2193
2194
0
            if (!opt.type)
2195
0
                goto next_hook;
2196
2197
0
            opt.type->parse(p->log, &opt, k, v, hp->data);
2198
0
            goto next_hook;
2199
0
        }
2200
2201
0
    next_hook:;
2202
0
    }
2203
0
}
2204
2205
static void update_render_options(struct vo *vo)
2206
0
{
2207
0
    struct priv *p = vo->priv;
2208
0
    pl_options pars = p->pars;
2209
0
    const struct gl_video_opts *opts = p->opts_cache->opts;
2210
0
    pars->params.background_color[0] = opts->background_color.r / 255.0;
2211
0
    pars->params.background_color[1] = opts->background_color.g / 255.0;
2212
0
    pars->params.background_color[2] = opts->background_color.b / 255.0;
2213
0
    pars->params.background_transparency = 1 - opts->background_color.a / 255.0;
2214
0
    pars->params.skip_anti_aliasing = !opts->correct_downscaling;
2215
0
    pars->params.disable_linear_scaling = !opts->linear_downscaling && !opts->linear_upscaling;
2216
0
    pars->params.disable_fbos = opts->dumb_mode == 1;
2217
2218
0
#if PL_API_VER >= 346
2219
0
    int map_background_types[3] = {
2220
0
        PL_CLEAR_SKIP,  // BACKGROUND_NONE
2221
0
        PL_CLEAR_COLOR, // BACKGROUND_COLOR
2222
0
        PL_CLEAR_TILES, // BACKGROUND_TILES
2223
0
    };
2224
0
    pars->params.background = map_background_types[opts->background];
2225
0
    pars->params.border = map_background_types[p->next_opts->border_background];
2226
#else
2227
    pars->params.blend_against_tiles = opts->background == BACKGROUND_TILES;
2228
#endif
2229
2230
0
    pars->params.corner_rounding = p->next_opts->corner_rounding;
2231
0
    pars->params.correct_subpixel_offsets = !opts->scaler_resizes_only;
2232
2233
    // Map scaler options as best we can
2234
0
    pars->params.upscaler = map_scaler(p, SCALER_SCALE);
2235
0
    pars->params.downscaler = map_scaler(p, SCALER_DSCALE);
2236
0
    pars->params.plane_upscaler = map_scaler(p, SCALER_CSCALE);
2237
0
    pars->params.frame_mixer = opts->interpolation ? map_scaler(p, SCALER_TSCALE) : NULL;
2238
2239
    // Request as many frames as required from the decoder, depending on the
2240
    // speed VPS/FPS ratio libplacebo may need more frames. Request frames up to
2241
    // ratio of 1/2, but only if anti aliasing is enabled.
2242
0
    int req_frames = 2;
2243
0
    if (pars->params.frame_mixer) {
2244
0
        req_frames += ceilf(pars->params.frame_mixer->kernel->radius) *
2245
0
                      (pars->params.skip_anti_aliasing ? 1 : 2);
2246
0
    }
2247
0
    vo_set_queue_params(vo, 0, MPMIN(VO_MAX_REQ_FRAMES, req_frames));
2248
2249
0
    pars->params.deband_params = opts->deband ? &pars->deband_params : NULL;
2250
0
    pars->deband_params.iterations = opts->deband_opts->iterations;
2251
0
    pars->deband_params.radius = opts->deband_opts->range;
2252
0
    pars->deband_params.threshold = opts->deband_opts->threshold / 16.384;
2253
0
    pars->deband_params.grain = opts->deband_opts->grain / 8.192;
2254
2255
0
    pars->params.sigmoid_params = opts->sigmoid_upscaling ? &pars->sigmoid_params : NULL;
2256
0
    pars->sigmoid_params.center = opts->sigmoid_center;
2257
0
    pars->sigmoid_params.slope = opts->sigmoid_slope;
2258
2259
0
    pars->params.peak_detect_params = opts->tone_map.compute_peak >= 0 ? &pars->peak_detect_params : NULL;
2260
0
    pars->peak_detect_params.smoothing_period = opts->tone_map.decay_rate;
2261
0
    pars->peak_detect_params.scene_threshold_low = opts->tone_map.scene_threshold_low;
2262
0
    pars->peak_detect_params.scene_threshold_high = opts->tone_map.scene_threshold_high;
2263
0
    pars->peak_detect_params.percentile = opts->tone_map.peak_percentile;
2264
0
    pars->peak_detect_params.allow_delayed = p->next_opts->delayed_peak;
2265
2266
0
    const struct pl_tone_map_function * const tone_map_funs[] = {
2267
0
        [TONE_MAPPING_AUTO]     = &pl_tone_map_auto,
2268
0
        [TONE_MAPPING_CLIP]     = &pl_tone_map_clip,
2269
0
        [TONE_MAPPING_MOBIUS]   = &pl_tone_map_mobius,
2270
0
        [TONE_MAPPING_REINHARD] = &pl_tone_map_reinhard,
2271
0
        [TONE_MAPPING_HABLE]    = &pl_tone_map_hable,
2272
0
        [TONE_MAPPING_GAMMA]    = &pl_tone_map_gamma,
2273
0
        [TONE_MAPPING_LINEAR]   = &pl_tone_map_linear,
2274
0
        [TONE_MAPPING_SPLINE]   = &pl_tone_map_spline,
2275
0
        [TONE_MAPPING_BT_2390]  = &pl_tone_map_bt2390,
2276
0
        [TONE_MAPPING_BT_2446A] = &pl_tone_map_bt2446a,
2277
0
        [TONE_MAPPING_ST2094_40] = &pl_tone_map_st2094_40,
2278
0
        [TONE_MAPPING_ST2094_10] = &pl_tone_map_st2094_10,
2279
0
    };
2280
2281
0
    const struct pl_gamut_map_function * const gamut_modes[] = {
2282
0
        [GAMUT_AUTO]            = pl_color_map_default_params.gamut_mapping,
2283
0
        [GAMUT_CLIP]            = &pl_gamut_map_clip,
2284
0
        [GAMUT_PERCEPTUAL]      = &pl_gamut_map_perceptual,
2285
0
        [GAMUT_RELATIVE]        = &pl_gamut_map_relative,
2286
0
        [GAMUT_SATURATION]      = &pl_gamut_map_saturation,
2287
0
        [GAMUT_ABSOLUTE]        = &pl_gamut_map_absolute,
2288
0
        [GAMUT_DESATURATE]      = &pl_gamut_map_desaturate,
2289
0
        [GAMUT_DARKEN]          = &pl_gamut_map_darken,
2290
0
        [GAMUT_WARN]            = &pl_gamut_map_highlight,
2291
0
        [GAMUT_LINEAR]          = &pl_gamut_map_linear,
2292
0
    };
2293
2294
0
    pars->color_map_params.tone_mapping_function = tone_map_funs[opts->tone_map.curve];
2295
0
AV_NOWARN_DEPRECATED(
2296
0
    pars->color_map_params.tone_mapping_param = opts->tone_map.curve_param;
2297
0
    if (isnan(pars->color_map_params.tone_mapping_param)) // vo_gpu compatibility
2298
0
        pars->color_map_params.tone_mapping_param = 0.0;
2299
0
)
2300
0
    pars->color_map_params.inverse_tone_mapping = opts->tone_map.inverse;
2301
0
    pars->color_map_params.contrast_recovery = opts->tone_map.contrast_recovery;
2302
0
    pars->color_map_params.visualize_lut = opts->tone_map.visualize;
2303
0
    pars->color_map_params.contrast_smoothness = opts->tone_map.contrast_smoothness;
2304
0
    pars->color_map_params.gamut_mapping = gamut_modes[opts->tone_map.gamut_mode];
2305
2306
0
    pars->params.dither_params = NULL;
2307
0
    pars->params.error_diffusion = NULL;
2308
2309
0
    switch (opts->dither_algo) {
2310
0
    case DITHER_ERROR_DIFFUSION:
2311
0
        pars->params.error_diffusion = pl_find_error_diffusion_kernel(opts->error_diffusion);
2312
0
        if (!pars->params.error_diffusion) {
2313
0
            MP_WARN(p, "Could not find error diffusion kernel '%s', falling "
2314
0
                    "back to fruit.\n", opts->error_diffusion);
2315
0
        }
2316
0
        MP_FALLTHROUGH;
2317
0
    case DITHER_ORDERED:
2318
0
    case DITHER_FRUIT:
2319
0
        pars->params.dither_params = &pars->dither_params;
2320
0
        pars->dither_params.method = opts->dither_algo == DITHER_ORDERED
2321
0
                                ? PL_DITHER_ORDERED_FIXED
2322
0
                                : PL_DITHER_BLUE_NOISE;
2323
0
        pars->dither_params.lut_size = opts->dither_size;
2324
0
        pars->dither_params.temporal = opts->temporal_dither;
2325
0
        break;
2326
0
    }
2327
2328
0
    if (opts->dither_depth < 0) {
2329
0
        pars->params.dither_params = NULL;
2330
0
        pars->params.error_diffusion = NULL;
2331
0
    }
2332
2333
0
    update_icc_opts(p, opts->icc_opts);
2334
2335
0
    pars->params.num_hooks = 0;
2336
0
    const struct pl_hook *hook;
2337
0
    for (int i = 0; opts->user_shaders && opts->user_shaders[i]; i++) {
2338
0
        if ((hook = load_hook(p, opts->user_shaders[i]))) {
2339
0
            MP_TARRAY_APPEND(p, p->hooks, pars->params.num_hooks, hook);
2340
0
            update_hook_opts(p, opts->user_shader_opts, opts->user_shaders[i], hook);
2341
0
        }
2342
0
    }
2343
2344
0
    pars->params.hooks = p->hooks;
2345
2346
0
    MP_DBG(p, "Render options updated, resetting render state.\n");
2347
0
    p->want_reset = true;
2348
0
}
2349
2350
const struct vo_driver video_out_gpu_next = {
2351
    .description = "Video output based on libplacebo",
2352
    .name = "gpu-next",
2353
    .caps = VO_CAP_ROTATE90 |
2354
            VO_CAP_FILM_GRAIN |
2355
            VO_CAP_VFLIP |
2356
            0x0,
2357
    .preinit = preinit,
2358
    .query_format = query_format,
2359
    .reconfig = reconfig,
2360
    .control = control,
2361
    .get_image_ts = get_image,
2362
    .draw_frame = draw_frame,
2363
    .flip_page = flip_page,
2364
    .get_vsync = get_vsync,
2365
    .wait_events = wait_events,
2366
    .wakeup = wakeup,
2367
    .uninit = uninit,
2368
    .priv_size = sizeof(struct priv),
2369
};