Coverage Report

Created: 2026-03-12 07:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/mpv/video/out/gpu/video.c
Line
Count
Source
1
/*
2
 * This file is part of mpv.
3
 *
4
 * mpv is free software; you can redistribute it and/or
5
 * modify it under the terms of the GNU Lesser General Public
6
 * License as published by the Free Software Foundation; either
7
 * version 2.1 of the License, or (at your option) any later version.
8
 *
9
 * mpv is distributed in the hope that it will be useful,
10
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12
 * GNU Lesser General Public License for more details.
13
 *
14
 * You should have received a copy of the GNU Lesser General Public
15
 * License along with mpv.  If not, see <http://www.gnu.org/licenses/>.
16
 */
17
18
#include <assert.h>
19
#include <float.h>
20
#include <math.h>
21
#include <stdarg.h>
22
#include <stdbool.h>
23
#include <string.h>
24
25
#include <libavutil/common.h>
26
#include <libavutil/lfg.h>
27
28
#include "video.h"
29
30
#include "misc/bstr.h"
31
#include "options/m_config.h"
32
#include "options/path.h"
33
#include "options/options.h"
34
#include "utils.h"
35
#include "hwdec.h"
36
#include "osd.h"
37
#include "ra.h"
38
#include "stream/stream.h"
39
#include "video_shaders.h"
40
#include "user_shaders.h"
41
#include "error_diffusion.h"
42
#include "video/out/filter_kernels.h"
43
#include "video/out/aspect.h"
44
#include "video/out/dither.h"
45
#include "video/out/vo.h"
46
47
// must be sorted, and terminated with 0
48
int filter_sizes[] =
49
    {2, 4, 6, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 0};
50
int tscale_sizes[] = {2, 4, 6, 8, 0};
51
52
struct vertex_pt {
53
    float x, y;
54
};
55
56
struct texplane {
57
    struct ra_tex *tex;
58
    int w, h;
59
    bool flipped;
60
};
61
62
struct video_image {
63
    struct texplane planes[4];
64
    struct mp_image *mpi;       // original input image
65
    uint64_t id;                // unique ID identifying mpi contents
66
    bool hwdec_mapped;
67
};
68
69
enum plane_type {
70
    PLANE_NONE = 0,
71
    PLANE_RGB,
72
    PLANE_LUMA,
73
    PLANE_CHROMA,
74
    PLANE_ALPHA,
75
    PLANE_XYZ,
76
};
77
78
static const char *plane_names[] = {
79
    [PLANE_NONE] = "unknown",
80
    [PLANE_RGB] = "rgb",
81
    [PLANE_LUMA] = "luma",
82
    [PLANE_CHROMA] = "chroma",
83
    [PLANE_ALPHA] = "alpha",
84
    [PLANE_XYZ] = "xyz",
85
};
86
87
// A self-contained description of a source image which can be bound to a
88
// texture unit and sampled from. Contains metadata about how it's to be used
89
struct image {
90
    enum plane_type type; // must be set to something non-zero
91
    int components; // number of relevant coordinates
92
    float multiplier; // multiplier to be used when sampling
93
    struct ra_tex *tex;
94
    int w, h; // logical size (after transformation)
95
    struct gl_transform transform; // rendering transformation
96
    int padding; // number of leading padding components (e.g. 2 = rg is padding)
97
};
98
99
// A named image, for user scripting purposes
100
struct saved_img {
101
    const char *name;
102
    struct image img;
103
};
104
105
// A texture hook. This is some operation that transforms a named texture as
106
// soon as it's generated
107
struct tex_hook {
108
    const char *save_tex;
109
    const char *hook_tex[SHADER_MAX_HOOKS];
110
    const char *bind_tex[SHADER_MAX_BINDS];
111
    int components; // how many components are relevant (0 = same as input)
112
    bool align_offset; // whether to align hooked tex with reference.
113
    void *priv; // this gets talloc_freed when the tex_hook is removed
114
    void (*hook)(struct gl_video *p, struct image img, // generates GLSL
115
                 struct gl_transform *trans, void *priv);
116
    bool (*cond)(struct gl_video *p, struct image img, void *priv);
117
};
118
119
struct surface {
120
    struct ra_tex *tex;
121
    uint64_t id;
122
    double pts;
123
};
124
125
0
#define SURFACES_MAX 10
126
127
struct cached_file {
128
    char *path;
129
    struct bstr body;
130
};
131
132
struct pass_info {
133
    struct bstr desc;
134
    struct mp_pass_perf perf;
135
};
136
137
struct dr_buffer {
138
    struct ra_buf *buf;
139
    // The mpi reference will keep the data from being recycled (or from other
140
    // references gaining write access) while the GPU is accessing the buffer.
141
    struct mp_image *mpi;
142
};
143
144
struct gl_video {
145
    struct ra *ra;
146
147
    struct mpv_global *global;
148
    struct mp_log *log;
149
    struct gl_video_opts opts;
150
    struct m_config_cache *opts_cache;
151
    struct gl_lcms *cms;
152
153
    int fb_depth;               // actual bits available in GL main framebuffer
154
    struct m_color clear_color;
155
    bool force_clear_color;
156
157
    struct gl_shader_cache *sc;
158
159
    struct osd_state *osd_state;
160
    struct mpgl_osd *osd;
161
    double osd_pts;
162
163
    struct ra_tex *lut_3d_texture;
164
    bool use_lut_3d;
165
    int lut_3d_size[3];
166
167
    struct ra_tex *dither_texture;
168
169
    struct mp_image_params real_image_params;   // configured format
170
    struct mp_image_params image_params;        // texture format (mind hwdec case)
171
    struct mp_image_params target_params;       // target format
172
    struct ra_imgfmt_desc ra_format;            // texture format
173
    int plane_count;
174
175
    bool is_gray;
176
    bool has_alpha;
177
    char color_swizzle[5];
178
    bool use_integer_conversion;
179
180
    struct video_image image;
181
182
    struct dr_buffer *dr_buffers;
183
    int num_dr_buffers;
184
185
    bool using_dr_path;
186
187
    bool dumb_mode;
188
    bool forced_dumb_mode;
189
190
    // Cached vertex array, to avoid re-allocation per frame. For simplicity,
191
    // our vertex format is simply a list of `vertex_pt`s, since this greatly
192
    // simplifies offset calculation at the cost of (unneeded) flexibility.
193
    struct vertex_pt *tmp_vertex;
194
    struct ra_renderpass_input *vao;
195
    int vao_len;
196
197
    const struct ra_format *fbo_format;
198
    struct ra_tex *merge_tex[4];
199
    struct ra_tex *scale_tex[4];
200
    struct ra_tex *integer_tex[4];
201
    struct ra_tex *chroma_tex[4];
202
    struct ra_tex *indirect_tex;
203
    struct ra_tex *blend_subs_tex;
204
    struct ra_tex *error_diffusion_tex[2];
205
    struct ra_tex *screen_tex;
206
    struct ra_tex *output_tex;
207
    struct ra_tex **hook_textures;
208
    int num_hook_textures;
209
    int idx_hook_textures;
210
211
    struct ra_buf *hdr_peak_ssbo;
212
    struct surface surfaces[SURFACES_MAX];
213
214
    // user pass descriptions and textures
215
    struct tex_hook *tex_hooks;
216
    int num_tex_hooks;
217
    struct gl_user_shader_tex *user_textures;
218
    int num_user_textures;
219
220
    int surface_idx;
221
    int surface_now;
222
    int frames_drawn;
223
    bool is_interpolated;
224
    bool output_tex_valid;
225
226
    // state for configured scalers
227
    struct scaler scaler[SCALER_COUNT];
228
229
    struct mp_csp_equalizer_state *video_eq;
230
231
    struct mp_rect src_rect;    // displayed part of the source video
232
    struct mp_rect dst_rect;    // video rectangle on output window
233
    struct mp_osd_res osd_rect; // OSD size/margins
234
235
    // temporary during rendering
236
    struct compute_info pass_compute; // compute shader metadata for this pass
237
    struct image *pass_imgs;          // bound images for this pass
238
    int num_pass_imgs;
239
    struct saved_img *saved_imgs;     // saved (named) images for this frame
240
    int num_saved_imgs;
241
242
    // effective current texture metadata - this will essentially affect the
243
    // next render pass target, as well as implicitly tracking what needs to
244
    // be done with the image
245
    int texture_w, texture_h;
246
    struct gl_transform texture_offset; // texture transform without rotation
247
    int components;
248
    bool use_linear;
249
    float user_gamma;
250
251
    // pass info / metrics
252
    struct pass_info pass_fresh[VO_PASS_PERF_MAX];
253
    struct pass_info pass_redraw[VO_PASS_PERF_MAX];
254
    struct pass_info *pass;
255
    int pass_idx;
256
    struct timer_pool *upload_timer;
257
    struct timer_pool *blit_timer;
258
    struct timer_pool *osd_timer;
259
260
    int frames_uploaded;
261
    int frames_rendered;
262
    AVLFG lfg;
263
264
    // Cached because computing it can take relatively long
265
    int last_dither_matrix_size;
266
    float *last_dither_matrix;
267
268
    struct cached_file *files;
269
    int num_files;
270
271
    struct ra_hwdec_ctx hwdec_ctx;
272
    struct ra_hwdec_mapper *hwdec_mapper;
273
    struct ra_hwdec *hwdec_overlay;
274
    bool hwdec_active;
275
276
    bool dsi_warned;
277
    bool broken_frame; // temporary error state
278
279
    bool colorspace_override_warned;
280
    bool correct_downscaling_warned;
281
};
282
283
#define FIXED_SCALE_KERNELS \
284
    {"bilinear",             SCALER_BILINEAR}, \
285
    {"bicubic_fast",         SCALER_BICUBIC_FAST}, \
286
    {"oversample",           SCALER_OVERSAMPLE}, \
287
288
#define NON_POLAR_FILTER_KERNELS \
289
    {"spline16",      SCALER_SPLINE16}, \
290
    {"spline36",      SCALER_SPLINE36}, \
291
    {"spline64",      SCALER_SPLINE64}, \
292
    {"sinc",          SCALER_SINC}, \
293
    {"lanczos",       SCALER_LANCZOS}, \
294
    {"ginseng",       SCALER_GINSENG}, \
295
    {"bicubic",       SCALER_BICUBIC}, \
296
    {"hermite",       SCALER_HERMITE}, \
297
    {"catmull_rom",   SCALER_CATMULL_ROM}, \
298
    {"mitchell",      SCALER_MITCHELL}, \
299
    {"robidoux",      SCALER_ROBIDOUX}, \
300
    {"robidouxsharp", SCALER_ROBIDOUXSHARP}, \
301
    {"box",           SCALER_BOX}, \
302
    {"nearest",       SCALER_NEAREST}, \
303
    {"triangle",      SCALER_TRIANGLE}, \
304
    {"gaussian",      SCALER_GAUSSIAN}, \
305
306
#define POLAR_FILTER_KERNELS \
307
    {"jinc",                 SCALER_JINC}, \
308
    {"ewa_lanczos",          SCALER_EWA_LANCZOS}, \
309
    {"ewa_hanning",          SCALER_EWA_HANNING}, \
310
    {"ewa_ginseng",          SCALER_EWA_GINSENG}, \
311
    {"ewa_lanczossharp",     SCALER_EWA_LANCZOSSHARP}, \
312
    {"ewa_lanczos4sharpest", SCALER_EWA_LANCZOS4SHARPEST}, \
313
    {"ewa_lanczossoft",      SCALER_EWA_LANCZOSSOFT}, \
314
    {"haasnsoft",            SCALER_HAASNSOFT}, \
315
    {"ewa_robidoux",         SCALER_EWA_ROBIDOUX}, \
316
    {"ewa_robidouxsharp",    SCALER_EWA_ROBIDOUXSHARP}, \
317
318
#define FILTER_WINDOWS \
319
    {"bartlett", WINDOW_BARTLETT}, \
320
    {"cosine",   WINDOW_COSINE}, \
321
    {"hanning",  WINDOW_HANNING}, \
322
    {"tukey",    WINDOW_TUKEY}, \
323
    {"hamming",  WINDOW_HAMMING}, \
324
    {"quadric",  WINDOW_QUADRIC}, \
325
    {"welch",    WINDOW_WELCH}, \
326
    {"kaiser",   WINDOW_KAISER}, \
327
    {"blackman", WINDOW_BLACKMAN}, \
328
    {"sphinx",   WINDOW_SPHINX}, \
329
330
static const struct m_opt_choice_alternatives scale_filters[] = {
331
    FIXED_SCALE_KERNELS
332
    NON_POLAR_FILTER_KERNELS
333
    POLAR_FILTER_KERNELS
334
    FILTER_WINDOWS
335
    {0},
336
};
337
338
static const struct m_opt_choice_alternatives cdscale_filters[] = {
339
    {"", SCALER_INHERIT},
340
    FIXED_SCALE_KERNELS
341
    NON_POLAR_FILTER_KERNELS
342
    POLAR_FILTER_KERNELS
343
    FILTER_WINDOWS
344
    {0},
345
};
346
347
static const struct m_opt_choice_alternatives tscale_filters[] = {
348
    {"oversample", SCALER_OVERSAMPLE},
349
    {"linear",     SCALER_LINEAR},
350
    NON_POLAR_FILTER_KERNELS
351
    FILTER_WINDOWS
352
    {"jinc",       WINDOW_JINC},
353
    {0},
354
};
355
356
static const struct m_opt_choice_alternatives filter_windows[] = {
357
    {"",     WINDOW_PREFERRED},
358
    FILTER_WINDOWS
359
    {"jinc", WINDOW_JINC},
360
    {0},
361
};
362
363
static const struct gl_video_opts gl_video_opts_def = {
364
    .dither_algo = DITHER_FRUIT,
365
    .dither_size = 6,
366
    .temporal_dither_period = 1,
367
    .error_diffusion = "sierra-lite",
368
    .fbo_format = "auto",
369
    .sigmoid_center = 0.75,
370
    .sigmoid_slope = 6.5,
371
    .scaler = {
372
        [SCALER_SCALE] =  {
373
            {SCALER_LANCZOS, .params = {NAN, NAN}, .functions = scale_filters},
374
            {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
375
        },
376
        [SCALER_DSCALE] = {
377
            {SCALER_HERMITE, .params = {NAN, NAN}, .functions = cdscale_filters},
378
            {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
379
        },
380
        [SCALER_CSCALE] = {
381
            {SCALER_INHERIT, .params = {NAN, NAN}, .functions = cdscale_filters},
382
            {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
383
        },
384
        [SCALER_TSCALE] = {
385
            {SCALER_OVERSAMPLE, .params = {NAN, NAN}, .functions = tscale_filters},
386
            {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
387
        },
388
    },
389
    .scaler_resizes_only = true,
390
    .correct_downscaling = true,
391
    .linear_downscaling = true,
392
    .sigmoid_upscaling = true,
393
    .interpolation_threshold = 0.01,
394
    .background = BACKGROUND_TILES,
395
    .background_color = {0, 0, 0, 255},
396
    .background_tile_color = {{237, 237, 237, 255},
397
                              {222, 222, 222, 255}},
398
    .background_tile_size = 16,
399
    .gamma = 1.0f,
400
    .tone_map = {
401
        .curve = TONE_MAPPING_AUTO,
402
        .curve_param = NAN,
403
        .max_boost = 1.0,
404
        .decay_rate = 20.0,
405
        .scene_threshold_low = 1.0,
406
        .scene_threshold_high = 3.0,
407
        .contrast_smoothness = 3.5,
408
    },
409
    .early_flush = -1,
410
    .shader_cache = true,
411
    .hwdec_interop = "auto",
412
    .treat_srgb_as_power22 = 1|2|4, // auto
413
};
414
415
static OPT_STRING_VALIDATE_FUNC(validate_error_diffusion_opt);
416
417
#define OPT_BASE_STRUCT struct gl_video_opts
418
419
// Use for options which use NAN for defaults.
420
#define OPT_FLOATDEF(field) \
421
    OPT_FLOAT(field), \
422
    .flags = M_OPT_DEFAULT_NAN
423
424
#define SCALER_OPTS(n, i) \
425
    {n"-param1", OPT_FLOATDEF(scaler[i].kernel.params[0])},                \
426
    {n"-param2", OPT_FLOATDEF(scaler[i].kernel.params[1])},                \
427
    {n"-blur",   OPT_FLOAT(scaler[i].kernel.blur)},                        \
428
    {n"-taper",  OPT_FLOAT(scaler[i].kernel.taper), M_RANGE(0.0, 1.0)},    \
429
    {n"-wparam", OPT_FLOATDEF(scaler[i].window.params[0])},                \
430
    {n"-wtaper", OPT_FLOAT(scaler[i].window.taper), M_RANGE(0.0, 1.0)},    \
431
    {n"-clamp",  OPT_FLOAT(scaler[i].clamp), M_RANGE(0.0, 1.0)},           \
432
    {n"-radius", OPT_FLOAT(scaler[i].radius), M_RANGE(0.5, 16.0)},         \
433
    {n"-antiring", OPT_FLOAT(scaler[i].antiring), M_RANGE(0.0, 1.0)},      \
434
    {n"-window", OPT_CHOICE_C(scaler[i].window.function, filter_windows)}
435
436
const struct m_sub_options gl_video_conf = {
437
    .opts = (const m_option_t[]) {
438
        {"gpu-dumb-mode", OPT_CHOICE(dumb_mode,
439
            {"auto", 0}, {"yes", 1}, {"no", -1})},
440
        {"gamma-factor", OPT_FLOAT(gamma), M_RANGE(0.1, 2.0)},
441
        {"gamma-auto", OPT_BOOL(gamma_auto),
442
            .deprecation_message = "replacement: gamma-auto.lua"},
443
        {"target-prim", OPT_CHOICE_C(target_prim, pl_csp_prim_names)},
444
        {"target-trc", OPT_CHOICE_C(target_trc, pl_csp_trc_names)},
445
        {"target-peak", OPT_CHOICE(target_peak, {"auto", 0}),
446
            M_RANGE(10, 10000)},
447
        {"hdr-reference-white", OPT_CHOICE(hdr_reference_white, {"auto", 0}),
448
            M_RANGE(10, 10000)},
449
        {"sdr-adjust-gamma", OPT_CHOICE(sdr_adjust_gamma,
450
            {"auto", 0}, {"yes", 1}, {"no", -1})},
451
        {"treat-srgb-as-power22", OPT_CHOICE(treat_srgb_as_power22,
452
            {"no", 0}, {"input", 1}, {"output", 2}, {"both", 1|2}, {"auto", 1|2|4})},
453
        {"target-contrast", OPT_CHOICE(target_contrast, {"auto", 0}, {"inf", -1}),
454
            M_RANGE(10, 10 / PL_COLOR_HDR_BLACK)},
455
        {"target-gamut", OPT_CHOICE_C(target_gamut, pl_csp_prim_names)},
456
        {"tone-mapping", OPT_CHOICE(tone_map.curve,
457
            {"auto",     TONE_MAPPING_AUTO},
458
            {"clip",     TONE_MAPPING_CLIP},
459
            {"mobius",   TONE_MAPPING_MOBIUS},
460
            {"reinhard", TONE_MAPPING_REINHARD},
461
            {"hable",    TONE_MAPPING_HABLE},
462
            {"gamma",    TONE_MAPPING_GAMMA},
463
            {"linear",   TONE_MAPPING_LINEAR},
464
            {"spline",   TONE_MAPPING_SPLINE},
465
            {"bt.2390",  TONE_MAPPING_BT_2390},
466
            {"bt.2446a", TONE_MAPPING_BT_2446A},
467
            {"st2094-40", TONE_MAPPING_ST2094_40},
468
            {"st2094-10", TONE_MAPPING_ST2094_10})},
469
        {"tone-mapping-param", OPT_FLOATDEF(tone_map.curve_param)},
470
        {"inverse-tone-mapping", OPT_BOOL(tone_map.inverse)},
471
        {"tone-mapping-max-boost", OPT_FLOAT(tone_map.max_boost),
472
            M_RANGE(1.0, 10.0)},
473
        {"tone-mapping-visualize", OPT_BOOL(tone_map.visualize)},
474
        {"gamut-mapping-mode", OPT_CHOICE(tone_map.gamut_mode,
475
            {"auto",        GAMUT_AUTO},
476
            {"clip",        GAMUT_CLIP},
477
            {"perceptual",  GAMUT_PERCEPTUAL},
478
            {"relative",    GAMUT_RELATIVE},
479
            {"saturation",  GAMUT_SATURATION},
480
            {"absolute",    GAMUT_ABSOLUTE},
481
            {"desaturate",  GAMUT_DESATURATE},
482
            {"darken",      GAMUT_DARKEN},
483
            {"warn",        GAMUT_WARN},
484
            {"linear",      GAMUT_LINEAR})},
485
        {"hdr-compute-peak", OPT_CHOICE(tone_map.compute_peak,
486
            {"auto", 0},
487
            {"yes", 1},
488
            {"no", -1})},
489
        {"hdr-peak-percentile", OPT_FLOAT(tone_map.peak_percentile),
490
            M_RANGE(0.0, 100.0)},
491
        {"hdr-peak-decay-rate", OPT_FLOAT(tone_map.decay_rate),
492
            M_RANGE(0.0, 1000.0)},
493
        {"hdr-scene-threshold-low", OPT_FLOAT(tone_map.scene_threshold_low),
494
            M_RANGE(0, 20.0)},
495
        {"hdr-scene-threshold-high", OPT_FLOAT(tone_map.scene_threshold_high),
496
            M_RANGE(0, 20.0)},
497
        {"hdr-contrast-recovery", OPT_FLOAT(tone_map.contrast_recovery),
498
            M_RANGE(0, 2.0)},
499
        {"hdr-contrast-smoothness", OPT_FLOAT(tone_map.contrast_smoothness),
500
            M_RANGE(1.0, 100.0)},
501
        {"opengl-pbo", OPT_BOOL(pbo)},
502
        {"scale", OPT_CHOICE_C(scaler[SCALER_SCALE].kernel.function, scale_filters)},
503
        SCALER_OPTS("scale",  SCALER_SCALE),
504
        {"dscale", OPT_CHOICE_C(scaler[SCALER_DSCALE].kernel.function, cdscale_filters)},
505
        SCALER_OPTS("dscale", SCALER_DSCALE),
506
        {"cscale", OPT_CHOICE_C(scaler[SCALER_CSCALE].kernel.function, cdscale_filters)},
507
        SCALER_OPTS("cscale", SCALER_CSCALE),
508
        {"tscale", OPT_CHOICE_C(scaler[SCALER_TSCALE].kernel.function, tscale_filters)},
509
        SCALER_OPTS("tscale", SCALER_TSCALE),
510
        {"scaler-resizes-only", OPT_BOOL(scaler_resizes_only)},
511
        {"correct-downscaling", OPT_BOOL(correct_downscaling)},
512
        {"linear-downscaling", OPT_BOOL(linear_downscaling)},
513
        {"linear-upscaling", OPT_BOOL(linear_upscaling)},
514
        {"sigmoid-upscaling", OPT_BOOL(sigmoid_upscaling)},
515
        {"sigmoid-center", OPT_FLOAT(sigmoid_center), M_RANGE(0.0, 1.0)},
516
        {"sigmoid-slope", OPT_FLOAT(sigmoid_slope), M_RANGE(1.0, 20.0)},
517
        {"fbo-format", OPT_STRING(fbo_format)},
518
        {"dither-depth", OPT_CHOICE(dither_depth, {"no", -1}, {"auto", 0}),
519
            M_RANGE(-1, 16)},
520
        {"dither", OPT_CHOICE(dither_algo,
521
            {"fruit", DITHER_FRUIT},
522
            {"ordered", DITHER_ORDERED},
523
            {"error-diffusion", DITHER_ERROR_DIFFUSION},
524
            {"no", DITHER_NONE})},
525
        {"dither-size-fruit", OPT_INT(dither_size), M_RANGE(2, 8)},
526
        {"temporal-dither", OPT_BOOL(temporal_dither)},
527
        {"temporal-dither-period", OPT_INT(temporal_dither_period),
528
            M_RANGE(1, 128)},
529
        {"error-diffusion",
530
            OPT_STRING_VALIDATE(error_diffusion, validate_error_diffusion_opt)},
531
        {"background", OPT_CHOICE(background,
532
            {"none", BACKGROUND_NONE},
533
            {"color", BACKGROUND_COLOR},
534
            {"tiles", BACKGROUND_TILES})},
535
        {"opengl-rectangle-textures", OPT_BOOL(use_rectangle)},
536
        {"background-color", OPT_COLOR(background_color)},
537
        {"background-tile-color-0", OPT_COLOR(background_tile_color[0])},
538
        {"background-tile-color-1", OPT_COLOR(background_tile_color[1])},
539
        {"background-tile-size", OPT_INT(background_tile_size), M_RANGE(1, 4096)},
540
        {"interpolation", OPT_BOOL(interpolation)},
541
        {"interpolation-threshold", OPT_FLOAT(interpolation_threshold)},
542
        {"blend-subtitles", OPT_CHOICE(blend_subs,
543
            {"no", BLEND_SUBS_NO},
544
            {"yes", BLEND_SUBS_YES},
545
            {"video", BLEND_SUBS_VIDEO})},
546
        {"glsl-shaders", OPT_PATHLIST(user_shaders), .flags = M_OPT_FILE},
547
        {"glsl-shader", OPT_CLI_ALIAS("glsl-shaders-append")},
548
        {"glsl-shader-opts", OPT_KEYVALUELIST(user_shader_opts)},
549
        {"deband", OPT_BOOL(deband)},
550
        {"deband", OPT_SUBSTRUCT(deband_opts, deband_conf)},
551
        {"sharpen", OPT_FLOAT(unsharp)},
552
        {"gpu-tex-pad-x", OPT_INT(tex_pad_x), M_RANGE(0, 4096)},
553
        {"gpu-tex-pad-y", OPT_INT(tex_pad_y), M_RANGE(0, 4096)},
554
        {"", OPT_SUBSTRUCT(icc_opts, mp_icc_conf)},
555
        {"gpu-shader-cache", OPT_BOOL(shader_cache)},
556
        {"gpu-shader-cache-dir", OPT_STRING(shader_cache_dir), .flags = M_OPT_FILE},
557
        {"gpu-hwdec-interop",
558
            OPT_STRING_VALIDATE(hwdec_interop, ra_hwdec_validate_opt)},
559
        {"gamut-warning", OPT_REMOVED("Replaced by --gamut-mapping-mode=warn")},
560
        {"gamut-clipping", OPT_REMOVED("Replaced by --gamut-mapping-mode=desaturate")},
561
        {"tone-mapping-desaturate", OPT_REMOVED("Replaced by --tone-mapping-mode")},
562
        {"tone-mapping-desaturate-exponent", OPT_REMOVED("Replaced by --tone-mapping-mode")},
563
        {0}
564
    },
565
    .size = sizeof(struct gl_video_opts),
566
    .defaults = &gl_video_opts_def,
567
    .change_flags = UPDATE_VIDEO,
568
};
569
570
static void uninit_rendering(struct gl_video *p);
571
static void uninit_scaler(struct gl_video *p, struct scaler *scaler);
572
static void check_gl_features(struct gl_video *p);
573
static bool pass_upload_image(struct gl_video *p, struct mp_image *mpi, uint64_t id);
574
static void reinit_from_options(struct gl_video *p);
575
static void get_scale_factors(struct gl_video *p, bool transpose_rot, double xy[2]);
576
static void gl_video_setup_hooks(struct gl_video *p);
577
static void gl_video_update_options(struct gl_video *p);
578
579
0
#define GLSL(x) gl_sc_add(p->sc, #x "\n");
580
0
#define GLSLF(...) gl_sc_addf(p->sc, __VA_ARGS__)
581
0
#define GLSLHF(...) gl_sc_haddf(p->sc, __VA_ARGS__)
582
0
#define PRELUDE(...) gl_sc_paddf(p->sc, __VA_ARGS__)
583
584
static struct bstr load_cached_file(struct gl_video *p, const char *path)
585
0
{
586
0
    if (!path || !path[0])
587
0
        return (struct bstr){0};
588
0
    for (int n = 0; n < p->num_files; n++) {
589
0
        if (strcmp(p->files[n].path, path) == 0)
590
0
            return p->files[n].body;
591
0
    }
592
    // not found -> load it
593
0
    char *fname = mp_get_user_path(NULL, p->global, path);
594
0
    struct bstr s = stream_read_file(fname, p, p->global, 1000000000); // 1GB
595
0
    talloc_free(fname);
596
0
    if (s.len) {
597
0
        struct cached_file new = {
598
0
            .path = talloc_strdup(p, path),
599
0
            .body = s,
600
0
        };
601
0
        MP_TARRAY_APPEND(p, p->files, p->num_files, new);
602
0
        return new.body;
603
0
    }
604
0
    return (struct bstr){0};
605
0
}
606
607
static void debug_check_gl(struct gl_video *p, const char *msg)
608
0
{
609
0
    if (p->ra->fns->debug_marker)
610
0
        p->ra->fns->debug_marker(p->ra, msg);
611
0
}
612
613
static void gl_video_reset_surfaces(struct gl_video *p)
614
0
{
615
0
    for (int i = 0; i < SURFACES_MAX; i++) {
616
0
        p->surfaces[i].id = 0;
617
0
        p->surfaces[i].pts = MP_NOPTS_VALUE;
618
0
    }
619
0
    p->surface_idx = 0;
620
0
    p->surface_now = 0;
621
0
    p->frames_drawn = 0;
622
0
    p->output_tex_valid = false;
623
0
}
624
625
static void gl_video_reset_hooks(struct gl_video *p)
626
0
{
627
0
    for (int i = 0; i < p->num_tex_hooks; i++)
628
0
        talloc_free(p->tex_hooks[i].priv);
629
630
0
    for (int i = 0; i < p->num_user_textures; i++)
631
0
        ra_tex_free(p->ra, &p->user_textures[i].tex);
632
633
0
    p->num_tex_hooks = 0;
634
0
    p->num_user_textures = 0;
635
0
}
636
637
static inline int surface_wrap(int id)
638
0
{
639
0
    id = id % SURFACES_MAX;
640
0
    return id < 0 ? id + SURFACES_MAX : id;
641
0
}
642
643
static void reinit_osd(struct gl_video *p)
644
0
{
645
0
    mpgl_osd_destroy(p->osd);
646
0
    p->osd = NULL;
647
0
    if (p->osd_state)
648
0
        p->osd = mpgl_osd_init(p->ra, p->log, p->osd_state);
649
0
}
650
651
static void uninit_rendering(struct gl_video *p)
652
0
{
653
0
    for (int n = 0; n < SCALER_COUNT; n++)
654
0
        uninit_scaler(p, &p->scaler[n]);
655
656
0
    ra_tex_free(p->ra, &p->dither_texture);
657
658
0
    for (int n = 0; n < 4; n++) {
659
0
        ra_tex_free(p->ra, &p->merge_tex[n]);
660
0
        ra_tex_free(p->ra, &p->scale_tex[n]);
661
0
        ra_tex_free(p->ra, &p->integer_tex[n]);
662
0
        ra_tex_free(p->ra, &p->chroma_tex[n]);
663
0
    }
664
665
0
    ra_tex_free(p->ra, &p->indirect_tex);
666
0
    ra_tex_free(p->ra, &p->blend_subs_tex);
667
0
    ra_tex_free(p->ra, &p->screen_tex);
668
0
    ra_tex_free(p->ra, &p->output_tex);
669
670
0
    for (int n = 0; n < 2; n++)
671
0
        ra_tex_free(p->ra, &p->error_diffusion_tex[n]);
672
673
0
    for (int n = 0; n < SURFACES_MAX; n++)
674
0
        ra_tex_free(p->ra, &p->surfaces[n].tex);
675
676
0
    for (int n = 0; n < p->num_hook_textures; n++)
677
0
        ra_tex_free(p->ra, &p->hook_textures[n]);
678
679
0
    gl_video_reset_surfaces(p);
680
0
    gl_video_reset_hooks(p);
681
682
0
    gl_sc_reset_error(p->sc);
683
0
}
684
685
bool gl_video_gamma_auto_enabled(struct gl_video *p)
686
0
{
687
0
    return p->opts.gamma_auto;
688
0
}
689
690
// Warning: profile.start must point to a ta allocation, and the function
691
//          takes over ownership.
692
void gl_video_set_icc_profile(struct gl_video *p, bstr icc_data)
693
0
{
694
0
    if (gl_lcms_set_memory_profile(p->cms, icc_data))
695
0
        reinit_from_options(p);
696
0
}
697
698
bool gl_video_icc_auto_enabled(struct gl_video *p)
699
0
{
700
0
    return p->opts.icc_opts ? p->opts.icc_opts->profile_auto : false;
701
0
}
702
703
static bool gl_video_get_lut3d(struct gl_video *p, enum pl_color_primaries prim,
704
                               enum pl_color_transfer trc)
705
0
{
706
0
    if (!p->use_lut_3d)
707
0
        return false;
708
709
0
    struct AVBufferRef *icc = NULL;
710
0
    if (p->image.mpi)
711
0
        icc = p->image.mpi->icc_profile;
712
713
0
    if (p->lut_3d_texture && !gl_lcms_has_changed(p->cms, prim, trc, icc))
714
0
        return true;
715
716
    // GLES3 doesn't provide filtered 16 bit integer textures
717
    // GLES2 doesn't even provide 3D textures
718
0
    const struct ra_format *fmt = ra_find_unorm_format(p->ra, 2, 4);
719
0
    if (!fmt || !(p->ra->caps & RA_CAP_TEX_3D)) {
720
0
        p->use_lut_3d = false;
721
0
        MP_WARN(p, "Disabling color management (no RGBA16 3D textures).\n");
722
0
        return false;
723
0
    }
724
725
0
    struct lut3d *lut3d = NULL;
726
0
    if (!fmt || !gl_lcms_get_lut3d(p->cms, &lut3d, prim, trc, icc) || !lut3d) {
727
0
        p->use_lut_3d = false;
728
0
        return false;
729
0
    }
730
731
0
    ra_tex_free(p->ra, &p->lut_3d_texture);
732
733
0
    struct ra_tex_params params = {
734
0
        .dimensions = 3,
735
0
        .w = lut3d->size[0],
736
0
        .h = lut3d->size[1],
737
0
        .d = lut3d->size[2],
738
0
        .format = fmt,
739
0
        .render_src = true,
740
0
        .src_linear = true,
741
0
        .initial_data = lut3d->data,
742
0
    };
743
0
    p->lut_3d_texture = ra_tex_create(p->ra, &params);
744
745
0
    debug_check_gl(p, "after 3d lut creation");
746
747
0
    for (int i = 0; i < 3; i++)
748
0
        p->lut_3d_size[i] = lut3d->size[i];
749
750
0
    talloc_free(lut3d);
751
752
0
    if (!p->lut_3d_texture) {
753
0
        p->use_lut_3d = false;
754
0
        return false;
755
0
    }
756
757
0
    return true;
758
0
}
759
760
// Fill an image struct from a ra_tex + some metadata
761
static struct image image_wrap(struct ra_tex *tex, enum plane_type type,
762
                               int components)
763
0
{
764
0
    mp_assert(type != PLANE_NONE);
765
0
    return (struct image){
766
0
        .type = type,
767
0
        .tex = tex,
768
0
        .multiplier = 1.0,
769
0
        .w = tex ? tex->params.w : 1,
770
0
        .h = tex ? tex->params.h : 1,
771
0
        .transform = identity_trans,
772
0
        .components = components,
773
0
    };
774
0
}
775
776
// Bind an image to a free texture unit and return its ID.
777
static int pass_bind(struct gl_video *p, struct image img)
778
0
{
779
0
    int idx = p->num_pass_imgs;
780
0
    MP_TARRAY_APPEND(p, p->pass_imgs, p->num_pass_imgs, img);
781
0
    return idx;
782
0
}
783
784
// Rotation by 90° and flipping.
785
// w/h is used for recentering.
786
static void get_transform(float w, float h, int rotate, bool flip,
787
                          struct gl_transform *out_tr)
788
0
{
789
0
    int a = rotate % 90 ? 0 : (rotate / 90) % 4;
790
0
    int sin90[4] = {0, 1, 0, -1}; // just to avoid rounding issues etc.
791
0
    int cos90[4] = {1, 0, -1, 0};
792
0
    struct gl_transform tr = {{{ cos90[a], sin90[a]},
793
0
                               {-sin90[a], cos90[a]}}};
794
795
    // basically, recenter to keep the whole image in view
796
0
    float b[2] = {1, 1};
797
0
    gl_transform_vec(tr, &b[0], &b[1]);
798
0
    tr.t[0] += b[0] < 0 ? w : 0;
799
0
    tr.t[1] += b[1] < 0 ? h : 0;
800
801
0
    if (flip) {
802
0
        struct gl_transform fliptr = {{{1, 0}, {0, -1}}, {0, h}};
803
0
        gl_transform_trans(fliptr, &tr);
804
0
    }
805
806
0
    *out_tr = tr;
807
0
}
808
809
// Return the chroma plane upscaled to luma size, but with additional padding
810
// for image sizes not aligned to subsampling.
811
static int chroma_upsize(int size, int pixel)
812
0
{
813
0
    return (size + pixel - 1) / pixel * pixel;
814
0
}
815
816
// If a and b are on the same plane, return what plane type should be used.
817
// If a or b are none, the other type always wins.
818
// Usually: LUMA/RGB/XYZ > CHROMA > ALPHA
819
static enum plane_type merge_plane_types(enum plane_type a, enum plane_type b)
820
0
{
821
0
    if (a == PLANE_NONE)
822
0
        return b;
823
0
    if (b == PLANE_LUMA || b == PLANE_RGB || b == PLANE_XYZ)
824
0
        return b;
825
0
    if (b != PLANE_NONE && a == PLANE_ALPHA)
826
0
        return b;
827
0
    return a;
828
0
}
829
830
// Places a video_image's image textures + associated metadata into img[]. The
831
// number of textures is equal to p->plane_count. Any necessary plane offsets
832
// are stored in off. (e.g. chroma position)
833
static void pass_get_images(struct gl_video *p, struct video_image *vimg,
834
                            struct image img[4], struct gl_transform off[4])
835
0
{
836
0
    mp_assert(vimg->mpi);
837
838
0
    int w = p->image_params.w;
839
0
    int h = p->image_params.h;
840
841
    // Determine the chroma offset
842
0
    float ls_w = 1.0 / p->ra_format.chroma_w;
843
0
    float ls_h = 1.0 / p->ra_format.chroma_h;
844
845
0
    struct gl_transform chroma = {{{ls_w, 0.0}, {0.0, ls_h}}};
846
847
0
    if (p->image_params.chroma_location != PL_CHROMA_CENTER) {
848
0
        float cx, cy;
849
0
        pl_chroma_location_offset(p->image_params.chroma_location, &cx, &cy);
850
        // By default texture coordinates are such that chroma is centered with
851
        // any chroma subsampling. If a specific direction is given, make it
852
        // so that the luma and chroma sample line up exactly.
853
        // For 4:4:4, setting chroma location should have no effect at all.
854
        // luma sample size (in chroma coord. space)
855
0
        chroma.t[0] = ls_w < 1 ? ls_w * -cx : 0;
856
0
        chroma.t[1] = ls_h < 1 ? ls_h * -cy : 0;
857
0
    }
858
859
0
    memset(img, 0, 4 * sizeof(img[0]));
860
0
    for (int n = 0; n < p->plane_count; n++) {
861
0
        struct texplane *t = &vimg->planes[n];
862
863
0
        enum plane_type type = PLANE_NONE;
864
0
        int padding = 0;
865
0
        for (int i = 0; i < 4; i++) {
866
0
            int c = p->ra_format.components[n][i];
867
0
            enum plane_type ctype;
868
0
            if (c == 0) {
869
0
                ctype = PLANE_NONE;
870
0
            } else if (c == 4) {
871
0
                ctype = PLANE_ALPHA;
872
0
            } else if (p->image_params.repr.sys == PL_COLOR_SYSTEM_RGB) {
873
0
                ctype = PLANE_RGB;
874
0
            } else if (p->image_params.repr.sys == PL_COLOR_SYSTEM_XYZ) {
875
0
                ctype = PLANE_XYZ;
876
0
            } else {
877
0
                ctype = c == 1 ? PLANE_LUMA : PLANE_CHROMA;
878
0
            }
879
0
            type = merge_plane_types(type, ctype);
880
0
            if (!c && padding == i)
881
0
                padding = i + 1;
882
0
        }
883
884
0
        int msb_valid_bits =
885
0
            p->ra_format.component_bits + MPMIN(p->ra_format.component_pad, 0);
886
0
        int csp = type == PLANE_ALPHA ? PL_COLOR_SYSTEM_RGB : p->image_params.repr.sys;
887
0
        float tex_mul =
888
0
            1.0 / mp_get_csp_mul(csp, msb_valid_bits, p->ra_format.component_bits);
889
0
        if (p->ra_format.component_type == RA_CTYPE_FLOAT)
890
0
            tex_mul = 1.0;
891
892
0
        img[n] = (struct image){
893
0
            .type = type,
894
0
            .tex = t->tex,
895
0
            .multiplier = tex_mul,
896
0
            .w = t->w,
897
0
            .h = t->h,
898
0
            .padding = padding,
899
0
        };
900
901
0
        for (int i = 0; i < 4; i++)
902
0
            img[n].components += !!p->ra_format.components[n][i];
903
904
0
        get_transform(t->w, t->h, p->image_params.rotate, t->flipped,
905
0
                      &img[n].transform);
906
0
        if (p->image_params.rotate % 180 == 90)
907
0
            MPSWAP(int, img[n].w, img[n].h);
908
909
0
        off[n] = identity_trans;
910
911
0
        if (type == PLANE_CHROMA) {
912
0
            struct gl_transform rot;
913
            // Reverse the rotation direction here because the different
914
            // coordinate system of chroma offset results in rotation
915
            // in the opposite direction.
916
0
            get_transform(0, 0, 360 - p->image_params.rotate, t->flipped, &rot);
917
918
0
            struct gl_transform tr = chroma;
919
0
            gl_transform_vec(rot, &tr.t[0], &tr.t[1]);
920
921
0
            float dx = (chroma_upsize(w, p->ra_format.chroma_w) - w) * ls_w;
922
0
            float dy = (chroma_upsize(h, p->ra_format.chroma_h) - h) * ls_h;
923
924
            // Adjust the chroma offset if the real chroma size is fractional
925
            // due image sizes not aligned to chroma subsampling.
926
0
            if (rot.m[0][0] < 0)
927
0
                tr.t[0] += dx;
928
0
            if (rot.m[1][0] < 0)
929
0
                tr.t[0] += dy;
930
0
            if (rot.m[0][1] < 0)
931
0
                tr.t[1] += dx;
932
0
            if (rot.m[1][1] < 0)
933
0
                tr.t[1] += dy;
934
935
0
            off[n] = tr;
936
0
        }
937
0
    }
938
0
}
939
940
// Return the index of the given component (assuming all non-padding components
941
// of all planes are concatenated into a linear list).
942
static int find_comp(struct ra_imgfmt_desc *desc, int component)
943
0
{
944
0
    int cur = 0;
945
0
    for (int n = 0; n < desc->num_planes; n++) {
946
0
        for (int i = 0; i < 4; i++) {
947
0
            if (desc->components[n][i]) {
948
0
                if (desc->components[n][i] == component)
949
0
                    return cur;
950
0
                cur++;
951
0
            }
952
0
        }
953
0
    }
954
0
    return -1;
955
0
}
956
957
static void init_video(struct gl_video *p)
958
0
{
959
0
    p->use_integer_conversion = false;
960
961
0
    struct ra_hwdec *hwdec = ra_hwdec_get(&p->hwdec_ctx, p->image_params.imgfmt);
962
0
    if (hwdec) {
963
0
        if (hwdec->driver->overlay_frame) {
964
0
            MP_WARN(p, "Using HW-overlay mode. No GL filtering is performed "
965
0
                       "on the video!\n");
966
0
            p->hwdec_overlay = hwdec;
967
0
        } else {
968
0
            p->hwdec_mapper = ra_hwdec_mapper_create(hwdec, &p->image_params);
969
0
            if (!p->hwdec_mapper)
970
0
                MP_ERR(p, "Initializing texture for hardware decoding failed.\n");
971
0
        }
972
0
        if (p->hwdec_mapper)
973
0
            p->image_params = p->hwdec_mapper->dst_params;
974
0
        const char **exts = hwdec->glsl_extensions;
975
0
        for (int n = 0; exts && exts[n]; n++)
976
0
            gl_sc_enable_extension(p->sc, (char *)exts[n]);
977
0
        p->hwdec_active = true;
978
0
    }
979
980
0
    p->ra_format = (struct ra_imgfmt_desc){0};
981
0
    ra_get_imgfmt_desc(p->ra, p->image_params.imgfmt, &p->ra_format);
982
983
0
    p->plane_count = p->ra_format.num_planes;
984
985
0
    p->has_alpha = false;
986
0
    p->is_gray = true;
987
988
0
    for (int n = 0; n < p->ra_format.num_planes; n++) {
989
0
        for (int i = 0; i < 4; i++) {
990
0
            if (p->ra_format.components[n][i]) {
991
0
                p->has_alpha |= p->ra_format.components[n][i] == 4;
992
0
                p->is_gray &= p->ra_format.components[n][i] == 1 ||
993
0
                              p->ra_format.components[n][i] == 4;
994
0
            }
995
0
        }
996
0
    }
997
998
0
    for (int c = 0; c < 4; c++) {
999
0
        int loc = find_comp(&p->ra_format, c + 1);
1000
0
        p->color_swizzle[c] = "rgba"[loc >= 0 && loc < 4 ? loc : 0];
1001
0
    }
1002
0
    p->color_swizzle[4] = '\0';
1003
1004
0
    mp_image_params_restore_dovi_mapping(&p->image_params);
1005
0
    mp_image_params_guess_csp(&p->image_params);
1006
1007
0
    av_lfg_init(&p->lfg, 1);
1008
1009
0
    debug_check_gl(p, "before video texture creation");
1010
1011
0
    if (!p->hwdec_active) {
1012
0
        struct video_image *vimg = &p->image;
1013
1014
0
        struct mp_image layout = {0};
1015
0
        mp_image_set_params(&layout, &p->image_params);
1016
1017
0
        for (int n = 0; n < p->plane_count; n++) {
1018
0
            struct texplane *plane = &vimg->planes[n];
1019
0
            const struct ra_format *format = p->ra_format.planes[n];
1020
1021
0
            plane->w = mp_image_plane_w(&layout, n);
1022
0
            plane->h = mp_image_plane_h(&layout, n);
1023
1024
0
            struct ra_tex_params params = {
1025
0
                .dimensions = 2,
1026
0
                .w = plane->w + p->opts.tex_pad_x,
1027
0
                .h = plane->h + p->opts.tex_pad_y,
1028
0
                .d = 1,
1029
0
                .format = format,
1030
0
                .render_src = true,
1031
0
                .src_linear = format->linear_filter,
1032
0
                .non_normalized = p->opts.use_rectangle,
1033
0
                .host_mutable = true,
1034
0
            };
1035
1036
0
            MP_VERBOSE(p, "Texture for plane %d: %dx%d\n", n,
1037
0
                       params.w, params.h);
1038
1039
0
            plane->tex = ra_tex_create(p->ra, &params);
1040
0
            p->use_integer_conversion |= format->ctype == RA_CTYPE_UINT;
1041
0
        }
1042
0
    }
1043
1044
0
    debug_check_gl(p, "after video texture creation");
1045
1046
    // Format-dependent checks.
1047
0
    check_gl_features(p);
1048
1049
0
    gl_video_setup_hooks(p);
1050
0
}
1051
1052
static struct dr_buffer *gl_find_dr_buffer(struct gl_video *p, uint8_t *ptr)
1053
0
{
1054
0
    for (int i = 0; i < p->num_dr_buffers; i++) {
1055
0
        struct dr_buffer *buffer = &p->dr_buffers[i];
1056
0
        uint8_t *bufptr = buffer->buf->data;
1057
0
        size_t size = buffer->buf->params.size;
1058
0
        if (ptr >= bufptr && ptr < bufptr + size)
1059
0
            return buffer;
1060
0
    }
1061
1062
0
    return NULL;
1063
0
}
1064
1065
static void gc_pending_dr_fences(struct gl_video *p, bool force)
1066
0
{
1067
0
again:;
1068
0
    for (int n = 0; n < p->num_dr_buffers; n++) {
1069
0
        struct dr_buffer *buffer = &p->dr_buffers[n];
1070
0
        if (!buffer->mpi)
1071
0
            continue;
1072
1073
0
        bool res = p->ra->fns->buf_poll(p->ra, buffer->buf);
1074
0
        if (res || force) {
1075
            // Unreferencing the image could cause gl_video_dr_free_buffer()
1076
            // to be called by the talloc destructor (if it was the last
1077
            // reference). This will implicitly invalidate the buffer pointer
1078
            // and change the p->dr_buffers array. To make it worse, it could
1079
            // free multiple dr_buffers due to weird theoretical corner cases.
1080
            // This is also why we use the goto to iterate again from the
1081
            // start, because everything gets fucked up. Hail satan!
1082
0
            struct mp_image *ref = buffer->mpi;
1083
0
            buffer->mpi = NULL;
1084
0
            talloc_free(ref);
1085
0
            goto again;
1086
0
        }
1087
0
    }
1088
0
}
1089
1090
static void unref_current_image(struct gl_video *p)
1091
0
{
1092
0
    struct video_image *vimg = &p->image;
1093
1094
0
    if (vimg->hwdec_mapped) {
1095
0
        mp_assert(p->hwdec_active && p->hwdec_mapper);
1096
0
        ra_hwdec_mapper_unmap(p->hwdec_mapper);
1097
0
        memset(vimg->planes, 0, sizeof(vimg->planes));
1098
0
        vimg->hwdec_mapped = false;
1099
0
    }
1100
1101
0
    vimg->id = 0;
1102
1103
0
    mp_image_unrefp(&vimg->mpi);
1104
1105
    // While we're at it, also garbage collect pending fences in here to
1106
    // get it out of the way.
1107
0
    gc_pending_dr_fences(p, false);
1108
0
}
1109
1110
// If overlay mode is used, make sure to remove the overlay.
1111
// Be careful with this. Removing the overlay and adding another one will
1112
// lead to flickering artifacts.
1113
static void unmap_overlay(struct gl_video *p)
1114
0
{
1115
0
    if (p->hwdec_overlay)
1116
0
        p->hwdec_overlay->driver->overlay_frame(p->hwdec_overlay, NULL, NULL, NULL, true);
1117
0
}
1118
1119
static void uninit_video(struct gl_video *p)
1120
0
{
1121
0
    uninit_rendering(p);
1122
1123
0
    struct video_image *vimg = &p->image;
1124
1125
0
    unmap_overlay(p);
1126
0
    unref_current_image(p);
1127
1128
0
    for (int n = 0; n < p->plane_count; n++) {
1129
0
        struct texplane *plane = &vimg->planes[n];
1130
0
        ra_tex_free(p->ra, &plane->tex);
1131
0
    }
1132
0
    *vimg = (struct video_image){0};
1133
1134
    // Invalidate image_params to ensure that gl_video_config() will call
1135
    // init_video() on uninitialized gl_video.
1136
0
    p->real_image_params = (struct mp_image_params){0};
1137
0
    p->image_params = p->real_image_params;
1138
0
    p->hwdec_active = false;
1139
0
    p->hwdec_overlay = NULL;
1140
0
    ra_hwdec_mapper_free(&p->hwdec_mapper);
1141
0
}
1142
1143
static void pass_record(struct gl_video *p, const struct mp_pass_perf *perf)
1144
0
{
1145
0
    if (!p->pass || p->pass_idx == VO_PASS_PERF_MAX)
1146
0
        return;
1147
1148
0
    struct pass_info *pass = &p->pass[p->pass_idx];
1149
0
    pass->perf = *perf;
1150
1151
0
    if (pass->desc.len == 0)
1152
0
        bstr_xappend(p, &pass->desc, bstr0("(unknown)"));
1153
1154
0
    p->pass_idx++;
1155
0
}
1156
1157
MP_PRINTF_ATTRIBUTE(2, 3)
1158
static void pass_describe(struct gl_video *p, const char *textf, ...)
1159
0
{
1160
0
    if (!p->pass || p->pass_idx == VO_PASS_PERF_MAX)
1161
0
        return;
1162
1163
0
    struct pass_info *pass = &p->pass[p->pass_idx];
1164
1165
0
    if (pass->desc.len > 0)
1166
0
        bstr_xappend(p, &pass->desc, bstr0(" + "));
1167
1168
0
    va_list ap;
1169
0
    va_start(ap, textf);
1170
0
    bstr_xappend_vasprintf(p, &pass->desc, textf, ap);
1171
0
    va_end(ap);
1172
0
}
1173
1174
static void pass_info_reset(struct gl_video *p, bool is_redraw)
1175
0
{
1176
0
    p->pass = is_redraw ? p->pass_redraw : p->pass_fresh;
1177
0
    p->pass_idx = 0;
1178
1179
0
    for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
1180
0
        p->pass[i].desc.len = 0;
1181
0
    }
1182
0
}
1183
1184
static void pass_report_performance(struct gl_video *p)
1185
0
{
1186
0
    if (!p->pass)
1187
0
        return;
1188
1189
0
    for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
1190
0
        struct pass_info *pass = &p->pass[i];
1191
0
        if (!pass->desc.len)
1192
0
            break;
1193
0
        MP_TRACE(p, "pass '%.*s': last %dus avg %dus peak %dus\n",
1194
0
                 BSTR_P(pass->desc),
1195
0
                 (int)pass->perf.last/1000,
1196
0
                 (int)pass->perf.avg/1000,
1197
0
                 (int)pass->perf.peak/1000);
1198
0
    }
1199
0
}
1200
1201
static void pass_prepare_src_tex(struct gl_video *p)
1202
0
{
1203
0
    struct gl_shader_cache *sc = p->sc;
1204
1205
0
    for (int n = 0; n < p->num_pass_imgs; n++) {
1206
0
        struct image *s = &p->pass_imgs[n];
1207
0
        if (!s->tex)
1208
0
            continue;
1209
1210
0
        char *texture_name = mp_tprintf(32, "texture%d", n);
1211
0
        char *texture_size = mp_tprintf(32, "texture_size%d", n);
1212
0
        char *texture_rot = mp_tprintf(32, "texture_rot%d", n);
1213
0
        char *texture_off = mp_tprintf(32, "texture_off%d", n);
1214
0
        char *pixel_size = mp_tprintf(32, "pixel_size%d", n);
1215
1216
0
        gl_sc_uniform_texture(sc, texture_name, s->tex);
1217
0
        float f[2] = {1, 1};
1218
0
        if (!s->tex->params.non_normalized) {
1219
0
            f[0] = s->tex->params.w;
1220
0
            f[1] = s->tex->params.h;
1221
0
        }
1222
0
        gl_sc_uniform_vec2(sc, texture_size, f);
1223
0
        gl_sc_uniform_mat2(sc, texture_rot, true, (float *)s->transform.m);
1224
0
        gl_sc_uniform_vec2(sc, texture_off, (float *)s->transform.t);
1225
0
        gl_sc_uniform_vec2(sc, pixel_size, (float[]){1.0f / f[0],
1226
0
                                                     1.0f / f[1]});
1227
0
    }
1228
0
}
1229
1230
static void cleanup_binds(struct gl_video *p)
1231
0
{
1232
0
    p->num_pass_imgs = 0;
1233
0
}
1234
1235
// Sets the appropriate compute shader metadata for an implicit compute pass
1236
// bw/bh: block size
1237
static void pass_is_compute(struct gl_video *p, int bw, int bh, bool flexible)
1238
0
{
1239
0
    if (p->pass_compute.active && flexible) {
1240
        // Avoid overwriting existing block sizes when using a flexible pass
1241
0
        bw = p->pass_compute.block_w;
1242
0
        bh = p->pass_compute.block_h;
1243
0
    }
1244
1245
0
    p->pass_compute = (struct compute_info){
1246
0
        .active = true,
1247
0
        .block_w = bw,
1248
0
        .block_h = bh,
1249
0
    };
1250
0
}
1251
1252
// w/h: the width/height of the compute shader's operating domain (e.g. the
1253
// target target that needs to be written, or the source texture that needs to
1254
// be reduced)
1255
static void dispatch_compute(struct gl_video *p, int w, int h,
1256
                             struct compute_info info)
1257
0
{
1258
0
    PRELUDE("layout (local_size_x = %d, local_size_y = %d) in;\n",
1259
0
            info.threads_w > 0 ? info.threads_w : info.block_w,
1260
0
            info.threads_h > 0 ? info.threads_h : info.block_h);
1261
1262
0
    pass_prepare_src_tex(p);
1263
1264
    // Since we don't actually have vertices, we pretend for convenience
1265
    // reasons that we do and calculate the right texture coordinates based on
1266
    // the output sample ID
1267
0
    gl_sc_uniform_vec2(p->sc, "out_scale", (float[2]){ 1.0 / w, 1.0 / h });
1268
0
    PRELUDE("#define outcoord(id) (out_scale * (vec2(id) + vec2(0.5)))\n");
1269
1270
0
    for (int n = 0; n < p->num_pass_imgs; n++) {
1271
0
        struct image *s = &p->pass_imgs[n];
1272
0
        if (!s->tex)
1273
0
            continue;
1274
1275
0
        PRELUDE("#define texmap%d(id) (texture_rot%d * outcoord(id) + "
1276
0
               "pixel_size%d * texture_off%d)\n", n, n, n, n);
1277
0
        PRELUDE("#define texcoord%d texmap%d(gl_GlobalInvocationID)\n", n, n);
1278
0
    }
1279
1280
    // always round up when dividing to make sure we don't leave off a part of
1281
    // the image
1282
0
    int num_x = info.block_w > 0 ? (w + info.block_w - 1) / info.block_w : 1,
1283
0
        num_y = info.block_h > 0 ? (h + info.block_h - 1) / info.block_h : 1;
1284
1285
0
    if (!(p->ra->caps & RA_CAP_NUM_GROUPS))
1286
0
        PRELUDE("#define gl_NumWorkGroups uvec3(%d, %d, 1)\n", num_x, num_y);
1287
1288
0
    struct mp_pass_perf perf = gl_sc_dispatch_compute(p->sc, num_x, num_y, 1);
1289
0
    pass_record(p, &perf);
1290
0
    cleanup_binds(p);
1291
0
}
1292
1293
static struct mp_pass_perf render_pass_quad(struct gl_video *p,
1294
                                            const struct ra_fbo *fbo, bool discard,
1295
                                            const struct mp_rect *dst)
1296
0
{
1297
    // The first element is reserved for `vec2 position`
1298
0
    int num_vertex_attribs = 1 + p->num_pass_imgs;
1299
0
    size_t vertex_stride = num_vertex_attribs * sizeof(struct vertex_pt);
1300
1301
    // Expand the VAO if necessary
1302
0
    while (p->vao_len < num_vertex_attribs) {
1303
0
        MP_TARRAY_APPEND(p, p->vao, p->vao_len, (struct ra_renderpass_input) {
1304
0
            .name = talloc_asprintf(p, "texcoord%d", p->vao_len - 1),
1305
0
            .type = RA_VARTYPE_FLOAT,
1306
0
            .dim_v = 2,
1307
0
            .dim_m = 1,
1308
0
            .offset = p->vao_len * sizeof(struct vertex_pt),
1309
0
        });
1310
0
    }
1311
1312
0
    int num_vertices = 6; // quad as triangle list
1313
0
    int num_attribs_total = num_vertices * num_vertex_attribs;
1314
0
    MP_TARRAY_GROW(p, p->tmp_vertex, num_attribs_total);
1315
1316
0
    struct gl_transform t;
1317
0
    gl_transform_ortho_fbo(&t, fbo);
1318
1319
0
    float x[2] = {dst->x0, dst->x1};
1320
0
    float y[2] = {dst->y0, dst->y1};
1321
0
    gl_transform_vec(t, &x[0], &y[0]);
1322
0
    gl_transform_vec(t, &x[1], &y[1]);
1323
1324
0
    for (int n = 0; n < 4; n++) {
1325
0
        struct vertex_pt *vs = &p->tmp_vertex[num_vertex_attribs * n];
1326
        // vec2 position in idx 0
1327
0
        vs[0].x = x[n / 2];
1328
0
        vs[0].y = y[n % 2];
1329
0
        for (int i = 0; i < p->num_pass_imgs; i++) {
1330
0
            struct image *s = &p->pass_imgs[i];
1331
0
            if (!s->tex)
1332
0
                continue;
1333
0
            struct gl_transform tr = s->transform;
1334
0
            float tx = (n / 2) * s->w;
1335
0
            float ty = (n % 2) * s->h;
1336
0
            gl_transform_vec(tr, &tx, &ty);
1337
0
            bool rect = s->tex->params.non_normalized;
1338
            // vec2 texcoordN in idx N+1
1339
0
            vs[i + 1].x = tx / (rect ? 1 : s->tex->params.w);
1340
0
            vs[i + 1].y = ty / (rect ? 1 : s->tex->params.h);
1341
0
        }
1342
0
    }
1343
1344
0
    memmove(&p->tmp_vertex[num_vertex_attribs * 4],
1345
0
            &p->tmp_vertex[num_vertex_attribs * 2],
1346
0
            vertex_stride);
1347
1348
0
    memmove(&p->tmp_vertex[num_vertex_attribs * 5],
1349
0
            &p->tmp_vertex[num_vertex_attribs * 1],
1350
0
            vertex_stride);
1351
1352
0
    return gl_sc_dispatch_draw(p->sc, fbo->tex, discard, p->vao, num_vertex_attribs,
1353
0
                               vertex_stride, p->tmp_vertex, num_vertices);
1354
0
}
1355
1356
static void finish_pass_fbo(struct gl_video *p, const struct ra_fbo *fbo,
1357
                            bool discard, const struct mp_rect *dst)
1358
0
{
1359
0
    pass_prepare_src_tex(p);
1360
0
    struct mp_pass_perf perf = render_pass_quad(p, fbo, discard, dst);
1361
0
    pass_record(p, &perf);
1362
0
    debug_check_gl(p, "after rendering");
1363
0
    cleanup_binds(p);
1364
0
}
1365
1366
// dst_fbo: this will be used for rendering; possibly reallocating the whole
1367
//          FBO, if the required parameters have changed
1368
// w, h: required FBO target dimension, and also defines the target rectangle
1369
//       used for rasterization
1370
static void finish_pass_tex(struct gl_video *p, struct ra_tex **dst_tex,
1371
                            int w, int h)
1372
0
{
1373
0
    if (!ra_tex_resize(p->ra, p->log, dst_tex, w, h, p->fbo_format)) {
1374
0
        cleanup_binds(p);
1375
0
        gl_sc_reset(p->sc);
1376
0
        return;
1377
0
    }
1378
1379
    // If RA_CAP_PARALLEL_COMPUTE is set, try to prefer compute shaders
1380
    // over fragment shaders wherever possible.
1381
0
    if (!p->pass_compute.active && (p->ra->caps & RA_CAP_PARALLEL_COMPUTE) &&
1382
0
        (*dst_tex)->params.storage_dst)
1383
0
    {
1384
0
        pass_is_compute(p, 16, 16, true);
1385
0
    }
1386
1387
0
    if (p->pass_compute.active) {
1388
0
        gl_sc_uniform_image2D_wo(p->sc, "out_image", *dst_tex);
1389
0
        if (!p->pass_compute.directly_writes)
1390
0
            GLSL(imageStore(out_image, ivec2(gl_GlobalInvocationID), color);)
1391
1392
0
        dispatch_compute(p, w, h, p->pass_compute);
1393
0
        p->pass_compute = (struct compute_info){0};
1394
1395
0
        debug_check_gl(p, "after dispatching compute shader");
1396
0
    } else {
1397
0
        struct ra_fbo fbo = { .tex = *dst_tex, };
1398
0
        finish_pass_fbo(p, &fbo, true, &(struct mp_rect){0, 0, w, h});
1399
0
    }
1400
0
}
1401
1402
static const char *get_tex_swizzle(struct image *img)
1403
0
{
1404
0
    if (!img->tex)
1405
0
        return "rgba";
1406
0
    if (img->tex->params.format->luminance_alpha)
1407
0
        return "raaa";
1408
0
    return img->tex->params.format->ordered ? "rgba" : "bgra";
1409
0
}
1410
1411
// Copy a texture to the vec4 color, while increasing offset. Also applies
1412
// the texture multiplier to the sampled color
1413
static void copy_image(struct gl_video *p, unsigned int *offset, struct image img)
1414
0
{
1415
0
    const unsigned int count = img.components;
1416
0
    char src[5] = {0};
1417
0
    char dst[5] = {0};
1418
1419
0
    mp_assert(*offset + count < sizeof(dst));
1420
0
    mp_assert(img.padding + count < sizeof(src));
1421
1422
0
    int id = pass_bind(p, img);
1423
1424
0
    const char *tex_fmt = get_tex_swizzle(&img);
1425
0
    const char *dst_fmt = "rgba";
1426
0
    for (unsigned int i = 0; i < count; i++) {
1427
0
        src[i] = tex_fmt[img.padding + i];
1428
0
        dst[i] = dst_fmt[*offset + i];
1429
0
    }
1430
1431
0
    if (img.tex && img.tex->params.format->ctype == RA_CTYPE_UINT) {
1432
0
        uint64_t tex_max = 1ull << p->ra_format.component_bits;
1433
0
        img.multiplier *= 1.0 / (tex_max - 1);
1434
0
    }
1435
1436
0
    GLSLF("color.%s = %f * vec4(texture(texture%d, texcoord%d)).%s;\n",
1437
0
          dst, img.multiplier, id, id, src);
1438
1439
0
    *offset += count;
1440
0
}
1441
1442
static void skip_unused(struct gl_video *p, int num_components)
1443
0
{
1444
0
    for (int i = num_components; i < 4; i++)
1445
0
        GLSLF("color.%c = %f;\n", "rgba"[i], i < 3 ? 0.0 : 1.0);
1446
0
}
1447
1448
static void uninit_scaler(struct gl_video *p, struct scaler *scaler)
1449
0
{
1450
0
    ra_tex_free(p->ra, &scaler->sep_fbo);
1451
0
    ra_tex_free(p->ra, &scaler->lut);
1452
0
    scaler->kernel = NULL;
1453
0
    scaler->initialized = false;
1454
0
}
1455
1456
static void hook_prelude(struct gl_video *p, const char *name, int id,
1457
                         struct image img)
1458
0
{
1459
0
    GLSLHF("#define %s_raw texture%d\n", name, id);
1460
0
    GLSLHF("#define %s_pos texcoord%d\n", name, id);
1461
0
    GLSLHF("#define %s_size texture_size%d\n", name, id);
1462
0
    GLSLHF("#define %s_rot texture_rot%d\n", name, id);
1463
0
    GLSLHF("#define %s_off texture_off%d\n", name, id);
1464
0
    GLSLHF("#define %s_pt pixel_size%d\n", name, id);
1465
0
    GLSLHF("#define %s_map texmap%d\n", name, id);
1466
0
    GLSLHF("#define %s_mul %f\n", name, img.multiplier);
1467
1468
0
    char crap[5] = "";
1469
0
    snprintf(crap, sizeof(crap), "%s", get_tex_swizzle(&img));
1470
1471
    // Remove leading padding by rotating the swizzle mask.
1472
0
    int len = strlen(crap);
1473
0
    for (int n = 0; n < img.padding; n++) {
1474
0
        if (len) {
1475
0
            char f = crap[0];
1476
0
            memmove(crap, crap + 1, len - 1);
1477
0
            crap[len - 1] = f;
1478
0
        }
1479
0
    }
1480
1481
    // Set up the sampling functions
1482
0
    GLSLHF("#define %s_tex(pos) (%s_mul * vec4(texture(%s_raw, pos)).%s)\n",
1483
0
           name, name, name, crap);
1484
1485
0
    if (p->ra->caps & RA_CAP_GATHER) {
1486
0
        GLSLHF("#define %s_gather(pos, c) (%s_mul * vec4("
1487
0
               "textureGather(%s_raw, pos, c)))\n", name, name, name);
1488
0
    }
1489
1490
    // Since the extra matrix multiplication impacts performance,
1491
    // skip it unless the texture was actually rotated
1492
0
    if (gl_transform_eq(img.transform, identity_trans)) {
1493
0
        GLSLHF("#define %s_texOff(off) %s_tex(%s_pos + %s_pt * vec2(off))\n",
1494
0
               name, name, name, name);
1495
0
    } else {
1496
0
        GLSLHF("#define %s_texOff(off) "
1497
0
                   "%s_tex(%s_pos + %s_rot * vec2(off)/%s_size)\n",
1498
0
               name, name, name, name, name);
1499
0
    }
1500
0
}
1501
1502
static bool saved_img_find(struct gl_video *p, const char *name,
1503
                           struct image *out)
1504
0
{
1505
0
    if (!name || !out)
1506
0
        return false;
1507
1508
0
    for (int i = 0; i < p->num_saved_imgs; i++) {
1509
0
        if (strcmp(p->saved_imgs[i].name, name) == 0) {
1510
0
            *out = p->saved_imgs[i].img;
1511
0
            return true;
1512
0
        }
1513
0
    }
1514
1515
0
    return false;
1516
0
}
1517
1518
static void saved_img_store(struct gl_video *p, const char *name,
1519
                            struct image img)
1520
0
{
1521
0
    mp_assert(name);
1522
1523
0
    for (int i = 0; i < p->num_saved_imgs; i++) {
1524
0
        if (strcmp(p->saved_imgs[i].name, name) == 0) {
1525
0
            p->saved_imgs[i].img = img;
1526
0
            return;
1527
0
        }
1528
0
    }
1529
1530
0
    MP_TARRAY_APPEND(p, p->saved_imgs, p->num_saved_imgs, (struct saved_img) {
1531
0
        .name = name,
1532
0
        .img = img
1533
0
    });
1534
0
}
1535
1536
static bool pass_hook_setup_binds(struct gl_video *p, const char *name,
1537
                                  struct image img, struct tex_hook *hook)
1538
0
{
1539
0
    for (int t = 0; t < SHADER_MAX_BINDS; t++) {
1540
0
        char *bind_name = (char *)hook->bind_tex[t];
1541
1542
0
        if (!bind_name)
1543
0
            continue;
1544
1545
        // This is a special name that means "currently hooked texture"
1546
0
        if (strcmp(bind_name, "HOOKED") == 0) {
1547
0
            int id = pass_bind(p, img);
1548
0
            hook_prelude(p, "HOOKED", id, img);
1549
0
            hook_prelude(p, name, id, img);
1550
0
            continue;
1551
0
        }
1552
1553
        // BIND can also be used to load user-defined textures, in which
1554
        // case we will directly load them as a uniform instead of
1555
        // generating the hook_prelude boilerplate
1556
0
        for (int u = 0; u < p->num_user_textures; u++) {
1557
0
            struct gl_user_shader_tex *utex = &p->user_textures[u];
1558
0
            if (bstr_equals0(utex->name, bind_name)) {
1559
0
                gl_sc_uniform_texture(p->sc, bind_name, utex->tex);
1560
0
                goto next_bind;
1561
0
            }
1562
0
        }
1563
1564
0
        struct image bind_img;
1565
0
        if (!saved_img_find(p, bind_name, &bind_img)) {
1566
            // Clean up texture bindings and move on to the next hook
1567
0
            MP_TRACE(p, "Skipping hook on %s due to no texture named %s.\n",
1568
0
                     name, bind_name);
1569
0
            p->num_pass_imgs -= t;
1570
0
            return false;
1571
0
        }
1572
1573
0
        hook_prelude(p, bind_name, pass_bind(p, bind_img), bind_img);
1574
1575
0
next_bind: ;
1576
0
    }
1577
1578
0
    return true;
1579
0
}
1580
1581
static struct ra_tex **next_hook_tex(struct gl_video *p)
1582
0
{
1583
0
    if (p->idx_hook_textures == p->num_hook_textures)
1584
0
        MP_TARRAY_APPEND(p, p->hook_textures, p->num_hook_textures, NULL);
1585
1586
0
    return &p->hook_textures[p->idx_hook_textures++];
1587
0
}
1588
1589
// Process hooks for a plane, saving the result and returning a new image
1590
// If 'trans' is NULL, the shader is forbidden from transforming img
1591
static struct image pass_hook(struct gl_video *p, const char *name,
1592
                              struct image img, struct gl_transform *trans)
1593
0
{
1594
0
    if (!name)
1595
0
        return img;
1596
1597
0
    saved_img_store(p, name, img);
1598
1599
0
    MP_TRACE(p, "Running hooks for %s\n", name);
1600
0
    for (int i = 0; i < p->num_tex_hooks; i++) {
1601
0
        struct tex_hook *hook = &p->tex_hooks[i];
1602
1603
        // Figure out if this pass hooks this texture
1604
0
        for (int h = 0; h < SHADER_MAX_HOOKS; h++) {
1605
0
            if (hook->hook_tex[h] && strcmp(hook->hook_tex[h], name) == 0)
1606
0
                goto found;
1607
0
        }
1608
1609
0
        continue;
1610
1611
0
found:
1612
        // Check the hook's condition
1613
0
        if (hook->cond && !hook->cond(p, img, hook->priv)) {
1614
0
            MP_TRACE(p, "Skipping hook on %s due to condition.\n", name);
1615
0
            continue;
1616
0
        }
1617
1618
0
        const char *store_name = hook->save_tex ? hook->save_tex : name;
1619
0
        bool is_overwrite = strcmp(store_name, name) == 0;
1620
1621
        // If user shader is set to align HOOKED with reference and fix its
1622
        // offset, it requires HOOKED to be resizable and overwritten.
1623
0
        if (is_overwrite && hook->align_offset) {
1624
0
            if (!trans) {
1625
0
                MP_ERR(p, "Hook tried to align unresizable texture %s!\n",
1626
0
                       name);
1627
0
                return img;
1628
0
            }
1629
1630
0
            struct gl_transform align_off = identity_trans;
1631
0
            align_off.t[0] = trans->t[0];
1632
0
            align_off.t[1] = trans->t[1];
1633
1634
0
            gl_transform_trans(align_off, &img.transform);
1635
0
        }
1636
1637
0
        if (!pass_hook_setup_binds(p, name, img, hook))
1638
0
            continue;
1639
1640
        // Run the actual hook. This generates a series of GLSL shader
1641
        // instructions sufficient for drawing the hook's output
1642
0
        struct gl_transform hook_off = identity_trans;
1643
0
        hook->hook(p, img, &hook_off, hook->priv);
1644
1645
0
        int comps = hook->components ? hook->components : img.components;
1646
0
        skip_unused(p, comps);
1647
1648
        // Compute the updated FBO dimensions and store the result
1649
0
        struct mp_rect_f sz = {0, 0, img.w, img.h};
1650
0
        gl_transform_rect(hook_off, &sz);
1651
0
        int w = lroundf(fabs(sz.x1 - sz.x0));
1652
0
        int h = lroundf(fabs(sz.y1 - sz.y0));
1653
1654
0
        struct ra_tex **tex = next_hook_tex(p);
1655
0
        finish_pass_tex(p, tex, w, h);
1656
0
        struct image saved_img = image_wrap(*tex, img.type, comps);
1657
1658
        // If the texture we're saving overwrites the "current" texture, also
1659
        // update the tex parameter so that the future loop cycles will use the
1660
        // updated values, and export the offset
1661
0
        if (is_overwrite) {
1662
0
            if (!trans && !gl_transform_eq(hook_off, identity_trans)) {
1663
0
                MP_ERR(p, "Hook tried changing size of unscalable texture %s!\n",
1664
0
                       name);
1665
0
                return img;
1666
0
            }
1667
1668
0
            img = saved_img;
1669
0
            if (trans) {
1670
0
                gl_transform_trans(hook_off, trans);
1671
1672
                // If user shader is set to align HOOKED, the offset it produces
1673
                // is dynamic (with static resizing factor though).
1674
                // Align it with reference manually to get offset fixed.
1675
0
                if (hook->align_offset) {
1676
0
                    trans->t[0] = 0.0;
1677
0
                    trans->t[1] = 0.0;
1678
0
                }
1679
0
            }
1680
0
        }
1681
1682
0
        saved_img_store(p, store_name, saved_img);
1683
0
    }
1684
1685
0
    return img;
1686
0
}
1687
1688
// This can be used at any time in the middle of rendering to specify an
1689
// optional hook point, which if triggered will render out to a new FBO and
1690
// load the result back into vec4 color. Offsets applied by the hooks are
1691
// accumulated in tex_trans, and the FBO is dimensioned according
1692
// to p->texture_w/h
1693
static void pass_opt_hook_point(struct gl_video *p, const char *name,
1694
                                struct gl_transform *tex_trans)
1695
0
{
1696
0
    if (!name)
1697
0
        return;
1698
1699
0
    for (int i = 0; i < p->num_tex_hooks; i++) {
1700
0
        struct tex_hook *hook = &p->tex_hooks[i];
1701
1702
0
        for (int h = 0; h < SHADER_MAX_HOOKS; h++) {
1703
0
            if (hook->hook_tex[h] && strcmp(hook->hook_tex[h], name) == 0)
1704
0
                goto found;
1705
0
        }
1706
1707
0
        for (int b = 0; b < SHADER_MAX_BINDS; b++) {
1708
0
            if (hook->bind_tex[b] && strcmp(hook->bind_tex[b], name) == 0)
1709
0
                goto found;
1710
0
        }
1711
0
    }
1712
1713
    // Nothing uses this texture, don't bother storing it
1714
0
    return;
1715
1716
0
found: ;
1717
0
    struct ra_tex **tex = next_hook_tex(p);
1718
0
    finish_pass_tex(p, tex, p->texture_w, p->texture_h);
1719
0
    struct image img = image_wrap(*tex, PLANE_RGB, p->components);
1720
0
    img = pass_hook(p, name, img, tex_trans);
1721
0
    copy_image(p, &(int){0}, img);
1722
0
    p->texture_w = img.w;
1723
0
    p->texture_h = img.h;
1724
0
    p->components = img.components;
1725
0
    pass_describe(p, "(remainder pass)");
1726
0
}
1727
1728
static void load_shader(struct gl_video *p, struct bstr body)
1729
0
{
1730
0
    gl_sc_hadd_bstr(p->sc, body);
1731
0
    gl_sc_uniform_dynamic(p->sc);
1732
0
    gl_sc_uniform_f(p->sc, "random", (double)av_lfg_get(&p->lfg) / UINT32_MAX);
1733
0
    gl_sc_uniform_dynamic(p->sc);
1734
0
    gl_sc_uniform_i(p->sc, "frame", p->frames_uploaded);
1735
0
    gl_sc_uniform_vec2(p->sc, "input_size",
1736
0
                       (float[]){(p->src_rect.x1 - p->src_rect.x0) *
1737
0
                                  p->texture_offset.m[0][0],
1738
0
                                  (p->src_rect.y1 - p->src_rect.y0) *
1739
0
                                  p->texture_offset.m[1][1]});
1740
0
    gl_sc_uniform_vec2(p->sc, "target_size",
1741
0
                       (float[]){p->dst_rect.x1 - p->dst_rect.x0,
1742
0
                                 p->dst_rect.y1 - p->dst_rect.y0});
1743
0
    gl_sc_uniform_vec2(p->sc, "tex_offset",
1744
0
                       (float[]){p->src_rect.x0 * p->texture_offset.m[0][0] +
1745
0
                                 p->texture_offset.t[0],
1746
0
                                 p->src_rect.y0 * p->texture_offset.m[1][1] +
1747
0
                                 p->texture_offset.t[1]});
1748
0
}
1749
1750
// Semantic equality
1751
static bool double_seq(double a, double b)
1752
0
{
1753
0
    return (isnan(a) && isnan(b)) || a == b;
1754
0
}
1755
1756
static bool scaler_fun_eq(struct scaler_fun a, struct scaler_fun b)
1757
0
{
1758
0
    return a.function == b.function &&
1759
0
           double_seq(a.params[0], b.params[0]) &&
1760
0
           double_seq(a.params[1], b.params[1]) &&
1761
0
           a.blur == b.blur &&
1762
0
           a.taper == b.taper;
1763
0
}
1764
1765
static bool scaler_conf_eq(struct scaler_config a, struct scaler_config b)
1766
0
{
1767
    // Note: antiring isn't compared because it doesn't affect LUT
1768
    // generation
1769
0
    return scaler_fun_eq(a.kernel, b.kernel) &&
1770
0
           scaler_fun_eq(a.window, b.window) &&
1771
0
           a.radius == b.radius &&
1772
0
           a.clamp == b.clamp;
1773
0
}
1774
1775
void scaler_conf_merge(struct scaler_config *dst, const struct scaler_config *src,
1776
                       enum scaler_unit unit)
1777
0
{
1778
0
    if (dst->kernel.function != SCALER_INHERIT)
1779
0
        return;
1780
0
    mp_assert(src->kernel.function != SCALER_INHERIT);
1781
0
    dst->kernel.function = src->kernel.function;
1782
1783
0
    const struct scaler_config *def = &gl_video_opts_def.scaler[unit];
1784
0
    for (int i = 0; i < MP_ARRAY_SIZE(dst->kernel.params); i++) {
1785
0
        if (isnan(dst->kernel.params[i]) || dst->kernel.params[i] == def->kernel.params[i])
1786
0
            dst->kernel.params[i] = src->kernel.params[i];
1787
0
        if (isnan(dst->window.params[i]) || dst->window.params[i] == def->window.params[i])
1788
0
            dst->window.params[i] = src->window.params[i];
1789
0
    }
1790
0
    if (dst->kernel.blur == def->kernel.blur)
1791
0
        dst->kernel.blur = src->kernel.blur;
1792
0
    if (dst->kernel.taper == def->kernel.taper)
1793
0
        dst->kernel.taper = src->kernel.taper;
1794
0
    if (dst->window.taper == def->window.taper)
1795
0
        dst->window.taper = src->window.taper;
1796
0
    if (dst->clamp == def->clamp)
1797
0
        dst->clamp = src->clamp;
1798
0
    if (dst->radius == def->radius)
1799
0
        dst->radius = src->radius;
1800
0
    if (dst->antiring == def->antiring)
1801
0
        dst->antiring = src->antiring;
1802
0
    if (dst->window.function == def->window.function)
1803
0
        dst->window.function = src->window.function;
1804
0
}
1805
1806
static void reinit_scaler(struct gl_video *p, struct scaler *scaler,
1807
                          const struct scaler_config *conf,
1808
                          double scale_factor,
1809
                          int sizes[])
1810
0
{
1811
0
    mp_assert(conf);
1812
0
    mp_assert(conf->kernel.function != SCALER_INHERIT);
1813
0
    if (scaler_conf_eq(scaler->conf, *conf) &&
1814
0
        scaler->scale_factor == scale_factor &&
1815
0
        scaler->initialized)
1816
0
        return;
1817
1818
0
    uninit_scaler(p, scaler);
1819
1820
0
    struct filter_kernel bare_window;
1821
0
    const struct filter_kernel *t_kernel = mp_find_filter_kernel(conf->kernel.function);
1822
0
    const struct filter_window *t_window = mp_find_filter_window(conf->window.function);
1823
0
    if (!t_kernel) {
1824
0
        const struct filter_window *window = mp_find_filter_window(conf->kernel.function);
1825
0
        if (window) {
1826
0
            bare_window = (struct filter_kernel) { .f = *window };
1827
0
            t_kernel = &bare_window;
1828
0
        }
1829
0
    }
1830
1831
0
    scaler->conf = *conf;
1832
0
    scaler->scale_factor = scale_factor;
1833
0
    scaler->insufficient = false;
1834
0
    scaler->initialized = true;
1835
0
    if (!t_kernel)
1836
0
        return;
1837
1838
0
    scaler->kernel_storage = *t_kernel;
1839
0
    scaler->kernel = &scaler->kernel_storage;
1840
1841
0
    if (!t_window) {
1842
        // fall back to the scaler's default window if available
1843
0
        t_window = mp_find_filter_window(t_kernel->window);
1844
0
    }
1845
0
    if (t_window)
1846
0
        scaler->kernel->w = *t_window;
1847
1848
0
    for (int n = 0; n < 2; n++) {
1849
0
        if (!isnan(conf->kernel.params[n]))
1850
0
            scaler->kernel->f.params[n] = conf->kernel.params[n];
1851
0
        if (!isnan(conf->window.params[n]))
1852
0
            scaler->kernel->w.params[n] = conf->window.params[n];
1853
0
    }
1854
1855
0
    if (conf->kernel.blur > 0.0)
1856
0
        scaler->kernel->f.blur = conf->kernel.blur;
1857
0
    if (conf->window.blur > 0.0)
1858
0
        scaler->kernel->w.blur = conf->window.blur;
1859
1860
0
    if (conf->kernel.taper > 0.0)
1861
0
        scaler->kernel->f.taper = conf->kernel.taper;
1862
0
    if (conf->window.taper > 0.0)
1863
0
        scaler->kernel->w.taper = conf->window.taper;
1864
1865
0
    if (scaler->kernel->f.resizable && conf->radius > 0.0)
1866
0
        scaler->kernel->f.radius = conf->radius;
1867
1868
0
    scaler->kernel->clamp = conf->clamp;
1869
0
    scaler->insufficient = !mp_init_filter(scaler->kernel, sizes, scale_factor);
1870
1871
0
    int size = scaler->kernel->size;
1872
0
    int num_components = size > 2 ? 4 : size;
1873
0
    const struct ra_format *fmt = ra_find_float16_format(p->ra, num_components);
1874
0
    mp_assert(fmt);
1875
1876
0
    int width = (size + num_components - 1) / num_components; // round up
1877
0
    int stride = width * num_components;
1878
0
    mp_assert(size <= stride);
1879
1880
0
    static const int lut_size = 256;
1881
0
    float *weights = talloc_array(NULL, float, lut_size * stride);
1882
0
    mp_compute_lut(scaler->kernel, lut_size, stride, weights);
1883
1884
0
    bool use_1d = scaler->kernel->polar && (p->ra->caps & RA_CAP_TEX_1D);
1885
1886
0
    struct ra_tex_params lut_params = {
1887
0
        .dimensions = use_1d ? 1 : 2,
1888
0
        .w = use_1d ? lut_size : width,
1889
0
        .h = use_1d ? 1 : lut_size,
1890
0
        .d = 1,
1891
0
        .format = fmt,
1892
0
        .render_src = true,
1893
0
        .src_linear = true,
1894
0
        .initial_data = weights,
1895
0
    };
1896
0
    scaler->lut = ra_tex_create(p->ra, &lut_params);
1897
1898
0
    talloc_free(weights);
1899
1900
0
    debug_check_gl(p, "after initializing scaler");
1901
0
}
1902
1903
// Special helper for sampling from two separated stages
1904
static void pass_sample_separated(struct gl_video *p, struct image src,
1905
                                  struct scaler *scaler, int w, int h)
1906
0
{
1907
    // Separate the transformation into x and y components, per pass
1908
0
    struct gl_transform t_x = {
1909
0
        .m = {{src.transform.m[0][0], 0.0}, {src.transform.m[1][0], 1.0}},
1910
0
        .t = {src.transform.t[0], 0.0},
1911
0
    };
1912
0
    struct gl_transform t_y = {
1913
0
        .m = {{1.0, src.transform.m[0][1]}, {0.0, src.transform.m[1][1]}},
1914
0
        .t = {0.0, src.transform.t[1]},
1915
0
    };
1916
1917
    // First pass (scale only in the y dir)
1918
0
    src.transform = t_y;
1919
0
    sampler_prelude(p->sc, pass_bind(p, src));
1920
0
    GLSLF("// first pass\n");
1921
0
    pass_sample_separated_gen(p->sc, scaler, 0, 1);
1922
0
    GLSLF("color *= %f;\n", src.multiplier);
1923
0
    finish_pass_tex(p, &scaler->sep_fbo, src.w, h);
1924
1925
    // Second pass (scale only in the x dir)
1926
0
    src = image_wrap(scaler->sep_fbo, src.type, src.components);
1927
0
    src.transform = t_x;
1928
0
    pass_describe(p, "%s second pass",
1929
0
                  m_opt_choice_str(scaler->conf.kernel.functions,
1930
0
                                   scaler->conf.kernel.function));
1931
0
    sampler_prelude(p->sc, pass_bind(p, src));
1932
0
    pass_sample_separated_gen(p->sc, scaler, 1, 0);
1933
0
}
1934
1935
// Picks either the compute shader version or the regular sampler version
1936
// depending on hardware support
1937
static void pass_dispatch_sample_polar(struct gl_video *p, struct scaler *scaler,
1938
                                       struct image img, int w, int h)
1939
0
{
1940
0
    uint64_t reqs = RA_CAP_COMPUTE;
1941
0
    if ((p->ra->caps & reqs) != reqs)
1942
0
        goto fallback;
1943
1944
0
    int bound = ceil(scaler->kernel->radius_cutoff);
1945
0
    int offset = bound - 1; // padding top/left
1946
0
    int padding = offset + bound; // total padding
1947
1948
0
    float ratiox = (float)w / img.w,
1949
0
          ratioy = (float)h / img.h;
1950
1951
    // For performance we want to load at least as many pixels
1952
    // horizontally as there are threads in a warp (32 for nvidia), as
1953
    // well as enough to take advantage of shmem parallelism
1954
0
    const int warp_size = 32, threads = 256;
1955
0
    int bw = warp_size;
1956
0
    int bh = threads / bw;
1957
1958
    // We need to sample everything from base_min to base_max, so make sure
1959
    // we have enough room in shmem
1960
0
    int iw = (int)ceil(bw / ratiox) + padding + 1,
1961
0
        ih = (int)ceil(bh / ratioy) + padding + 1;
1962
1963
0
    int shmem_req = iw * ih * img.components * sizeof(float);
1964
0
    if (shmem_req > p->ra->max_shmem)
1965
0
        goto fallback;
1966
1967
0
    pass_is_compute(p, bw, bh, false);
1968
0
    pass_compute_polar(p->sc, scaler, img.components, bw, bh, iw, ih);
1969
0
    return;
1970
1971
0
fallback:
1972
    // Fall back to regular polar shader when compute shaders are unsupported
1973
    // or the kernel is too big for shmem
1974
0
    pass_sample_polar(p->sc, scaler, img.components,
1975
0
                      p->ra->caps & RA_CAP_GATHER);
1976
0
}
1977
1978
// Sample from image, with the src rectangle given by it.
1979
// The dst rectangle is implicit by what the caller will do next, but w and h
1980
// must still be what is going to be used (to dimension FBOs correctly).
1981
// This will write the scaled contents to the vec4 "color".
1982
// The scaler unit is initialized by this function; in order to avoid cache
1983
// thrashing, the scaler unit should usually use the same parameters.
1984
static void pass_sample(struct gl_video *p, struct image img,
1985
                        struct scaler *scaler, const struct scaler_config *conf,
1986
                        double scale_factor, int w, int h)
1987
0
{
1988
0
    reinit_scaler(p, scaler, conf, scale_factor, filter_sizes);
1989
1990
    // Describe scaler
1991
0
    const char *scaler_opt[] = {
1992
0
        [SCALER_SCALE] = "scale",
1993
0
        [SCALER_DSCALE] = "dscale",
1994
0
        [SCALER_CSCALE] = "cscale",
1995
0
        [SCALER_TSCALE] = "tscale",
1996
0
    };
1997
1998
0
    pass_describe(p, "%s=%s (%s)", scaler_opt[scaler->index],
1999
0
                  m_opt_choice_str(scaler->conf.kernel.functions,
2000
0
                                   scaler->conf.kernel.function),
2001
0
                  plane_names[img.type]);
2002
2003
0
    bool is_separated = scaler->kernel && !scaler->kernel->polar;
2004
2005
    // Set up the transformation+prelude and bind the texture, for everything
2006
    // other than separated scaling (which does this in the subfunction)
2007
0
    if (!is_separated)
2008
0
        sampler_prelude(p->sc, pass_bind(p, img));
2009
2010
    // Dispatch the scaler. They're all wildly different.
2011
0
    if (scaler->conf.kernel.function == SCALER_BILINEAR) {
2012
0
        GLSL(color = texture(tex, pos);)
2013
0
    } else if (scaler->conf.kernel.function == SCALER_BICUBIC_FAST) {
2014
0
        pass_sample_bicubic_fast(p->sc);
2015
0
    } else if (scaler->conf.kernel.function == SCALER_OVERSAMPLE) {
2016
0
        pass_sample_oversample(p->sc, scaler, w, h);
2017
0
    } else if (scaler->kernel && scaler->kernel->polar) {
2018
0
        pass_dispatch_sample_polar(p, scaler, img, w, h);
2019
0
    } else if (scaler->kernel) {
2020
0
        pass_sample_separated(p, img, scaler, w, h);
2021
0
    } else {
2022
0
        MP_ASSERT_UNREACHABLE(); // should never happen
2023
0
    }
2024
2025
    // Apply any required multipliers. Separated scaling already does this in
2026
    // its first stage
2027
0
    if (!is_separated)
2028
0
        GLSLF("color *= %f;\n", img.multiplier);
2029
2030
    // Micro-optimization: Avoid scaling unneeded channels
2031
0
    skip_unused(p, img.components);
2032
0
}
2033
2034
// Returns true if two images are semantically equivalent (same metadata)
2035
static bool image_equiv(struct image a, struct image b)
2036
0
{
2037
0
    return a.type == b.type &&
2038
0
           a.components == b.components &&
2039
0
           a.multiplier == b.multiplier &&
2040
0
           a.tex->params.format == b.tex->params.format &&
2041
0
           a.tex->params.w == b.tex->params.w &&
2042
0
           a.tex->params.h == b.tex->params.h &&
2043
0
           a.w == b.w &&
2044
0
           a.h == b.h &&
2045
0
           gl_transform_eq(a.transform, b.transform);
2046
0
}
2047
2048
static void deband_hook(struct gl_video *p, struct image img,
2049
                        struct gl_transform *trans, void *priv)
2050
0
{
2051
0
    pass_describe(p, "debanding (%s)", plane_names[img.type]);
2052
0
    pass_sample_deband(p->sc, p->opts.deband_opts, &p->lfg,
2053
0
                       p->image_params.color.transfer);
2054
0
}
2055
2056
static void unsharp_hook(struct gl_video *p, struct image img,
2057
                         struct gl_transform *trans, void *priv)
2058
0
{
2059
0
    pass_describe(p, "unsharp masking");
2060
0
    pass_sample_unsharp(p->sc, p->opts.unsharp);
2061
0
}
2062
2063
struct szexp_ctx {
2064
    struct gl_video *p;
2065
    struct image img;
2066
};
2067
2068
static bool szexp_lookup(void *priv, struct bstr var, float size[2])
2069
0
{
2070
0
    struct szexp_ctx *ctx = priv;
2071
0
    struct gl_video *p = ctx->p;
2072
2073
0
    if (bstr_equals0(var, "NATIVE_CROPPED")) {
2074
0
        size[0] = (p->src_rect.x1 - p->src_rect.x0) * p->texture_offset.m[0][0];
2075
0
        size[1] = (p->src_rect.y1 - p->src_rect.y0) * p->texture_offset.m[1][1];
2076
0
        return true;
2077
0
    }
2078
2079
    // The size of OUTPUT is determined. It could be useful for certain
2080
    // user shaders to skip passes.
2081
0
    if (bstr_equals0(var, "OUTPUT")) {
2082
0
        size[0] = p->dst_rect.x1 - p->dst_rect.x0;
2083
0
        size[1] = p->dst_rect.y1 - p->dst_rect.y0;
2084
0
        return true;
2085
0
    }
2086
2087
    // HOOKED is a special case
2088
0
    if (bstr_equals0(var, "HOOKED")) {
2089
0
        size[0] = ctx->img.w;
2090
0
        size[1] = ctx->img.h;
2091
0
        return true;
2092
0
    }
2093
2094
0
    for (int o = 0; o < p->num_saved_imgs; o++) {
2095
0
        if (bstr_equals0(var, p->saved_imgs[o].name)) {
2096
0
            size[0] = p->saved_imgs[o].img.w;
2097
0
            size[1] = p->saved_imgs[o].img.h;
2098
0
            return true;
2099
0
        }
2100
0
    }
2101
2102
0
    return false;
2103
0
}
2104
2105
static bool user_hook_cond(struct gl_video *p, struct image img, void *priv)
2106
0
{
2107
0
    struct gl_user_shader_hook *shader = priv;
2108
0
    mp_assert(shader);
2109
2110
0
    float res = false;
2111
0
    struct szexp_ctx ctx = {p, img};
2112
0
    eval_szexpr(p->log, &ctx, szexp_lookup, shader->cond, &res);
2113
0
    return res;
2114
0
}
2115
2116
static void user_hook(struct gl_video *p, struct image img,
2117
                      struct gl_transform *trans, void *priv)
2118
0
{
2119
0
    struct gl_user_shader_hook *shader = priv;
2120
0
    mp_assert(shader);
2121
0
    load_shader(p, shader->pass_body);
2122
2123
0
    pass_describe(p, "user shader: %.*s (%s)", BSTR_P(shader->pass_desc),
2124
0
                  plane_names[img.type]);
2125
2126
0
    if (shader->compute.active) {
2127
0
        p->pass_compute = shader->compute;
2128
0
        GLSLF("hook();\n");
2129
0
    } else {
2130
0
        GLSLF("color = hook();\n");
2131
0
    }
2132
2133
    // Make sure we at least create a legal FBO on failure, since it's better
2134
    // to do this and display an error message than just crash OpenGL
2135
0
    float w = 1.0, h = 1.0;
2136
2137
0
    eval_szexpr(p->log, &(struct szexp_ctx){p, img}, szexp_lookup, shader->width, &w);
2138
0
    eval_szexpr(p->log, &(struct szexp_ctx){p, img}, szexp_lookup, shader->height, &h);
2139
2140
0
    *trans = (struct gl_transform){{{w / img.w, 0}, {0, h / img.h}}};
2141
0
    gl_transform_trans(shader->offset, trans);
2142
0
}
2143
2144
static bool add_user_hook(void *priv, const struct gl_user_shader_hook *hook)
2145
0
{
2146
0
    struct gl_video *p = priv;
2147
0
    struct gl_user_shader_hook *copy = talloc_dup(p, (struct gl_user_shader_hook *)hook);
2148
0
    struct tex_hook texhook = {
2149
0
        .save_tex = bstrdup0(copy, copy->save_tex),
2150
0
        .components = copy->components,
2151
0
        .align_offset = copy->align_offset,
2152
0
        .hook = user_hook,
2153
0
        .cond = user_hook_cond,
2154
0
        .priv = copy,
2155
0
    };
2156
2157
0
    for (int h = 0; h < SHADER_MAX_HOOKS; h++)
2158
0
        texhook.hook_tex[h] = bstrdup0(copy, copy->hook_tex[h]);
2159
0
    for (int h = 0; h < SHADER_MAX_BINDS; h++)
2160
0
        texhook.bind_tex[h] = bstrdup0(copy, copy->bind_tex[h]);
2161
2162
0
    MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, texhook);
2163
0
    return true;
2164
0
}
2165
2166
static bool add_user_tex(void *priv, struct gl_user_shader_tex tex)
2167
0
{
2168
0
    struct gl_video *p = priv;
2169
2170
0
    tex.tex = ra_tex_create(p->ra, &tex.params);
2171
0
    TA_FREEP(&tex.params.initial_data);
2172
2173
0
    if (!tex.tex)
2174
0
        return false;
2175
2176
0
    MP_TARRAY_APPEND(p, p->user_textures, p->num_user_textures, tex);
2177
0
    return true;
2178
0
}
2179
2180
static void load_user_shaders(struct gl_video *p, char **shaders)
2181
0
{
2182
0
    if (!shaders)
2183
0
        return;
2184
2185
0
    for (int n = 0; shaders[n] != NULL; n++) {
2186
0
        struct bstr file = load_cached_file(p, shaders[n]);
2187
0
        parse_user_shader(p->log, p->ra, file, p, add_user_hook, add_user_tex);
2188
0
    }
2189
0
}
2190
2191
static void gl_video_setup_hooks(struct gl_video *p)
2192
0
{
2193
0
    gl_video_reset_hooks(p);
2194
2195
0
    if (p->opts.deband) {
2196
0
        MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, (struct tex_hook) {
2197
0
            .hook_tex = {"LUMA", "CHROMA", "RGB", "XYZ"},
2198
0
            .bind_tex = {"HOOKED"},
2199
0
            .hook = deband_hook,
2200
0
        });
2201
0
    }
2202
2203
0
    if (p->opts.unsharp != 0.0) {
2204
0
        MP_TARRAY_APPEND(p, p->tex_hooks, p->num_tex_hooks, (struct tex_hook) {
2205
0
            .hook_tex = {"MAIN"},
2206
0
            .bind_tex = {"HOOKED"},
2207
0
            .hook = unsharp_hook,
2208
0
        });
2209
0
    }
2210
2211
0
    load_user_shaders(p, p->opts.user_shaders);
2212
0
}
2213
2214
// sample from video textures, set "color" variable to yuv value
2215
static void pass_read_video(struct gl_video *p)
2216
0
{
2217
0
    struct image img[4];
2218
0
    struct gl_transform offsets[4];
2219
0
    pass_get_images(p, &p->image, img, offsets);
2220
2221
    // To keep the code as simple as possibly, we currently run all shader
2222
    // stages even if they would be unnecessary (e.g. no hooks for a texture).
2223
    // In the future, deferred image should optimize this away.
2224
2225
    // Merge semantically identical textures. This loop is done from back
2226
    // to front so that merged textures end up in the right order while
2227
    // simultaneously allowing us to skip unnecessary merges
2228
0
    for (int n = 3; n >= 0; n--) {
2229
0
        if (img[n].type == PLANE_NONE)
2230
0
            continue;
2231
2232
0
        int first = n;
2233
0
        int num = 0;
2234
2235
0
        for (int i = 0; i < n; i++) {
2236
0
            if (image_equiv(img[n], img[i]) &&
2237
0
                gl_transform_eq(offsets[n], offsets[i]))
2238
0
            {
2239
0
                GLSLF("// merging plane %d ...\n", i);
2240
0
                copy_image(p, &num, img[i]);
2241
0
                first = MPMIN(first, i);
2242
0
                img[i] = (struct image){0};
2243
0
            }
2244
0
        }
2245
2246
0
        if (num > 0) {
2247
0
            GLSLF("// merging plane %d ... into %d\n", n, first);
2248
0
            copy_image(p, &num, img[n]);
2249
0
            pass_describe(p, "merging planes");
2250
0
            finish_pass_tex(p, &p->merge_tex[n], img[n].w, img[n].h);
2251
0
            img[first] = image_wrap(p->merge_tex[n], img[n].type, num);
2252
0
            img[n] = (struct image){0};
2253
0
        }
2254
0
    }
2255
2256
    // If any textures are still in integer format by this point, we need
2257
    // to introduce an explicit conversion pass to avoid breaking hooks/scaling
2258
0
    for (int n = 0; n < 4; n++) {
2259
0
        if (img[n].tex && img[n].tex->params.format->ctype == RA_CTYPE_UINT) {
2260
0
            GLSLF("// use_integer fix for plane %d\n", n);
2261
0
            copy_image(p, &(int){0}, img[n]);
2262
0
            pass_describe(p, "use_integer fix");
2263
0
            finish_pass_tex(p, &p->integer_tex[n], img[n].w, img[n].h);
2264
0
            img[n] = image_wrap(p->integer_tex[n], img[n].type,
2265
0
                                img[n].components);
2266
0
        }
2267
0
    }
2268
2269
    // The basic idea is we assume the rgb/luma texture is the "reference" and
2270
    // scale everything else to match, after all planes are finalized.
2271
    // We find the reference texture first, in order to maintain texture offset
2272
    // between hooks on different type of planes.
2273
0
    int reference_tex_num = 0;
2274
0
    for (int n = 0; n < 4; n++) {
2275
0
        switch (img[n].type) {
2276
0
        case PLANE_RGB:
2277
0
        case PLANE_XYZ:
2278
0
        case PLANE_LUMA: break;
2279
0
        default: continue;
2280
0
        }
2281
2282
0
        reference_tex_num = n;
2283
0
        break;
2284
0
    }
2285
2286
    // Dispatch the hooks for all of these textures, saving and perhaps
2287
    // modifying them in the process
2288
0
    for (int n = 0; n < 4; n++) {
2289
0
        const char *name;
2290
0
        switch (img[n].type) {
2291
0
        case PLANE_RGB:    name = "RGB";    break;
2292
0
        case PLANE_LUMA:   name = "LUMA";   break;
2293
0
        case PLANE_CHROMA: name = "CHROMA"; break;
2294
0
        case PLANE_ALPHA:  name = "ALPHA";  break;
2295
0
        case PLANE_XYZ:    name = "XYZ";    break;
2296
0
        default: continue;
2297
0
        }
2298
2299
0
        img[n] = pass_hook(p, name, img[n], &offsets[n]);
2300
2301
0
        if (reference_tex_num == n) {
2302
            // The reference texture is finalized now.
2303
0
            p->texture_w = img[n].w;
2304
0
            p->texture_h = img[n].h;
2305
0
            p->texture_offset = offsets[n];
2306
0
        }
2307
0
    }
2308
2309
    // If chroma textures are in a subsampled semi-planar format and rotated,
2310
    // introduce an explicit conversion pass to avoid breaking chroma scalers.
2311
0
    for (int n = 0; n < 4; n++) {
2312
0
        if (img[n].tex && img[n].type == PLANE_CHROMA &&
2313
0
            img[n].tex->params.format->num_components == 2 &&
2314
0
            p->image_params.rotate % 180 == 90 &&
2315
0
            p->ra_format.chroma_w != 1)
2316
0
        {
2317
0
            GLSLF("// chroma fix for rotated plane %d\n", n);
2318
0
            copy_image(p, &(int){0}, img[n]);
2319
0
            pass_describe(p, "chroma fix for rotated plane");
2320
0
            finish_pass_tex(p, &p->chroma_tex[n], img[n].w, img[n].h);
2321
0
            img[n] = image_wrap(p->chroma_tex[n], img[n].type,
2322
0
                                img[n].components);
2323
0
        }
2324
0
    }
2325
2326
    // At this point all planes are finalized but they may not be at the
2327
    // required size yet. Furthermore, they may have texture offsets that
2328
    // require realignment.
2329
2330
    // Compute the reference rect
2331
0
    struct mp_rect_f src = {0.0, 0.0, p->image_params.w, p->image_params.h};
2332
0
    struct mp_rect_f ref = src;
2333
0
    gl_transform_rect(p->texture_offset, &ref);
2334
2335
    // Explicitly scale all of the textures that don't match
2336
0
    for (int n = 0; n < 4; n++) {
2337
0
        if (img[n].type == PLANE_NONE)
2338
0
            continue;
2339
2340
        // If the planes are aligned identically, we will end up with the
2341
        // exact same source rectangle.
2342
0
        struct mp_rect_f rect = src;
2343
0
        gl_transform_rect(offsets[n], &rect);
2344
0
        if (mp_rect_f_seq(ref, rect))
2345
0
            continue;
2346
2347
        // If the rectangles differ, then our planes have a different
2348
        // alignment and/or size. First of all, we have to compute the
2349
        // corrections required to meet the target rectangle
2350
0
        struct gl_transform fix = {
2351
0
            .m = {{(ref.x1 - ref.x0) / (rect.x1 - rect.x0), 0.0},
2352
0
                  {0.0, (ref.y1 - ref.y0) / (rect.y1 - rect.y0)}},
2353
0
            .t = {ref.x0, ref.y0},
2354
0
        };
2355
2356
        // Since the scale in texture space is different from the scale in
2357
        // absolute terms, we have to scale the coefficients down to be
2358
        // relative to the texture's physical dimensions and local offset
2359
0
        struct gl_transform scale = {
2360
0
            .m = {{(float)img[n].w / p->texture_w, 0.0},
2361
0
                  {0.0, (float)img[n].h / p->texture_h}},
2362
0
            .t = {-rect.x0, -rect.y0},
2363
0
        };
2364
0
        if (p->image_params.rotate % 180 == 90)
2365
0
            MPSWAP(double, scale.m[0][0], scale.m[1][1]);
2366
2367
0
        gl_transform_trans(scale, &fix);
2368
2369
        // Since the texture transform is a function of the texture coordinates
2370
        // to texture space, rather than the other way around, we have to
2371
        // actually apply the *inverse* of this. Fortunately, calculating
2372
        // the inverse is relatively easy here.
2373
0
        fix.m[0][0] = 1.0 / fix.m[0][0];
2374
0
        fix.m[1][1] = 1.0 / fix.m[1][1];
2375
0
        fix.t[0] = fix.m[0][0] * -fix.t[0];
2376
0
        fix.t[1] = fix.m[1][1] * -fix.t[1];
2377
0
        gl_transform_trans(fix, &img[n].transform);
2378
2379
0
        int scaler_id = -1;
2380
0
        const char *name = NULL;
2381
0
        switch (img[n].type) {
2382
0
        case PLANE_RGB:
2383
0
        case PLANE_LUMA:
2384
0
        case PLANE_XYZ:
2385
0
            scaler_id = SCALER_SCALE;
2386
            // these aren't worth hooking, fringe hypothetical cases only
2387
0
            break;
2388
0
        case PLANE_CHROMA:
2389
0
            scaler_id = SCALER_CSCALE;
2390
0
            name = "CHROMA_SCALED";
2391
0
            break;
2392
0
        case PLANE_ALPHA:
2393
            // alpha always uses bilinear
2394
0
            name = "ALPHA_SCALED";
2395
0
        }
2396
2397
0
        if (scaler_id < 0)
2398
0
            continue;
2399
2400
0
        const struct scaler_config *conf = &p->opts.scaler[scaler_id];
2401
0
        struct scaler_config tmp;
2402
0
        if (conf->kernel.function == SCALER_INHERIT) {
2403
0
            tmp = *conf;
2404
0
            scaler_conf_merge(&tmp, &p->opts.scaler[SCALER_SCALE], scaler_id);
2405
0
            conf = &tmp;
2406
0
        }
2407
2408
0
        struct scaler *scaler = &p->scaler[scaler_id];
2409
2410
        // bilinear scaling is a free no-op thanks to GPU sampling
2411
0
        if (conf->kernel.function != SCALER_BILINEAR) {
2412
0
            GLSLF("// upscaling plane %d\n", n);
2413
0
            pass_sample(p, img[n], scaler, conf, 1.0, p->texture_w, p->texture_h);
2414
0
            finish_pass_tex(p, &p->scale_tex[n], p->texture_w, p->texture_h);
2415
0
            img[n] = image_wrap(p->scale_tex[n], img[n].type, img[n].components);
2416
0
        }
2417
2418
        // Run any post-scaling hooks
2419
0
        img[n] = pass_hook(p, name, img[n], NULL);
2420
0
    }
2421
2422
    // All planes are of the same size and properly aligned at this point
2423
0
    pass_describe(p, "combining planes");
2424
0
    int coord = 0;
2425
0
    for (int i = 0; i < 4; i++) {
2426
0
        if (img[i].type != PLANE_NONE)
2427
0
            copy_image(p, &coord, img[i]);
2428
0
    }
2429
0
    p->components = coord;
2430
0
}
2431
2432
// Utility function that simply binds a texture and reads from it, without any
2433
// transformations.
2434
static void pass_read_tex(struct gl_video *p, struct ra_tex *tex)
2435
0
{
2436
0
    struct image img = image_wrap(tex, PLANE_RGB, p->components);
2437
0
    copy_image(p, &(int){0}, img);
2438
0
}
2439
2440
// yuv conversion, and any other conversions before main up/down-scaling
2441
static void pass_convert_yuv(struct gl_video *p)
2442
0
{
2443
0
    struct gl_shader_cache *sc = p->sc;
2444
2445
0
    struct mp_csp_params cparams = MP_CSP_PARAMS_DEFAULTS;
2446
0
    cparams.gray = p->is_gray;
2447
0
    cparams.is_float = p->ra_format.component_type == RA_CTYPE_FLOAT;
2448
0
    mp_csp_set_image_params(&cparams, &p->image_params);
2449
0
    mp_csp_equalizer_state_get(p->video_eq, &cparams);
2450
0
    p->user_gamma = 1.0 / (cparams.gamma * p->opts.gamma);
2451
2452
0
    pass_describe(p, "color conversion");
2453
2454
0
    if (p->color_swizzle[0])
2455
0
        GLSLF("color = color.%s;\n", p->color_swizzle);
2456
2457
    // Pre-colormatrix input gamma correction
2458
0
    if (cparams.repr.sys == PL_COLOR_SYSTEM_XYZ)
2459
0
        pass_linearize(p->sc, p->image_params.color.transfer);
2460
2461
    // We always explicitly normalize the range in pass_read_video
2462
0
    cparams.input_bits = cparams.texture_bits = 0;
2463
2464
    // Conversion to RGB. For RGB itself, this still applies e.g. brightness
2465
    // and contrast controls, or expansion of e.g. LSB-packed 10 bit data.
2466
0
    struct pl_transform3x3 m = {0};
2467
0
    mp_get_csp_matrix(&cparams, &m);
2468
0
    gl_sc_uniform_mat3(sc, "colormatrix", true, &m.mat.m[0][0]);
2469
0
    gl_sc_uniform_vec3(sc, "colormatrix_c", m.c);
2470
2471
0
    GLSL(color.rgb = mat3(colormatrix) * color.rgb + colormatrix_c;)
2472
2473
0
    if (cparams.repr.sys == PL_COLOR_SYSTEM_XYZ) {
2474
0
        pass_delinearize(p->sc, p->image_params.color.transfer);
2475
        // mp_get_csp_matrix implicitly converts XYZ to DCI-P3
2476
0
        p->image_params.repr.sys = PL_COLOR_SYSTEM_RGB;
2477
0
        p->image_params.color.primaries = PL_COLOR_PRIM_DCI_P3;
2478
0
    }
2479
2480
0
    if (p->image_params.repr.sys == PL_COLOR_SYSTEM_BT_2020_C) {
2481
        // Conversion for C'rcY'cC'bc via the BT.2020 CL system:
2482
        // C'bc = (B'-Y'c) / 1.9404  | C'bc <= 0
2483
        //      = (B'-Y'c) / 1.5816  | C'bc >  0
2484
        //
2485
        // C'rc = (R'-Y'c) / 1.7184  | C'rc <= 0
2486
        //      = (R'-Y'c) / 0.9936  | C'rc >  0
2487
        //
2488
        // as per the BT.2020 specification, table 4. This is a non-linear
2489
        // transformation because (constant) luminance receives non-equal
2490
        // contributions from the three different channels.
2491
0
        GLSLF("// constant luminance conversion \n"
2492
0
              "color.br = color.br * mix(vec2(1.5816, 0.9936),              \n"
2493
0
              "                         vec2(1.9404, 1.7184),               \n"
2494
0
              "                         %s(lessThanEqual(color.br, vec2(0))))\n"
2495
0
              "          + color.gg;                                        \n",
2496
0
              gl_sc_bvec(p->sc, 2));
2497
        // Expand channels to camera-linear light. This shader currently just
2498
        // assumes everything uses the BT.2020 12-bit gamma function, since the
2499
        // difference between 10 and 12-bit is negligible for anything other
2500
        // than 12-bit content.
2501
0
        GLSLF("color.rgb = mix(color.rgb * vec3(1.0/4.5),                       \n"
2502
0
              "                pow((color.rgb + vec3(0.0993))*vec3(1.0/1.0993), \n"
2503
0
              "                    vec3(1.0/0.45)),                             \n"
2504
0
              "                %s(lessThanEqual(vec3(0.08145), color.rgb)));    \n",
2505
0
              gl_sc_bvec(p->sc, 3));
2506
        // Calculate the green channel from the expanded RYcB
2507
        // The BT.2020 specification says Yc = 0.2627*R + 0.6780*G + 0.0593*B
2508
0
        GLSL(color.g = (color.g - 0.2627*color.r - 0.0593*color.b)*1.0/0.6780;)
2509
        // Recompress to receive the R'G'B' result, same as other systems
2510
0
        GLSLF("color.rgb = mix(color.rgb * vec3(4.5),                       \n"
2511
0
              "                vec3(1.0993) * pow(color.rgb, vec3(0.45)) - vec3(0.0993), \n"
2512
0
              "                %s(lessThanEqual(vec3(0.0181), color.rgb))); \n",
2513
0
              gl_sc_bvec(p->sc, 3));
2514
0
    }
2515
2516
0
    p->components = 3;
2517
0
    if (!p->has_alpha) {
2518
0
        GLSL(color.a = 1.0;)
2519
0
    } else if (p->image_params.repr.alpha == PL_ALPHA_PREMULTIPLIED) {
2520
0
        p->components = 4;
2521
0
    } else {
2522
0
        p->components = 4;
2523
0
        GLSL(color = vec4(color.rgb * color.a, color.a);) // straight -> premul
2524
0
    }
2525
0
}
2526
2527
static void get_scale_factors(struct gl_video *p, bool transpose_rot, double xy[2])
2528
0
{
2529
0
    double target_w = p->src_rect.x1 - p->src_rect.x0;
2530
0
    double target_h = p->src_rect.y1 - p->src_rect.y0;
2531
0
    if (transpose_rot && p->image_params.rotate % 180 == 90)
2532
0
        MPSWAP(double, target_w, target_h);
2533
0
    xy[0] = (p->dst_rect.x1 - p->dst_rect.x0) / target_w;
2534
0
    xy[1] = (p->dst_rect.y1 - p->dst_rect.y0) / target_h;
2535
0
}
2536
2537
// Cropping.
2538
static void compute_src_transform(struct gl_video *p, struct gl_transform *tr)
2539
0
{
2540
0
    float sx = (p->src_rect.x1 - p->src_rect.x0) / (float)p->texture_w,
2541
0
          sy = (p->src_rect.y1 - p->src_rect.y0) / (float)p->texture_h,
2542
0
          ox = p->src_rect.x0,
2543
0
          oy = p->src_rect.y0;
2544
0
    struct gl_transform transform = {{{sx, 0}, {0, sy}}, {ox, oy}};
2545
2546
0
    gl_transform_trans(p->texture_offset, &transform);
2547
2548
0
    *tr = transform;
2549
0
}
2550
2551
// Takes care of the main scaling and pre/post-conversions
2552
static void pass_scale_main(struct gl_video *p)
2553
0
{
2554
    // Figure out the main scaler.
2555
0
    double xy[2];
2556
0
    get_scale_factors(p, true, xy);
2557
2558
    // actual scale factor should be divided by the scale factor of prescaling.
2559
0
    xy[0] /= p->texture_offset.m[0][0];
2560
0
    xy[1] /= p->texture_offset.m[1][1];
2561
2562
    // The calculation of scale factor involves 32-bit float(from gl_transform),
2563
    // use non-strict equality test to tolerate precision loss.
2564
0
    bool downscaling = xy[0] < 1.0 - FLT_EPSILON || xy[1] < 1.0 - FLT_EPSILON;
2565
0
    bool upscaling = !downscaling && (xy[0] > 1.0 + FLT_EPSILON ||
2566
0
                                      xy[1] > 1.0 + FLT_EPSILON);
2567
0
    double scale_factor = 1.0;
2568
2569
0
    struct scaler *scaler = &p->scaler[SCALER_SCALE];
2570
0
    struct scaler_config scaler_conf = p->opts.scaler[SCALER_SCALE];
2571
0
    if (p->opts.scaler_resizes_only && !downscaling && !upscaling) {
2572
0
        scaler_conf.kernel.function = SCALER_BILINEAR;
2573
        // For scaler-resizes-only, we round the texture offset to
2574
        // the nearest round value in order to prevent ugly blurriness
2575
        // (in exchange for slightly shifting the image by up to half a
2576
        // subpixel)
2577
0
        p->texture_offset.t[0] = roundf(p->texture_offset.t[0]);
2578
0
        p->texture_offset.t[1] = roundf(p->texture_offset.t[1]);
2579
0
    }
2580
0
    if (downscaling) {
2581
0
        scaler_conf = p->opts.scaler[SCALER_DSCALE];
2582
0
        scaler = &p->scaler[SCALER_DSCALE];
2583
0
        scaler_conf_merge(&scaler_conf, &p->opts.scaler[SCALER_SCALE], SCALER_DSCALE);
2584
0
    }
2585
2586
    // When requesting correct-downscaling and the clip is anamorphic, and
2587
    // because only a single scale factor is used for both axes, enable it only
2588
    // when both axes are downscaled, and use the milder of the factors to not
2589
    // end up with too much blur on one axis (even if we end up with sub-optimal
2590
    // scale factor on the other axis). This is better than not respecting
2591
    // correct scaling at all for anamorphic clips.
2592
0
    double f = MPMAX(xy[0], xy[1]);
2593
0
    if (p->opts.correct_downscaling && f < 1.0)
2594
0
        scale_factor = 1.0 / f;
2595
2596
    // Pre-conversion, like linear light/sigmoidization
2597
0
    GLSLF("// scaler pre-conversion\n");
2598
0
    bool use_linear = false;
2599
0
    if (downscaling) {
2600
0
        use_linear = p->opts.linear_downscaling;
2601
2602
        // Linear light downscaling results in nasty artifacts for HDR curves
2603
        // due to the potentially extreme brightness differences severely
2604
        // compounding any ringing. So just scale in gamma light instead.
2605
0
        if (pl_color_space_is_hdr(&p->image_params.color))
2606
0
            use_linear = false;
2607
0
    } else if (upscaling) {
2608
0
        use_linear = p->opts.linear_upscaling || p->opts.sigmoid_upscaling;
2609
0
    }
2610
2611
0
    if (use_linear) {
2612
0
        p->use_linear = true;
2613
0
        pass_linearize(p->sc, p->image_params.color.transfer);
2614
0
        pass_opt_hook_point(p, "LINEAR", NULL);
2615
0
    }
2616
2617
0
    bool use_sigmoid = use_linear && p->opts.sigmoid_upscaling && upscaling;
2618
0
    float sig_center, sig_slope, sig_offset, sig_scale;
2619
0
    if (use_sigmoid) {
2620
        // Coefficients for the sigmoidal transform are taken from the
2621
        // formula here: http://www.imagemagick.org/Usage/color_mods/#sigmoidal
2622
0
        sig_center = p->opts.sigmoid_center;
2623
0
        sig_slope  = p->opts.sigmoid_slope;
2624
        // This function needs to go through (0,0) and (1,1) so we compute the
2625
        // values at 1 and 0, and then scale/shift them, respectively.
2626
0
        sig_offset = 1.0/(1+expf(sig_slope * sig_center));
2627
0
        sig_scale  = 1.0/(1+expf(sig_slope * (sig_center-1))) - sig_offset;
2628
0
        GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
2629
0
        GLSLF("color.rgb = %f - log(1.0/(color.rgb * %f + %f) - 1.0) * 1.0/%f;\n",
2630
0
                sig_center, sig_scale, sig_offset, sig_slope);
2631
0
        pass_opt_hook_point(p, "SIGMOID", NULL);
2632
0
    }
2633
2634
0
    pass_opt_hook_point(p, "PREKERNEL", NULL);
2635
2636
0
    int vp_w = p->dst_rect.x1 - p->dst_rect.x0;
2637
0
    int vp_h = p->dst_rect.y1 - p->dst_rect.y0;
2638
0
    struct gl_transform transform;
2639
0
    compute_src_transform(p, &transform);
2640
2641
0
    GLSLF("// main scaling\n");
2642
0
    finish_pass_tex(p, &p->indirect_tex, p->texture_w, p->texture_h);
2643
0
    struct image src = image_wrap(p->indirect_tex, PLANE_RGB, p->components);
2644
0
    gl_transform_trans(transform, &src.transform);
2645
0
    pass_sample(p, src, scaler, &scaler_conf, scale_factor, vp_w, vp_h);
2646
2647
    // Changes the texture size to display size after main scaler.
2648
0
    p->texture_w = vp_w;
2649
0
    p->texture_h = vp_h;
2650
2651
0
    pass_opt_hook_point(p, "POSTKERNEL", NULL);
2652
2653
0
    GLSLF("// scaler post-conversion\n");
2654
0
    if (use_sigmoid) {
2655
        // Inverse of the transformation above
2656
0
        GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
2657
0
        GLSLF("color.rgb = (1.0/(1.0 + exp(%f * (%f - color.rgb))) - %f) * 1.0/%f;\n",
2658
0
                sig_slope, sig_center, sig_offset, sig_scale);
2659
0
    }
2660
0
}
2661
2662
// Adapts the colors to the right output color space. (Final pass during
2663
// rendering)
2664
// If OSD is true, ignore any changes that may have been made to the video
2665
// by previous passes (i.e. linear scaling)
2666
static void pass_colormanage(struct gl_video *p, struct pl_color_space src,
2667
                             enum mp_csp_light src_light,
2668
                             const struct pl_color_space *fbo_csp, int flags, bool osd)
2669
0
{
2670
0
    struct ra *ra = p->ra;
2671
2672
    // Configure the destination according to the FBO color space,
2673
    // unless specific transfer function, primaries or target peak
2674
    // is set. If values are set to _AUTO, the most likely intended
2675
    // values are guesstimated later in this function.
2676
0
    struct pl_color_space dst = {
2677
0
        .transfer = p->opts.target_trc == PL_COLOR_TRC_UNKNOWN ?
2678
0
                        fbo_csp->transfer : p->opts.target_trc,
2679
0
        .primaries = p->opts.target_prim == PL_COLOR_PRIM_UNKNOWN ?
2680
0
                     fbo_csp->primaries : p->opts.target_prim,
2681
0
        .hdr.max_luma = !p->opts.target_peak ?
2682
0
                        fbo_csp->hdr.max_luma : p->opts.target_peak,
2683
0
    };
2684
2685
0
    if (!p->colorspace_override_warned &&
2686
0
        ((fbo_csp->transfer && dst.transfer != fbo_csp->transfer) ||
2687
0
         (fbo_csp->primaries && dst.primaries != fbo_csp->primaries)))
2688
0
    {
2689
0
        MP_WARN(p, "One or more colorspace value is being overridden "
2690
0
                   "by user while the FBO provides colorspace information: "
2691
0
                   "transfer function: (dst: %s, fbo: %s), "
2692
0
                   "primaries: (dst: %s, fbo: %s). "
2693
0
                   "Rendering can lead to incorrect results!\n",
2694
0
                m_opt_choice_str(pl_csp_trc_names,  dst.transfer),
2695
0
                m_opt_choice_str(pl_csp_trc_names,  fbo_csp->transfer),
2696
0
                m_opt_choice_str(pl_csp_prim_names, dst.primaries),
2697
0
                m_opt_choice_str(pl_csp_prim_names, fbo_csp->primaries));
2698
0
        p->colorspace_override_warned = true;
2699
0
    }
2700
2701
0
    enum mp_csp_light dst_light = dst.transfer == PL_COLOR_TRC_HLG ?
2702
0
                                    MP_CSP_LIGHT_SCENE_HLG : MP_CSP_LIGHT_DISPLAY;
2703
2704
0
    if (p->use_lut_3d && (flags & RENDER_SCREEN_COLOR)) {
2705
        // The 3DLUT is always generated against the video's original source
2706
        // space, *not* the reference space. (To avoid having to regenerate
2707
        // the 3DLUT for the OSD on every frame)
2708
0
        enum pl_color_primaries prim_orig = p->image_params.color.primaries;
2709
0
        enum pl_color_transfer trc_orig = p->image_params.color.transfer;
2710
2711
        // One exception: HDR is not implemented by LittleCMS for technical
2712
        // limitation reasons, so we use a gamma 2.2 input curve here instead.
2713
        // We could pick any value we want here, the difference is just coding
2714
        // efficiency.
2715
0
        if (pl_color_space_is_hdr(&p->image_params.color))
2716
0
            trc_orig = PL_COLOR_TRC_GAMMA22;
2717
2718
0
        if (gl_video_get_lut3d(p, prim_orig, trc_orig)) {
2719
0
            dst.primaries = prim_orig;
2720
0
            dst.transfer = trc_orig;
2721
0
            mp_assert(dst.primaries && dst.transfer);
2722
0
        }
2723
0
    }
2724
2725
0
    if (dst.primaries == PL_COLOR_PRIM_UNKNOWN) {
2726
        // The vast majority of people are on sRGB or BT.709 displays, so pick
2727
        // this as the default output color space.
2728
0
        dst.primaries = PL_COLOR_PRIM_BT_709;
2729
2730
0
        if (src.primaries == PL_COLOR_PRIM_BT_601_525 ||
2731
0
            src.primaries == PL_COLOR_PRIM_BT_601_625)
2732
0
        {
2733
            // Since we auto-pick BT.601 and BT.709 based on the dimensions,
2734
            // combined with the fact that they're very similar to begin with,
2735
            // and to avoid confusing the average user, just don't adapt BT.601
2736
            // content automatically at all.
2737
0
            dst.primaries = src.primaries;
2738
0
        }
2739
0
    }
2740
2741
0
    if (dst.transfer == PL_COLOR_TRC_UNKNOWN) {
2742
        // Most people seem to complain when the image is darker or brighter
2743
        // than what they're "used to", so just avoid changing the gamma
2744
        // altogether by default. The only exceptions to this rule apply to
2745
        // very unusual TRCs, which even hardcode technoluddites would probably
2746
        // not enjoy viewing unaltered.
2747
0
        dst.transfer = src.transfer;
2748
2749
        // Avoid outputting linear light or HDR content "by default". For these
2750
        // just pick gamma 2.2 as a default, since it's a good estimate for
2751
        // the response of typical displays
2752
0
        if (dst.transfer == PL_COLOR_TRC_LINEAR || pl_color_space_is_hdr(&dst))
2753
0
            dst.transfer = PL_COLOR_TRC_GAMMA22;
2754
0
    }
2755
2756
    // If there's no specific signal peak known for the output display, infer
2757
    // it from the chosen transfer function. Also normalize the src peak, in
2758
    // case it was unknown
2759
0
    if (!dst.hdr.max_luma)
2760
0
        dst.hdr.max_luma = pl_color_transfer_nominal_peak(dst.transfer) * MP_REF_WHITE;
2761
0
    if (!src.hdr.max_luma)
2762
0
        src.hdr.max_luma = pl_color_transfer_nominal_peak(src.transfer) * MP_REF_WHITE;
2763
2764
    // Whitelist supported modes
2765
0
    switch (p->opts.tone_map.curve) {
2766
0
    case TONE_MAPPING_AUTO:
2767
0
    case TONE_MAPPING_CLIP:
2768
0
    case TONE_MAPPING_MOBIUS:
2769
0
    case TONE_MAPPING_REINHARD:
2770
0
    case TONE_MAPPING_HABLE:
2771
0
    case TONE_MAPPING_GAMMA:
2772
0
    case TONE_MAPPING_LINEAR:
2773
0
    case TONE_MAPPING_BT_2390:
2774
0
        break;
2775
0
    default:
2776
0
        MP_WARN(p, "Tone mapping curve unsupported by vo_gpu, falling back.\n");
2777
0
        p->opts.tone_map.curve = TONE_MAPPING_AUTO;
2778
0
        break;
2779
0
    }
2780
2781
0
    switch (p->opts.tone_map.gamut_mode) {
2782
0
    case GAMUT_AUTO:
2783
0
    case GAMUT_WARN:
2784
0
    case GAMUT_CLIP:
2785
0
    case GAMUT_DESATURATE:
2786
0
        break;
2787
0
    default:
2788
0
        MP_WARN(p, "Gamut mapping mode unsupported by vo_gpu, falling back.\n");
2789
0
        p->opts.tone_map.gamut_mode = GAMUT_AUTO;
2790
0
        break;
2791
0
    }
2792
2793
0
    struct gl_tone_map_opts tone_map = p->opts.tone_map;
2794
0
    bool detect_peak = tone_map.compute_peak >= 0 && pl_color_space_is_hdr(&src)
2795
0
                       && src.hdr.max_luma > dst.hdr.max_luma;
2796
2797
0
    if (detect_peak && !p->hdr_peak_ssbo) {
2798
0
        struct {
2799
0
            float average[2];
2800
0
            int32_t frame_sum;
2801
0
            uint32_t frame_max;
2802
0
            uint32_t counter;
2803
0
        } peak_ssbo = {0};
2804
2805
0
        struct ra_buf_params params = {
2806
0
            .type = RA_BUF_TYPE_SHADER_STORAGE,
2807
0
            .size = sizeof(peak_ssbo),
2808
0
            .initial_data = &peak_ssbo,
2809
0
        };
2810
2811
0
        p->hdr_peak_ssbo = ra_buf_create(ra, &params);
2812
0
        if (!p->hdr_peak_ssbo) {
2813
0
            MP_WARN(p, "Failed to create HDR peak detection SSBO, disabling.\n");
2814
0
            tone_map.compute_peak = p->opts.tone_map.compute_peak = -1;
2815
0
            detect_peak = false;
2816
0
        }
2817
0
    }
2818
2819
0
    if (detect_peak) {
2820
0
        pass_describe(p, "detect HDR peak");
2821
0
        pass_is_compute(p, 8, 8, true); // 8x8 is good for performance
2822
0
        gl_sc_ssbo(p->sc, "PeakDetect", p->hdr_peak_ssbo,
2823
0
            "vec2 average;"
2824
0
            "int frame_sum;"
2825
0
            "uint frame_max;"
2826
0
            "uint counter;"
2827
0
        );
2828
0
    } else {
2829
0
        tone_map.compute_peak = -1;
2830
0
    }
2831
2832
    // Adapt from src to dst as necessary
2833
0
    pass_color_map(p->sc, p->use_linear && !osd, &src, &dst, src_light, dst_light, &tone_map);
2834
2835
0
    if (!osd) {
2836
0
        struct mp_csp_params cparams = MP_CSP_PARAMS_DEFAULTS;
2837
0
        mp_csp_equalizer_state_get(p->video_eq, &cparams);
2838
0
        if (cparams.levels_out == PL_COLOR_LEVELS_UNKNOWN)
2839
0
            cparams.levels_out = PL_COLOR_LEVELS_FULL;
2840
0
        p->target_params = (struct mp_image_params){
2841
0
            .imgfmt_name = p->fbo_format ? p->fbo_format->name : "unknown",
2842
0
            .w = mp_rect_w(p->dst_rect),
2843
0
            .h = mp_rect_h(p->dst_rect),
2844
0
            .color = dst,
2845
0
            .repr = {.sys = PL_COLOR_SYSTEM_RGB, .levels = cparams.levels_out},
2846
0
            .rotate = p->image_params.rotate,
2847
0
        };
2848
0
    }
2849
2850
0
    if (p->use_lut_3d && (flags & RENDER_SCREEN_COLOR)) {
2851
0
        gl_sc_uniform_texture(p->sc, "lut_3d", p->lut_3d_texture);
2852
0
        GLSL(vec3 cpos;)
2853
0
        for (int i = 0; i < 3; i++)
2854
0
            GLSLF("cpos[%d] = LUT_POS(color[%d], %d.0);\n", i, i, p->lut_3d_size[i]);
2855
0
        GLSL(color.rgb = tex3D(lut_3d, cpos).rgb;)
2856
0
    }
2857
0
}
2858
2859
void gl_video_set_fb_depth(struct gl_video *p, int fb_depth)
2860
0
{
2861
0
    p->fb_depth = fb_depth;
2862
0
}
2863
2864
static void pass_dither(struct gl_video *p, const struct ra_fbo *fbo)
2865
0
{
2866
    // Assume 8 bits per component if unknown.
2867
0
    int dst_depth = p->fb_depth > 0 ? p->fb_depth : 8;
2868
0
    if (p->opts.dither_depth > 0)
2869
0
        dst_depth = p->opts.dither_depth;
2870
2871
0
    if (p->opts.dither_depth < 0 || p->opts.dither_algo == DITHER_NONE)
2872
0
        return;
2873
2874
0
    if (p->opts.dither_algo == DITHER_ERROR_DIFFUSION) {
2875
0
        const struct error_diffusion_kernel *kernel =
2876
0
            mp_find_error_diffusion_kernel(p->opts.error_diffusion);
2877
0
        int o_w = p->dst_rect.x1 - p->dst_rect.x0,
2878
0
            o_h = p->dst_rect.y1 - p->dst_rect.y0;
2879
2880
0
        int shmem_req = mp_ef_compute_shared_memory_size(kernel, o_h);
2881
0
        if (shmem_req > p->ra->max_shmem) {
2882
0
            MP_WARN(p, "Fallback to dither=fruit because there is no enough "
2883
0
                       "shared memory (%d/%d).\n",
2884
0
                       shmem_req, (int)p->ra->max_shmem);
2885
0
            p->opts.dither_algo = DITHER_FRUIT;
2886
0
        } else {
2887
0
            finish_pass_tex(p, &p->error_diffusion_tex[0], o_w, o_h);
2888
2889
0
            struct image img = image_wrap(p->error_diffusion_tex[0], PLANE_RGB, p->components);
2890
2891
            // Ensure the block size doesn't exceed the maximum of the
2892
            // implementation.
2893
0
            int block_size = MPMIN(p->ra->max_compute_group_threads, o_h);
2894
2895
0
            pass_describe(p, "dither=error-diffusion (kernel=%s, depth=%d)",
2896
0
                             kernel->name, dst_depth);
2897
2898
0
            p->pass_compute = (struct compute_info) {
2899
0
                .active = true,
2900
0
                .threads_w = block_size,
2901
0
                .threads_h = 1,
2902
0
                .directly_writes = true
2903
0
            };
2904
2905
0
            int tex_id = pass_bind(p, img);
2906
2907
0
            pass_error_diffusion(p->sc, kernel, tex_id, o_w, o_h,
2908
0
                                 dst_depth, block_size);
2909
2910
0
            finish_pass_tex(p, &p->error_diffusion_tex[1], o_w, o_h);
2911
2912
0
            img = image_wrap(p->error_diffusion_tex[1], PLANE_RGB, p->components);
2913
0
            copy_image(p, &(int){0}, img);
2914
2915
0
            return;
2916
0
        }
2917
0
    }
2918
2919
0
    if (!p->dither_texture) {
2920
0
        MP_VERBOSE(p, "Dither to %d.\n", dst_depth);
2921
2922
0
        int tex_size = 0;
2923
0
        void *tex_data = NULL;
2924
0
        const struct ra_format *fmt = NULL;
2925
0
        void *temp = NULL;
2926
2927
0
        if (p->opts.dither_algo == DITHER_FRUIT) {
2928
0
            int sizeb = p->opts.dither_size;
2929
0
            int size = 1 << sizeb;
2930
2931
0
            if (p->last_dither_matrix_size != size) {
2932
0
                p->last_dither_matrix = talloc_realloc(p, p->last_dither_matrix,
2933
0
                                                       float, size * size);
2934
0
                mp_make_fruit_dither_matrix(p->last_dither_matrix, sizeb);
2935
0
                p->last_dither_matrix_size = size;
2936
0
            }
2937
2938
            // Prefer R16 texture since they provide higher precision.
2939
0
            fmt = ra_find_unorm_format(p->ra, 2, 1);
2940
0
            if (!fmt)
2941
0
                fmt = ra_find_float16_format(p->ra, 1);
2942
0
            if (fmt) {
2943
0
                tex_size = size;
2944
0
                tex_data = p->last_dither_matrix;
2945
0
                if (fmt->ctype == RA_CTYPE_UNORM) {
2946
0
                    uint16_t *t = temp = talloc_array(NULL, uint16_t, size * size);
2947
0
                    for (int n = 0; n < size * size; n++)
2948
0
                        t[n] = p->last_dither_matrix[n] * UINT16_MAX;
2949
0
                    tex_data = t;
2950
0
                }
2951
0
            } else {
2952
0
                MP_VERBOSE(p, "GL too old. Falling back to ordered dither.\n");
2953
0
                p->opts.dither_algo = DITHER_ORDERED;
2954
0
            }
2955
0
        }
2956
2957
0
        if (p->opts.dither_algo == DITHER_ORDERED) {
2958
0
            temp = talloc_array(NULL, char, 8 * 8);
2959
0
            mp_make_ordered_dither_matrix(temp, 8);
2960
2961
0
            fmt = ra_find_unorm_format(p->ra, 1, 1);
2962
0
            tex_size = 8;
2963
0
            tex_data = temp;
2964
0
        }
2965
2966
0
        struct ra_tex_params params = {
2967
0
            .dimensions = 2,
2968
0
            .w = tex_size,
2969
0
            .h = tex_size,
2970
0
            .d = 1,
2971
0
            .format = fmt,
2972
0
            .render_src = true,
2973
0
            .src_repeat = true,
2974
0
            .initial_data = tex_data,
2975
0
        };
2976
0
        p->dither_texture = ra_tex_create(p->ra, &params);
2977
2978
0
        debug_check_gl(p, "dither setup");
2979
2980
0
        talloc_free(temp);
2981
2982
0
        if (!p->dither_texture)
2983
0
            return;
2984
0
    }
2985
2986
0
    GLSLF("// dithering\n");
2987
2988
    // This defines how many bits are considered significant for output on
2989
    // screen. The superfluous bits will be used for rounding according to the
2990
    // dither matrix. The precision of the source implicitly decides how many
2991
    // dither patterns can be visible.
2992
0
    int dither_quantization = (1 << dst_depth) - 1;
2993
0
    int dither_size = p->dither_texture->params.w;
2994
2995
0
    gl_sc_uniform_texture(p->sc, "dither", p->dither_texture);
2996
2997
0
    GLSLF("vec2 dither_coord = vec2(gl_FragCoord.x, %d.0 + %f * gl_FragCoord.y);",
2998
0
          fbo->flip ? fbo->tex->params.h : 0, fbo->flip ? -1.0 : 1.0);
2999
0
    GLSLF("vec2 dither_pos = dither_coord * 1.0/%d.0;\n", dither_size);
3000
3001
0
    if (p->opts.temporal_dither) {
3002
0
        int phase = (p->frames_rendered / p->opts.temporal_dither_period) % 8u;
3003
0
        float r = phase * (M_PI / 2); // rotate
3004
0
        float m = phase < 4 ? 1 : -1; // mirror
3005
3006
0
        float matrix[2][2] = {{cos(r),     -sin(r)    },
3007
0
                              {sin(r) * m,  cos(r) * m}};
3008
0
        gl_sc_uniform_dynamic(p->sc);
3009
0
        gl_sc_uniform_mat2(p->sc, "dither_trafo", true, &matrix[0][0]);
3010
3011
0
        GLSL(dither_pos = dither_trafo * dither_pos;)
3012
0
    }
3013
3014
0
    GLSL(float dither_value = texture(dither, dither_pos).r;)
3015
0
    GLSLF("color = floor(color * %d.0 + dither_value + 0.5 / %d.0) * 1.0/%d.0;\n",
3016
0
          dither_quantization, dither_size * dither_size, dither_quantization);
3017
0
}
3018
3019
// Draws the OSD, in scene-referred colors.. If cms is true, subtitles are
3020
// instead adapted to the display's gamut.
3021
static void pass_draw_osd(struct gl_video *p, int osd_flags, int frame_flags,
3022
                          double pts, struct mp_osd_res rect, const struct ra_fbo *fbo,
3023
                          bool cms)
3024
0
{
3025
0
    if (frame_flags & RENDER_FRAME_VF_SUBS)
3026
0
        osd_flags |= OSD_DRAW_SUB_FILTER;
3027
3028
0
    if ((osd_flags & OSD_DRAW_SUB_ONLY) && (osd_flags & OSD_DRAW_OSD_ONLY))
3029
0
        return;
3030
3031
0
    mpgl_osd_generate(p->osd, rect, pts, p->image_params.stereo3d, osd_flags);
3032
3033
0
    timer_pool_start(p->osd_timer);
3034
0
    for (int n = 0; n < MAX_OSD_PARTS; n++) {
3035
        // (This returns false if this part is empty with nothing to draw.)
3036
0
        if (!mpgl_osd_draw_prepare(p->osd, n, p->sc))
3037
0
            continue;
3038
        // When subtitles need to be color managed, assume they're in sRGB
3039
        // (for lack of anything saner to do)
3040
0
        if (cms) {
3041
0
            static const struct pl_color_space csp_srgb = {
3042
0
                .primaries = PL_COLOR_PRIM_BT_709,
3043
0
                .transfer = PL_COLOR_TRC_SRGB,
3044
0
            };
3045
3046
0
            pass_colormanage(p, csp_srgb, MP_CSP_LIGHT_DISPLAY, &fbo->color_space,
3047
0
                             frame_flags, true);
3048
0
        }
3049
0
        mpgl_osd_draw_finish(p->osd, n, p->sc, fbo);
3050
0
    }
3051
3052
0
    timer_pool_stop(p->osd_timer);
3053
0
    pass_describe(p, "drawing osd");
3054
0
    struct mp_pass_perf perf = timer_pool_measure(p->osd_timer);
3055
0
    pass_record(p, &perf);
3056
0
}
3057
3058
static float chroma_realign(int size, int pixel)
3059
0
{
3060
0
    return size / (float)chroma_upsize(size, pixel);
3061
0
}
3062
3063
// Minimal rendering code path, for GLES or OpenGL 2.1 without proper FBOs.
3064
static void pass_render_frame_dumb(struct gl_video *p)
3065
0
{
3066
0
    struct image img[4];
3067
0
    struct gl_transform off[4];
3068
0
    pass_get_images(p, &p->image, img, off);
3069
3070
0
    struct gl_transform transform;
3071
0
    compute_src_transform(p, &transform);
3072
3073
0
    int index = 0;
3074
0
    for (int i = 0; i < p->plane_count; i++) {
3075
0
        int cw = img[i].type == PLANE_CHROMA ? p->ra_format.chroma_w : 1;
3076
0
        int ch = img[i].type == PLANE_CHROMA ? p->ra_format.chroma_h : 1;
3077
0
        if (p->image_params.rotate % 180 == 90)
3078
0
            MPSWAP(int, cw, ch);
3079
3080
0
        struct gl_transform t = transform;
3081
0
        t.m[0][0] *= chroma_realign(p->texture_w, cw);
3082
0
        t.m[1][1] *= chroma_realign(p->texture_h, ch);
3083
3084
0
        t.t[0] /= cw;
3085
0
        t.t[1] /= ch;
3086
3087
0
        t.t[0] += off[i].t[0];
3088
0
        t.t[1] += off[i].t[1];
3089
3090
0
        gl_transform_trans(img[i].transform, &t);
3091
0
        img[i].transform = t;
3092
3093
0
        copy_image(p, &index, img[i]);
3094
0
    }
3095
3096
0
    pass_convert_yuv(p);
3097
0
}
3098
3099
// The main rendering function, takes care of everything up to and including
3100
// upscaling. p->image is rendered.
3101
// flags: bit set of RENDER_FRAME_* flags
3102
static bool pass_render_frame(struct gl_video *p, struct mp_image *mpi,
3103
                              uint64_t id, int flags)
3104
0
{
3105
    // initialize the texture parameters and temporary variables
3106
0
    p->texture_w = p->image_params.w;
3107
0
    p->texture_h = p->image_params.h;
3108
0
    p->texture_offset = identity_trans;
3109
0
    p->components = 0;
3110
0
    p->num_saved_imgs = 0;
3111
0
    p->idx_hook_textures = 0;
3112
0
    p->use_linear = false;
3113
3114
    // try uploading the frame
3115
0
    if (!pass_upload_image(p, mpi, id))
3116
0
        return false;
3117
3118
0
    if (p->image_params.rotate % 180 == 90)
3119
0
        MPSWAP(int, p->texture_w, p->texture_h);
3120
3121
0
    if (p->dumb_mode)
3122
0
        return true;
3123
3124
0
    pass_read_video(p);
3125
0
    pass_opt_hook_point(p, "NATIVE", &p->texture_offset);
3126
0
    pass_convert_yuv(p);
3127
0
    pass_opt_hook_point(p, "MAINPRESUB", &p->texture_offset);
3128
3129
    // For subtitles
3130
0
    double vpts = p->image.mpi->pts;
3131
0
    if (vpts == MP_NOPTS_VALUE)
3132
0
        vpts = p->osd_pts;
3133
3134
0
    if (p->osd && p->opts.blend_subs == BLEND_SUBS_VIDEO &&
3135
0
        (flags & RENDER_FRAME_SUBS))
3136
0
    {
3137
0
        double scale[2];
3138
0
        get_scale_factors(p, false, scale);
3139
0
        struct mp_osd_res rect = {
3140
0
            .w = p->texture_w, .h = p->texture_h,
3141
0
            .display_par = scale[1] / scale[0], // counter compensate scaling
3142
0
        };
3143
0
        finish_pass_tex(p, &p->blend_subs_tex, rect.w, rect.h);
3144
0
        struct ra_fbo fbo = { p->blend_subs_tex };
3145
0
        pass_draw_osd(p, OSD_DRAW_SUB_ONLY, flags, vpts, rect, &fbo, false);
3146
0
        pass_read_tex(p, p->blend_subs_tex);
3147
0
        pass_describe(p, "blend subs video");
3148
0
    }
3149
0
    pass_opt_hook_point(p, "MAIN", &p->texture_offset);
3150
3151
0
    pass_scale_main(p);
3152
3153
0
    int vp_w = p->dst_rect.x1 - p->dst_rect.x0,
3154
0
        vp_h = p->dst_rect.y1 - p->dst_rect.y0;
3155
0
    if (p->osd && p->opts.blend_subs == BLEND_SUBS_YES &&
3156
0
        (flags & RENDER_FRAME_SUBS))
3157
0
    {
3158
        // Recreate the real video size from the src/dst rects
3159
0
        struct mp_osd_res rect = {
3160
0
            .w = vp_w, .h = vp_h,
3161
0
            .ml = -p->src_rect.x0, .mr = p->src_rect.x1 - p->image_params.w,
3162
0
            .mt = -p->src_rect.y0, .mb = p->src_rect.y1 - p->image_params.h,
3163
0
            .display_par = 1.0,
3164
0
        };
3165
        // Adjust margins for scale
3166
0
        double scale[2];
3167
0
        get_scale_factors(p, true, scale);
3168
0
        rect.ml *= scale[0]; rect.mr *= scale[0];
3169
0
        rect.mt *= scale[1]; rect.mb *= scale[1];
3170
        // We should always blend subtitles in non-linear light
3171
0
        if (p->use_linear) {
3172
0
            pass_delinearize(p->sc, p->image_params.color.transfer);
3173
0
            p->use_linear = false;
3174
0
        }
3175
0
        finish_pass_tex(p, &p->blend_subs_tex, p->texture_w, p->texture_h);
3176
0
        struct ra_fbo fbo = { p->blend_subs_tex };
3177
0
        pass_draw_osd(p, OSD_DRAW_SUB_ONLY, flags, vpts, rect, &fbo, false);
3178
0
        pass_read_tex(p, p->blend_subs_tex);
3179
0
        pass_describe(p, "blend subs");
3180
0
    }
3181
3182
0
    pass_opt_hook_point(p, "SCALED", NULL);
3183
3184
0
    return true;
3185
0
}
3186
3187
static void pass_draw_to_screen(struct gl_video *p, const struct ra_fbo *fbo, int flags)
3188
0
{
3189
0
    if (p->dumb_mode)
3190
0
        pass_render_frame_dumb(p);
3191
3192
    // Adjust the overall gamma before drawing to screen
3193
0
    if (p->user_gamma != 1) {
3194
0
        gl_sc_uniform_f(p->sc, "user_gamma", p->user_gamma);
3195
0
        GLSL(color.rgb = clamp(color.rgb, 0.0, 1.0);)
3196
0
        GLSL(color.rgb = pow(color.rgb, vec3(user_gamma));)
3197
0
    }
3198
3199
0
    pass_colormanage(p, p->image_params.color, p->image_params.light,
3200
0
                     &fbo->color_space, flags, false);
3201
3202
    // Since finish_pass_fbo doesn't work with compute shaders, and neither
3203
    // does the checkerboard/dither code, we may need an indirection via
3204
    // p->screen_tex here.
3205
0
    if (p->pass_compute.active) {
3206
0
        int o_w = p->dst_rect.x1 - p->dst_rect.x0,
3207
0
            o_h = p->dst_rect.y1 - p->dst_rect.y0;
3208
0
        finish_pass_tex(p, &p->screen_tex, o_w, o_h);
3209
0
        struct image tmp = image_wrap(p->screen_tex, PLANE_RGB, p->components);
3210
0
        copy_image(p, &(int){0}, tmp);
3211
0
    }
3212
3213
0
    if (p->has_alpha) {
3214
0
        if (p->opts.background == BACKGROUND_TILES) {
3215
            // Draw checkerboard pattern to indicate transparency
3216
0
            struct m_color *c = p->opts.background_tile_color;
3217
0
            GLSLF("// transparency checkerboard\n");
3218
0
            GLSLF("vec2 tile_coord = vec2(gl_FragCoord.x, %d.0 + %f * gl_FragCoord.y);\n",
3219
0
                  fbo->flip ? fbo->tex->params.h : 0, fbo->flip ? -1.0 : 1.0);
3220
0
            GLSLF("bvec2 tile = lessThan(fract(tile_coord * 1.0 / %d.0), vec2(0.5));\n",
3221
0
                  p->opts.background_tile_size * 2);
3222
0
            GLSLF("vec3 background = tile.x == tile.y ? vec3(%f, %f, %f) : vec3(%f, %f, %f);\n",
3223
0
                  c[0].r / 255.0, c[0].g / 255.0, c[0].b / 255.0,
3224
0
                  c[1].r / 255.0, c[1].g / 255.0, c[1].b / 255.0);
3225
0
            GLSL(color.rgb += background.rgb * (1.0 - color.a);)
3226
0
            GLSL(color.a = 1.0;)
3227
0
        } else if (p->opts.background == BACKGROUND_COLOR) {
3228
            // Blend into background color (usually black)
3229
0
            struct m_color c = p->opts.background_color;
3230
0
            GLSLF("vec4 background = vec4(%f, %f, %f, %f);\n",
3231
0
                  c.r / 255.0, c.g / 255.0, c.b / 255.0, c.a / 255.0);
3232
0
            GLSL(color += background * (1.0 - color.a);)
3233
0
            GLSL(color.rgb *= vec3(color.a););
3234
0
        }
3235
0
    }
3236
3237
0
    pass_opt_hook_point(p, "OUTPUT", NULL);
3238
3239
0
    if (flags & RENDER_SCREEN_COLOR)
3240
0
        pass_dither(p, fbo);
3241
0
    pass_describe(p, "output to screen");
3242
0
    finish_pass_fbo(p, fbo, false, &p->dst_rect);
3243
0
}
3244
3245
// flags: bit set of RENDER_FRAME_* flags
3246
static bool update_surface(struct gl_video *p, struct mp_image *mpi,
3247
                           uint64_t id, struct surface *surf, int flags)
3248
0
{
3249
0
    int vp_w = p->dst_rect.x1 - p->dst_rect.x0,
3250
0
        vp_h = p->dst_rect.y1 - p->dst_rect.y0;
3251
3252
0
    pass_info_reset(p, false);
3253
0
    if (!pass_render_frame(p, mpi, id, flags))
3254
0
        return false;
3255
3256
    // Frame blending should always be done in linear light to preserve the
3257
    // overall brightness, otherwise this will result in flashing dark frames
3258
    // because mixing in compressed light artificially darkens the results
3259
0
    if (!p->use_linear) {
3260
0
        p->use_linear = true;
3261
0
        pass_linearize(p->sc, p->image_params.color.transfer);
3262
0
    }
3263
3264
0
    finish_pass_tex(p, &surf->tex, vp_w, vp_h);
3265
0
    surf->id  = id;
3266
0
    surf->pts = mpi->pts;
3267
0
    return true;
3268
0
}
3269
3270
// Draws an interpolate frame to fbo, based on the frame timing in t
3271
// flags: bit set of RENDER_FRAME_* flags
3272
static void gl_video_interpolate_frame(struct gl_video *p, struct vo_frame *t,
3273
                                       const struct ra_fbo *fbo, int flags)
3274
0
{
3275
0
    bool is_new = false;
3276
3277
    // Reset the queue completely if this is a still image, to avoid any
3278
    // interpolation artifacts from surrounding frames when unpausing or
3279
    // framestepping
3280
0
    if (t->still)
3281
0
        gl_video_reset_surfaces(p);
3282
3283
    // First of all, figure out if we have a frame available at all, and draw
3284
    // it manually + reset the queue if not
3285
0
    if (p->surfaces[p->surface_now].id == 0) {
3286
0
        struct surface *now = &p->surfaces[p->surface_now];
3287
0
        if (!update_surface(p, t->current, t->frame_id, now, flags))
3288
0
            return;
3289
0
        p->surface_idx = p->surface_now;
3290
0
        is_new = true;
3291
0
    }
3292
3293
    // Find the right frame for this instant
3294
0
    if (t->current) {
3295
0
        int next = surface_wrap(p->surface_now + 1);
3296
0
        while (p->surfaces[next].id &&
3297
0
               p->surfaces[next].id > p->surfaces[p->surface_now].id &&
3298
0
               p->surfaces[p->surface_now].id < t->frame_id)
3299
0
        {
3300
0
            p->surface_now = next;
3301
0
            next = surface_wrap(next + 1);
3302
0
        }
3303
0
    }
3304
3305
    // Figure out the queue size. For illustration, a filter radius of 2 would
3306
    // look like this: _ A [B] C D _
3307
    // A is surface_bse, B is surface_now, C is surface_now+1 and D is
3308
    // surface_end.
3309
0
    struct scaler *tscale = &p->scaler[SCALER_TSCALE];
3310
0
    reinit_scaler(p, tscale, &p->opts.scaler[SCALER_TSCALE], 1, tscale_sizes);
3311
0
    bool oversample = tscale->conf.kernel.function == SCALER_OVERSAMPLE;
3312
0
    bool linear = tscale->conf.kernel.function == SCALER_LINEAR;
3313
0
    int size;
3314
3315
0
    if (oversample || linear) {
3316
0
        size = 2;
3317
0
    } else {
3318
0
        mp_assert(tscale->kernel && !tscale->kernel->polar);
3319
0
        size = ceil(tscale->kernel->size);
3320
0
    }
3321
3322
0
    int radius = size/2;
3323
0
    int surface_now = p->surface_now;
3324
0
    int surface_bse = surface_wrap(surface_now - (radius-1));
3325
0
    int surface_end = surface_wrap(surface_now + radius);
3326
0
    mp_assert(surface_wrap(surface_bse + size-1) == surface_end);
3327
3328
    // Render new frames while there's room in the queue. Note that technically,
3329
    // this should be done before the step where we find the right frame, but
3330
    // it only barely matters at the very beginning of playback, and this way
3331
    // makes the code much more linear.
3332
0
    int surface_dst = surface_wrap(p->surface_idx + 1);
3333
0
    for (int i = 0; i < t->num_frames; i++) {
3334
        // Avoid overwriting data we might still need
3335
0
        if (surface_dst == surface_bse - 1)
3336
0
            break;
3337
3338
0
        struct mp_image *f = t->frames[i];
3339
0
        uint64_t f_id = t->frame_id + i;
3340
0
        if (!mp_image_params_static_equal(&f->params, &p->real_image_params))
3341
0
            continue;
3342
3343
0
        if (f_id > p->surfaces[p->surface_idx].id) {
3344
0
            struct surface *dst = &p->surfaces[surface_dst];
3345
0
            if (!update_surface(p, f, f_id, dst, flags))
3346
0
                return;
3347
0
            p->surface_idx = surface_dst;
3348
0
            surface_dst = surface_wrap(surface_dst + 1);
3349
0
            is_new = true;
3350
0
        }
3351
0
    }
3352
3353
    // Figure out whether the queue is "valid". A queue is invalid if the
3354
    // frames' PTS is not monotonically increasing. Anything else is invalid,
3355
    // so avoid blending incorrect data and just draw the latest frame as-is.
3356
    // Possible causes for failure of this condition include seeks, pausing,
3357
    // end of playback or start of playback.
3358
0
    bool valid = true;
3359
0
    for (int i = surface_bse, ii; valid && i != surface_end; i = ii) {
3360
0
        ii = surface_wrap(i + 1);
3361
0
        if (p->surfaces[i].id == 0 || p->surfaces[ii].id == 0) {
3362
0
            valid = false;
3363
0
        } else if (p->surfaces[ii].id < p->surfaces[i].id) {
3364
0
            valid = false;
3365
0
            MP_DBG(p, "interpolation queue underrun\n");
3366
0
        }
3367
0
    }
3368
3369
    // Update OSD PTS to synchronize subtitles with the displayed frame
3370
0
    p->osd_pts = p->surfaces[surface_now].pts;
3371
3372
    // Finally, draw the right mix of frames to the screen.
3373
0
    if (!is_new)
3374
0
        pass_info_reset(p, true);
3375
0
    pass_describe(p, "interpolation");
3376
0
    if (!valid || t->still) {
3377
        // surface_now is guaranteed to be valid, so we can safely use it.
3378
0
        pass_read_tex(p, p->surfaces[surface_now].tex);
3379
0
        p->is_interpolated = false;
3380
0
    } else {
3381
0
        double mix = t->vsync_offset / t->ideal_frame_duration;
3382
        // The scaler code always wants the fcoord to be between 0 and 1,
3383
        // so we try to adjust by using the previous set of N frames instead
3384
        // (which requires some extra checking to make sure it's valid)
3385
0
        if (mix < 0.0) {
3386
0
            int prev = surface_wrap(surface_bse - 1);
3387
0
            if (p->surfaces[prev].id != 0 &&
3388
0
                p->surfaces[prev].id < p->surfaces[surface_bse].id)
3389
0
            {
3390
0
                mix += 1.0;
3391
0
                surface_bse = prev;
3392
0
            } else {
3393
0
                mix = 0.0; // at least don't blow up, this should only
3394
                           // ever happen at the start of playback
3395
0
            }
3396
0
        }
3397
3398
0
        if (oversample) {
3399
            // Oversample uses the frame area as mix ratio, not the vsync
3400
            // position itself
3401
0
            double vsync_dist = t->vsync_interval / t->ideal_frame_duration,
3402
0
                   threshold = tscale->conf.kernel.params[0];
3403
0
            threshold = isnan(threshold) ? 0.0 : threshold;
3404
0
            mix = (1 - mix) / vsync_dist;
3405
0
            mix = mix <= 0 + threshold ? 0 : mix;
3406
0
            mix = mix >= 1 - threshold ? 1 : mix;
3407
0
            mix = 1 - mix;
3408
0
        }
3409
3410
        // Blend the frames together
3411
0
        if (oversample || linear) {
3412
0
            gl_sc_uniform_dynamic(p->sc);
3413
0
            gl_sc_uniform_f(p->sc, "inter_coeff", mix);
3414
0
            GLSL(color = mix(texture(texture0, texcoord0),
3415
0
                             texture(texture1, texcoord1),
3416
0
                             inter_coeff);)
3417
0
        } else {
3418
0
            gl_sc_uniform_dynamic(p->sc);
3419
0
            gl_sc_uniform_f(p->sc, "fcoord", mix);
3420
0
            pass_sample_separated_gen(p->sc, tscale, 0, 0);
3421
0
        }
3422
3423
        // Load all the required frames
3424
0
        for (int i = 0; i < size; i++) {
3425
0
            struct image img =
3426
0
                image_wrap(p->surfaces[surface_wrap(surface_bse+i)].tex,
3427
0
                           PLANE_RGB, p->components);
3428
            // Since the code in pass_sample_separated currently assumes
3429
            // the textures are bound in-order and starting at 0, we just
3430
            // assert to make sure this is the case (which it should always be)
3431
0
            int id = pass_bind(p, img);
3432
0
            mp_assert(id == i);
3433
0
        }
3434
3435
0
        MP_TRACE(p, "inter frame dur: %f vsync: %f, mix: %f\n",
3436
0
                 t->ideal_frame_duration, t->vsync_interval, mix);
3437
0
        p->is_interpolated = true;
3438
0
    }
3439
0
    pass_draw_to_screen(p, fbo, flags);
3440
3441
0
    p->frames_drawn += 1;
3442
0
}
3443
3444
void gl_video_render_frame(struct gl_video *p, struct vo_frame *frame,
3445
                           const struct ra_fbo *fbo, int flags)
3446
0
{
3447
0
    gl_video_update_options(p);
3448
3449
0
    struct mp_rect target_rc = {0, 0, fbo->tex->params.w, fbo->tex->params.h};
3450
3451
0
    p->broken_frame = false;
3452
3453
0
    bool has_frame = !!frame->current;
3454
3455
0
    struct m_color c = p->clear_color;
3456
0
    float clear_color[4] = {c.r / 255.0, c.g / 255.0, c.b / 255.0, c.a / 255.0};
3457
0
    clear_color[0] *= clear_color[3];
3458
0
    clear_color[1] *= clear_color[3];
3459
0
    clear_color[2] *= clear_color[3];
3460
0
    p->ra->fns->clear(p->ra, fbo->tex, clear_color, &target_rc);
3461
3462
0
    if (p->hwdec_overlay) {
3463
0
        if (has_frame) {
3464
0
            float *color = p->hwdec_overlay->overlay_colorkey;
3465
0
            p->ra->fns->clear(p->ra, fbo->tex, color, &p->dst_rect);
3466
0
        }
3467
3468
0
        p->hwdec_overlay->driver->overlay_frame(p->hwdec_overlay, frame->current,
3469
0
                                                &p->src_rect, &p->dst_rect,
3470
0
                                                frame->frame_id != p->image.id);
3471
3472
0
        if (frame->current)
3473
0
            p->osd_pts = frame->current->pts;
3474
3475
        // Disable GL rendering
3476
0
        has_frame = false;
3477
0
    }
3478
3479
0
    if (has_frame) {
3480
0
        bool interpolate = p->opts.interpolation && frame->display_synced &&
3481
0
                           (p->frames_drawn || !frame->still);
3482
0
        if (interpolate) {
3483
0
            double ratio = frame->ideal_frame_duration / frame->vsync_interval;
3484
0
            if (fabs(ratio - 1.0) < p->opts.interpolation_threshold)
3485
0
                interpolate = false;
3486
0
        }
3487
3488
0
        if (interpolate) {
3489
0
            gl_video_interpolate_frame(p, frame, fbo, flags);
3490
0
        } else {
3491
0
            bool is_new = frame->frame_id != p->image.id;
3492
3493
            // Redrawing a frame might update subtitles.
3494
0
            if (frame->still && p->opts.blend_subs)
3495
0
                is_new = true;
3496
3497
0
            if (is_new || !p->output_tex_valid) {
3498
0
                p->output_tex_valid = false;
3499
3500
0
                pass_info_reset(p, !is_new);
3501
0
                if (!pass_render_frame(p, frame->current, frame->frame_id, flags))
3502
0
                    goto done;
3503
3504
                // For the non-interpolation case, we draw to a single "cache"
3505
                // texture to speed up subsequent re-draws (if any exist)
3506
0
                bool repeats = frame->num_vsyncs > 1 && frame->display_synced;
3507
0
                bool r = false;
3508
0
                if ((repeats || frame->still) && !p->dumb_mode &&
3509
0
                    (p->ra->caps & RA_CAP_BLIT) && fbo->tex->params.blit_dst)
3510
0
                {
3511
                    // Attempt to use the same format as the destination FBO
3512
                    // if possible. Some RAs use a wrapped dummy format here,
3513
                    // so fall back to the fbo_format in that case.
3514
0
                    const struct ra_format *fmt = fbo->tex->params.format;
3515
0
                    if (fmt->dummy_format)
3516
0
                        fmt = p->fbo_format;
3517
0
                    r = ra_tex_resize(p->ra, p->log, &p->output_tex,
3518
0
                                      fbo->tex->params.w, fbo->tex->params.h,
3519
0
                                      fmt);
3520
0
                }
3521
0
                const struct ra_fbo *dest_fbo =
3522
0
                    r ? &(struct ra_fbo) { .tex = p->output_tex, .color_space = fbo->color_space } : fbo;
3523
0
                p->output_tex_valid = r;
3524
0
                pass_draw_to_screen(p, dest_fbo, flags);
3525
0
            }
3526
3527
            // "output tex valid" and "output tex needed" are equivalent
3528
0
            if (p->output_tex_valid && fbo->tex->params.blit_dst) {
3529
0
                pass_info_reset(p, true);
3530
0
                pass_describe(p, "redraw cached frame");
3531
0
                struct mp_rect src = p->dst_rect;
3532
0
                struct mp_rect dst = src;
3533
0
                if (fbo->flip) {
3534
0
                    dst.y0 = fbo->tex->params.h - src.y0;
3535
0
                    dst.y1 = fbo->tex->params.h - src.y1;
3536
0
                }
3537
0
                timer_pool_start(p->blit_timer);
3538
0
                p->ra->fns->blit(p->ra, fbo->tex, p->output_tex, &dst, &src);
3539
0
                timer_pool_stop(p->blit_timer);
3540
0
                struct mp_pass_perf perf = timer_pool_measure(p->blit_timer);
3541
0
                pass_record(p, &perf);
3542
0
            }
3543
0
        }
3544
0
    }
3545
3546
0
done:
3547
3548
0
    debug_check_gl(p, "after video rendering");
3549
3550
0
    if (p->osd && (flags & (RENDER_FRAME_SUBS | RENDER_FRAME_OSD))) {
3551
        // If we haven't actually drawn anything so far, then we technically
3552
        // need to consider this the start of a new pass. Let's call it a
3553
        // redraw just because, since it's basically a blank frame anyway
3554
0
        if (!has_frame)
3555
0
            pass_info_reset(p, true);
3556
3557
0
        int osd_flags = p->opts.blend_subs ? OSD_DRAW_OSD_ONLY : 0;
3558
0
        if (!(flags & RENDER_FRAME_SUBS))
3559
0
            osd_flags |= OSD_DRAW_OSD_ONLY;
3560
0
        if (!(flags & RENDER_FRAME_OSD))
3561
0
            osd_flags |= OSD_DRAW_SUB_ONLY;
3562
3563
0
        pass_draw_osd(p, osd_flags, flags, p->osd_pts, p->osd_rect, fbo, true);
3564
0
        debug_check_gl(p, "after OSD rendering");
3565
0
    }
3566
3567
0
    p->broken_frame |= gl_sc_error_state(p->sc);
3568
0
    if (p->broken_frame) {
3569
        // Make the screen solid blue to make it visually clear that an
3570
        // error has occurred
3571
0
        float color[4] = {0.0, 0.05, 0.5, 1.0};
3572
0
        p->ra->fns->clear(p->ra, fbo->tex, color, &target_rc);
3573
0
    }
3574
3575
0
    p->frames_rendered++;
3576
0
    pass_report_performance(p);
3577
0
}
3578
3579
void gl_video_screenshot(struct gl_video *p, struct vo_frame *frame,
3580
                         struct voctrl_screenshot *args)
3581
0
{
3582
0
    if (!p->ra->fns->tex_download)
3583
0
        return;
3584
3585
0
    bool ok = false;
3586
0
    struct mp_image *res = NULL;
3587
0
    struct ra_tex *target = NULL;
3588
0
    struct mp_rect old_src = p->src_rect;
3589
0
    struct mp_rect old_dst = p->dst_rect;
3590
0
    struct mp_osd_res old_osd = p->osd_rect;
3591
0
    struct vo_frame *nframe = vo_frame_ref(frame);
3592
3593
    // Disable interpolation and such.
3594
0
    nframe->redraw = true;
3595
0
    nframe->repeat = false;
3596
0
    nframe->still = true;
3597
0
    nframe->pts = 0;
3598
0
    nframe->duration = -1;
3599
3600
0
    if (!args->scaled) {
3601
0
        int w, h;
3602
0
        mp_image_params_get_dsize(&p->image_params, &w, &h);
3603
0
        if (w < 1 || h < 1)
3604
0
            return;
3605
3606
0
        int src_w = p->image_params.w;
3607
0
        int src_h = p->image_params.h;
3608
0
        struct mp_rect src = {0, 0, src_w, src_h};
3609
0
        struct mp_rect dst = {0, 0, w, h};
3610
3611
0
        if (mp_image_crop_valid(&p->image_params))
3612
0
            src = p->image_params.crop;
3613
3614
0
        if (p->image_params.rotate % 180 == 90) {
3615
0
            MPSWAP(int, w, h);
3616
0
            MPSWAP(int, src_w, src_h);
3617
0
        }
3618
0
        mp_rect_rotate(&src, src_w, src_h, p->image_params.rotate);
3619
0
        mp_rect_rotate(&dst, w, h, p->image_params.rotate);
3620
3621
0
        struct mp_osd_res osd = {
3622
0
            .display_par = 1.0,
3623
0
            .w = mp_rect_w(dst),
3624
0
            .h = mp_rect_h(dst),
3625
0
        };
3626
0
        gl_video_resize(p, &src, &dst, &osd);
3627
0
    }
3628
3629
0
    gl_video_reset_surfaces(p);
3630
3631
0
    struct ra_tex_params params = {
3632
0
        .dimensions = 2,
3633
0
        .downloadable = true,
3634
0
        .w = p->osd_rect.w,
3635
0
        .h = p->osd_rect.h,
3636
0
        .d = 1,
3637
0
        .render_dst = true,
3638
0
    };
3639
3640
0
    params.format = ra_find_unorm_format(p->ra, 1, 4);
3641
0
    int mpfmt = p->has_alpha ? IMGFMT_RGBA : IMGFMT_RGB0;
3642
0
    if (args->high_bit_depth && p->ra_format.component_bits > 8) {
3643
0
        const struct ra_format *fmt = ra_find_unorm_format(p->ra, 2, 4);
3644
0
        if (fmt && fmt->renderable) {
3645
0
            params.format = fmt;
3646
0
            mpfmt = IMGFMT_RGBA64;
3647
0
        }
3648
0
    }
3649
3650
0
    if (!params.format || !params.format->renderable)
3651
0
        goto done;
3652
0
    target = ra_tex_create(p->ra, &params);
3653
0
    if (!target)
3654
0
        goto done;
3655
3656
0
    int flags = 0;
3657
0
    if (args->subs)
3658
0
        flags |= RENDER_FRAME_SUBS;
3659
0
    if (args->osd)
3660
0
        flags |= RENDER_FRAME_OSD;
3661
0
    if (args->scaled)
3662
0
        flags |= RENDER_SCREEN_COLOR;
3663
0
    gl_video_render_frame(p, nframe, &(struct ra_fbo){target}, flags);
3664
3665
0
    res = mp_image_alloc(mpfmt, params.w, params.h);
3666
0
    if (!res)
3667
0
        goto done;
3668
3669
0
    struct ra_tex_download_params download_params = {
3670
0
        .tex = target,
3671
0
        .dst = res->planes[0],
3672
0
        .stride = res->stride[0],
3673
0
    };
3674
0
    if (!p->ra->fns->tex_download(p->ra, &download_params))
3675
0
        goto done;
3676
3677
0
    if (p->broken_frame)
3678
0
        goto done;
3679
3680
0
    ok = true;
3681
0
done:
3682
0
    talloc_free(nframe);
3683
0
    ra_tex_free(p->ra, &target);
3684
0
    gl_video_resize(p, &old_src, &old_dst, &old_osd);
3685
0
    gl_video_reset_surfaces(p);
3686
0
    if (!ok)
3687
0
        TA_FREEP(&res);
3688
0
    args->res = res;
3689
0
}
3690
3691
// Use this color instead of the global option.
3692
void gl_video_set_clear_color(struct gl_video *p, struct m_color c)
3693
0
{
3694
0
    p->force_clear_color = true;
3695
0
    p->clear_color = c;
3696
0
}
3697
3698
void gl_video_set_osd_pts(struct gl_video *p, double pts)
3699
0
{
3700
0
    p->osd_pts = pts;
3701
0
}
3702
3703
bool gl_video_check_osd_change(struct gl_video *p, struct mp_osd_res *res,
3704
                               double pts)
3705
0
{
3706
0
    return p->osd ? mpgl_osd_check_change(p->osd, res, pts) : false;
3707
0
}
3708
3709
void gl_video_resize(struct gl_video *p,
3710
                     struct mp_rect *src, struct mp_rect *dst,
3711
                     struct mp_osd_res *osd)
3712
0
{
3713
0
    if (mp_rect_equals(&p->src_rect, src) &&
3714
0
        mp_rect_equals(&p->dst_rect, dst) &&
3715
0
        osd_res_equals(p->osd_rect, *osd))
3716
0
        return;
3717
3718
0
    p->src_rect = *src;
3719
0
    p->dst_rect = *dst;
3720
0
    p->osd_rect = *osd;
3721
3722
0
    gl_video_reset_surfaces(p);
3723
3724
0
    if (p->osd)
3725
0
        mpgl_osd_resize(p->osd, p->osd_rect, p->image_params.stereo3d);
3726
0
}
3727
3728
static void frame_perf_data(struct pass_info pass[], struct mp_frame_perf *out)
3729
0
{
3730
0
    for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
3731
0
        if (!pass[i].desc.len)
3732
0
            break;
3733
0
        out->perf[out->count] = pass[i].perf;
3734
0
        strncpy(out->desc[out->count], pass[i].desc.start,
3735
0
                sizeof(out->desc[out->count]) - 1);
3736
0
        out->desc[out->count][sizeof(out->desc[out->count]) - 1] = '\0';
3737
0
        out->count++;
3738
0
    }
3739
0
}
3740
3741
void gl_video_perfdata(struct gl_video *p, struct voctrl_performance_data *out)
3742
0
{
3743
0
    *out = (struct voctrl_performance_data){0};
3744
0
    frame_perf_data(p->pass_fresh,  &out->fresh);
3745
0
    frame_perf_data(p->pass_redraw, &out->redraw);
3746
0
}
3747
3748
// Returns false on failure.
3749
static bool pass_upload_image(struct gl_video *p, struct mp_image *mpi, uint64_t id)
3750
0
{
3751
0
    struct video_image *vimg = &p->image;
3752
3753
0
    if (vimg->id == id)
3754
0
        return true;
3755
3756
0
    unref_current_image(p);
3757
3758
0
    mpi = mp_image_new_ref(mpi);
3759
0
    if (!mpi)
3760
0
        goto error;
3761
3762
0
    vimg->mpi = mpi;
3763
0
    vimg->id = id;
3764
0
    p->osd_pts = mpi->pts;
3765
0
    p->frames_uploaded++;
3766
3767
0
    if (p->hwdec_active) {
3768
        // Hardware decoding
3769
3770
0
        if (!p->hwdec_mapper)
3771
0
            goto error;
3772
3773
0
        pass_describe(p, "map frame (hwdec)");
3774
0
        timer_pool_start(p->upload_timer);
3775
0
        bool ok = ra_hwdec_mapper_map(p->hwdec_mapper, vimg->mpi) >= 0;
3776
0
        timer_pool_stop(p->upload_timer);
3777
0
        struct mp_pass_perf perf = timer_pool_measure(p->upload_timer);
3778
0
        pass_record(p, &perf);
3779
3780
0
        vimg->hwdec_mapped = true;
3781
0
        if (ok) {
3782
0
            struct mp_image layout = {0};
3783
0
            mp_image_set_params(&layout, &p->image_params);
3784
0
            struct ra_tex **tex = p->hwdec_mapper->tex;
3785
0
            for (int n = 0; n < p->plane_count; n++) {
3786
0
                vimg->planes[n] = (struct texplane){
3787
0
                    .w = mp_image_plane_w(&layout, n),
3788
0
                    .h = mp_image_plane_h(&layout, n),
3789
0
                    .tex = tex[n],
3790
0
                    .flipped = layout.params.vflip,
3791
0
                };
3792
0
            }
3793
0
        } else {
3794
0
            MP_FATAL(p, "Mapping hardware decoded surface failed.\n");
3795
0
            goto error;
3796
0
        }
3797
0
        return true;
3798
0
    }
3799
3800
    // Software decoding
3801
0
    mp_assert(mpi->num_planes == p->plane_count);
3802
3803
0
    timer_pool_start(p->upload_timer);
3804
3805
0
    if (mpi->params.vflip)
3806
0
        mp_image_vflip(mpi);
3807
3808
0
    for (int n = 0; n < p->plane_count; n++) {
3809
0
        struct texplane *plane = &vimg->planes[n];
3810
0
        if (!plane->tex) {
3811
0
            timer_pool_stop(p->upload_timer);
3812
0
            goto error;
3813
0
        }
3814
3815
0
        struct ra_tex_upload_params params = {
3816
0
            .tex = plane->tex,
3817
0
            .src = mpi->planes[n],
3818
0
            .invalidate = true,
3819
0
            .stride = mpi->stride[n],
3820
0
        };
3821
3822
0
        plane->flipped = params.stride < 0;
3823
0
        if (plane->flipped) {
3824
0
            int h = mp_image_plane_h(mpi, n);
3825
0
            params.src = (char *)params.src + (h - 1) * params.stride;
3826
0
            params.stride = -params.stride;
3827
0
        }
3828
3829
0
        struct dr_buffer *mapped = gl_find_dr_buffer(p, mpi->planes[n]);
3830
0
        if (mapped) {
3831
0
            params.buf = mapped->buf;
3832
0
            params.buf_offset = (uintptr_t)params.src -
3833
0
                                (uintptr_t)mapped->buf->data;
3834
0
            params.src = NULL;
3835
0
        }
3836
3837
0
        if (p->using_dr_path != !!mapped) {
3838
0
            p->using_dr_path = !!mapped;
3839
0
            MP_VERBOSE(p, "DR enabled: %s\n", p->using_dr_path ? "yes" : "no");
3840
0
        }
3841
3842
0
        if (!p->ra->fns->tex_upload(p->ra, &params)) {
3843
0
            timer_pool_stop(p->upload_timer);
3844
0
            goto error;
3845
0
        }
3846
3847
0
        if (mapped && !mapped->mpi)
3848
0
            mapped->mpi = mp_image_new_ref(mpi);
3849
0
    }
3850
0
    timer_pool_stop(p->upload_timer);
3851
3852
0
    bool using_pbo = p->ra->use_pbo || !(p->ra->caps & RA_CAP_DIRECT_UPLOAD);
3853
0
    const char *mode = p->using_dr_path ? "DR" : using_pbo ? "PBO" : "naive";
3854
0
    pass_describe(p, "upload frame (%s)", mode);
3855
0
    struct mp_pass_perf perf = timer_pool_measure(p->upload_timer);
3856
0
    pass_record(p, &perf);
3857
3858
0
    return true;
3859
3860
0
error:
3861
0
    unref_current_image(p);
3862
0
    p->broken_frame = true;
3863
0
    return false;
3864
0
}
3865
3866
static bool test_fbo(struct gl_video *p, const struct ra_format *fmt)
3867
0
{
3868
0
    MP_VERBOSE(p, "Testing FBO format %s\n", fmt->name);
3869
0
    struct ra_tex *tex = NULL;
3870
0
    bool success = ra_tex_resize(p->ra, p->log, &tex, 16, 16, fmt);
3871
0
    ra_tex_free(p->ra, &tex);
3872
0
    return success;
3873
0
}
3874
3875
// Return whether dumb-mode can be used without disabling any features.
3876
// Essentially, vo_gpu with --profile=fast will return true.
3877
static bool check_dumb_mode(struct gl_video *p)
3878
0
{
3879
0
    struct gl_video_opts *o = &p->opts;
3880
0
    if (p->use_integer_conversion)
3881
0
        return false;
3882
0
    if (o->dumb_mode > 0) // requested by user
3883
0
        return true;
3884
0
    if (o->dumb_mode < 0) // disabled by user
3885
0
        return false;
3886
3887
    // otherwise, use auto-detection
3888
0
    if (o->correct_downscaling || o->linear_downscaling ||
3889
0
        o->linear_upscaling || o->sigmoid_upscaling || o->interpolation ||
3890
0
        o->blend_subs || o->deband || o->unsharp)
3891
0
        return false;
3892
    // check remaining scalers (tscale is already implicitly excluded above)
3893
0
    for (int i = 0; i < SCALER_COUNT; i++) {
3894
0
        if (i != SCALER_TSCALE) {
3895
0
            if (o->scaler[i].kernel.function != SCALER_BILINEAR &&
3896
0
                o->scaler[i].kernel.function != SCALER_INHERIT)
3897
0
                return false;
3898
0
        }
3899
0
    }
3900
0
    if (o->user_shaders && o->user_shaders[0])
3901
0
        return false;
3902
0
    return true;
3903
0
}
3904
3905
// Disable features that are not supported with the current OpenGL version.
3906
static void check_gl_features(struct gl_video *p)
3907
0
{
3908
0
    struct ra *ra = p->ra;
3909
0
    bool have_float_tex = !!ra_find_float16_format(ra, 1);
3910
0
    bool have_mglsl = ra->glsl_version >= 130; // modern GLSL
3911
0
    const struct ra_format *rg_tex = ra_find_unorm_format(p->ra, 1, 2);
3912
0
    bool have_texrg = rg_tex && !rg_tex->luminance_alpha;
3913
0
    bool have_compute = ra->caps & RA_CAP_COMPUTE;
3914
0
    bool have_ssbo = ra->caps & RA_CAP_BUF_RW;
3915
0
    bool have_fragcoord = ra->caps & RA_CAP_FRAGCOORD;
3916
3917
0
    const char *auto_fbo_fmts[] = {"rgba16f", "rgba16hf", "rgba16",
3918
0
                                   "rgb10_a2", "rgba8", 0};
3919
0
    const char *user_fbo_fmts[] = {p->opts.fbo_format, 0};
3920
0
    const char **fbo_fmts = user_fbo_fmts[0] && strcmp(user_fbo_fmts[0], "auto")
3921
0
                          ? user_fbo_fmts : auto_fbo_fmts;
3922
0
    bool user_specified_fbo_fmt = fbo_fmts == user_fbo_fmts;
3923
0
    bool fbo_test_result = false;
3924
0
    bool have_fbo = false;
3925
0
    p->fbo_format = NULL;
3926
0
    for (int n = 0; fbo_fmts[n]; n++) {
3927
0
        const char *fmt = fbo_fmts[n];
3928
0
        const struct ra_format *f = ra_find_named_format(p->ra, fmt);
3929
0
        if (!f && user_specified_fbo_fmt)
3930
0
            MP_WARN(p, "FBO format '%s' not found!\n", fmt);
3931
0
        if (f && f->renderable && f->linear_filter &&
3932
0
            (fbo_test_result = test_fbo(p, f))) {
3933
0
            MP_VERBOSE(p, "Using FBO format %s.\n", f->name);
3934
0
            have_fbo = true;
3935
0
            p->fbo_format = f;
3936
0
            break;
3937
0
        }
3938
3939
0
        if (user_specified_fbo_fmt) {
3940
0
            MP_WARN(p, "User-specified FBO format '%s' failed to initialize! "
3941
0
                       "(exists=%d, renderable=%d, linear_filter=%d, "
3942
0
                       "fbo_test_result=%d)\n",
3943
0
                    fmt, !!f, f ? f->renderable : 0,  f ? f->linear_filter : 0,
3944
0
                    fbo_test_result);
3945
0
        }
3946
0
    }
3947
3948
0
    if (!have_fragcoord && p->opts.dither_depth >= 0 &&
3949
0
        p->opts.dither_algo != DITHER_NONE)
3950
0
    {
3951
0
        p->opts.dither_algo = DITHER_NONE;
3952
0
        MP_WARN(p, "Disabling dithering (no gl_FragCoord).\n");
3953
0
    }
3954
0
    if (!have_fragcoord && p->opts.background == BACKGROUND_TILES) {
3955
0
        p->opts.background = BACKGROUND_COLOR;
3956
0
        MP_VERBOSE(p, "Disabling alpha checkerboard (no gl_FragCoord).\n");
3957
0
    }
3958
0
    if (!have_fbo && have_compute) {
3959
0
        have_compute = false;
3960
0
        MP_WARN(p, "Force-disabling compute shaders as an FBO format was not "
3961
0
                   "available! See your FBO format configuration!\n");
3962
0
    }
3963
3964
0
    if (have_compute && have_fbo && !p->fbo_format->storable) {
3965
0
        have_compute = false;
3966
0
        MP_WARN(p, "Force-disabling compute shaders as the chosen FBO format "
3967
0
                "is not storable! See your FBO format configuration!\n");
3968
0
    }
3969
3970
0
    if (!have_compute && p->opts.dither_algo == DITHER_ERROR_DIFFUSION) {
3971
0
        MP_WARN(p, "Disabling error diffusion dithering because compute shader "
3972
0
                   "was not supported. Fallback to dither=fruit instead.\n");
3973
0
        p->opts.dither_algo = DITHER_FRUIT;
3974
0
    }
3975
3976
0
    bool have_compute_peak = have_compute && have_ssbo;
3977
0
    if (!have_compute_peak && p->opts.tone_map.compute_peak >= 0) {
3978
0
        int msgl = p->opts.tone_map.compute_peak == 1 ? MSGL_WARN : MSGL_V;
3979
0
        MP_MSG(p, msgl, "Disabling HDR peak computation (one or more of the "
3980
0
                        "following is not supported: compute shaders=%d, "
3981
0
                        "SSBO=%d).\n", have_compute, have_ssbo);
3982
0
        p->opts.tone_map.compute_peak = -1;
3983
0
    }
3984
3985
0
    p->forced_dumb_mode = p->opts.dumb_mode > 0 || !have_fbo || !have_texrg;
3986
0
    bool voluntarily_dumb = check_dumb_mode(p);
3987
0
    if (p->forced_dumb_mode || voluntarily_dumb) {
3988
0
        if (voluntarily_dumb) {
3989
0
            MP_VERBOSE(p, "No advanced processing required. Enabling dumb mode.\n");
3990
0
        } else if (p->opts.dumb_mode <= 0) {
3991
0
            MP_WARN(p, "High bit depth FBOs unsupported. Enabling dumb mode.\n"
3992
0
                       "Most extended features will be disabled.\n");
3993
0
        }
3994
0
        p->dumb_mode = true;
3995
        // Most things don't work, so whitelist all options that still work.
3996
0
        p->opts = (struct gl_video_opts){
3997
0
            .scaler = {
3998
0
                [SCALER_SCALE] = {
3999
0
                    {SCALER_BILINEAR, .params = {NAN, NAN}, .functions = scale_filters},
4000
0
                    {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
4001
0
                },
4002
0
                [SCALER_DSCALE] = {
4003
0
                    {SCALER_BILINEAR, .params = {NAN, NAN}, .functions = cdscale_filters},
4004
0
                    {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
4005
0
                },
4006
0
                [SCALER_CSCALE] = {
4007
0
                    {SCALER_BILINEAR, .params = {NAN, NAN}, .functions = cdscale_filters},
4008
0
                    {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
4009
0
                },
4010
0
                [SCALER_TSCALE] = {
4011
0
                    {SCALER_BILINEAR, .params = {NAN, NAN}, .functions = tscale_filters},
4012
0
                    {WINDOW_PREFERRED, .params = {NAN, NAN}, .functions = filter_windows},
4013
0
                },
4014
0
            },
4015
0
            .gamma = p->opts.gamma,
4016
0
            .gamma_auto = p->opts.gamma_auto,
4017
0
            .pbo = p->opts.pbo,
4018
0
            .fbo_format = p->opts.fbo_format,
4019
0
            .background = p->opts.background,
4020
0
            .use_rectangle = p->opts.use_rectangle,
4021
0
            .background_color = p->opts.background_color,
4022
0
            .background_tile_color[0] = p->opts.background_tile_color[0],
4023
0
            .background_tile_color[1] = p->opts.background_tile_color[1],
4024
0
            .background_tile_size = p->opts.background_tile_size,
4025
0
            .dither_algo = p->opts.dither_algo,
4026
0
            .dither_depth = p->opts.dither_depth,
4027
0
            .dither_size = p->opts.dither_size,
4028
0
            .error_diffusion = p->opts.error_diffusion,
4029
0
            .temporal_dither = p->opts.temporal_dither,
4030
0
            .temporal_dither_period = p->opts.temporal_dither_period,
4031
0
            .tex_pad_x = p->opts.tex_pad_x,
4032
0
            .tex_pad_y = p->opts.tex_pad_y,
4033
0
            .tone_map = p->opts.tone_map,
4034
0
            .early_flush = p->opts.early_flush,
4035
0
            .icc_opts = p->opts.icc_opts,
4036
0
            .hwdec_interop = p->opts.hwdec_interop,
4037
0
            .target_trc = p->opts.target_trc,
4038
0
            .target_prim = p->opts.target_prim,
4039
0
            .target_peak = p->opts.target_peak,
4040
0
        };
4041
0
        if (!have_fbo)
4042
0
            p->use_lut_3d = false;
4043
0
        return;
4044
0
    }
4045
0
    p->dumb_mode = false;
4046
4047
    // Normally, we want to disable them by default if FBOs are unavailable,
4048
    // because they will be slow (not critically slow, but still slower).
4049
    // Without FP textures, we must always disable them.
4050
    // I don't know if luminance alpha float textures exist, so disregard them.
4051
0
    for (int n = 0; n < SCALER_COUNT; n++) {
4052
0
        const struct filter_kernel *kernel =
4053
0
            mp_find_filter_kernel(p->opts.scaler[n].kernel.function);
4054
0
        if (kernel) {
4055
0
            char *reason = NULL;
4056
0
            if (!have_float_tex)
4057
0
                reason = "(float tex. missing)";
4058
0
            if (!have_mglsl)
4059
0
                reason = "(GLSL version too old)";
4060
0
            if (reason) {
4061
0
                MP_WARN(p, "Disabling scaler #%d %s %s.\n", n,
4062
0
                        m_opt_choice_str(p->opts.scaler[n].kernel.functions,
4063
0
                                         p->opts.scaler[n].kernel.function),
4064
0
                        reason);
4065
4066
                // p->opts is a copy => we can just mess with it.
4067
0
                p->opts.scaler[n].kernel.function = SCALER_BILINEAR;
4068
0
                if (n == SCALER_TSCALE)
4069
0
                    p->opts.interpolation = false;
4070
0
            }
4071
0
        }
4072
0
    }
4073
4074
0
    int use_cms = p->opts.target_prim != PL_COLOR_PRIM_UNKNOWN ||
4075
0
                  p->opts.target_trc != PL_COLOR_TRC_UNKNOWN || p->use_lut_3d;
4076
4077
    // mix() is needed for some gamma functions
4078
0
    if (!have_mglsl && (p->opts.linear_downscaling ||
4079
0
                        p->opts.linear_upscaling || p->opts.sigmoid_upscaling))
4080
0
    {
4081
0
        p->opts.linear_downscaling = false;
4082
0
        p->opts.linear_upscaling = false;
4083
0
        p->opts.sigmoid_upscaling = false;
4084
0
        MP_WARN(p, "Disabling linear/sigmoid scaling (GLSL version too old).\n");
4085
0
    }
4086
0
    if (!have_mglsl && use_cms) {
4087
0
        p->opts.target_prim = PL_COLOR_PRIM_UNKNOWN;
4088
0
        p->opts.target_trc = PL_COLOR_TRC_UNKNOWN;
4089
0
        p->use_lut_3d = false;
4090
0
        MP_WARN(p, "Disabling color management (GLSL version too old).\n");
4091
0
    }
4092
0
    if (!have_mglsl && p->opts.deband) {
4093
0
        p->opts.deband = false;
4094
0
        MP_WARN(p, "Disabling debanding (GLSL version too old).\n");
4095
0
    }
4096
0
}
4097
4098
static void init_gl(struct gl_video *p)
4099
0
{
4100
0
    debug_check_gl(p, "before init_gl");
4101
4102
0
    p->upload_timer = timer_pool_create(p->ra);
4103
0
    p->blit_timer = timer_pool_create(p->ra);
4104
0
    p->osd_timer = timer_pool_create(p->ra);
4105
4106
0
    debug_check_gl(p, "after init_gl");
4107
4108
0
    ra_dump_tex_formats(p->ra, MSGL_DEBUG);
4109
0
    ra_dump_img_formats(p->ra, MSGL_DEBUG);
4110
0
}
4111
4112
void gl_video_uninit(struct gl_video *p)
4113
10.5k
{
4114
10.5k
    if (!p)
4115
10.5k
        return;
4116
4117
0
    uninit_video(p);
4118
0
    ra_hwdec_ctx_uninit(&p->hwdec_ctx);
4119
0
    gl_sc_destroy(p->sc);
4120
4121
0
    ra_tex_free(p->ra, &p->lut_3d_texture);
4122
0
    ra_buf_free(p->ra, &p->hdr_peak_ssbo);
4123
4124
0
    timer_pool_destroy(p->upload_timer);
4125
0
    timer_pool_destroy(p->blit_timer);
4126
0
    timer_pool_destroy(p->osd_timer);
4127
4128
0
    for (int i = 0; i < VO_PASS_PERF_MAX; i++) {
4129
0
        talloc_free(p->pass_fresh[i].desc.start);
4130
0
        talloc_free(p->pass_redraw[i].desc.start);
4131
0
    }
4132
4133
0
    mpgl_osd_destroy(p->osd);
4134
4135
    // Forcibly destroy possibly remaining image references. This should also
4136
    // cause gl_video_dr_free_buffer() to be called for the remaining buffers.
4137
0
    gc_pending_dr_fences(p, true);
4138
4139
    // Should all have been unreffed already.
4140
0
    mp_assert(!p->num_dr_buffers);
4141
4142
0
    talloc_free(p);
4143
0
}
4144
4145
void gl_video_reset(struct gl_video *p)
4146
0
{
4147
0
    gl_video_reset_surfaces(p);
4148
0
}
4149
4150
bool gl_video_showing_interpolated_frame(struct gl_video *p)
4151
0
{
4152
0
    return p->is_interpolated;
4153
0
}
4154
4155
static bool is_imgfmt_desc_supported(struct gl_video *p,
4156
                                     const struct ra_imgfmt_desc *desc)
4157
0
{
4158
0
    if (!desc->num_planes)
4159
0
        return false;
4160
4161
0
    if (desc->planes[0]->ctype == RA_CTYPE_UINT && p->forced_dumb_mode)
4162
0
        return false;
4163
4164
0
    return true;
4165
0
}
4166
4167
bool gl_video_check_format(struct gl_video *p, int mp_format)
4168
0
{
4169
0
    struct ra_imgfmt_desc desc;
4170
0
    if (ra_get_imgfmt_desc(p->ra, mp_format, &desc) &&
4171
0
        is_imgfmt_desc_supported(p, &desc))
4172
0
        return true;
4173
0
    if (ra_hwdec_get(&p->hwdec_ctx, mp_format))
4174
0
        return true;
4175
0
    return false;
4176
0
}
4177
4178
void gl_video_config(struct gl_video *p, struct mp_image_params *params)
4179
0
{
4180
0
    unmap_overlay(p);
4181
0
    unref_current_image(p);
4182
4183
0
    if (!mp_image_params_static_equal(&p->real_image_params, params)) {
4184
0
        uninit_video(p);
4185
0
        p->real_image_params = *params;
4186
0
        p->image_params = *params;
4187
0
        if (params->imgfmt)
4188
0
            init_video(p);
4189
0
    }
4190
4191
0
    gl_video_reset_surfaces(p);
4192
0
}
4193
4194
void gl_video_set_osd_source(struct gl_video *p, struct osd_state *osd)
4195
0
{
4196
0
    mpgl_osd_destroy(p->osd);
4197
0
    p->osd = NULL;
4198
0
    p->osd_state = osd;
4199
0
    reinit_osd(p);
4200
0
}
4201
4202
struct gl_video *gl_video_init(struct ra *ra, struct mp_log *log,
4203
                               struct mpv_global *g)
4204
0
{
4205
0
    struct gl_video *p = talloc_ptrtype(NULL, p);
4206
0
    *p = (struct gl_video) {
4207
0
        .ra = ra,
4208
0
        .global = g,
4209
0
        .log = log,
4210
0
        .sc = gl_sc_create(ra, g, log),
4211
0
        .video_eq = mp_csp_equalizer_create(p, g),
4212
0
        .opts_cache = m_config_cache_alloc(p, g, &gl_video_conf),
4213
0
    };
4214
    // make sure this variable is initialized to *something*
4215
0
    p->pass = p->pass_fresh;
4216
0
    struct gl_video_opts *opts = p->opts_cache->opts;
4217
0
    p->cms = gl_lcms_init(p, log, g, opts->icc_opts),
4218
0
    p->opts = *opts;
4219
0
    for (int n = 0; n < SCALER_COUNT; n++)
4220
0
        p->scaler[n] = (struct scaler){.index = n};
4221
    // our VAO always has the vec2 position as the first element
4222
0
    MP_TARRAY_APPEND(p, p->vao, p->vao_len, (struct ra_renderpass_input) {
4223
0
        .name = "position",
4224
0
        .type = RA_VARTYPE_FLOAT,
4225
0
        .dim_v = 2,
4226
0
        .dim_m = 1,
4227
0
        .offset = 0,
4228
0
    });
4229
0
    init_gl(p);
4230
0
    reinit_from_options(p);
4231
0
    return p;
4232
0
}
4233
4234
static void gl_video_update_options(struct gl_video *p)
4235
0
{
4236
0
    if (m_config_cache_update(p->opts_cache)) {
4237
0
        gl_lcms_update_options(p->cms);
4238
0
        reinit_from_options(p);
4239
0
    }
4240
4241
0
    if (mp_csp_equalizer_state_changed(p->video_eq))
4242
0
        p->output_tex_valid = false;
4243
0
}
4244
4245
static void reinit_from_options(struct gl_video *p)
4246
0
{
4247
0
    p->use_lut_3d = gl_lcms_has_profile(p->cms);
4248
4249
    // Copy the option fields, so that check_gl_features() can mutate them.
4250
    // This works only for the fields themselves of course, not for any memory
4251
    // referenced by them.
4252
0
    p->opts = *(struct gl_video_opts *)p->opts_cache->opts;
4253
4254
0
    if (!p->force_clear_color)
4255
0
        p->clear_color = p->opts.background_color;
4256
4257
0
    check_gl_features(p);
4258
0
    uninit_rendering(p);
4259
0
    if (p->opts.shader_cache)
4260
0
        gl_sc_set_cache_dir(p->sc, p->opts.shader_cache_dir);
4261
0
    p->ra->use_pbo = p->opts.pbo;
4262
0
    gl_video_setup_hooks(p);
4263
0
    reinit_osd(p);
4264
4265
0
    struct mp_vo_opts *vo_opts = mp_get_config_group(p, p->global, &vo_sub_opts);
4266
0
    if (p->opts.interpolation && !vo_opts->video_sync && !p->dsi_warned) {
4267
0
        MP_WARN(p, "Interpolation now requires enabling display-sync mode.\n"
4268
0
                   "E.g.: --video-sync=display-resample\n");
4269
0
        p->dsi_warned = true;
4270
0
    }
4271
0
    talloc_free(vo_opts);
4272
4273
0
    if (p->opts.correct_downscaling && !p->correct_downscaling_warned) {
4274
0
        if (p->opts.scaler[SCALER_DSCALE].kernel.function == SCALER_BILINEAR ||
4275
0
            (p->opts.scaler[SCALER_DSCALE].kernel.function == SCALER_INHERIT &&
4276
0
             p->opts.scaler[SCALER_SCALE].kernel.function == SCALER_BILINEAR)) {
4277
0
            MP_WARN(p, "correct-downscaling requires non-bilinear scaler.\n");
4278
0
            p->correct_downscaling_warned = true;
4279
0
        }
4280
0
    }
4281
0
}
4282
4283
void gl_video_configure_queue(struct gl_video *p, struct vo *vo)
4284
0
{
4285
0
    gl_video_update_options(p);
4286
4287
0
    int queue_size = 1;
4288
4289
    // Figure out an adequate size for the interpolation queue. The larger
4290
    // the radius, the earlier we need to queue frames.
4291
0
    if (p->opts.interpolation) {
4292
0
        const struct filter_kernel *kernel =
4293
0
            mp_find_filter_kernel(p->opts.scaler[SCALER_TSCALE].kernel.function);
4294
0
        if (kernel) {
4295
            // filter_scale wouldn't be correctly initialized were we to use it here.
4296
            // This is fine since we're always upsampling, but beware if downsampling
4297
            // is added!
4298
0
            double radius = kernel->f.radius;
4299
0
            radius = radius > 0 ? radius : p->opts.scaler[SCALER_TSCALE].radius;
4300
0
            queue_size += 1 + ceil(radius);
4301
0
        } else {
4302
            // Oversample/linear case
4303
0
            queue_size += 2;
4304
0
        }
4305
0
    }
4306
4307
0
    vo_set_queue_params(vo, 0, queue_size);
4308
0
}
4309
4310
static int validate_error_diffusion_opt(struct mp_log *log, const m_option_t *opt,
4311
                                        struct bstr name, const char **value)
4312
1.43k
{
4313
1.43k
    struct bstr param = bstr0(*value);
4314
1.43k
    char s[32] = {0};
4315
1.43k
    int r = 1;
4316
1.43k
    if (bstr_equals0(param, "help")) {
4317
52
        r = M_OPT_EXIT;
4318
1.38k
    } else {
4319
1.38k
        snprintf(s, sizeof(s), "%.*s", BSTR_P(param));
4320
1.38k
        const struct error_diffusion_kernel *k = mp_find_error_diffusion_kernel(s);
4321
1.38k
        if (!k)
4322
321
            r = M_OPT_INVALID;
4323
1.38k
    }
4324
1.43k
    if (r < 1) {
4325
373
        mp_info(log, "Available error diffusion kernels:\n");
4326
4.10k
        for (int n = 0; mp_error_diffusion_kernels[n].name; n++)
4327
3.73k
            mp_info(log, "    %s\n", mp_error_diffusion_kernels[n].name);
4328
373
        if (s[0])
4329
254
            mp_fatal(log, "No error diffusion kernel named '%s' found!\n", s);
4330
373
    }
4331
1.43k
    return r;
4332
1.43k
}
4333
4334
void gl_video_set_ambient_lux(struct gl_video *p, double lux)
4335
0
{
4336
0
    if (p->opts.gamma_auto) {
4337
0
        p->opts.gamma = gl_video_scale_ambient_lux(16.0, 256.0, 1.0, 1.2, lux);
4338
0
        MP_TRACE(p, "ambient light changed: %f lux (gamma: %f)\n", lux,
4339
0
                 p->opts.gamma);
4340
0
    }
4341
0
}
4342
4343
static void *gl_video_dr_alloc_buffer(struct gl_video *p, size_t size)
4344
0
{
4345
0
    struct ra_buf_params params = {
4346
0
        .type = RA_BUF_TYPE_TEX_UPLOAD,
4347
0
        .host_mapped = true,
4348
0
        .size = size,
4349
0
    };
4350
4351
0
    struct ra_buf *buf = ra_buf_create(p->ra, &params);
4352
0
    if (!buf)
4353
0
        return NULL;
4354
4355
0
    MP_TARRAY_GROW(p, p->dr_buffers, p->num_dr_buffers);
4356
0
    p->dr_buffers[p->num_dr_buffers++] = (struct dr_buffer){ .buf = buf };
4357
4358
0
    return buf->data;
4359
0
}
4360
4361
static void gl_video_dr_free_buffer(void *opaque, uint8_t *data)
4362
0
{
4363
0
    struct gl_video *p = opaque;
4364
4365
0
    for (int n = 0; n < p->num_dr_buffers; n++) {
4366
0
        struct dr_buffer *buffer = &p->dr_buffers[n];
4367
0
        if (buffer->buf->data == data) {
4368
0
            mp_assert(!buffer->mpi); // can't be freed while it has a ref
4369
0
            ra_buf_free(p->ra, &buffer->buf);
4370
0
            MP_TARRAY_REMOVE_AT(p->dr_buffers, p->num_dr_buffers, n);
4371
0
            return;
4372
0
        }
4373
0
    }
4374
    // not found - must not happen
4375
0
    MP_ASSERT_UNREACHABLE();
4376
0
}
4377
4378
struct mp_image *gl_video_get_image(struct gl_video *p, int imgfmt, int w, int h,
4379
                                    int stride_align, int flags)
4380
0
{
4381
0
    if (flags & VO_DR_FLAG_HOST_CACHED) {
4382
0
        if (p->ra->caps & RA_CAP_SLOW_DR) {
4383
0
            MP_VERBOSE(p, "DR path suspected slow/uncached, disabling.\n");
4384
0
            return NULL;
4385
0
        }
4386
0
    }
4387
4388
0
    if (!gl_video_check_format(p, imgfmt))
4389
0
        return NULL;
4390
4391
0
    int size = mp_image_get_alloc_size(imgfmt, w, h, stride_align);
4392
0
    if (size < 0)
4393
0
        return NULL;
4394
4395
0
    int alloc_size = size + stride_align;
4396
0
    void *ptr = gl_video_dr_alloc_buffer(p, alloc_size);
4397
0
    if (!ptr)
4398
0
        return NULL;
4399
4400
    // (we expect vo.c to proxy the free callback, so it happens in the same
4401
    // thread it was allocated in, removing the need for synchronization)
4402
0
    struct mp_image *res = mp_image_from_buffer(imgfmt, w, h, stride_align,
4403
0
                                                ptr, alloc_size, p,
4404
0
                                                gl_video_dr_free_buffer);
4405
0
    if (!res)
4406
0
        gl_video_dr_free_buffer(p, ptr);
4407
0
    return res;
4408
0
}
4409
4410
void gl_video_init_hwdecs(struct gl_video *p, struct ra_ctx *ra_ctx,
4411
                          struct mp_hwdec_devices *devs,
4412
                          bool load_all_by_default)
4413
0
{
4414
0
    mp_assert(!p->hwdec_ctx.ra_ctx);
4415
0
    p->hwdec_ctx = (struct ra_hwdec_ctx) {
4416
0
        .log = p->log,
4417
0
        .global = p->global,
4418
0
        .ra_ctx = ra_ctx,
4419
0
    };
4420
4421
0
    ra_hwdec_ctx_init(&p->hwdec_ctx, devs, p->opts.hwdec_interop, load_all_by_default);
4422
0
}
4423
4424
void gl_video_load_hwdecs_for_img_fmt(struct gl_video *p, struct mp_hwdec_devices *devs,
4425
                                      struct hwdec_imgfmt_request *params)
4426
0
{
4427
0
    mp_assert(p->hwdec_ctx.ra_ctx);
4428
0
    ra_hwdec_ctx_load_fmt(&p->hwdec_ctx, devs, params);
4429
0
}
4430
4431
struct mp_image_params *gl_video_get_target_params_ptr(struct gl_video *p)
4432
0
{
4433
0
    return &p->target_params;
4434
0
}