Coverage Report

Created: 2026-01-25 07:18

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/ffmpeg/libswscale/x86/swscale.c
Line
Count
Source
1
/*
2
 * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20
21
#include <inttypes.h>
22
#include "config.h"
23
#include "libswscale/swscale.h"
24
#include "libswscale/swscale_internal.h"
25
#include "libavutil/attributes.h"
26
#include "libavutil/avassert.h"
27
#include "libavutil/intreadwrite.h"
28
#include "libavutil/x86/cpu.h"
29
#include "libavutil/cpu.h"
30
#include "libavutil/mem_internal.h"
31
#include "libavutil/pixdesc.h"
32
33
const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = {
34
    0x0103010301030103LL,
35
    0x0200020002000200LL,};
36
37
const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = {
38
    0x0602060206020602LL,
39
    0x0004000400040004LL,};
40
41
#if HAVE_INLINE_ASM
42
43
DECLARE_ASM_CONST(8, uint64_t, bF8)=       0xF8F8F8F8F8F8F8F8LL;
44
DECLARE_ASM_CONST(8, uint64_t, bFC)=       0xFCFCFCFCFCFCFCFCLL;
45
46
DECLARE_ASM_CONST(8, uint64_t, M24A) = 0x00FF0000FF0000FFLL;
47
DECLARE_ASM_CONST(8, uint64_t, M24B) = 0xFF0000FF0000FF00LL;
48
DECLARE_ASM_CONST(8, uint64_t, M24C) = 0x0000FF0000FF0000LL;
49
50
// MMXEXT versions
51
#if HAVE_MMXEXT_INLINE
52
#undef RENAME
53
#undef COMPILE_TEMPLATE_MMXEXT
54
#define COMPILE_TEMPLATE_MMXEXT 1
55
0
#define RENAME(a) a ## _mmxext
56
#include "swscale_template.c"
57
#endif
58
59
void ff_updateMMXDitherTables(SwsInternal *c, int dstY)
60
0
{
61
0
    const int dstH= c->opts.dst_h;
62
0
    const int flags= c->opts.flags;
63
64
0
    SwsPlane *lumPlane = &c->slice[c->numSlice-2].plane[0];
65
0
    SwsPlane *chrUPlane = &c->slice[c->numSlice-2].plane[1];
66
0
    SwsPlane *alpPlane = &c->slice[c->numSlice-2].plane[3];
67
68
0
    int hasAlpha = c->needAlpha;
69
0
    int32_t *vLumFilterPos= c->vLumFilterPos;
70
0
    int32_t *vChrFilterPos= c->vChrFilterPos;
71
0
    int16_t *vLumFilter= c->vLumFilter;
72
0
    int16_t *vChrFilter= c->vChrFilter;
73
0
    int32_t *lumMmxFilter= c->lumMmxFilter;
74
0
    int32_t *chrMmxFilter= c->chrMmxFilter;
75
0
    av_unused int32_t *alpMmxFilter= c->alpMmxFilter;
76
0
    const int vLumFilterSize= c->vLumFilterSize;
77
0
    const int vChrFilterSize= c->vChrFilterSize;
78
0
    const int chrDstY= dstY>>c->chrDstVSubSample;
79
0
    const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
80
0
    const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
81
82
0
    c->blueDither= ff_dither8[dstY&1];
83
0
    if (c->opts.dst_format == AV_PIX_FMT_RGB555 || c->opts.dst_format == AV_PIX_FMT_BGR555)
84
0
        c->greenDither= ff_dither8[dstY&1];
85
0
    else
86
0
        c->greenDither= ff_dither4[dstY&1];
87
0
    c->redDither= ff_dither8[(dstY+1)&1];
88
0
    if (dstY < dstH - 2) {
89
0
        const int16_t **lumSrcPtr  = (const int16_t **)(void*) lumPlane->line + firstLumSrcY - lumPlane->sliceY;
90
0
        const int16_t **chrUSrcPtr = (const int16_t **)(void*) chrUPlane->line + firstChrSrcY - chrUPlane->sliceY;
91
0
        const int16_t **alpSrcPtr  = (CONFIG_SWSCALE_ALPHA && hasAlpha) ? (const int16_t **)(void*) alpPlane->line + firstLumSrcY - alpPlane->sliceY : NULL;
92
93
0
        int i;
94
0
        if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->opts.src_h) {
95
0
            const int16_t **tmpY = (const int16_t **) lumPlane->tmp;
96
97
0
            int neg = -firstLumSrcY, i, end = FFMIN(c->opts.src_h - firstLumSrcY, vLumFilterSize);
98
0
            for (i = 0; i < neg;            i++)
99
0
                tmpY[i] = lumSrcPtr[neg];
100
0
            for (     ; i < end;            i++)
101
0
                tmpY[i] = lumSrcPtr[i];
102
0
            for (     ; i < vLumFilterSize; i++)
103
0
                tmpY[i] = tmpY[i-1];
104
0
            lumSrcPtr = tmpY;
105
106
0
            if (alpSrcPtr) {
107
0
                const int16_t **tmpA = (const int16_t **) alpPlane->tmp;
108
0
                for (i = 0; i < neg;            i++)
109
0
                    tmpA[i] = alpSrcPtr[neg];
110
0
                for (     ; i < end;            i++)
111
0
                    tmpA[i] = alpSrcPtr[i];
112
0
                for (     ; i < vLumFilterSize; i++)
113
0
                    tmpA[i] = tmpA[i - 1];
114
0
                alpSrcPtr = tmpA;
115
0
            }
116
0
        }
117
0
        if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->chrSrcH) {
118
0
            const int16_t **tmpU = (const int16_t **) chrUPlane->tmp;
119
0
            int neg = -firstChrSrcY, i, end = FFMIN(c->chrSrcH - firstChrSrcY, vChrFilterSize);
120
0
            for (i = 0; i < neg;            i++) {
121
0
                tmpU[i] = chrUSrcPtr[neg];
122
0
            }
123
0
            for (     ; i < end;            i++) {
124
0
                tmpU[i] = chrUSrcPtr[i];
125
0
            }
126
0
            for (     ; i < vChrFilterSize; i++) {
127
0
                tmpU[i] = tmpU[i - 1];
128
0
            }
129
0
            chrUSrcPtr = tmpU;
130
0
        }
131
132
0
        if (flags & SWS_ACCURATE_RND) {
133
0
            int s= APCK_SIZE / 8;
134
0
            for (i=0; i<vLumFilterSize; i+=2) {
135
0
                *(const void**)&lumMmxFilter[s*i              ]= lumSrcPtr[i  ];
136
0
                *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4  ]= lumSrcPtr[i+(vLumFilterSize>1)];
137
0
                lumMmxFilter[s*i+APCK_COEF/4  ]=
138
0
                lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i    ]
139
0
                    + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1] * (1 << 16) : 0);
140
0
                if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
141
0
                    *(const void**)&alpMmxFilter[s*i              ]= alpSrcPtr[i  ];
142
0
                    *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4  ]= alpSrcPtr[i+(vLumFilterSize>1)];
143
0
                    alpMmxFilter[s*i+APCK_COEF/4  ]=
144
0
                    alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4  ];
145
0
                }
146
0
            }
147
0
            for (i=0; i<vChrFilterSize; i+=2) {
148
0
                *(const void**)&chrMmxFilter[s*i              ]= chrUSrcPtr[i  ];
149
0
                *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4  ]= chrUSrcPtr[i+(vChrFilterSize>1)];
150
0
                chrMmxFilter[s*i+APCK_COEF/4  ]=
151
0
                chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i    ]
152
0
                    + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1] * (1 << 16) : 0);
153
0
            }
154
0
        } else {
155
0
            for (i=0; i<vLumFilterSize; i++) {
156
0
                *(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
157
0
                lumMmxFilter[4*i+2]=
158
0
                lumMmxFilter[4*i+3]=
159
0
                ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001U;
160
0
                if (CONFIG_SWSCALE_ALPHA && hasAlpha) {
161
0
                    *(const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
162
0
                    alpMmxFilter[4*i+2]=
163
0
                    alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
164
0
                }
165
0
            }
166
0
            for (i=0; i<vChrFilterSize; i++) {
167
0
                *(const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
168
0
                chrMmxFilter[4*i+2]=
169
0
                chrMmxFilter[4*i+3]=
170
0
                ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001U;
171
0
            }
172
0
        }
173
0
    }
174
0
}
175
#endif /* HAVE_INLINE_ASM */
176
177
#define YUV2YUVX_FUNC_MMX(opt, step)  \
178
void ff_yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, int srcOffset, \
179
                           uint8_t *dest, int dstW,  \
180
                           const uint8_t *dither, int offset); \
181
static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
182
                           const int16_t **src, uint8_t *dest, int dstW, \
183
0
                           const uint8_t *dither, int offset) \
184
0
{ \
185
0
    if(dstW > 0) \
186
0
        ff_yuv2yuvX_ ##opt(filter, filterSize - 1, 0, dest - offset, dstW + offset, dither, offset); \
187
0
    return; \
188
0
}
189
190
#define YUV2YUVX_FUNC(opt, step)  \
191
void ff_yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, int srcOffset, \
192
                           uint8_t *dest, int dstW,  \
193
                           const uint8_t *dither, int offset); \
194
static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
195
                           const int16_t **src, uint8_t *dest, int dstW, \
196
0
                           const uint8_t *dither, int offset) \
197
0
{ \
198
0
    int remainder = (dstW % step); \
199
0
    int pixelsProcessed = dstW - remainder; \
200
0
    if(((uintptr_t)dest) & 15){ \
201
0
        yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset); \
202
0
        return; \
203
0
    } \
204
0
    if(pixelsProcessed > 0) \
205
0
        ff_yuv2yuvX_ ##opt(filter, filterSize - 1, 0, dest - offset, pixelsProcessed + offset, dither, offset); \
206
0
    if(remainder > 0){ \
207
0
      ff_yuv2yuvX_mmxext(filter, filterSize - 1, pixelsProcessed, dest - offset, pixelsProcessed + remainder + offset, dither, offset); \
208
0
    } \
209
0
    return; \
210
0
}
Unexecuted instantiation: swscale.c:yuv2yuvX_sse3
Unexecuted instantiation: swscale.c:yuv2yuvX_avx2
211
212
#if HAVE_MMXEXT_EXTERNAL
213
YUV2YUVX_FUNC_MMX(mmxext, 16)
214
#endif
215
#if HAVE_SSE3_EXTERNAL
216
YUV2YUVX_FUNC(sse3, 32)
217
#endif
218
#if HAVE_AVX2_EXTERNAL
219
YUV2YUVX_FUNC(avx2, 64)
220
#endif
221
222
#define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \
223
void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \
224
                                                SwsInternal *c, int16_t *data, \
225
                                                int dstW, const uint8_t *src, \
226
                                                const int16_t *filter, \
227
                                                const int32_t *filterPos, int filterSize)
228
229
#define SCALE_FUNCS(filter_n, opt) \
230
    SCALE_FUNC(filter_n,  8, 15, opt); \
231
    SCALE_FUNC(filter_n,  9, 15, opt); \
232
    SCALE_FUNC(filter_n, 10, 15, opt); \
233
    SCALE_FUNC(filter_n, 12, 15, opt); \
234
    SCALE_FUNC(filter_n, 14, 15, opt); \
235
    SCALE_FUNC(filter_n, 16, 15, opt); \
236
    SCALE_FUNC(filter_n,  8, 19, opt); \
237
    SCALE_FUNC(filter_n,  9, 19, opt); \
238
    SCALE_FUNC(filter_n, 10, 19, opt); \
239
    SCALE_FUNC(filter_n, 12, 19, opt); \
240
    SCALE_FUNC(filter_n, 14, 19, opt); \
241
    SCALE_FUNC(filter_n, 16, 19, opt)
242
243
#define SCALE_FUNCS_MMX(opt) \
244
    SCALE_FUNCS(4, opt); \
245
    SCALE_FUNCS(8, opt); \
246
    SCALE_FUNCS(X, opt)
247
248
#define SCALE_FUNCS_SSE(opt) \
249
    SCALE_FUNCS(4, opt); \
250
    SCALE_FUNCS(8, opt); \
251
    SCALE_FUNCS(X4, opt); \
252
    SCALE_FUNCS(X8, opt)
253
254
SCALE_FUNCS_SSE(sse2);
255
SCALE_FUNCS_SSE(ssse3);
256
SCALE_FUNCS_SSE(sse4);
257
258
SCALE_FUNC(4, 8, 15, avx2);
259
SCALE_FUNC(X4, 8, 15, avx2);
260
261
#define VSCALEX_FUNC(size, opt) \
262
void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \
263
                                        const int16_t **src, uint8_t *dest, int dstW, \
264
                                        const uint8_t *dither, int offset)
265
#define VSCALEX_FUNCS(opt) \
266
    VSCALEX_FUNC(8,  opt); \
267
    VSCALEX_FUNC(9,  opt); \
268
    VSCALEX_FUNC(10, opt)
269
270
VSCALEX_FUNC(8, mmxext);
271
VSCALEX_FUNCS(sse2);
272
VSCALEX_FUNCS(sse4);
273
VSCALEX_FUNC(16, sse4);
274
VSCALEX_FUNCS(avx);
275
276
#define VSCALE_FUNC(size, opt) \
277
void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \
278
                                        const uint8_t *dither, int offset)
279
#define VSCALE_FUNCS(opt1, opt2) \
280
    VSCALE_FUNC(8,  opt1); \
281
    VSCALE_FUNC(9,  opt2); \
282
    VSCALE_FUNC(10, opt2); \
283
    VSCALE_FUNC(16, opt1)
284
285
VSCALE_FUNCS(sse2, sse2);
286
VSCALE_FUNC(16, sse4);
287
VSCALE_FUNCS(avx, avx);
288
289
#define INPUT_Y_FUNC(fmt, opt) \
290
void ff_ ## fmt ## ToY_  ## opt(uint8_t *dst, const uint8_t *src, \
291
                                const uint8_t *unused1, const uint8_t *unused2, \
292
                                int w, uint32_t *unused, void *opq)
293
#define INPUT_UV_FUNC(fmt, opt) \
294
void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \
295
                                const uint8_t *unused0, \
296
                                const uint8_t *src1, \
297
                                const uint8_t *src2, \
298
                                int w, uint32_t *unused, void *opq)
299
#define INPUT_FUNC(fmt, opt) \
300
    INPUT_Y_FUNC(fmt, opt); \
301
    INPUT_UV_FUNC(fmt, opt)
302
#define INPUT_FUNCS(opt) \
303
    INPUT_FUNC(uyvy, opt); \
304
    INPUT_FUNC(yuyv, opt); \
305
    INPUT_UV_FUNC(nv12, opt); \
306
    INPUT_UV_FUNC(nv21, opt); \
307
    INPUT_FUNC(rgba, opt); \
308
    INPUT_FUNC(bgra, opt); \
309
    INPUT_FUNC(argb, opt); \
310
    INPUT_FUNC(abgr, opt); \
311
    INPUT_FUNC(rgb24, opt); \
312
    INPUT_FUNC(bgr24, opt)
313
314
INPUT_FUNCS(sse2);
315
INPUT_FUNCS(ssse3);
316
INPUT_FUNCS(avx);
317
INPUT_FUNC(rgba, avx2);
318
INPUT_FUNC(bgra, avx2);
319
INPUT_FUNC(argb, avx2);
320
INPUT_FUNC(abgr, avx2);
321
INPUT_FUNC(rgb24, avx2);
322
INPUT_FUNC(bgr24, avx2);
323
324
#if ARCH_X86_64
325
#define YUV2NV_DECL(fmt, opt) \
326
void ff_yuv2 ## fmt ## cX_ ## opt(enum AVPixelFormat format, const uint8_t *dither, \
327
                                  const int16_t *filter, int filterSize, \
328
                                  const int16_t **u, const int16_t **v, \
329
                                  uint8_t *dst, int dstWidth)
330
331
YUV2NV_DECL(nv12, avx2);
332
YUV2NV_DECL(nv21, avx2);
333
334
#define YUV2GBRP_FN_DECL(fmt, opt)                                                      \
335
void ff_yuv2##fmt##_full_X_ ##opt(SwsInternal *c, const int16_t *lumFilter,           \
336
                                 const int16_t **lumSrcx, int lumFilterSize,         \
337
                                 const int16_t *chrFilter, const int16_t **chrUSrcx, \
338
                                 const int16_t **chrVSrcx, int chrFilterSize,        \
339
                                 const int16_t **alpSrcx, uint8_t **dest,            \
340
                                 int dstW, int y)
341
342
#define YUV2GBRP_DECL(opt)     \
343
YUV2GBRP_FN_DECL(gbrp,       opt); \
344
YUV2GBRP_FN_DECL(gbrap,      opt); \
345
YUV2GBRP_FN_DECL(gbrp9le,    opt); \
346
YUV2GBRP_FN_DECL(gbrp10le,   opt); \
347
YUV2GBRP_FN_DECL(gbrap10le,  opt); \
348
YUV2GBRP_FN_DECL(gbrp12le,   opt); \
349
YUV2GBRP_FN_DECL(gbrap12le,  opt); \
350
YUV2GBRP_FN_DECL(gbrp14le,   opt); \
351
YUV2GBRP_FN_DECL(gbrp16le,   opt); \
352
YUV2GBRP_FN_DECL(gbrap16le,  opt); \
353
YUV2GBRP_FN_DECL(gbrpf32le,  opt); \
354
YUV2GBRP_FN_DECL(gbrapf32le, opt); \
355
YUV2GBRP_FN_DECL(gbrp9be,    opt); \
356
YUV2GBRP_FN_DECL(gbrp10be,   opt); \
357
YUV2GBRP_FN_DECL(gbrap10be,  opt); \
358
YUV2GBRP_FN_DECL(gbrp12be,   opt); \
359
YUV2GBRP_FN_DECL(gbrap12be,  opt); \
360
YUV2GBRP_FN_DECL(gbrp14be,   opt); \
361
YUV2GBRP_FN_DECL(gbrp16be,   opt); \
362
YUV2GBRP_FN_DECL(gbrap16be,  opt); \
363
YUV2GBRP_FN_DECL(gbrpf32be,  opt); \
364
YUV2GBRP_FN_DECL(gbrapf32be, opt)
365
366
YUV2GBRP_DECL(sse2);
367
YUV2GBRP_DECL(sse4);
368
YUV2GBRP_DECL(avx2);
369
370
#define INPUT_PLANAR_RGB_Y_FN_DECL(fmt, opt)                               \
371
void ff_planar_##fmt##_to_y_##opt(uint8_t *dst,                            \
372
                           const uint8_t *src[4], int w, int32_t *rgb2yuv, \
373
                           void *opq)
374
375
#define INPUT_PLANAR_RGB_UV_FN_DECL(fmt, opt)                              \
376
void ff_planar_##fmt##_to_uv_##opt(uint8_t *dstU, uint8_t *dstV,           \
377
                           const uint8_t *src[4], int w, int32_t *rgb2yuv, \
378
                           void *opq)
379
380
#define INPUT_PLANAR_RGB_A_FN_DECL(fmt, opt)                               \
381
void ff_planar_##fmt##_to_a_##opt(uint8_t *dst,                            \
382
                           const uint8_t *src[4], int w, int32_t *rgb2yuv, \
383
                           void *opq)
384
385
386
#define INPUT_PLANAR_RGBXX_A_DECL(fmt, opt) \
387
INPUT_PLANAR_RGB_A_FN_DECL(fmt##le,  opt);  \
388
INPUT_PLANAR_RGB_A_FN_DECL(fmt##be,  opt)
389
390
#define INPUT_PLANAR_RGBXX_Y_DECL(fmt, opt) \
391
INPUT_PLANAR_RGB_Y_FN_DECL(fmt##le,  opt);  \
392
INPUT_PLANAR_RGB_Y_FN_DECL(fmt##be,  opt)
393
394
#define INPUT_PLANAR_RGBXX_UV_DECL(fmt, opt) \
395
INPUT_PLANAR_RGB_UV_FN_DECL(fmt##le,  opt);  \
396
INPUT_PLANAR_RGB_UV_FN_DECL(fmt##be,  opt)
397
398
#define INPUT_PLANAR_RGBXX_YUVA_DECL(fmt, opt) \
399
INPUT_PLANAR_RGBXX_Y_DECL(fmt,  opt);          \
400
INPUT_PLANAR_RGBXX_UV_DECL(fmt, opt);          \
401
INPUT_PLANAR_RGBXX_A_DECL(fmt,  opt)
402
403
#define INPUT_PLANAR_RGBXX_YUV_DECL(fmt, opt) \
404
INPUT_PLANAR_RGBXX_Y_DECL(fmt,  opt);         \
405
INPUT_PLANAR_RGBXX_UV_DECL(fmt, opt)
406
407
#define INPUT_PLANAR_RGBXX_UVA_DECL(fmt, opt) \
408
INPUT_PLANAR_RGBXX_UV_DECL(fmt, opt);         \
409
INPUT_PLANAR_RGBXX_A_DECL(fmt,  opt)
410
411
#define INPUT_PLANAR_RGB_A_ALL_DECL(opt) \
412
INPUT_PLANAR_RGB_A_FN_DECL(rgb,   opt);  \
413
INPUT_PLANAR_RGBXX_A_DECL(rgb10,  opt);  \
414
INPUT_PLANAR_RGBXX_A_DECL(rgb12,  opt);  \
415
INPUT_PLANAR_RGBXX_A_DECL(rgb16,  opt);  \
416
INPUT_PLANAR_RGBXX_A_DECL(rgbf32, opt)
417
418
#define INPUT_PLANAR_RGB_Y_ALL_DECL(opt) \
419
INPUT_PLANAR_RGB_Y_FN_DECL(rgb,   opt);  \
420
INPUT_PLANAR_RGBXX_Y_DECL(rgb9,   opt);  \
421
INPUT_PLANAR_RGBXX_Y_DECL(rgb10,  opt);  \
422
INPUT_PLANAR_RGBXX_Y_DECL(rgb12,  opt);  \
423
INPUT_PLANAR_RGBXX_Y_DECL(rgb14,  opt);  \
424
INPUT_PLANAR_RGBXX_Y_DECL(rgb16,  opt);  \
425
INPUT_PLANAR_RGBXX_Y_DECL(rgbf32, opt)
426
427
#define INPUT_PLANAR_RGB_UV_ALL_DECL(opt) \
428
INPUT_PLANAR_RGB_UV_FN_DECL(rgb,    opt); \
429
INPUT_PLANAR_RGBXX_UV_DECL(rgb9,   opt);  \
430
INPUT_PLANAR_RGBXX_UV_DECL(rgb10,  opt);  \
431
INPUT_PLANAR_RGBXX_UV_DECL(rgb12,  opt);  \
432
INPUT_PLANAR_RGBXX_UV_DECL(rgb14,  opt);  \
433
INPUT_PLANAR_RGBXX_UV_DECL(rgb16,  opt);  \
434
INPUT_PLANAR_RGBXX_UV_DECL(rgbf32, opt)
435
436
INPUT_PLANAR_RGBXX_Y_DECL(rgbf32, sse2);
437
INPUT_PLANAR_RGB_UV_ALL_DECL(sse2);
438
INPUT_PLANAR_RGB_A_ALL_DECL(sse2);
439
440
INPUT_PLANAR_RGB_Y_ALL_DECL(sse4);
441
INPUT_PLANAR_RGB_UV_ALL_DECL(sse4);
442
INPUT_PLANAR_RGBXX_A_DECL(rgbf32, sse4);
443
444
INPUT_PLANAR_RGB_Y_ALL_DECL(avx2);
445
INPUT_PLANAR_RGB_UV_ALL_DECL(avx2);
446
INPUT_PLANAR_RGB_A_ALL_DECL(avx2);
447
#endif
448
449
0
#define RANGE_CONVERT_FUNCS(opt, bpc) do {                                  \
450
0
    if (c->opts.src_range) {                                                \
451
0
        c->lumConvertRange = ff_lumRangeFromJpeg##bpc##_##opt;              \
452
0
        c->chrConvertRange = ff_chrRangeFromJpeg##bpc##_##opt;              \
453
0
    } else {                                                                \
454
0
        c->lumConvertRange = ff_lumRangeToJpeg##bpc##_##opt;                \
455
0
        c->chrConvertRange = ff_chrRangeToJpeg##bpc##_##opt;                \
456
0
    }                                                                       \
457
0
} while (0)
458
459
#define RANGE_CONVERT_FUNCS_DECL(opt, bpc)                                  \
460
void ff_lumRangeFromJpeg##bpc##_##opt(int16_t *dst, int width,              \
461
                                      uint32_t coeff, int64_t offset);      \
462
void ff_chrRangeFromJpeg##bpc##_##opt(int16_t *dstU, int16_t *dstV, int width, \
463
                                      uint32_t coeff, int64_t offset);      \
464
void ff_lumRangeToJpeg##bpc##_##opt(int16_t *dst, int width,                \
465
                                    uint32_t coeff, int64_t offset);        \
466
void ff_chrRangeToJpeg##bpc##_##opt(int16_t *dstU, int16_t *dstV, int width, \
467
                                    uint32_t coeff, int64_t offset);        \
468
469
RANGE_CONVERT_FUNCS_DECL(sse2, 8)
470
RANGE_CONVERT_FUNCS_DECL(sse4, 16)
471
RANGE_CONVERT_FUNCS_DECL(avx2, 8)
472
RANGE_CONVERT_FUNCS_DECL(avx2, 16)
473
474
av_cold void ff_sws_init_range_convert_x86(SwsInternal *c)
475
0
{
476
0
    int cpu_flags = av_get_cpu_flags();
477
0
    if (EXTERNAL_AVX2_FAST(cpu_flags)) {
478
0
        if (c->dstBpc <= 14) {
479
0
            RANGE_CONVERT_FUNCS(avx2, 8);
480
0
        } else {
481
0
            RANGE_CONVERT_FUNCS(avx2, 16);
482
0
        }
483
0
    } else if (EXTERNAL_SSE2(cpu_flags) && c->dstBpc <= 14) {
484
0
        RANGE_CONVERT_FUNCS(sse2, 8);
485
0
    } else if (EXTERNAL_SSE4(cpu_flags) && c->dstBpc > 14) {
486
0
        RANGE_CONVERT_FUNCS(sse4, 16);
487
0
    }
488
0
}
489
490
av_cold void ff_sws_init_swscale_x86(SwsInternal *c)
491
0
{
492
0
    int cpu_flags = av_get_cpu_flags();
493
494
0
#if HAVE_MMXEXT_INLINE
495
0
    if (INLINE_MMXEXT(cpu_flags))
496
0
        sws_init_swscale_mmxext(c);
497
0
#endif
498
0
    if(c->use_mmx_vfilter && !(c->opts.flags & SWS_ACCURATE_RND)) {
499
0
#if HAVE_MMXEXT_EXTERNAL
500
0
        if (EXTERNAL_MMXEXT(cpu_flags))
501
0
            c->yuv2planeX = yuv2yuvX_mmxext;
502
0
#endif
503
0
#if HAVE_SSE3_EXTERNAL
504
0
        if (EXTERNAL_SSE3(cpu_flags))
505
0
            c->yuv2planeX = yuv2yuvX_sse3;
506
0
#endif
507
0
#if HAVE_AVX2_EXTERNAL
508
0
        if (EXTERNAL_AVX2_FAST(cpu_flags))
509
0
            c->yuv2planeX = yuv2yuvX_avx2;
510
0
#endif
511
0
    }
512
#if ARCH_X86_32 && !HAVE_ALIGNED_STACK
513
    // The better yuv2planeX_8 functions need aligned stack on x86-32,
514
    // so we use MMXEXT in this case if they are not available.
515
    if (EXTERNAL_MMXEXT(cpu_flags)) {
516
        if (c->dstBpc == 8 && !c->use_mmx_vfilter)
517
            c->yuv2planeX = ff_yuv2planeX_8_mmxext;
518
    }
519
#endif /* ARCH_X86_32 && !HAVE_ALIGNED_STACK */
520
521
0
#define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \
522
0
    if (c->srcBpc == 8) { \
523
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \
524
0
                                     ff_hscale8to19_ ## filtersize ## _ ## opt1; \
525
0
    } else if (c->srcBpc == 9) { \
526
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \
527
0
                                     ff_hscale9to19_ ## filtersize ## _ ## opt1; \
528
0
    } else if (c->srcBpc == 10) { \
529
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \
530
0
                                     ff_hscale10to19_ ## filtersize ## _ ## opt1; \
531
0
    } else if (c->srcBpc == 12) { \
532
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale12to15_ ## filtersize ## _ ## opt2 : \
533
0
                                     ff_hscale12to19_ ## filtersize ## _ ## opt1; \
534
0
    } else if (c->srcBpc == 14 || ((c->opts.src_format==AV_PIX_FMT_PAL8||isAnyRGB(c->opts.src_format)) && av_pix_fmt_desc_get(c->opts.src_format)->comp[0].depth<16)) { \
535
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale14to15_ ## filtersize ## _ ## opt2 : \
536
0
                                     ff_hscale14to19_ ## filtersize ## _ ## opt1; \
537
0
    } else { /* c->srcBpc == 16 */ \
538
0
        av_assert0(c->srcBpc == 16);\
539
0
        hscalefn = c->dstBpc <= 14 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \
540
0
                                     ff_hscale16to19_ ## filtersize ## _ ## opt1; \
541
0
    } \
542
0
} while (0)
543
0
#define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case, condition_8bit) \
544
0
switch(c->dstBpc){ \
545
0
    case 16:                          do_16_case;                          break; \
546
0
    case 10: if (!isBE(c->opts.dst_format) && !isSemiPlanarYUV(c->opts.dst_format) && !isDataInHighBits(c->opts.dst_format)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \
547
0
    case 9:  if (!isBE(c->opts.dst_format)) vscalefn = ff_yuv2planeX_9_  ## opt; break; \
548
0
    case 8: if ((condition_8bit) && !c->use_mmx_vfilter) vscalefn = ff_yuv2planeX_8_  ## opt; break; \
549
0
    }
550
0
#define ASSIGN_VSCALE_FUNC(vscalefn, opt) \
551
0
    switch(c->dstBpc){ \
552
0
    case 16: if (!isBE(c->opts.dst_format)) vscalefn = ff_yuv2plane1_16_ ## opt; break; \
553
0
    case 10: if (!isBE(c->opts.dst_format) && !isSemiPlanarYUV(c->opts.dst_format) && !isDataInHighBits(c->opts.dst_format)) vscalefn = ff_yuv2plane1_10_ ## opt; break; \
554
0
    case 9:  if (!isBE(c->opts.dst_format)) vscalefn = ff_yuv2plane1_9_  ## opt;  break; \
555
0
    case 8:                           vscalefn = ff_yuv2plane1_8_  ## opt;  break; \
556
0
    default: av_assert0(c->dstBpc>8); \
557
0
    }
558
0
#define case_rgb(x, X, opt) \
559
0
        case AV_PIX_FMT_ ## X: \
560
0
            c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \
561
0
            if (!c->chrSrcHSubSample) \
562
0
                c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \
563
0
            break
564
0
#define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \
565
0
    switch (filtersize) { \
566
0
    case 4:  ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \
567
0
    case 8:  ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \
568
0
    default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \
569
0
             else                ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \
570
0
             break; \
571
0
    }
572
0
    if (EXTERNAL_SSE2(cpu_flags)) {
573
0
        ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse2, sse2);
574
0
        ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse2, sse2);
575
0
        ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse2, ,
576
0
                            HAVE_ALIGNED_STACK || ARCH_X86_64);
577
0
        if (!(c->opts.flags & SWS_ACCURATE_RND))
578
0
            ASSIGN_VSCALE_FUNC(c->yuv2plane1, sse2);
579
580
0
        switch (c->opts.src_format) {
581
0
        case AV_PIX_FMT_YA8:
582
0
            c->lumToYV12 = ff_yuyvToY_sse2;
583
0
            if (c->needAlpha)
584
0
                c->alpToYV12 = ff_uyvyToY_sse2;
585
0
            break;
586
0
        case AV_PIX_FMT_YUYV422:
587
0
            c->lumToYV12 = ff_yuyvToY_sse2;
588
0
            c->chrToYV12 = ff_yuyvToUV_sse2;
589
0
            break;
590
0
        case AV_PIX_FMT_UYVY422:
591
0
            c->lumToYV12 = ff_uyvyToY_sse2;
592
0
            c->chrToYV12 = ff_uyvyToUV_sse2;
593
0
            break;
594
0
        case AV_PIX_FMT_NV12:
595
0
            c->chrToYV12 = ff_nv12ToUV_sse2;
596
0
            break;
597
0
        case AV_PIX_FMT_NV21:
598
0
            c->chrToYV12 = ff_nv21ToUV_sse2;
599
0
            break;
600
0
        case_rgb(rgb24, RGB24, sse2);
601
0
        case_rgb(bgr24, BGR24, sse2);
602
0
        case_rgb(bgra,  BGRA,  sse2);
603
0
        case_rgb(rgba,  RGBA,  sse2);
604
0
        case_rgb(abgr,  ABGR,  sse2);
605
0
        case_rgb(argb,  ARGB,  sse2);
606
0
        default:
607
0
            break;
608
0
        }
609
0
    }
610
0
    if (EXTERNAL_SSSE3(cpu_flags)) {
611
0
        ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, ssse3, ssse3);
612
0
        ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, ssse3, ssse3);
613
0
        switch (c->opts.src_format) {
614
0
        case_rgb(rgb24, RGB24, ssse3);
615
0
        case_rgb(bgr24, BGR24, ssse3);
616
0
        default:
617
0
            break;
618
0
        }
619
0
    }
620
0
    if (EXTERNAL_SSE4(cpu_flags)) {
621
        /* Xto15 don't need special sse4 functions */
622
0
        ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse4, ssse3);
623
0
        ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse4, ssse3);
624
0
        ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse4,
625
0
                            if (!isBE(c->opts.dst_format)) c->yuv2planeX = ff_yuv2planeX_16_sse4,
626
0
                            HAVE_ALIGNED_STACK || ARCH_X86_64);
627
0
        if (c->dstBpc == 16 && !isBE(c->opts.dst_format) && !(c->opts.flags & SWS_ACCURATE_RND))
628
0
            c->yuv2plane1 = ff_yuv2plane1_16_sse4;
629
0
    }
630
631
0
    if (EXTERNAL_AVX(cpu_flags)) {
632
0
        ASSIGN_VSCALEX_FUNC(c->yuv2planeX, avx, ,
633
0
                            HAVE_ALIGNED_STACK || ARCH_X86_64);
634
0
        if (!(c->opts.flags & SWS_ACCURATE_RND))
635
0
            ASSIGN_VSCALE_FUNC(c->yuv2plane1, avx);
636
637
0
        switch (c->opts.src_format) {
638
0
        case AV_PIX_FMT_YUYV422:
639
0
            c->chrToYV12 = ff_yuyvToUV_avx;
640
0
            break;
641
0
        case AV_PIX_FMT_UYVY422:
642
0
            c->chrToYV12 = ff_uyvyToUV_avx;
643
0
            break;
644
0
        case AV_PIX_FMT_NV12:
645
0
            c->chrToYV12 = ff_nv12ToUV_avx;
646
0
            break;
647
0
        case AV_PIX_FMT_NV21:
648
0
            c->chrToYV12 = ff_nv21ToUV_avx;
649
0
            break;
650
0
        case_rgb(rgb24, RGB24, avx);
651
0
        case_rgb(bgr24, BGR24, avx);
652
0
        case_rgb(bgra,  BGRA,  avx);
653
0
        case_rgb(rgba,  RGBA,  avx);
654
0
        case_rgb(abgr,  ABGR,  avx);
655
0
        case_rgb(argb,  ARGB,  avx);
656
0
        default:
657
0
            break;
658
0
        }
659
0
    }
660
661
0
#if ARCH_X86_64
662
0
#define ASSIGN_AVX2_SCALE_FUNC(hscalefn, filtersize) \
663
0
    switch (filtersize) { \
664
0
    case 4:  hscalefn = ff_hscale8to15_4_avx2; break; \
665
0
    default:  hscalefn = ff_hscale8to15_X4_avx2; break; \
666
0
             break; \
667
0
    }
668
669
0
    if (EXTERNAL_AVX2_FAST(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_SLOW_GATHER)) {
670
0
        if ((c->srcBpc == 8) && (c->dstBpc <= 14)) {
671
0
            ASSIGN_AVX2_SCALE_FUNC(c->hcScale, c->hChrFilterSize);
672
0
            ASSIGN_AVX2_SCALE_FUNC(c->hyScale, c->hLumFilterSize);
673
0
        }
674
0
    }
675
676
0
    if (EXTERNAL_AVX2_FAST(cpu_flags)) {
677
0
        if (ARCH_X86_64)
678
0
            switch (c->opts.src_format) {
679
0
            case_rgb(rgb24, RGB24, avx2);
680
0
            case_rgb(bgr24, BGR24, avx2);
681
0
            case_rgb(bgra,  BGRA,  avx2);
682
0
            case_rgb(rgba,  RGBA,  avx2);
683
0
            case_rgb(abgr,  ABGR,  avx2);
684
0
            case_rgb(argb,  ARGB,  avx2);
685
0
            }
686
0
        if (!(c->opts.flags & SWS_ACCURATE_RND)) // FIXME
687
0
        switch (c->opts.dst_format) {
688
0
        case AV_PIX_FMT_NV12:
689
0
        case AV_PIX_FMT_NV24:
690
0
            c->yuv2nv12cX = ff_yuv2nv12cX_avx2;
691
0
            break;
692
0
        case AV_PIX_FMT_NV21:
693
0
        case AV_PIX_FMT_NV42:
694
0
            c->yuv2nv12cX = ff_yuv2nv21cX_avx2;
695
0
            break;
696
0
        default:
697
0
            break;
698
0
        }
699
0
    }
700
701
702
0
#define INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(fmt, name, opt)          \
703
0
        case fmt:                                                     \
704
0
            c->readAlpPlanar = ff_planar_##name##_to_a_##opt;
705
706
0
#define INPUT_PLANER_RGBA_YUV_FUNC_CASE(rgb_fmt, rgba_fmt, name, opt) \
707
0
        case rgba_fmt:                                                \
708
0
        case rgb_fmt:                                                 \
709
0
            c->readLumPlanar = ff_planar_##name##_to_y_##opt;         \
710
0
            c->readChrPlanar = ff_planar_##name##_to_uv_##opt;        \
711
0
            break;
712
713
0
#define INPUT_PLANER_RGB_YUV_FUNC_CASE(fmt, name, opt)                \
714
0
        case fmt:                                                     \
715
0
            c->readLumPlanar = ff_planar_##name##_to_y_##opt;         \
716
0
            c->readChrPlanar = ff_planar_##name##_to_uv_##opt;        \
717
0
            break;
718
719
0
#define INPUT_PLANER_RGB_UV_FUNC_CASE(fmt, name, opt)                 \
720
0
        case fmt:                                                     \
721
0
            c->readChrPlanar = ff_planar_##name##_to_uv_##opt;        \
722
0
            break;
723
724
0
#define INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(rgb_fmt, rgba_fmt, name, opt) \
725
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(rgba_fmt##LE,  name##le, opt)       \
726
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(rgb_fmt##LE, name##le, opt)       \
727
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(rgba_fmt##BE,  name##be, opt)       \
728
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(rgb_fmt##BE, name##be, opt)
729
730
0
#define INPUT_PLANER_RGBAXX_UVA_FUNC_CASE(rgb_fmt, rgba_fmt, name, opt) \
731
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(rgba_fmt##LE, name##le, opt)       \
732
0
        INPUT_PLANER_RGB_UV_FUNC_CASE(rgb_fmt##LE, name##le, opt)       \
733
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(rgba_fmt##BE, name##be, opt)       \
734
0
        INPUT_PLANER_RGB_UV_FUNC_CASE(rgb_fmt##BE, name##be, opt)
735
736
0
#define INPUT_PLANER_RGBAXX_YUV_FUNC_CASE(rgb_fmt, rgba_fmt, name, opt)           \
737
0
        INPUT_PLANER_RGBA_YUV_FUNC_CASE(rgb_fmt##LE, rgba_fmt##LE, name##le, opt) \
738
0
        INPUT_PLANER_RGBA_YUV_FUNC_CASE(rgb_fmt##BE, rgba_fmt##BE, name##be, opt)
739
740
0
#define INPUT_PLANER_RGBXX_YUV_FUNC_CASE(rgb_fmt, name, opt)       \
741
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(rgb_fmt##LE, name##le, opt) \
742
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(rgb_fmt##BE, name##be, opt)
743
744
0
#define INPUT_PLANER_RGBXX_UV_FUNC_CASE(rgb_fmt, name, opt)        \
745
0
        INPUT_PLANER_RGB_UV_FUNC_CASE(rgb_fmt##LE, name##le, opt)  \
746
0
        INPUT_PLANER_RGB_UV_FUNC_CASE(rgb_fmt##BE, name##be, opt)
747
748
0
#define INPUT_PLANER_RGB_YUVA_ALL_CASES(opt)                                                     \
749
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(AV_PIX_FMT_GBRAP,                         rgb, opt) \
750
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(    AV_PIX_FMT_GBRP,                            rgb, opt) \
751
0
        INPUT_PLANER_RGBXX_YUV_FUNC_CASE(  AV_PIX_FMT_GBRP9,                          rgb9, opt) \
752
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRP10,  AV_PIX_FMT_GBRAP10,   rgb10, opt) \
753
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRP12,  AV_PIX_FMT_GBRAP12,   rgb12, opt) \
754
0
        INPUT_PLANER_RGBXX_YUV_FUNC_CASE(  AV_PIX_FMT_GBRP14,                        rgb14, opt) \
755
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRP16,  AV_PIX_FMT_GBRAP16,   rgb16, opt) \
756
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, rgbf32, opt)
757
758
759
0
    if (EXTERNAL_SSE2(cpu_flags)) {
760
0
        switch (c->opts.src_format) {
761
0
        INPUT_PLANER_RGB_A_FUNC_CASE_NOBREAK(AV_PIX_FMT_GBRAP,                         rgb, sse2);
762
0
        INPUT_PLANER_RGB_UV_FUNC_CASE(     AV_PIX_FMT_GBRP,                            rgb, sse2);
763
0
        INPUT_PLANER_RGBXX_UV_FUNC_CASE(   AV_PIX_FMT_GBRP9,                          rgb9, sse2);
764
0
        INPUT_PLANER_RGBAXX_UVA_FUNC_CASE( AV_PIX_FMT_GBRP10,  AV_PIX_FMT_GBRAP10,   rgb10, sse2);
765
0
        INPUT_PLANER_RGBAXX_UVA_FUNC_CASE( AV_PIX_FMT_GBRP12,  AV_PIX_FMT_GBRAP12,   rgb12, sse2);
766
0
        INPUT_PLANER_RGBXX_UV_FUNC_CASE(   AV_PIX_FMT_GBRP14,                        rgb14, sse2);
767
0
        INPUT_PLANER_RGBAXX_UVA_FUNC_CASE( AV_PIX_FMT_GBRP16,  AV_PIX_FMT_GBRAP16,   rgb16, sse2);
768
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, rgbf32, sse2);
769
0
        default:
770
0
            break;
771
0
        }
772
0
    }
773
774
0
    if (EXTERNAL_SSE4(cpu_flags)) {
775
0
        switch (c->opts.src_format) {
776
0
        case AV_PIX_FMT_GBRAP:
777
0
        INPUT_PLANER_RGB_YUV_FUNC_CASE(    AV_PIX_FMT_GBRP,                            rgb, sse4);
778
0
        INPUT_PLANER_RGBXX_YUV_FUNC_CASE(  AV_PIX_FMT_GBRP9,                          rgb9, sse4);
779
0
        INPUT_PLANER_RGBAXX_YUV_FUNC_CASE( AV_PIX_FMT_GBRP10,  AV_PIX_FMT_GBRAP10,   rgb10, sse4);
780
0
        INPUT_PLANER_RGBAXX_YUV_FUNC_CASE( AV_PIX_FMT_GBRP12,  AV_PIX_FMT_GBRAP12,   rgb12, sse4);
781
0
        INPUT_PLANER_RGBXX_YUV_FUNC_CASE(  AV_PIX_FMT_GBRP14,                        rgb14, sse4);
782
0
        INPUT_PLANER_RGBAXX_YUV_FUNC_CASE( AV_PIX_FMT_GBRP16,  AV_PIX_FMT_GBRAP16,   rgb16, sse4);
783
0
        INPUT_PLANER_RGBAXX_YUVA_FUNC_CASE(AV_PIX_FMT_GBRPF32, AV_PIX_FMT_GBRAPF32, rgbf32, sse4);
784
0
        default:
785
0
            break;
786
0
        }
787
0
    }
788
789
0
    if (EXTERNAL_AVX2_FAST(cpu_flags)) {
790
0
        switch (c->opts.src_format) {
791
0
        INPUT_PLANER_RGB_YUVA_ALL_CASES(avx2)
792
0
        default:
793
0
            break;
794
0
        }
795
0
    }
796
797
0
    if(c->opts.flags & SWS_FULL_CHR_H_INT) {
798
799
0
#define YUV2ANYX_FUNC_CASE(fmt, name, opt)              \
800
0
        case fmt:                                       \
801
0
            c->yuv2anyX = ff_yuv2##name##_full_X_##opt; \
802
0
            break;
803
804
0
#define YUV2ANYX_GBRAP_CASES(opt)                                  \
805
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP,       gbrp,       opt) \
806
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP,      gbrap,      opt) \
807
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP9LE,    gbrp9le,    opt) \
808
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP10LE,   gbrp10le,   opt) \
809
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP10LE,  gbrap10le,  opt) \
810
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP12LE,   gbrp12le,   opt) \
811
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP12LE,  gbrap12le,  opt) \
812
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP14LE,   gbrp14le,   opt) \
813
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP16LE,   gbrp16le,   opt) \
814
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP16LE,  gbrap16le,  opt) \
815
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRPF32LE,  gbrpf32le,  opt) \
816
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAPF32LE, gbrapf32le, opt) \
817
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP9BE,    gbrp9be,    opt) \
818
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP10BE,   gbrp10be,   opt) \
819
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP10BE,  gbrap10be,  opt) \
820
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP12BE,   gbrp12be,   opt) \
821
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP12BE,  gbrap12be,  opt) \
822
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP14BE,   gbrp14be,   opt) \
823
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRP16BE,   gbrp16be,   opt) \
824
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAP16BE,  gbrap16be,  opt) \
825
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRPF32BE,  gbrpf32be,  opt) \
826
0
        YUV2ANYX_FUNC_CASE(AV_PIX_FMT_GBRAPF32BE, gbrapf32be, opt)
827
828
0
        if (EXTERNAL_SSE2(cpu_flags)) {
829
0
            switch (c->opts.dst_format) {
830
0
            YUV2ANYX_GBRAP_CASES(sse2)
831
0
            default:
832
0
                break;
833
0
            }
834
0
        }
835
836
0
        if (EXTERNAL_SSE4(cpu_flags)) {
837
0
            switch (c->opts.dst_format) {
838
0
            YUV2ANYX_GBRAP_CASES(sse4)
839
0
            default:
840
0
                break;
841
0
            }
842
0
        }
843
844
0
        if (EXTERNAL_AVX2_FAST(cpu_flags)) {
845
0
            switch (c->opts.dst_format) {
846
0
            YUV2ANYX_GBRAP_CASES(avx2)
847
0
            default:
848
0
                break;
849
0
            }
850
0
        }
851
0
    }
852
853
0
#endif
854
0
}