Coverage Report

Created: 2025-07-16 07:53

/src/libavif/src/reformat.c
Line
Count
Source (jump to first uncovered line)
1
// Copyright 2019 Joe Drago. All rights reserved.
2
// SPDX-License-Identifier: BSD-2-Clause
3
4
#include "avif/internal.h"
5
6
#include <assert.h>
7
#include <stdint.h>
8
#include <string.h>
9
10
#if defined(_WIN32)
11
#include <process.h>
12
#include <windows.h>
13
#else
14
#include <pthread.h>
15
#endif
16
17
static void * avifMemset16(void * dest, int val, size_t count)
18
0
{
19
0
    uint16_t * dest16 = (uint16_t *)dest;
20
0
    for (size_t i = 0; i < count; i++)
21
0
        *dest16++ = (uint16_t)val;
22
0
    return dest;
23
0
}
24
25
struct YUVBlock
26
{
27
    float y;
28
    float u;
29
    float v;
30
};
31
32
avifBool avifGetRGBColorSpaceInfo(const avifRGBImage * rgb, avifRGBColorSpaceInfo * info)
33
2.13k
{
34
2.13k
    AVIF_CHECK(rgb->depth == 8 || rgb->depth == 10 || rgb->depth == 12 || rgb->depth == 16);
35
2.13k
    if (rgb->isFloat) {
36
0
        AVIF_CHECK(rgb->depth == 16);
37
0
    }
38
2.13k
    if (rgb->format == AVIF_RGB_FORMAT_RGB_565) {
39
0
        AVIF_CHECK(rgb->depth == 8);
40
0
    }
41
    // Cast to silence "comparison of unsigned expression is always true" warning.
42
2.13k
    AVIF_CHECK((int)rgb->format >= AVIF_RGB_FORMAT_RGB && rgb->format < AVIF_RGB_FORMAT_COUNT);
43
44
2.13k
    info->channelBytes = (rgb->depth > 8) ? 2 : 1;
45
2.13k
    info->pixelBytes = avifRGBImagePixelSize(rgb);
46
47
2.13k
    switch (rgb->format) {
48
0
        case AVIF_RGB_FORMAT_RGB:
49
0
            info->offsetBytesR = info->channelBytes * 0;
50
0
            info->offsetBytesG = info->channelBytes * 1;
51
0
            info->offsetBytesB = info->channelBytes * 2;
52
0
            info->offsetBytesA = 0;
53
0
            break;
54
723
        case AVIF_RGB_FORMAT_RGBA:
55
723
            info->offsetBytesR = info->channelBytes * 0;
56
723
            info->offsetBytesG = info->channelBytes * 1;
57
723
            info->offsetBytesB = info->channelBytes * 2;
58
723
            info->offsetBytesA = info->channelBytes * 3;
59
723
            break;
60
0
        case AVIF_RGB_FORMAT_ARGB:
61
0
            info->offsetBytesA = info->channelBytes * 0;
62
0
            info->offsetBytesR = info->channelBytes * 1;
63
0
            info->offsetBytesG = info->channelBytes * 2;
64
0
            info->offsetBytesB = info->channelBytes * 3;
65
0
            break;
66
0
        case AVIF_RGB_FORMAT_BGR:
67
0
            info->offsetBytesB = info->channelBytes * 0;
68
0
            info->offsetBytesG = info->channelBytes * 1;
69
0
            info->offsetBytesR = info->channelBytes * 2;
70
0
            info->offsetBytesA = 0;
71
0
            break;
72
1.40k
        case AVIF_RGB_FORMAT_BGRA:
73
1.40k
            info->offsetBytesB = info->channelBytes * 0;
74
1.40k
            info->offsetBytesG = info->channelBytes * 1;
75
1.40k
            info->offsetBytesR = info->channelBytes * 2;
76
1.40k
            info->offsetBytesA = info->channelBytes * 3;
77
1.40k
            break;
78
0
        case AVIF_RGB_FORMAT_ABGR:
79
0
            info->offsetBytesA = info->channelBytes * 0;
80
0
            info->offsetBytesB = info->channelBytes * 1;
81
0
            info->offsetBytesG = info->channelBytes * 2;
82
0
            info->offsetBytesR = info->channelBytes * 3;
83
0
            break;
84
0
        case AVIF_RGB_FORMAT_RGB_565:
85
            // Since RGB_565 consists of two bytes per RGB pixel, we simply use
86
            // the pointer to the red channel to populate the entire pixel value
87
            // as a uint16_t. As a result only offsetBytesR is used and the
88
            // other offsets are unused.
89
0
            info->offsetBytesR = 0;
90
0
            info->offsetBytesG = 0;
91
0
            info->offsetBytesB = 0;
92
0
            info->offsetBytesA = 0;
93
0
            break;
94
0
        case AVIF_RGB_FORMAT_GRAY:
95
0
            info->offsetBytesGray = info->channelBytes * 0;
96
0
            break;
97
0
        case AVIF_RGB_FORMAT_GRAYA:
98
0
            info->offsetBytesGray = info->channelBytes * 0;
99
0
            info->offsetBytesA = info->channelBytes * 1;
100
0
            break;
101
0
        case AVIF_RGB_FORMAT_AGRAY:
102
0
            info->offsetBytesA = info->channelBytes * 0;
103
0
            info->offsetBytesGray = info->channelBytes * 1;
104
0
            break;
105
106
0
        case AVIF_RGB_FORMAT_COUNT:
107
0
            return AVIF_FALSE;
108
2.13k
    }
109
110
2.13k
    info->maxChannel = (1 << rgb->depth) - 1;
111
2.13k
    info->maxChannelF = (float)info->maxChannel;
112
113
2.13k
    return AVIF_TRUE;
114
2.13k
}
115
116
avifBool avifGetYUVColorSpaceInfo(const avifImage * image, avifYUVColorSpaceInfo * info)
117
2.13k
{
118
2.13k
    AVIF_CHECK(image->depth == 8 || image->depth == 10 || image->depth == 12 || image->depth == 16);
119
2.13k
    AVIF_CHECK(image->yuvFormat >= AVIF_PIXEL_FORMAT_YUV444 && image->yuvFormat < AVIF_PIXEL_FORMAT_COUNT);
120
2.13k
    AVIF_CHECK(image->yuvRange == AVIF_RANGE_LIMITED || image->yuvRange == AVIF_RANGE_FULL);
121
122
    // These matrix coefficients values are currently unsupported. Revise this list as more support is added.
123
    //
124
    // YCgCo performs limited-full range adjustment on R,G,B but the current implementation performs range adjustment
125
    // on Y,U,V. So YCgCo with limited range is unsupported.
126
2.13k
    if ((image->matrixCoefficients == 3 /* CICP reserved */) ||
127
2.13k
        ((image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO || image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO_RE ||
128
2.12k
          image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO_RO) &&
129
2.12k
         (image->yuvRange == AVIF_RANGE_LIMITED)) ||
130
2.13k
        (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_BT2020_CL) ||
131
2.13k
        (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_SMPTE2085) ||
132
2.13k
        (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_CHROMA_DERIVED_CL) ||
133
2.13k
        (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_ICTCP) || (image->matrixCoefficients >= AVIF_MATRIX_COEFFICIENTS_LAST)) {
134
207
        return AVIF_FALSE;
135
207
    }
136
137
    // Removing 400 here would break backward behavior but would respect the spec.
138
1.92k
    if ((image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_IDENTITY) && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV444) &&
139
1.92k
        (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400)) {
140
0
        return AVIF_FALSE;
141
0
    }
142
1.92k
    avifGetPixelFormatInfo(image->yuvFormat, &info->formatInfo);
143
1.92k
    avifCalcYUVCoefficients(image, &info->kr, &info->kg, &info->kb);
144
145
1.92k
    info->channelBytes = (image->depth > 8) ? 2 : 1;
146
147
1.92k
    info->depth = image->depth;
148
1.92k
    info->range = image->yuvRange;
149
1.92k
    info->maxChannel = (1 << image->depth) - 1;
150
1.92k
    info->biasY = (info->range == AVIF_RANGE_LIMITED) ? (float)(16 << (info->depth - 8)) : 0.0f;
151
1.92k
    info->biasUV = (float)(1 << (info->depth - 1));
152
1.92k
    info->rangeY = (float)((info->range == AVIF_RANGE_LIMITED) ? (219 << (info->depth - 8)) : info->maxChannel);
153
1.92k
    info->rangeUV = (float)((info->range == AVIF_RANGE_LIMITED) ? (224 << (info->depth - 8)) : info->maxChannel);
154
155
1.92k
    return AVIF_TRUE;
156
1.92k
}
157
158
static avifBool avifPrepareReformatState(const avifImage * image, const avifRGBImage * rgb, avifReformatState * state)
159
2.14k
{
160
2.14k
    const avifBool useYCgCoRe = (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO_RE);
161
2.14k
    const avifBool useYCgCoRo = (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO_RO);
162
2.14k
    if (useYCgCoRe || useYCgCoRo) {
163
12
        const int bitOffset = (useYCgCoRe) ? 2 : 1;
164
12
        if (image->depth - bitOffset != rgb->depth) {
165
12
            return AVIF_FALSE;
166
12
        }
167
12
    }
168
169
2.13k
    AVIF_CHECK(avifGetRGBColorSpaceInfo(rgb, &state->rgb));
170
2.13k
    AVIF_CHECK(avifGetYUVColorSpaceInfo(image, &state->yuv));
171
172
1.92k
    state->yuv.mode = AVIF_REFORMAT_MODE_YUV_COEFFICIENTS;
173
174
1.92k
    if (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_IDENTITY) {
175
668
        state->yuv.mode = AVIF_REFORMAT_MODE_IDENTITY;
176
1.25k
    } else if (image->matrixCoefficients == AVIF_MATRIX_COEFFICIENTS_YCGCO) {
177
112
        state->yuv.mode = AVIF_REFORMAT_MODE_YCGCO;
178
1.14k
    } else if (useYCgCoRe) {
179
0
        state->yuv.mode = AVIF_REFORMAT_MODE_YCGCO_RE;
180
1.14k
    } else if (useYCgCoRo) {
181
0
        state->yuv.mode = AVIF_REFORMAT_MODE_YCGCO_RO;
182
0
    }
183
184
1.92k
    if (state->yuv.mode != AVIF_REFORMAT_MODE_YUV_COEFFICIENTS) {
185
780
        state->yuv.kr = 0.0f;
186
780
        state->yuv.kg = 0.0f;
187
780
        state->yuv.kb = 0.0f;
188
780
    }
189
190
1.92k
    return AVIF_TRUE;
191
2.13k
}
192
193
// Formulas 20-31 from https://www.itu.int/rec/T-REC-H.273-201612-S
194
static int avifYUVColorSpaceInfoYToUNorm(avifYUVColorSpaceInfo * info, float v)
195
0
{
196
0
    int unorm = (int)avifRoundf(v * info->rangeY + info->biasY);
197
0
    return AVIF_CLAMP(unorm, 0, info->maxChannel);
198
0
}
199
200
static int avifYUVColorSpaceInfoUVToUNorm(avifYUVColorSpaceInfo * info, float v)
201
0
{
202
0
    int unorm;
203
204
    // YCgCo performs limited-full range adjustment on R,G,B but the current implementation performs range adjustment
205
    // on Y,U,V. So YCgCo with limited range is unsupported.
206
0
    assert((info->mode != AVIF_REFORMAT_MODE_YCGCO && info->mode != AVIF_REFORMAT_MODE_YCGCO_RE && info->mode != AVIF_REFORMAT_MODE_YCGCO_RO) ||
207
0
           (info->range == AVIF_RANGE_FULL));
208
209
0
    if (info->mode == AVIF_REFORMAT_MODE_IDENTITY) {
210
0
        unorm = (int)avifRoundf(v * info->rangeY + info->biasY);
211
0
    } else {
212
0
        unorm = (int)avifRoundf(v * info->rangeUV + info->biasUV);
213
0
    }
214
215
0
    return AVIF_CLAMP(unorm, 0, info->maxChannel);
216
0
}
217
218
avifResult avifImageRGBToYUV(avifImage * image, const avifRGBImage * rgb)
219
0
{
220
0
    if (!rgb->pixels || rgb->format == AVIF_RGB_FORMAT_RGB_565) {
221
0
        return AVIF_RESULT_REFORMAT_FAILED;
222
0
    }
223
224
0
    avifReformatState state;
225
0
    if (!avifPrepareReformatState(image, rgb, &state)) {
226
0
        return AVIF_RESULT_REFORMAT_FAILED;
227
0
    }
228
229
0
    if (rgb->isFloat) {
230
0
        return AVIF_RESULT_NOT_IMPLEMENTED;
231
0
    }
232
233
0
    const avifBool hasAlpha = avifRGBFormatHasAlpha(rgb->format) && !rgb->ignoreAlpha;
234
0
    avifResult allocationResult = avifImageAllocatePlanes(image, hasAlpha ? AVIF_PLANES_ALL : AVIF_PLANES_YUV);
235
0
    if (allocationResult != AVIF_RESULT_OK) {
236
0
        return allocationResult;
237
0
    }
238
239
0
    avifAlphaMultiplyMode alphaMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
240
0
    if (hasAlpha) {
241
0
        if (!rgb->alphaPremultiplied && image->alphaPremultiplied) {
242
0
            alphaMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
243
0
        } else if (rgb->alphaPremultiplied && !image->alphaPremultiplied) {
244
0
            alphaMode = AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY;
245
0
        }
246
0
    }
247
248
0
    const avifBool isGray = avifRGBFormatIsGray(rgb->format);
249
0
    avifBool converted = AVIF_FALSE;
250
251
    // Try converting with libsharpyuv.
252
0
    if (!isGray) {
253
0
        if ((rgb->chromaDownsampling == AVIF_CHROMA_DOWNSAMPLING_SHARP_YUV) && (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420)) {
254
0
            const avifResult libSharpYUVResult = avifImageRGBToYUVLibSharpYUV(image, rgb, &state);
255
0
            if (libSharpYUVResult != AVIF_RESULT_OK) {
256
                // Return the error if sharpyuv was requested but failed for any reason, including libsharpyuv not being available.
257
0
                return libSharpYUVResult;
258
0
            }
259
0
            converted = AVIF_TRUE;
260
0
        }
261
262
0
        if (!converted && !rgb->avoidLibYUV && (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP)) {
263
0
            avifResult libyuvResult = avifImageRGBToYUVLibYUV(image, rgb);
264
0
            if (libyuvResult == AVIF_RESULT_OK) {
265
0
                converted = AVIF_TRUE;
266
0
            } else if (libyuvResult != AVIF_RESULT_NOT_IMPLEMENTED) {
267
0
                return libyuvResult;
268
0
            }
269
0
        }
270
0
    }
271
272
0
    if (!converted && !isGray) {
273
0
        const float kr = state.yuv.kr;
274
0
        const float kg = state.yuv.kg;
275
0
        const float kb = state.yuv.kb;
276
277
0
        struct YUVBlock yuvBlock[2][2];
278
0
        float rgbPixel[3];
279
0
        const uint32_t rgbPixelBytes = state.rgb.pixelBytes;
280
0
        const uint32_t offsetBytesR = state.rgb.offsetBytesR;
281
0
        const uint32_t offsetBytesG = state.rgb.offsetBytesG;
282
0
        const uint32_t offsetBytesB = state.rgb.offsetBytesB;
283
0
        const uint32_t offsetBytesA = state.rgb.offsetBytesA;
284
0
        const size_t rgbRowBytes = rgb->rowBytes;
285
0
        const float rgbMaxChannelF = state.rgb.maxChannelF;
286
0
        uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
287
0
        uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
288
0
        uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
289
0
        const size_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
290
0
        const size_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
291
0
        const size_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
292
0
        for (uint32_t outerJ = 0; outerJ < image->height; outerJ += 2) {
293
0
            for (uint32_t outerI = 0; outerI < image->width; outerI += 2) {
294
0
                uint32_t blockW = 2, blockH = 2;
295
0
                if ((outerI + 1) >= image->width) {
296
0
                    blockW = 1;
297
0
                }
298
0
                if ((outerJ + 1) >= image->height) {
299
0
                    blockH = 1;
300
0
                }
301
302
                // Convert an entire 2x2 block to YUV, and populate any fully sampled channels as we go
303
0
                for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
304
0
                    for (uint32_t bI = 0; bI < blockW; ++bI) {
305
0
                        const uint32_t i = outerI + bI;
306
0
                        const uint32_t j = outerJ + bJ;
307
308
                        // Unpack RGB into normalized float
309
0
                        if (state.rgb.channelBytes > 1) {
310
0
                            rgbPixel[0] = *((uint16_t *)(&rgb->pixels[offsetBytesR + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
311
0
                                          rgbMaxChannelF;
312
0
                            rgbPixel[1] = *((uint16_t *)(&rgb->pixels[offsetBytesG + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
313
0
                                          rgbMaxChannelF;
314
0
                            rgbPixel[2] = *((uint16_t *)(&rgb->pixels[offsetBytesB + (i * rgbPixelBytes) + (j * rgbRowBytes)])) /
315
0
                                          rgbMaxChannelF;
316
0
                        } else {
317
0
                            rgbPixel[0] = rgb->pixels[offsetBytesR + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
318
0
                            rgbPixel[1] = rgb->pixels[offsetBytesG + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
319
0
                            rgbPixel[2] = rgb->pixels[offsetBytesB + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
320
0
                        }
321
322
0
                        if (alphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
323
0
                            float a;
324
0
                            if (state.rgb.channelBytes > 1) {
325
0
                                a = *((uint16_t *)(&rgb->pixels[offsetBytesA + (i * rgbPixelBytes) + (j * rgbRowBytes)])) / rgbMaxChannelF;
326
0
                            } else {
327
0
                                a = rgb->pixels[offsetBytesA + (i * rgbPixelBytes) + (j * rgbRowBytes)] / rgbMaxChannelF;
328
0
                            }
329
330
0
                            if (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
331
0
                                if (a == 0) {
332
0
                                    rgbPixel[0] = 0;
333
0
                                    rgbPixel[1] = 0;
334
0
                                    rgbPixel[2] = 0;
335
0
                                } else if (a < 1.0f) {
336
0
                                    rgbPixel[0] *= a;
337
0
                                    rgbPixel[1] *= a;
338
0
                                    rgbPixel[2] *= a;
339
0
                                }
340
0
                            } else {
341
                                // alphaMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY
342
0
                                if (a == 0) {
343
0
                                    rgbPixel[0] = 0;
344
0
                                    rgbPixel[1] = 0;
345
0
                                    rgbPixel[2] = 0;
346
0
                                } else if (a < 1.0f) {
347
0
                                    rgbPixel[0] /= a;
348
0
                                    rgbPixel[1] /= a;
349
0
                                    rgbPixel[2] /= a;
350
0
                                    rgbPixel[0] = AVIF_MIN(rgbPixel[0], 1.0f);
351
0
                                    rgbPixel[1] = AVIF_MIN(rgbPixel[1], 1.0f);
352
0
                                    rgbPixel[2] = AVIF_MIN(rgbPixel[2], 1.0f);
353
0
                                }
354
0
                            }
355
0
                        }
356
357
                        // RGB -> YUV conversion
358
0
                        if (state.yuv.mode == AVIF_REFORMAT_MODE_IDENTITY) {
359
                            // Formulas 41,42,43 from https://www.itu.int/rec/T-REC-H.273-201612-S
360
0
                            yuvBlock[bI][bJ].y = rgbPixel[1]; // G
361
0
                            yuvBlock[bI][bJ].u = rgbPixel[2]; // B
362
0
                            yuvBlock[bI][bJ].v = rgbPixel[0]; // R
363
0
                        } else if (state.yuv.mode == AVIF_REFORMAT_MODE_YCGCO) {
364
                            // Formulas 44,45,46 from https://www.itu.int/rec/T-REC-H.273-201612-S
365
0
                            yuvBlock[bI][bJ].y = 0.5f * rgbPixel[1] + 0.25f * (rgbPixel[0] + rgbPixel[2]);
366
0
                            yuvBlock[bI][bJ].u = 0.5f * rgbPixel[1] - 0.25f * (rgbPixel[0] + rgbPixel[2]);
367
0
                            yuvBlock[bI][bJ].v = 0.5f * (rgbPixel[0] - rgbPixel[2]);
368
0
                        } else if (state.yuv.mode == AVIF_REFORMAT_MODE_YCGCO_RE || state.yuv.mode == AVIF_REFORMAT_MODE_YCGCO_RO) {
369
                            // Formulas 58,59,60,61 from https://www.itu.int/rec/T-REC-H.273-202407-P
370
0
                            const int R = (int)avifRoundf(AVIF_CLAMP(rgbPixel[0] * rgbMaxChannelF, 0.0f, rgbMaxChannelF));
371
0
                            const int G = (int)avifRoundf(AVIF_CLAMP(rgbPixel[1] * rgbMaxChannelF, 0.0f, rgbMaxChannelF));
372
0
                            const int B = (int)avifRoundf(AVIF_CLAMP(rgbPixel[2] * rgbMaxChannelF, 0.0f, rgbMaxChannelF));
373
0
                            const int Co = R - B;
374
0
                            const int t = B + (Co >> 1);
375
0
                            const int Cg = G - t;
376
0
                            yuvBlock[bI][bJ].y = (t + (Cg >> 1)) / state.yuv.rangeY;
377
0
                            yuvBlock[bI][bJ].u = Cg / state.yuv.rangeUV;
378
0
                            yuvBlock[bI][bJ].v = Co / state.yuv.rangeUV;
379
0
                        } else {
380
0
                            float Y = (kr * rgbPixel[0]) + (kg * rgbPixel[1]) + (kb * rgbPixel[2]);
381
0
                            yuvBlock[bI][bJ].y = Y;
382
0
                            yuvBlock[bI][bJ].u = (rgbPixel[2] - Y) / (2 * (1 - kb));
383
0
                            yuvBlock[bI][bJ].v = (rgbPixel[0] - Y) / (2 * (1 - kr));
384
0
                        }
385
386
0
                        if (state.yuv.channelBytes > 1) {
387
0
                            uint16_t * pY = (uint16_t *)&yPlane[(i * 2) + (j * yRowBytes)];
388
0
                            *pY = (uint16_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
389
0
                            if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
390
                                // YUV444, full chroma
391
0
                                uint16_t * pU = (uint16_t *)&uPlane[(i * 2) + (j * uRowBytes)];
392
0
                                *pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
393
0
                                uint16_t * pV = (uint16_t *)&vPlane[(i * 2) + (j * vRowBytes)];
394
0
                                *pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
395
0
                            }
396
0
                        } else {
397
0
                            yPlane[i + (j * yRowBytes)] = (uint8_t)avifYUVColorSpaceInfoYToUNorm(&state.yuv, yuvBlock[bI][bJ].y);
398
0
                            if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
399
                                // YUV444, full chroma
400
0
                                uPlane[i + (j * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].u);
401
0
                                vPlane[i + (j * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, yuvBlock[bI][bJ].v);
402
0
                            }
403
0
                        }
404
0
                    }
405
0
                }
406
407
                // Populate any subsampled channels with averages from the 2x2 block
408
0
                if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV400) {
409
                    // Do nothing on chroma planes.
410
0
                } else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420) {
411
                    // YUV420, average 4 samples (2x2)
412
413
0
                    float sumU = 0.0f;
414
0
                    float sumV = 0.0f;
415
0
                    for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
416
0
                        for (uint32_t bI = 0; bI < blockW; ++bI) {
417
0
                            sumU += yuvBlock[bI][bJ].u;
418
0
                            sumV += yuvBlock[bI][bJ].v;
419
0
                        }
420
0
                    }
421
0
                    float totalSamples = (float)(blockW * blockH);
422
0
                    float avgU = sumU / totalSamples;
423
0
                    float avgV = sumV / totalSamples;
424
425
0
                    const int chromaShiftX = 1;
426
0
                    const int chromaShiftY = 1;
427
0
                    int uvI = outerI >> chromaShiftX;
428
0
                    int uvJ = outerJ >> chromaShiftY;
429
0
                    if (state.yuv.channelBytes > 1) {
430
0
                        uint16_t * pU = (uint16_t *)&uPlane[(uvI * 2) + (uvJ * uRowBytes)];
431
0
                        *pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
432
0
                        uint16_t * pV = (uint16_t *)&vPlane[(uvI * 2) + (uvJ * vRowBytes)];
433
0
                        *pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
434
0
                    } else {
435
0
                        uPlane[uvI + (uvJ * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
436
0
                        vPlane[uvI + (uvJ * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
437
0
                    }
438
0
                } else if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422) {
439
                    // YUV422, average 2 samples (1x2), twice
440
441
0
                    for (uint32_t bJ = 0; bJ < blockH; ++bJ) {
442
0
                        float sumU = 0.0f;
443
0
                        float sumV = 0.0f;
444
0
                        for (uint32_t bI = 0; bI < blockW; ++bI) {
445
0
                            sumU += yuvBlock[bI][bJ].u;
446
0
                            sumV += yuvBlock[bI][bJ].v;
447
0
                        }
448
0
                        float totalSamples = (float)blockW;
449
0
                        float avgU = sumU / totalSamples;
450
0
                        float avgV = sumV / totalSamples;
451
452
0
                        const int chromaShiftX = 1;
453
0
                        int uvI = outerI >> chromaShiftX;
454
0
                        int uvJ = outerJ + bJ;
455
0
                        if (state.yuv.channelBytes > 1) {
456
0
                            uint16_t * pU = (uint16_t *)&uPlane[(uvI * 2) + (uvJ * uRowBytes)];
457
0
                            *pU = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
458
0
                            uint16_t * pV = (uint16_t *)&vPlane[(uvI * 2) + (uvJ * vRowBytes)];
459
0
                            *pV = (uint16_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
460
0
                        } else {
461
0
                            uPlane[uvI + (uvJ * uRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgU);
462
0
                            vPlane[uvI + (uvJ * vRowBytes)] = (uint8_t)avifYUVColorSpaceInfoUVToUNorm(&state.yuv, avgV);
463
0
                        }
464
0
                    }
465
0
                }
466
0
            }
467
0
        }
468
0
    } else if (!converted && isGray) {
469
0
        const uint32_t grayPixelBytes = state.rgb.pixelBytes;
470
0
        const uint32_t offsetBytesGray = state.rgb.offsetBytesGray;
471
0
        const uint32_t offsetBytesA = state.rgb.offsetBytesA;
472
0
        const uint32_t grayRowBytes = rgb->rowBytes;
473
0
        const float grayMaxChannelF = state.rgb.maxChannelF;
474
0
        uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
475
0
        const uint32_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
476
0
        for (uint32_t j = 0; j < image->height; ++j) {
477
0
            for (uint32_t i = 0; i < image->width; ++i) {
478
0
                float g;
479
0
                if (state.rgb.channelBytes > 1) {
480
0
                    g = *(uint16_t *)&rgb->pixels[offsetBytesGray + i * grayPixelBytes + (j * grayRowBytes)];
481
0
                } else {
482
0
                    g = rgb->pixels[offsetBytesGray + i * grayPixelBytes + (j * grayRowBytes)];
483
0
                }
484
0
                if (alphaMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
485
0
                    float a;
486
0
                    if (state.rgb.channelBytes > 1) {
487
0
                        a = *((uint16_t *)(&rgb->pixels[offsetBytesA + (i * grayPixelBytes) + (j * grayRowBytes)])) / grayMaxChannelF;
488
0
                    } else {
489
0
                        a = rgb->pixels[offsetBytesA + (i * grayPixelBytes) + (j * grayRowBytes)] / grayMaxChannelF;
490
0
                    }
491
492
0
                    if (alphaMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
493
0
                        if (a == 0) {
494
0
                            g = 0;
495
0
                        } else if (a < 1.0f) {
496
0
                            g *= a;
497
0
                        }
498
0
                    } else {
499
                        // alphaMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY
500
0
                        if (a == 0) {
501
0
                            g = 0;
502
0
                        } else if (a < 1.0f) {
503
0
                            g /= a;
504
0
                            g = AVIF_MIN(g, 1.0f);
505
0
                        }
506
0
                    }
507
0
                }
508
0
                g = avifRoundf(AVIF_CLAMP(g, 0.0f, state.yuv.maxChannel));
509
0
                if (state.yuv.channelBytes > 1) {
510
0
                    uint16_t * pY = (uint16_t *)&yPlane[(i * 2) + j * yRowBytes];
511
0
                    *pY = (uint16_t)g;
512
0
                } else {
513
0
                    yPlane[i + (j * yRowBytes)] = (uint8_t)g;
514
0
                }
515
0
            }
516
0
        }
517
        // Set the chroma planes, if any, to the half value.
518
0
        avifPixelFormatInfo info;
519
0
        avifGetPixelFormatInfo(image->yuvFormat, &info);
520
0
        const uint32_t shiftedH = (uint32_t)(((uint64_t)image->height + info.chromaShiftY) >> info.chromaShiftY);
521
0
        const int half = 1 << (image->depth - 1);
522
0
        if (image->yuvPlanes[AVIF_CHAN_U]) {
523
0
            uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
524
0
            const uint32_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
525
0
            if (state.yuv.channelBytes > 1) {
526
0
                avifMemset16(uPlane, half, shiftedH * uRowBytes / 2);
527
0
            } else {
528
0
                memset(uPlane, half, shiftedH * uRowBytes);
529
0
            }
530
0
        }
531
0
        if (image->yuvPlanes[AVIF_CHAN_V]) {
532
0
            uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
533
0
            const uint32_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
534
0
            if (state.yuv.channelBytes > 1) {
535
0
                avifMemset16(vPlane, half, shiftedH * vRowBytes / 2);
536
0
            } else {
537
0
                memset(vPlane, half, shiftedH * vRowBytes);
538
0
            }
539
0
        }
540
0
    }
541
542
0
    if (image->alphaPlane && image->alphaRowBytes) {
543
0
        avifAlphaParams params;
544
545
0
        params.width = image->width;
546
0
        params.height = image->height;
547
0
        params.dstDepth = image->depth;
548
0
        params.dstPlane = image->alphaPlane;
549
0
        params.dstRowBytes = image->alphaRowBytes;
550
0
        params.dstOffsetBytes = 0;
551
0
        params.dstPixelBytes = state.yuv.channelBytes;
552
553
0
        if (avifRGBFormatHasAlpha(rgb->format) && !rgb->ignoreAlpha) {
554
0
            params.srcDepth = rgb->depth;
555
0
            params.srcPlane = rgb->pixels;
556
0
            params.srcRowBytes = rgb->rowBytes;
557
0
            params.srcOffsetBytes = state.rgb.offsetBytesA;
558
0
            params.srcPixelBytes = state.rgb.pixelBytes;
559
560
0
            avifReformatAlpha(&params);
561
0
        } else {
562
            // libyuv does not fill alpha when converting from RGB to YUV so
563
            // fill it regardless of the value of convertedWithLibYUV.
564
0
            avifFillAlpha(&params);
565
0
        }
566
0
    }
567
0
    return AVIF_RESULT_OK;
568
0
}
569
570
// Allocates and fills look-up tables for going from YUV limited/full unorm -> full range RGB FP32.
571
// Review this when implementing YCgCo limited range support.
572
static avifBool avifCreateYUVToRGBLookUpTables(float ** unormFloatTableY, float ** unormFloatTableUV, uint32_t depth, const avifReformatState * state)
573
6.22k
{
574
6.22k
    const size_t cpCount = (size_t)1 << depth;
575
576
6.22k
    assert(unormFloatTableY);
577
6.22k
    *unormFloatTableY = (float *)avifAlloc(cpCount * sizeof(float));
578
6.22k
    AVIF_CHECK(*unormFloatTableY);
579
7.71M
    for (uint32_t cp = 0; cp < cpCount; ++cp) {
580
7.70M
        (*unormFloatTableY)[cp] = ((float)cp - state->yuv.biasY) / state->yuv.rangeY;
581
7.70M
    }
582
583
6.22k
    if (unormFloatTableUV) {
584
4.73k
        if (state->yuv.mode == AVIF_REFORMAT_MODE_IDENTITY) {
585
            // Just reuse the luma table since the chroma values are the same.
586
346
            *unormFloatTableUV = *unormFloatTableY;
587
4.39k
        } else {
588
4.39k
            *unormFloatTableUV = (float *)avifAlloc(cpCount * sizeof(float));
589
4.39k
            if (!*unormFloatTableUV) {
590
0
                avifFree(*unormFloatTableY);
591
0
                *unormFloatTableY = NULL;
592
0
                return AVIF_FALSE;
593
0
            }
594
6.56M
            for (uint32_t cp = 0; cp < cpCount; ++cp) {
595
6.55M
                (*unormFloatTableUV)[cp] = ((float)cp - state->yuv.biasUV) / state->yuv.rangeUV;
596
6.55M
            }
597
4.39k
        }
598
4.73k
    }
599
6.22k
    return AVIF_TRUE;
600
6.22k
}
601
602
// Frees look-up tables allocated with avifCreateYUVToRGBLookUpTables().
603
static void avifFreeYUVToRGBLookUpTables(float ** unormFloatTableY, float ** unormFloatTableUV)
604
6.22k
{
605
6.22k
    if (unormFloatTableUV) {
606
4.73k
        if (*unormFloatTableUV != *unormFloatTableY) {
607
4.39k
            avifFree(*unormFloatTableUV);
608
4.39k
        }
609
4.73k
        *unormFloatTableUV = NULL;
610
4.73k
    }
611
612
6.22k
    avifFree(*unormFloatTableY);
613
6.22k
    *unormFloatTableY = NULL;
614
6.22k
}
615
616
0
#define RGB565(R, G, B) ((uint16_t)(((B) >> 3) | (((G) >> 2) << 5) | (((R) >> 3) << 11)))
617
618
static void avifStoreRGB8Pixel(avifRGBFormat format, uint8_t R, uint8_t G, uint8_t B, uint8_t * ptrR, uint8_t * ptrG, uint8_t * ptrB)
619
538M
{
620
538M
    if (format == AVIF_RGB_FORMAT_RGB_565) {
621
        // References for RGB565 color conversion:
622
        // * https://docs.microsoft.com/en-us/windows/win32/directshow/working-with-16-bit-rgb
623
        // * https://chromium.googlesource.com/libyuv/libyuv/+/9892d70c965678381d2a70a1c9002d1cf136ee78/source/row_common.cc#2362
624
0
        *(uint16_t *)ptrR = RGB565(R, G, B);
625
0
        return;
626
0
    }
627
538M
    *ptrR = R;
628
538M
    *ptrG = G;
629
538M
    *ptrB = B;
630
538M
}
631
632
static void avifGetRGB565(const uint8_t * ptrR, uint8_t * R, uint8_t * G, uint8_t * B)
633
0
{
634
    // References for RGB565 color conversion:
635
    // * https://docs.microsoft.com/en-us/windows/win32/directshow/working-with-16-bit-rgb
636
    // * https://chromium.googlesource.com/libyuv/libyuv/+/331c361581896292fb46c8c6905e41262b7ca95f/source/row_common.cc#185
637
0
    const uint16_t rgb656 = ((const uint16_t *)ptrR)[0];
638
0
    const uint16_t r5 = (rgb656 & 0xF800) >> 11;
639
0
    const uint16_t g6 = (rgb656 & 0x07E0) >> 5;
640
0
    const uint16_t b5 = (rgb656 & 0x001F);
641
0
    *R = (uint8_t)((r5 << 3) | (r5 >> 2));
642
0
    *G = (uint8_t)((g6 << 2) | (g6 >> 4));
643
0
    *B = (uint8_t)((b5 << 3) | (b5 >> 2));
644
0
}
645
646
// Note: This function handles alpha (un)multiply.
647
static avifResult avifImageYUVAnyToRGBAnySlow(const avifImage * image,
648
                                              avifRGBImage * rgb,
649
                                              const avifReformatState * state,
650
                                              avifAlphaMultiplyMode alphaMultiplyMode)
651
2.07k
{
652
    // Aliases for some state
653
2.07k
    const float kr = state->yuv.kr;
654
2.07k
    const float kg = state->yuv.kg;
655
2.07k
    const float kb = state->yuv.kb;
656
2.07k
    float * unormFloatTableY = NULL;
657
2.07k
    float * unormFloatTableUV = NULL;
658
2.07k
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
659
2.07k
    const uint32_t yuvChannelBytes = state->yuv.channelBytes;
660
2.07k
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
661
662
    // Aliases for plane data
663
2.07k
    const uint8_t * yPlane = image->yuvPlanes[AVIF_CHAN_Y];
664
2.07k
    const uint8_t * uPlane = image->yuvPlanes[AVIF_CHAN_U];
665
2.07k
    const uint8_t * vPlane = image->yuvPlanes[AVIF_CHAN_V];
666
2.07k
    const uint8_t * aPlane = image->alphaPlane;
667
2.07k
    const uint32_t yRowBytes = image->yuvRowBytes[AVIF_CHAN_Y];
668
2.07k
    const uint32_t uRowBytes = image->yuvRowBytes[AVIF_CHAN_U];
669
2.07k
    const uint32_t vRowBytes = image->yuvRowBytes[AVIF_CHAN_V];
670
2.07k
    const uint32_t aRowBytes = image->alphaRowBytes;
671
672
    // Various observations and limits
673
2.07k
    const avifBool yuvHasColor = (uPlane && vPlane && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400));
674
2.07k
    const avifBool rgbHasColor = !avifRGBFormatIsGray(rgb->format);
675
2.07k
    const uint16_t yuvMaxChannel = (uint16_t)state->yuv.maxChannel;
676
2.07k
    const float rgbMaxChannelF = state->rgb.maxChannelF;
677
678
    // If toRGBAlphaMode is active (not no-op), assert that the alpha plane is present. The end of
679
    // the avifPrepareReformatState() function should ensure this, but this assert makes it clear
680
    // to clang's analyzer.
681
2.07k
    assert((alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP) || aPlane);
682
683
2.45M
    for (uint32_t j = 0; j < image->height; ++j) {
684
        // uvJ is used only when yuvHasColor is true.
685
2.45M
        const uint32_t uvJ = yuvHasColor ? (j >> state->yuv.formatInfo.chromaShiftY) : 0;
686
2.45M
        const uint8_t * ptrY8 = &yPlane[j * yRowBytes];
687
2.45M
        const uint8_t * ptrU8 = uPlane ? &uPlane[(uvJ * uRowBytes)] : NULL;
688
2.45M
        const uint8_t * ptrV8 = vPlane ? &vPlane[(uvJ * vRowBytes)] : NULL;
689
2.45M
        const uint8_t * ptrA8 = aPlane ? &aPlane[j * aRowBytes] : NULL;
690
2.45M
        const uint16_t * ptrY16 = (const uint16_t *)ptrY8;
691
2.45M
        const uint16_t * ptrU16 = (const uint16_t *)ptrU8;
692
2.45M
        const uint16_t * ptrV16 = (const uint16_t *)ptrV8;
693
2.45M
        const uint16_t * ptrA16 = (const uint16_t *)ptrA8;
694
695
2.45M
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
696
2.45M
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
697
2.45M
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
698
2.45M
        uint8_t * ptrGray = &rgb->pixels[state->rgb.offsetBytesGray + (j * rgb->rowBytes)];
699
700
290M
        for (uint32_t i = 0; i < image->width; ++i) {
701
287M
            float Y, Cb = 0.5f, Cr = 0.5f;
702
703
            // Calculate Y
704
287M
            uint16_t unormY;
705
287M
            if (image->depth == 8) {
706
74.9M
                unormY = ptrY8[i];
707
212M
            } else {
708
                // clamp incoming data to protect against bad LUT lookups
709
212M
                unormY = AVIF_MIN(ptrY16[i], yuvMaxChannel);
710
212M
            }
711
287M
            Y = unormFloatTableY[unormY];
712
713
            // Calculate Cb and Cr
714
287M
            if (yuvHasColor) {
715
280M
                const uint32_t uvI = i >> state->yuv.formatInfo.chromaShiftX;
716
280M
                if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) {
717
31.8M
                    uint16_t unormU, unormV;
718
719
31.8M
                    if (image->depth == 8) {
720
11.3M
                        unormU = ptrU8[uvI];
721
11.3M
                        unormV = ptrV8[uvI];
722
20.4M
                    } else {
723
                        // clamp incoming data to protect against bad LUT lookups
724
20.4M
                        unormU = AVIF_MIN(ptrU16[uvI], yuvMaxChannel);
725
20.4M
                        unormV = AVIF_MIN(ptrV16[uvI], yuvMaxChannel);
726
20.4M
                    }
727
728
31.8M
                    Cb = unormFloatTableUV[unormU];
729
31.8M
                    Cr = unormFloatTableUV[unormV];
730
248M
                } else {
731
                    // Upsample to 444:
732
                    //
733
                    // *   *   *   *
734
                    //   A       B
735
                    // *   1   2   *
736
                    //
737
                    // *   3   4   *
738
                    //   C       D
739
                    // *   *   *   *
740
                    //
741
                    // When converting from YUV420 to RGB, for any given "high-resolution" RGB
742
                    // coordinate (1,2,3,4,*), there are up to four "low-resolution" UV samples
743
                    // (A,B,C,D) that are "nearest" to the pixel. For RGB pixel #1, A is the closest
744
                    // UV sample, B and C are "adjacent" to it on the same row and column, and D is
745
                    // the diagonal. For RGB pixel 3, C is the closest UV sample, A and D are
746
                    // adjacent, and B is the diagonal. Sometimes the adjacent pixel on the same row
747
                    // is to the left or right, and sometimes the adjacent pixel on the same column
748
                    // is up or down. For any edge or corner, there might only be only one or two
749
                    // samples nearby, so they'll be duplicated.
750
                    //
751
                    // The following code attempts to find all four nearest UV samples and put them
752
                    // in the following unormU and unormV grid as follows:
753
                    //
754
                    // unorm[0][0] = closest         ( weights: bilinear: 9/16, nearest: 1 )
755
                    // unorm[1][0] = adjacent col    ( weights: bilinear: 3/16, nearest: 0 )
756
                    // unorm[0][1] = adjacent row    ( weights: bilinear: 3/16, nearest: 0 )
757
                    // unorm[1][1] = diagonal        ( weights: bilinear: 1/16, nearest: 0 )
758
                    //
759
                    // It then weights them according to the requested upsampling set in avifRGBImage.
760
761
248M
                    uint16_t unormU[2][2], unormV[2][2];
762
763
                    // How many bytes to add to a uint8_t pointer index to get to the adjacent (lesser) sample in a given direction
764
248M
                    int uAdjCol, vAdjCol, uAdjRow, vAdjRow;
765
248M
                    if ((i == 0) || ((i == (image->width - 1)) && ((i % 2) != 0))) {
766
2.74M
                        uAdjCol = 0;
767
2.74M
                        vAdjCol = 0;
768
245M
                    } else {
769
245M
                        if ((i % 2) != 0) {
770
125M
                            uAdjCol = yuvChannelBytes;
771
125M
                            vAdjCol = yuvChannelBytes;
772
125M
                        } else {
773
120M
                            uAdjCol = -1 * yuvChannelBytes;
774
120M
                            vAdjCol = -1 * yuvChannelBytes;
775
120M
                        }
776
245M
                    }
777
778
                    // For YUV422, uvJ will always be a fresh value (always corresponds to j), so
779
                    // we'll simply duplicate the sample as if we were on the top or bottom row and
780
                    // it'll behave as plain old linear (1D) upsampling, which is all we want.
781
248M
                    if ((j == 0) || ((j == (image->height - 1)) && ((j % 2) != 0)) || (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV422)) {
782
19.5M
                        uAdjRow = 0;
783
19.5M
                        vAdjRow = 0;
784
229M
                    } else {
785
229M
                        if ((j % 2) != 0) {
786
114M
                            uAdjRow = (int)uRowBytes;
787
114M
                            vAdjRow = (int)vRowBytes;
788
114M
                        } else {
789
114M
                            uAdjRow = -1 * (int)uRowBytes;
790
114M
                            vAdjRow = -1 * (int)vRowBytes;
791
114M
                        }
792
229M
                    }
793
794
248M
                    if (image->depth == 8) {
795
57.9M
                        unormU[0][0] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes)];
796
57.9M
                        unormV[0][0] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes)];
797
57.9M
                        unormU[1][0] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol];
798
57.9M
                        unormV[1][0] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol];
799
57.9M
                        unormU[0][1] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjRow];
800
57.9M
                        unormV[0][1] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjRow];
801
57.9M
                        unormU[1][1] = uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol + uAdjRow];
802
57.9M
                        unormV[1][1] = vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol + vAdjRow];
803
190M
                    } else {
804
190M
                        unormU[0][0] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes)]);
805
190M
                        unormV[0][0] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes)]);
806
190M
                        unormU[1][0] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol]);
807
190M
                        unormV[1][0] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol]);
808
190M
                        unormU[0][1] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjRow]);
809
190M
                        unormV[0][1] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjRow]);
810
190M
                        unormU[1][1] = *((const uint16_t *)&uPlane[(uvJ * uRowBytes) + (uvI * yuvChannelBytes) + uAdjCol + uAdjRow]);
811
190M
                        unormV[1][1] = *((const uint16_t *)&vPlane[(uvJ * vRowBytes) + (uvI * yuvChannelBytes) + vAdjCol + vAdjRow]);
812
813
                        // clamp incoming data to protect against bad LUT lookups
814
569M
                        for (int bJ = 0; bJ < 2; ++bJ) {
815
1.13G
                            for (int bI = 0; bI < 2; ++bI) {
816
757M
                                unormU[bI][bJ] = AVIF_MIN(unormU[bI][bJ], yuvMaxChannel);
817
757M
                                unormV[bI][bJ] = AVIF_MIN(unormV[bI][bJ], yuvMaxChannel);
818
757M
                            }
819
378M
                        }
820
190M
                    }
821
822
248M
                    if ((rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_FASTEST) ||
823
248M
                        (rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_NEAREST)) {
824
                        // Nearest neighbor; ignore all UVs but the closest one
825
0
                        Cb = unormFloatTableUV[unormU[0][0]];
826
0
                        Cr = unormFloatTableUV[unormV[0][0]];
827
248M
                    } else {
828
                        // Bilinear filtering with weights
829
248M
                        Cb = (unormFloatTableUV[unormU[0][0]] * (9.0f / 16.0f)) + (unormFloatTableUV[unormU[1][0]] * (3.0f / 16.0f)) +
830
248M
                             (unormFloatTableUV[unormU[0][1]] * (3.0f / 16.0f)) + (unormFloatTableUV[unormU[1][1]] * (1.0f / 16.0f));
831
248M
                        Cr = (unormFloatTableUV[unormV[0][0]] * (9.0f / 16.0f)) + (unormFloatTableUV[unormV[1][0]] * (3.0f / 16.0f)) +
832
248M
                             (unormFloatTableUV[unormV[0][1]] * (3.0f / 16.0f)) + (unormFloatTableUV[unormV[1][1]] * (1.0f / 16.0f));
833
248M
                    }
834
248M
                }
835
280M
            }
836
837
287M
            float Rc = 0.0f, Gc = 0.0f, Bc = 0.0f, grayc = 0.0f;
838
287M
            if (rgbHasColor) {
839
287M
                float R, G, B;
840
287M
                if (yuvHasColor) {
841
276M
                    if (state->yuv.mode == AVIF_REFORMAT_MODE_IDENTITY) {
842
                        // Identity (GBR): Formulas 41,42,43 from
843
                        // https://www.itu.int/rec/T-REC-H.273-201612-S
844
3.56M
                        G = Y;
845
3.56M
                        B = Cb;
846
3.56M
                        R = Cr;
847
273M
                    } else if (state->yuv.mode == AVIF_REFORMAT_MODE_YCGCO) {
848
                        // YCgCo: Formulas 47,48,49,50 from
849
                        // https://www.itu.int/rec/T-REC-H.273-201612-S
850
28.0M
                        const float t = Y - Cb;
851
28.0M
                        G = Y + Cb;
852
28.0M
                        B = t - Cr;
853
28.0M
                        R = t + Cr;
854
247M
                    } else if ((state->yuv.mode == AVIF_REFORMAT_MODE_YCGCO_RE) || (state->yuv.mode == AVIF_REFORMAT_MODE_YCGCO_RO)) {
855
                        // YCgCoRe/YCgCoRo: Formulas 62,63,64,65 from
856
                        // https://www.itu.int/rec/T-REC-H.273-202407-P
857
0
                        const int YY = unormY;
858
0
                        const int Cg = (int)avifRoundf(Cb * yuvMaxChannel);
859
0
                        const int Co = (int)avifRoundf(Cr * yuvMaxChannel);
860
0
                        const int t = YY - (Cg >> 1);
861
0
                        G = (float)AVIF_CLAMP(t + Cg, 0, state->rgb.maxChannel);
862
0
                        B = (float)AVIF_CLAMP(t - (Co >> 1), 0, state->rgb.maxChannel);
863
0
                        R = (float)AVIF_CLAMP(B + Co, 0, state->rgb.maxChannel);
864
0
                        G /= rgbMaxChannelF;
865
0
                        B /= rgbMaxChannelF;
866
0
                        R /= rgbMaxChannelF;
867
245M
                    } else {
868
                        // Normal YUV
869
245M
                        R = Y + (2 * (1 - kr)) * Cr;
870
245M
                        B = Y + (2 * (1 - kb)) * Cb;
871
245M
                        G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
872
245M
                    }
873
276M
                } else {
874
                    // Monochrome: just populate all channels with luma (state->yuv.mode
875
                    // is irrelevant)
876
10.8M
                    R = Y;
877
10.8M
                    G = Y;
878
10.8M
                    B = Y;
879
10.8M
                }
880
287M
                Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
881
287M
                Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
882
287M
                Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
883
18.4E
            } else {
884
                // Monochrome: gray is luma
885
18.4E
                float gray = Y;
886
18.4E
                grayc = AVIF_CLAMP(gray, 0.0f, 1.0f);
887
18.4E
            }
888
889
287M
            if (alphaMultiplyMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP) {
890
                // Calculate A
891
0
                uint16_t unormA;
892
0
                if (image->depth == 8) {
893
0
                    unormA = ptrA8[i];
894
0
                } else {
895
0
                    unormA = AVIF_MIN(ptrA16[i], yuvMaxChannel);
896
0
                }
897
0
                const float A = unormA / ((float)state->yuv.maxChannel);
898
0
                const float Ac = AVIF_CLAMP(A, 0.0f, 1.0f);
899
900
0
                if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
901
0
                    if (rgbHasColor) {
902
0
                        if (Ac == 0.0f) {
903
0
                            Rc = 0.0f;
904
0
                            Gc = 0.0f;
905
0
                            Bc = 0.0f;
906
0
                        } else if (Ac < 1.0f) {
907
0
                            Rc *= Ac;
908
0
                            Gc *= Ac;
909
0
                            Bc *= Ac;
910
0
                        }
911
0
                    } else {
912
0
                        if (Ac == 0.0f) {
913
0
                            grayc = 0.0f;
914
0
                        } else if (Ac < 1.0f) {
915
0
                            grayc *= Ac;
916
0
                        }
917
0
                    }
918
0
                } else {
919
                    // alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY
920
0
                    if (rgbHasColor) {
921
0
                        if (Ac == 0.0f) {
922
0
                            Rc = 0.0f;
923
0
                            Gc = 0.0f;
924
0
                            Bc = 0.0f;
925
0
                        } else if (Ac < 1.0f) {
926
0
                            Rc /= Ac;
927
0
                            Gc /= Ac;
928
0
                            Bc /= Ac;
929
0
                            Rc = AVIF_MIN(Rc, 1.0f);
930
0
                            Gc = AVIF_MIN(Gc, 1.0f);
931
0
                            Bc = AVIF_MIN(Bc, 1.0f);
932
0
                        }
933
0
                    } else {
934
0
                        if (Ac == 0.0f) {
935
0
                            grayc = 0.0f;
936
0
                        } else if (Ac < 1.0f) {
937
0
                            grayc /= Ac;
938
0
                            grayc = AVIF_MIN(grayc, 1.0f);
939
0
                        }
940
0
                    }
941
0
                }
942
0
            }
943
944
287M
            if (rgbHasColor) {
945
287M
                if (rgb->depth == 8) {
946
74.6M
                    avifStoreRGB8Pixel(rgb->format,
947
74.6M
                                       (uint8_t)(0.5f + (Rc * rgbMaxChannelF)),
948
74.6M
                                       (uint8_t)(0.5f + (Gc * rgbMaxChannelF)),
949
74.6M
                                       (uint8_t)(0.5f + (Bc * rgbMaxChannelF)),
950
74.6M
                                       ptrR,
951
74.6M
                                       ptrG,
952
74.6M
                                       ptrB);
953
212M
                } else {
954
212M
                    *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
955
212M
                    *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
956
212M
                    *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
957
212M
                }
958
287M
                ptrR += rgbPixelBytes;
959
287M
                ptrG += rgbPixelBytes;
960
287M
                ptrB += rgbPixelBytes;
961
287M
            } else {
962
120k
                if (rgb->depth == 8) {
963
0
                    *ptrGray = (uint8_t)(0.5f + (grayc * rgbMaxChannelF));
964
120k
                } else {
965
120k
                    *((uint16_t *)ptrGray) = (uint16_t)(0.5f + (grayc * rgbMaxChannelF));
966
120k
                }
967
120k
                ptrGray += rgbPixelBytes;
968
120k
            }
969
287M
        }
970
2.45M
    }
971
2.07k
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV);
972
2.07k
    return AVIF_RESULT_OK;
973
2.07k
}
974
975
static avifResult avifImageYUV16ToRGB16Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
976
1.41k
{
977
1.41k
    const float kr = state->yuv.kr;
978
1.41k
    const float kg = state->yuv.kg;
979
1.41k
    const float kb = state->yuv.kb;
980
1.41k
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
981
1.41k
    float * unormFloatTableY = NULL;
982
1.41k
    float * unormFloatTableUV = NULL;
983
1.41k
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
984
985
1.41k
    const uint16_t yuvMaxChannel = (uint16_t)state->yuv.maxChannel;
986
1.41k
    const float rgbMaxChannelF = state->rgb.maxChannelF;
987
583k
    for (uint32_t j = 0; j < image->height; ++j) {
988
582k
        const uint32_t uvJ = j >> state->yuv.formatInfo.chromaShiftY;
989
582k
        const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
990
582k
        const uint16_t * const ptrU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
991
582k
        const uint16_t * const ptrV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
992
582k
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
993
582k
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
994
582k
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
995
996
100M
        for (uint32_t i = 0; i < image->width; ++i) {
997
99.9M
            uint32_t uvI = i >> state->yuv.formatInfo.chromaShiftX;
998
999
            // clamp incoming data to protect against bad LUT lookups
1000
99.9M
            const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
1001
99.9M
            const uint16_t unormU = AVIF_MIN(ptrU[uvI], yuvMaxChannel);
1002
99.9M
            const uint16_t unormV = AVIF_MIN(ptrV[uvI], yuvMaxChannel);
1003
1004
            // Convert unorm to float
1005
99.9M
            const float Y = unormFloatTableY[unormY];
1006
99.9M
            const float Cb = unormFloatTableUV[unormU];
1007
99.9M
            const float Cr = unormFloatTableUV[unormV];
1008
1009
99.9M
            const float R = Y + (2 * (1 - kr)) * Cr;
1010
99.9M
            const float B = Y + (2 * (1 - kb)) * Cb;
1011
99.9M
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1012
99.9M
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1013
99.9M
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1014
99.9M
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1015
1016
99.9M
            *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
1017
99.9M
            *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
1018
99.9M
            *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
1019
1020
99.9M
            ptrR += rgbPixelBytes;
1021
99.9M
            ptrG += rgbPixelBytes;
1022
99.9M
            ptrB += rgbPixelBytes;
1023
99.9M
        }
1024
582k
    }
1025
1.41k
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV);
1026
1.41k
    return AVIF_RESULT_OK;
1027
1.41k
}
1028
1029
static avifResult avifImageYUV16ToRGB16Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1030
666
{
1031
666
    const float kr = state->yuv.kr;
1032
666
    const float kg = state->yuv.kg;
1033
666
    const float kb = state->yuv.kb;
1034
666
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1035
666
    float * unormFloatTableY = NULL;
1036
666
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, NULL, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1037
1038
666
    const uint16_t maxChannel = (uint16_t)state->yuv.maxChannel;
1039
666
    const float maxChannelF = state->rgb.maxChannelF;
1040
234k
    for (uint32_t j = 0; j < image->height; ++j) {
1041
234k
        const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1042
234k
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1043
234k
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1044
234k
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1045
1046
381M
        for (uint32_t i = 0; i < image->width; ++i) {
1047
            // clamp incoming data to protect against bad LUT lookups
1048
381M
            const uint16_t unormY = AVIF_MIN(ptrY[i], maxChannel);
1049
1050
            // Convert unorm to float
1051
381M
            const float Y = unormFloatTableY[unormY];
1052
381M
            const float Cb = 0.0f;
1053
381M
            const float Cr = 0.0f;
1054
1055
381M
            const float R = Y + (2 * (1 - kr)) * Cr;
1056
381M
            const float B = Y + (2 * (1 - kb)) * Cb;
1057
381M
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1058
381M
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1059
381M
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1060
381M
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1061
1062
381M
            *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * maxChannelF));
1063
381M
            *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * maxChannelF));
1064
381M
            *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * maxChannelF));
1065
1066
381M
            ptrR += rgbPixelBytes;
1067
381M
            ptrG += rgbPixelBytes;
1068
381M
            ptrB += rgbPixelBytes;
1069
381M
        }
1070
234k
    }
1071
666
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, NULL);
1072
666
    return AVIF_RESULT_OK;
1073
666
}
1074
1075
static avifResult avifImageYUV16ToRGB8Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1076
0
{
1077
0
    const float kr = state->yuv.kr;
1078
0
    const float kg = state->yuv.kg;
1079
0
    const float kb = state->yuv.kb;
1080
0
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1081
0
    float * unormFloatTableY = NULL;
1082
0
    float * unormFloatTableUV = NULL;
1083
0
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1084
1085
0
    const uint16_t yuvMaxChannel = (uint16_t)state->yuv.maxChannel;
1086
0
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1087
0
    for (uint32_t j = 0; j < image->height; ++j) {
1088
0
        const uint32_t uvJ = j >> state->yuv.formatInfo.chromaShiftY;
1089
0
        const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1090
0
        const uint16_t * const ptrU = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
1091
0
        const uint16_t * const ptrV = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
1092
0
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1093
0
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1094
0
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1095
1096
0
        for (uint32_t i = 0; i < image->width; ++i) {
1097
0
            uint32_t uvI = i >> state->yuv.formatInfo.chromaShiftX;
1098
1099
            // clamp incoming data to protect against bad LUT lookups
1100
0
            const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
1101
0
            const uint16_t unormU = AVIF_MIN(ptrU[uvI], yuvMaxChannel);
1102
0
            const uint16_t unormV = AVIF_MIN(ptrV[uvI], yuvMaxChannel);
1103
1104
            // Convert unorm to float
1105
0
            const float Y = unormFloatTableY[unormY];
1106
0
            const float Cb = unormFloatTableUV[unormU];
1107
0
            const float Cr = unormFloatTableUV[unormV];
1108
1109
0
            const float R = Y + (2 * (1 - kr)) * Cr;
1110
0
            const float B = Y + (2 * (1 - kb)) * Cb;
1111
0
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1112
0
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1113
0
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1114
0
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1115
1116
0
            avifStoreRGB8Pixel(rgb->format,
1117
0
                               (uint8_t)(0.5f + (Rc * rgbMaxChannelF)),
1118
0
                               (uint8_t)(0.5f + (Gc * rgbMaxChannelF)),
1119
0
                               (uint8_t)(0.5f + (Bc * rgbMaxChannelF)),
1120
0
                               ptrR,
1121
0
                               ptrG,
1122
0
                               ptrB);
1123
1124
0
            ptrR += rgbPixelBytes;
1125
0
            ptrG += rgbPixelBytes;
1126
0
            ptrB += rgbPixelBytes;
1127
0
        }
1128
0
    }
1129
0
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV);
1130
0
    return AVIF_RESULT_OK;
1131
0
}
1132
1133
static avifResult avifImageYUV16ToRGB8Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1134
0
{
1135
0
    const float kr = state->yuv.kr;
1136
0
    const float kg = state->yuv.kg;
1137
0
    const float kb = state->yuv.kb;
1138
0
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1139
0
    float * unormFloatTableY = NULL;
1140
0
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, NULL, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1141
1142
0
    const uint16_t yuvMaxChannel = (uint16_t)state->yuv.maxChannel;
1143
0
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1144
0
    for (uint32_t j = 0; j < image->height; ++j) {
1145
0
        const uint16_t * const ptrY = (uint16_t *)&image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1146
0
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1147
0
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1148
0
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1149
1150
0
        for (uint32_t i = 0; i < image->width; ++i) {
1151
            // clamp incoming data to protect against bad LUT lookups
1152
0
            const uint16_t unormY = AVIF_MIN(ptrY[i], yuvMaxChannel);
1153
1154
            // Convert unorm to float
1155
0
            const float Y = unormFloatTableY[unormY];
1156
0
            const float Cb = 0.0f;
1157
0
            const float Cr = 0.0f;
1158
1159
0
            const float R = Y + (2 * (1 - kr)) * Cr;
1160
0
            const float B = Y + (2 * (1 - kb)) * Cb;
1161
0
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1162
0
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1163
0
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1164
0
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1165
1166
0
            avifStoreRGB8Pixel(rgb->format,
1167
0
                               (uint8_t)(0.5f + (Rc * rgbMaxChannelF)),
1168
0
                               (uint8_t)(0.5f + (Gc * rgbMaxChannelF)),
1169
0
                               (uint8_t)(0.5f + (Bc * rgbMaxChannelF)),
1170
0
                               ptrR,
1171
0
                               ptrG,
1172
0
                               ptrB);
1173
1174
0
            ptrR += rgbPixelBytes;
1175
0
            ptrG += rgbPixelBytes;
1176
0
            ptrB += rgbPixelBytes;
1177
0
        }
1178
0
    }
1179
0
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, NULL);
1180
0
    return AVIF_RESULT_OK;
1181
0
}
1182
1183
static avifResult avifImageYUV8ToRGB16Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1184
0
{
1185
0
    const float kr = state->yuv.kr;
1186
0
    const float kg = state->yuv.kg;
1187
0
    const float kb = state->yuv.kb;
1188
0
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1189
0
    float * unormFloatTableY = NULL;
1190
0
    float * unormFloatTableUV = NULL;
1191
0
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1192
1193
0
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1194
0
    for (uint32_t j = 0; j < image->height; ++j) {
1195
0
        const uint32_t uvJ = j >> state->yuv.formatInfo.chromaShiftY;
1196
0
        const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1197
0
        const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
1198
0
        const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
1199
0
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1200
0
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1201
0
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1202
1203
0
        for (uint32_t i = 0; i < image->width; ++i) {
1204
0
            uint32_t uvI = i >> state->yuv.formatInfo.chromaShiftX;
1205
1206
            // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1207
0
            const float Y = unormFloatTableY[ptrY[i]];
1208
0
            const float Cb = unormFloatTableUV[ptrU[uvI]];
1209
0
            const float Cr = unormFloatTableUV[ptrV[uvI]];
1210
1211
0
            const float R = Y + (2 * (1 - kr)) * Cr;
1212
0
            const float B = Y + (2 * (1 - kb)) * Cb;
1213
0
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1214
0
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1215
0
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1216
0
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1217
1218
0
            *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
1219
0
            *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
1220
0
            *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
1221
1222
0
            ptrR += rgbPixelBytes;
1223
0
            ptrG += rgbPixelBytes;
1224
0
            ptrB += rgbPixelBytes;
1225
0
        }
1226
0
    }
1227
0
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV);
1228
0
    return AVIF_RESULT_OK;
1229
0
}
1230
1231
static avifResult avifImageYUV8ToRGB16Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1232
0
{
1233
0
    const float kr = state->yuv.kr;
1234
0
    const float kg = state->yuv.kg;
1235
0
    const float kb = state->yuv.kb;
1236
0
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1237
0
    float * unormFloatTableY = NULL;
1238
0
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, NULL, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1239
1240
0
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1241
0
    for (uint32_t j = 0; j < image->height; ++j) {
1242
0
        const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1243
0
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1244
0
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1245
0
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1246
1247
0
        for (uint32_t i = 0; i < image->width; ++i) {
1248
            // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1249
0
            const float Y = unormFloatTableY[ptrY[i]];
1250
0
            const float Cb = 0.0f;
1251
0
            const float Cr = 0.0f;
1252
1253
0
            const float R = Y + (2 * (1 - kr)) * Cr;
1254
0
            const float B = Y + (2 * (1 - kb)) * Cb;
1255
0
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1256
0
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1257
0
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1258
0
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1259
1260
0
            *((uint16_t *)ptrR) = (uint16_t)(0.5f + (Rc * rgbMaxChannelF));
1261
0
            *((uint16_t *)ptrG) = (uint16_t)(0.5f + (Gc * rgbMaxChannelF));
1262
0
            *((uint16_t *)ptrB) = (uint16_t)(0.5f + (Bc * rgbMaxChannelF));
1263
1264
0
            ptrR += rgbPixelBytes;
1265
0
            ptrG += rgbPixelBytes;
1266
0
            ptrB += rgbPixelBytes;
1267
0
        }
1268
0
    }
1269
0
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, NULL);
1270
0
    return AVIF_RESULT_OK;
1271
0
}
1272
1273
static avifResult avifImageIdentity8ToRGB8ColorFullRange(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1274
4.58k
{
1275
4.58k
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1276
191k
    for (uint32_t j = 0; j < image->height; ++j) {
1277
186k
        const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1278
186k
        const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(j * image->yuvRowBytes[AVIF_CHAN_U])];
1279
186k
        const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(j * image->yuvRowBytes[AVIF_CHAN_V])];
1280
186k
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1281
186k
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1282
186k
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1283
1284
        // This is intentionally a per-row conditional instead of a per-pixel
1285
        // conditional. This makes the "else" path (much more common than the
1286
        // "if" path) much faster than having a per-pixel branch.
1287
186k
        if (rgb->format == AVIF_RGB_FORMAT_RGB_565) {
1288
0
            for (uint32_t i = 0; i < image->width; ++i) {
1289
0
                *(uint16_t *)ptrR = RGB565(ptrV[i], ptrY[i], ptrU[i]);
1290
0
                ptrR += rgbPixelBytes;
1291
0
            }
1292
186k
        } else {
1293
36.0M
            for (uint32_t i = 0; i < image->width; ++i) {
1294
35.8M
                *ptrR = ptrV[i];
1295
35.8M
                *ptrG = ptrY[i];
1296
35.8M
                *ptrB = ptrU[i];
1297
35.8M
                ptrR += rgbPixelBytes;
1298
35.8M
                ptrG += rgbPixelBytes;
1299
35.8M
                ptrB += rgbPixelBytes;
1300
35.8M
            }
1301
186k
        }
1302
186k
    }
1303
4.58k
    return AVIF_RESULT_OK;
1304
4.58k
}
1305
1306
static avifResult avifImageYUV8ToRGB8Color(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1307
1.25k
{
1308
1.25k
    const float kr = state->yuv.kr;
1309
1.25k
    const float kg = state->yuv.kg;
1310
1.25k
    const float kb = state->yuv.kb;
1311
1.25k
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1312
1.25k
    float * unormFloatTableY = NULL;
1313
1.25k
    float * unormFloatTableUV = NULL;
1314
1.25k
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1315
1316
1.25k
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1317
909k
    for (uint32_t j = 0; j < image->height; ++j) {
1318
908k
        const uint32_t uvJ = j >> state->yuv.formatInfo.chromaShiftY;
1319
908k
        const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1320
908k
        const uint8_t * const ptrU = &image->yuvPlanes[AVIF_CHAN_U][(uvJ * image->yuvRowBytes[AVIF_CHAN_U])];
1321
908k
        const uint8_t * const ptrV = &image->yuvPlanes[AVIF_CHAN_V][(uvJ * image->yuvRowBytes[AVIF_CHAN_V])];
1322
908k
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1323
908k
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1324
908k
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1325
1326
100M
        for (uint32_t i = 0; i < image->width; ++i) {
1327
99.1M
            uint32_t uvI = i >> state->yuv.formatInfo.chromaShiftX;
1328
1329
            // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1330
99.1M
            const float Y = unormFloatTableY[ptrY[i]];
1331
99.1M
            const float Cb = unormFloatTableUV[ptrU[uvI]];
1332
99.1M
            const float Cr = unormFloatTableUV[ptrV[uvI]];
1333
1334
99.1M
            const float R = Y + (2 * (1 - kr)) * Cr;
1335
99.1M
            const float B = Y + (2 * (1 - kb)) * Cb;
1336
99.1M
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1337
99.1M
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1338
99.1M
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1339
99.1M
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1340
1341
99.1M
            avifStoreRGB8Pixel(rgb->format,
1342
99.1M
                               (uint8_t)(0.5f + (Rc * rgbMaxChannelF)),
1343
99.1M
                               (uint8_t)(0.5f + (Gc * rgbMaxChannelF)),
1344
99.1M
                               (uint8_t)(0.5f + (Bc * rgbMaxChannelF)),
1345
99.1M
                               ptrR,
1346
99.1M
                               ptrG,
1347
99.1M
                               ptrB);
1348
1349
99.1M
            ptrR += rgbPixelBytes;
1350
99.1M
            ptrG += rgbPixelBytes;
1351
99.1M
            ptrB += rgbPixelBytes;
1352
99.1M
        }
1353
908k
    }
1354
1.25k
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, &unormFloatTableUV);
1355
1.25k
    return AVIF_RESULT_OK;
1356
1.25k
}
1357
1358
static avifResult avifImageYUV8ToRGB8Mono(const avifImage * image, avifRGBImage * rgb, avifReformatState * state)
1359
819
{
1360
819
    const float kr = state->yuv.kr;
1361
819
    const float kg = state->yuv.kg;
1362
819
    const float kb = state->yuv.kb;
1363
819
    const uint32_t rgbPixelBytes = state->rgb.pixelBytes;
1364
819
    float * unormFloatTableY = NULL;
1365
819
    AVIF_CHECKERR(avifCreateYUVToRGBLookUpTables(&unormFloatTableY, NULL, image->depth, state), AVIF_RESULT_OUT_OF_MEMORY);
1366
1367
819
    const float rgbMaxChannelF = state->rgb.maxChannelF;
1368
391k
    for (uint32_t j = 0; j < image->height; ++j) {
1369
390k
        const uint8_t * const ptrY = &image->yuvPlanes[AVIF_CHAN_Y][(j * image->yuvRowBytes[AVIF_CHAN_Y])];
1370
390k
        uint8_t * ptrR = &rgb->pixels[state->rgb.offsetBytesR + (j * rgb->rowBytes)];
1371
390k
        uint8_t * ptrG = &rgb->pixels[state->rgb.offsetBytesG + (j * rgb->rowBytes)];
1372
390k
        uint8_t * ptrB = &rgb->pixels[state->rgb.offsetBytesB + (j * rgb->rowBytes)];
1373
1374
364M
        for (uint32_t i = 0; i < image->width; ++i) {
1375
            // Convert unorm to float (no clamp necessary, the full uint8_t range is a legal lookup)
1376
364M
            const float Y = unormFloatTableY[ptrY[i]];
1377
364M
            const float Cb = 0.0f;
1378
364M
            const float Cr = 0.0f;
1379
1380
364M
            const float R = Y + (2 * (1 - kr)) * Cr;
1381
364M
            const float B = Y + (2 * (1 - kb)) * Cb;
1382
364M
            const float G = Y - ((2 * ((kr * (1 - kr) * Cr) + (kb * (1 - kb) * Cb))) / kg);
1383
364M
            const float Rc = AVIF_CLAMP(R, 0.0f, 1.0f);
1384
364M
            const float Gc = AVIF_CLAMP(G, 0.0f, 1.0f);
1385
364M
            const float Bc = AVIF_CLAMP(B, 0.0f, 1.0f);
1386
1387
364M
            avifStoreRGB8Pixel(rgb->format,
1388
364M
                               (uint8_t)(0.5f + (Rc * rgbMaxChannelF)),
1389
364M
                               (uint8_t)(0.5f + (Gc * rgbMaxChannelF)),
1390
364M
                               (uint8_t)(0.5f + (Bc * rgbMaxChannelF)),
1391
364M
                               ptrR,
1392
364M
                               ptrG,
1393
364M
                               ptrB);
1394
1395
364M
            ptrR += rgbPixelBytes;
1396
364M
            ptrG += rgbPixelBytes;
1397
364M
            ptrB += rgbPixelBytes;
1398
364M
        }
1399
390k
    }
1400
819
    avifFreeYUVToRGBLookUpTables(&unormFloatTableY, NULL);
1401
819
    return AVIF_RESULT_OK;
1402
819
}
1403
1404
// This constant comes from libyuv. For details, see here:
1405
// https://chromium.googlesource.com/libyuv/libyuv/+/2f87e9a7/source/row_common.cc#3537
1406
0
#define F16_MULTIPLIER 1.9259299444e-34f
1407
1408
typedef union avifF16
1409
{
1410
    float f;
1411
    uint32_t u32;
1412
} avifF16;
1413
1414
static avifResult avifRGBImageToF16(avifRGBImage * rgb)
1415
0
{
1416
0
    avifResult libyuvResult = AVIF_RESULT_NOT_IMPLEMENTED;
1417
0
    if (!rgb->avoidLibYUV) {
1418
0
        libyuvResult = avifRGBImageToF16LibYUV(rgb);
1419
0
    }
1420
0
    if (libyuvResult != AVIF_RESULT_NOT_IMPLEMENTED) {
1421
0
        return libyuvResult;
1422
0
    }
1423
0
    const uint32_t channelCount = avifRGBFormatChannelCount(rgb->format);
1424
0
    const float scale = 1.0f / ((1 << rgb->depth) - 1);
1425
0
    const float multiplier = F16_MULTIPLIER * scale;
1426
0
    uint16_t * pixelRowBase = (uint16_t *)rgb->pixels;
1427
0
    const uint32_t stride = rgb->rowBytes >> 1;
1428
0
    for (uint32_t j = 0; j < rgb->height; ++j) {
1429
0
        uint16_t * pixel = pixelRowBase;
1430
0
        for (uint32_t i = 0; i < rgb->width * channelCount; ++i, ++pixel) {
1431
0
            avifF16 f16;
1432
0
            f16.f = *pixel * multiplier;
1433
0
            *pixel = (uint16_t)(f16.u32 >> 13);
1434
0
        }
1435
0
        pixelRowBase += stride;
1436
0
    }
1437
0
    return AVIF_RESULT_OK;
1438
0
}
1439
1440
static avifResult avifImageYUVToRGBImpl(const avifImage * image, avifRGBImage * rgb, avifReformatState * state, avifAlphaMultiplyMode alphaMultiplyMode)
1441
10.8k
{
1442
10.8k
    avifBool convertedWithLibYUV = AVIF_FALSE;
1443
    // Reformat alpha, if user asks for it, or (un)multiply processing needs it.
1444
10.8k
    avifBool reformatAlpha = avifRGBFormatHasAlpha(rgb->format) &&
1445
10.8k
                             (!rgb->ignoreAlpha || (alphaMultiplyMode != AVIF_ALPHA_MULTIPLY_MODE_NO_OP));
1446
    // This value is used only when reformatAlpha is true.
1447
10.8k
    avifBool alphaReformattedWithLibYUV = AVIF_FALSE;
1448
10.8k
    if (!rgb->avoidLibYUV && ((alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP) || avifRGBFormatHasAlpha(rgb->format))) {
1449
10.8k
        avifResult libyuvResult = avifImageYUVToRGBLibYUV(image, rgb, reformatAlpha, &alphaReformattedWithLibYUV);
1450
10.8k
        if (libyuvResult == AVIF_RESULT_OK) {
1451
0
            convertedWithLibYUV = AVIF_TRUE;
1452
10.8k
        } else {
1453
10.8k
            if (libyuvResult != AVIF_RESULT_NOT_IMPLEMENTED) {
1454
0
                return libyuvResult;
1455
0
            }
1456
10.8k
        }
1457
10.8k
    }
1458
1459
10.8k
    if (reformatAlpha && !alphaReformattedWithLibYUV) {
1460
10.8k
        avifAlphaParams params;
1461
1462
10.8k
        params.width = rgb->width;
1463
10.8k
        params.height = rgb->height;
1464
10.8k
        params.dstDepth = rgb->depth;
1465
10.8k
        params.dstPlane = rgb->pixels;
1466
10.8k
        params.dstRowBytes = rgb->rowBytes;
1467
10.8k
        params.dstOffsetBytes = state->rgb.offsetBytesA;
1468
10.8k
        params.dstPixelBytes = state->rgb.pixelBytes;
1469
1470
10.8k
        if (image->alphaPlane && image->alphaRowBytes) {
1471
188
            params.srcDepth = image->depth;
1472
188
            params.srcPlane = image->alphaPlane;
1473
188
            params.srcRowBytes = image->alphaRowBytes;
1474
188
            params.srcOffsetBytes = 0;
1475
188
            params.srcPixelBytes = state->yuv.channelBytes;
1476
1477
188
            avifReformatAlpha(&params);
1478
10.6k
        } else {
1479
10.6k
            avifFillAlpha(&params);
1480
10.6k
        }
1481
10.8k
    }
1482
1483
10.8k
    if (!convertedWithLibYUV) {
1484
        // libyuv is either unavailable or unable to perform the specific conversion required here.
1485
        // Look over the available built-in "fast" routines for YUV->RGB conversion and see if one
1486
        // fits the current combination, or as a last resort, call avifImageYUVAnyToRGBAnySlow(),
1487
        // which handles every possibly YUV->RGB combination, but very slowly (in comparison).
1488
1489
10.8k
        avifResult convertResult = AVIF_RESULT_NOT_IMPLEMENTED;
1490
1491
10.8k
        const avifBool hasColor =
1492
10.8k
            (image->yuvRowBytes[AVIF_CHAN_U] && image->yuvRowBytes[AVIF_CHAN_V] && (image->yuvFormat != AVIF_PIXEL_FORMAT_YUV400));
1493
1494
10.8k
        if (!avifRGBFormatIsGray(rgb->format) &&
1495
10.8k
            (!hasColor || (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) ||
1496
10.8k
             ((rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_FASTEST) || (rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_NEAREST))) &&
1497
10.8k
            (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_NO_OP || avifRGBFormatHasAlpha(rgb->format))) {
1498
            // Explanations on the above conditional:
1499
            // * None of these fast paths currently support bilinear upsampling, so avoid all of them
1500
            //   unless the YUV data isn't subsampled or they explicitly requested AVIF_CHROMA_UPSAMPLING_NEAREST.
1501
            // * None of these fast paths currently handle alpha (un)multiply, so avoid all of them
1502
            //   if we can't do alpha (un)multiply as a separated post step (destination format doesn't have alpha).
1503
1504
9.67k
            if (state->yuv.mode == AVIF_REFORMAT_MODE_IDENTITY) {
1505
4.92k
                if ((image->depth == 8) && (rgb->depth == 8) && (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV444) &&
1506
4.92k
                    (image->yuvRange == AVIF_RANGE_FULL)) {
1507
4.58k
                    convertResult = avifImageIdentity8ToRGB8ColorFullRange(image, rgb, state);
1508
4.58k
                }
1509
1510
                // TODO: Add more fast paths for identity
1511
4.92k
            } else if (state->yuv.mode == AVIF_REFORMAT_MODE_YUV_COEFFICIENTS) {
1512
4.14k
                if (image->depth > 8) {
1513
                    // yuv:u16
1514
1515
2.07k
                    if (rgb->depth > 8) {
1516
                        // yuv:u16, rgb:u16
1517
1518
2.07k
                        if (hasColor) {
1519
1.41k
                            convertResult = avifImageYUV16ToRGB16Color(image, rgb, state);
1520
1.41k
                        } else {
1521
666
                            convertResult = avifImageYUV16ToRGB16Mono(image, rgb, state);
1522
666
                        }
1523
2.07k
                    } else {
1524
                        // yuv:u16, rgb:u8
1525
1526
0
                        if (hasColor) {
1527
0
                            convertResult = avifImageYUV16ToRGB8Color(image, rgb, state);
1528
0
                        } else {
1529
0
                            convertResult = avifImageYUV16ToRGB8Mono(image, rgb, state);
1530
0
                        }
1531
0
                    }
1532
2.07k
                } else {
1533
                    // yuv:u8
1534
1535
2.06k
                    if (rgb->depth > 8) {
1536
                        // yuv:u8, rgb:u16
1537
1538
0
                        if (hasColor) {
1539
0
                            convertResult = avifImageYUV8ToRGB16Color(image, rgb, state);
1540
0
                        } else {
1541
0
                            convertResult = avifImageYUV8ToRGB16Mono(image, rgb, state);
1542
0
                        }
1543
2.06k
                    } else {
1544
                        // yuv:u8, rgb:u8
1545
1546
2.06k
                        if (hasColor) {
1547
1.25k
                            convertResult = avifImageYUV8ToRGB8Color(image, rgb, state);
1548
1.25k
                        } else {
1549
819
                            convertResult = avifImageYUV8ToRGB8Mono(image, rgb, state);
1550
819
                        }
1551
2.06k
                    }
1552
2.06k
                }
1553
4.14k
            }
1554
9.67k
        }
1555
1556
10.8k
        if (convertResult == AVIF_RESULT_NOT_IMPLEMENTED) {
1557
            // If we get here, there is no fast path for this combination. Time to be slow!
1558
2.07k
            convertResult = avifImageYUVAnyToRGBAnySlow(image, rgb, state, alphaMultiplyMode);
1559
1560
            // The slow path also handles alpha (un)multiply, so forget the operation here.
1561
2.07k
            alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
1562
2.07k
        }
1563
1564
10.8k
        if (convertResult != AVIF_RESULT_OK) {
1565
0
            return convertResult;
1566
0
        }
1567
10.8k
    }
1568
1569
    // Process alpha premultiplication, if necessary
1570
10.8k
    if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY) {
1571
0
        avifResult result = avifRGBImagePremultiplyAlpha(rgb);
1572
0
        if (result != AVIF_RESULT_OK) {
1573
0
            return result;
1574
0
        }
1575
10.8k
    } else if (alphaMultiplyMode == AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY) {
1576
0
        avifResult result = avifRGBImageUnpremultiplyAlpha(rgb);
1577
0
        if (result != AVIF_RESULT_OK) {
1578
0
            return result;
1579
0
        }
1580
0
    }
1581
1582
    // Convert pixels to half floats (F16), if necessary.
1583
10.8k
    if (rgb->isFloat) {
1584
0
        return avifRGBImageToF16(rgb);
1585
0
    }
1586
1587
10.8k
    return AVIF_RESULT_OK;
1588
10.8k
}
1589
1590
typedef struct
1591
{
1592
#if defined(_WIN32)
1593
    HANDLE thread;
1594
#else
1595
    pthread_t thread;
1596
#endif
1597
    avifImage image;
1598
    avifRGBImage rgb;
1599
    avifReformatState * state;
1600
    avifAlphaMultiplyMode alphaMultiplyMode;
1601
    avifResult result;
1602
    avifBool threadCreated;
1603
} YUVToRGBThreadData;
1604
1605
#if defined(_WIN32)
1606
static unsigned int __stdcall avifImageYUVToRGBThreadWorker(void * arg)
1607
#else
1608
static void * avifImageYUVToRGBThreadWorker(void * arg)
1609
#endif
1610
10.1k
{
1611
10.1k
    YUVToRGBThreadData * data = (YUVToRGBThreadData *)arg;
1612
10.1k
    data->result = avifImageYUVToRGBImpl(&data->image, &data->rgb, data->state, data->alphaMultiplyMode);
1613
#if defined(_WIN32)
1614
    return 0;
1615
#else
1616
10.1k
    return NULL;
1617
10.1k
#endif
1618
10.1k
}
1619
1620
static avifBool avifCreateYUVToRGBThread(YUVToRGBThreadData * tdata)
1621
8.88k
{
1622
#if defined(_WIN32)
1623
    tdata->thread = (HANDLE)_beginthreadex(/*security=*/NULL,
1624
                                           /*stack_size=*/0,
1625
                                           &avifImageYUVToRGBThreadWorker,
1626
                                           tdata,
1627
                                           /*initflag=*/0,
1628
                                           /*thrdaddr=*/NULL);
1629
    return tdata->thread != NULL;
1630
#else
1631
    // TODO: Set the thread name for ease of debugging.
1632
8.88k
    return pthread_create(&tdata->thread, NULL, &avifImageYUVToRGBThreadWorker, tdata) == 0;
1633
8.88k
#endif
1634
8.88k
}
1635
1636
static avifBool avifJoinYUVToRGBThread(YUVToRGBThreadData * tdata)
1637
8.88k
{
1638
#if defined(_WIN32)
1639
    return WaitForSingleObject(tdata->thread, INFINITE) == WAIT_OBJECT_0 && CloseHandle(tdata->thread) != 0;
1640
#else
1641
8.88k
    return pthread_join(tdata->thread, NULL) == 0;
1642
8.88k
#endif
1643
8.88k
}
1644
1645
avifResult avifImageYUVToRGB(const avifImage * image, avifRGBImage * rgb)
1646
2.14k
{
1647
    // It is okay for rgb->maxThreads to be equal to zero in order to allow clients to zero initialize the avifRGBImage struct
1648
    // with memset.
1649
2.14k
    if (!image->yuvPlanes[AVIF_CHAN_Y] || rgb->maxThreads < 0) {
1650
0
        return AVIF_RESULT_REFORMAT_FAILED;
1651
0
    }
1652
1653
2.14k
    avifReformatState state;
1654
2.14k
    if (!avifPrepareReformatState(image, rgb, &state)) {
1655
219
        return AVIF_RESULT_REFORMAT_FAILED;
1656
219
    }
1657
1658
1.92k
    avifAlphaMultiplyMode alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_NO_OP;
1659
1.92k
    if (image->alphaPlane) {
1660
34
        if (!avifRGBFormatHasAlpha(rgb->format) || rgb->ignoreAlpha) {
1661
            // if we are converting some image with alpha into a format without alpha, we should do 'premultiply alpha' before
1662
            // discarding alpha plane. This has the same effect of rendering this image on a black background, which makes sense.
1663
0
            if (!image->alphaPremultiplied) {
1664
0
                alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
1665
0
            }
1666
34
        } else {
1667
34
            if (!image->alphaPremultiplied && rgb->alphaPremultiplied) {
1668
0
                alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_MULTIPLY;
1669
34
            } else if (image->alphaPremultiplied && !rgb->alphaPremultiplied) {
1670
0
                alphaMultiplyMode = AVIF_ALPHA_MULTIPLY_MODE_UNMULTIPLY;
1671
0
            }
1672
34
        }
1673
34
    }
1674
1675
    // In practice, we rarely need more than 8 threads for YUV to RGB conversion.
1676
1.92k
    uint32_t jobs = AVIF_CLAMP(rgb->maxThreads, 1, 8);
1677
1678
    // When yuv format is 420 and chromaUpsampling could be BILINEAR, there is a dependency across the horizontal borders of each
1679
    // job. So we disallow multithreading in that case.
1680
1.92k
    if (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV420 && (rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_AUTOMATIC ||
1681
246
                                                         rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_BEST_QUALITY ||
1682
246
                                                         rgb->chromaUpsampling == AVIF_CHROMA_UPSAMPLING_BILINEAR)) {
1683
246
        jobs = 1;
1684
246
    }
1685
1686
    // Each thread worker needs at least 2 Y rows (to account for potential U/V subsampling).
1687
1.92k
    if (jobs == 1 || (image->height / 2) < jobs) {
1688
653
        return avifImageYUVToRGBImpl(image, rgb, &state, alphaMultiplyMode);
1689
653
    }
1690
1691
1.27k
    const size_t byteCount = sizeof(YUVToRGBThreadData) * jobs;
1692
1.27k
    YUVToRGBThreadData * threadData = (YUVToRGBThreadData *)avifAlloc(byteCount);
1693
1.27k
    if (!threadData) {
1694
0
        return AVIF_RESULT_OUT_OF_MEMORY;
1695
0
    }
1696
1.27k
    memset(threadData, 0, byteCount);
1697
1.27k
    uint32_t rowsPerJob = image->height / jobs;
1698
1.27k
    if (rowsPerJob % 2) {
1699
134
        ++rowsPerJob;
1700
134
        jobs = (image->height + rowsPerJob - 1) / rowsPerJob; // ceil
1701
134
    }
1702
1.27k
    const uint32_t rowsForLastJob = image->height - rowsPerJob * (jobs - 1);
1703
1.27k
    uint32_t startRow = 0;
1704
1.27k
    uint32_t i;
1705
11.4k
    for (i = 0; i < jobs; ++i, startRow += rowsPerJob) {
1706
10.1k
        YUVToRGBThreadData * tdata = &threadData[i];
1707
10.1k
        const avifCropRect rect = { .x = 0, .y = startRow, .width = image->width, .height = (i == jobs - 1) ? rowsForLastJob : rowsPerJob };
1708
10.1k
        if (avifImageSetViewRect(&tdata->image, image, &rect) != AVIF_RESULT_OK) {
1709
0
            tdata->result = AVIF_RESULT_REFORMAT_FAILED;
1710
0
            break;
1711
0
        }
1712
1713
10.1k
        tdata->rgb = *rgb;
1714
10.1k
        tdata->rgb.pixels += startRow * (size_t)rgb->rowBytes;
1715
10.1k
        tdata->rgb.height = tdata->image.height;
1716
1717
10.1k
        tdata->state = &state;
1718
10.1k
        tdata->alphaMultiplyMode = alphaMultiplyMode;
1719
1720
10.1k
        if (i > 0) {
1721
8.88k
            tdata->threadCreated = avifCreateYUVToRGBThread(tdata);
1722
8.88k
            if (!tdata->threadCreated) {
1723
0
                tdata->result = AVIF_RESULT_REFORMAT_FAILED;
1724
0
                break;
1725
0
            }
1726
8.88k
        }
1727
10.1k
    }
1728
    // If above loop ran successfully, run the first job in the current thread.
1729
1.27k
    if (i == jobs) {
1730
1.27k
        avifImageYUVToRGBThreadWorker(&threadData[0]);
1731
1.27k
    }
1732
1.27k
    avifResult result = AVIF_RESULT_OK;
1733
11.4k
    for (i = 0; i < jobs; ++i) {
1734
10.1k
        YUVToRGBThreadData * tdata = &threadData[i];
1735
10.1k
        if (tdata->threadCreated && !avifJoinYUVToRGBThread(tdata)) {
1736
0
            result = AVIF_RESULT_REFORMAT_FAILED;
1737
0
        }
1738
10.1k
        if (tdata->result != AVIF_RESULT_OK) {
1739
0
            result = tdata->result;
1740
0
        }
1741
10.1k
    }
1742
1.27k
    avifFree(threadData);
1743
1.27k
    return result;
1744
1.27k
}
1745
1746
// Limited -> Full
1747
// Plan: subtract limited offset, then multiply by ratio of FULLSIZE/LIMITEDSIZE (rounding), then clamp.
1748
// RATIO = (FULLY - 0) / (MAXLIMITEDY - MINLIMITEDY)
1749
// -----------------------------------------
1750
// ( ( (v - MINLIMITEDY)                    | subtract limited offset
1751
//     * FULLY                              | multiply numerator of ratio
1752
//   ) + ((MAXLIMITEDY - MINLIMITEDY) / 2)  | add 0.5 (half of denominator) to round
1753
// ) / (MAXLIMITEDY - MINLIMITEDY)          | divide by denominator of ratio
1754
// AVIF_CLAMP(v, 0, FULLY)                  | clamp to full range
1755
// -----------------------------------------
1756
#define LIMITED_TO_FULL(MINLIMITEDY, MAXLIMITEDY, FULLY)                                                 \
1757
31.4k
    v = (((v - MINLIMITEDY) * FULLY) + ((MAXLIMITEDY - MINLIMITEDY) / 2)) / (MAXLIMITEDY - MINLIMITEDY); \
1758
31.4k
    v = AVIF_CLAMP(v, 0, FULLY)
1759
1760
// Full -> Limited
1761
// Plan: multiply by ratio of LIMITEDSIZE/FULLSIZE (rounding), then add limited offset, then clamp.
1762
// RATIO = (MAXLIMITEDY - MINLIMITEDY) / (FULLY - 0)
1763
// -----------------------------------------
1764
// ( ( (v * (MAXLIMITEDY - MINLIMITEDY))    | multiply numerator of ratio
1765
//     + (FULLY / 2)                        | add 0.5 (half of denominator) to round
1766
//   ) / FULLY                              | divide by denominator of ratio
1767
// ) + MINLIMITEDY                          | add limited offset
1768
//  AVIF_CLAMP(v, MINLIMITEDY, MAXLIMITEDY) | clamp to limited range
1769
// -----------------------------------------
1770
#define FULL_TO_LIMITED(MINLIMITEDY, MAXLIMITEDY, FULLY)                           \
1771
0
    v = (((v * (MAXLIMITEDY - MINLIMITEDY)) + (FULLY / 2)) / FULLY) + MINLIMITEDY; \
1772
0
    v = AVIF_CLAMP(v, MINLIMITEDY, MAXLIMITEDY)
1773
1774
int avifLimitedToFullY(uint32_t depth, int v)
1775
31.4k
{
1776
31.4k
    switch (depth) {
1777
22.5k
        case 8:
1778
22.5k
            LIMITED_TO_FULL(16, 235, 255);
1779
22.5k
            break;
1780
8.86k
        case 10:
1781
8.86k
            LIMITED_TO_FULL(64, 940, 1023);
1782
8.86k
            break;
1783
0
        case 12:
1784
0
            LIMITED_TO_FULL(256, 3760, 4095);
1785
0
            break;
1786
31.4k
    }
1787
31.4k
    return v;
1788
31.4k
}
1789
1790
int avifLimitedToFullUV(uint32_t depth, int v)
1791
0
{
1792
0
    switch (depth) {
1793
0
        case 8:
1794
0
            LIMITED_TO_FULL(16, 240, 255);
1795
0
            break;
1796
0
        case 10:
1797
0
            LIMITED_TO_FULL(64, 960, 1023);
1798
0
            break;
1799
0
        case 12:
1800
0
            LIMITED_TO_FULL(256, 3840, 4095);
1801
0
            break;
1802
0
    }
1803
0
    return v;
1804
0
}
1805
1806
int avifFullToLimitedY(uint32_t depth, int v)
1807
0
{
1808
0
    switch (depth) {
1809
0
        case 8:
1810
0
            FULL_TO_LIMITED(16, 235, 255);
1811
0
            break;
1812
0
        case 10:
1813
0
            FULL_TO_LIMITED(64, 940, 1023);
1814
0
            break;
1815
0
        case 12:
1816
0
            FULL_TO_LIMITED(256, 3760, 4095);
1817
0
            break;
1818
0
    }
1819
0
    return v;
1820
0
}
1821
1822
int avifFullToLimitedUV(uint32_t depth, int v)
1823
0
{
1824
0
    switch (depth) {
1825
0
        case 8:
1826
0
            FULL_TO_LIMITED(16, 240, 255);
1827
0
            break;
1828
0
        case 10:
1829
0
            FULL_TO_LIMITED(64, 960, 1023);
1830
0
            break;
1831
0
        case 12:
1832
0
            FULL_TO_LIMITED(256, 3840, 4095);
1833
0
            break;
1834
0
    }
1835
0
    return v;
1836
0
}
1837
1838
static inline uint16_t avifFloatToF16(float v)
1839
0
{
1840
0
    avifF16 f16;
1841
0
    f16.f = v * F16_MULTIPLIER;
1842
0
    return (uint16_t)(f16.u32 >> 13);
1843
0
}
1844
1845
static inline float avifF16ToFloat(uint16_t v)
1846
0
{
1847
0
    avifF16 f16;
1848
0
    f16.u32 = v << 13;
1849
0
    return f16.f / F16_MULTIPLIER;
1850
0
}
1851
1852
void avifGetRGBAPixel(const avifRGBImage * src, uint32_t x, uint32_t y, const avifRGBColorSpaceInfo * info, float rgbaPixel[4])
1853
0
{
1854
0
    assert(src != NULL);
1855
0
    assert(!src->isFloat || src->depth == 16);
1856
0
    assert(src->format != AVIF_RGB_FORMAT_RGB_565 || src->depth == 8);
1857
1858
0
    const uint8_t * const srcPixel = &src->pixels[y * src->rowBytes + x * info->pixelBytes];
1859
0
    if (info->channelBytes > 1) {
1860
0
        uint16_t r = *((const uint16_t *)(&srcPixel[info->offsetBytesR]));
1861
0
        uint16_t g = *((const uint16_t *)(&srcPixel[info->offsetBytesG]));
1862
0
        uint16_t b = *((const uint16_t *)(&srcPixel[info->offsetBytesB]));
1863
0
        uint16_t a = avifRGBFormatHasAlpha(src->format) ? *((const uint16_t *)(&srcPixel[info->offsetBytesA])) : (uint16_t)info->maxChannel;
1864
0
        if (src->isFloat) {
1865
0
            rgbaPixel[0] = avifF16ToFloat(r);
1866
0
            rgbaPixel[1] = avifF16ToFloat(g);
1867
0
            rgbaPixel[2] = avifF16ToFloat(b);
1868
0
            rgbaPixel[3] = avifRGBFormatHasAlpha(src->format) ? avifF16ToFloat(a) : 1.0f;
1869
0
        } else {
1870
0
            rgbaPixel[0] = r / info->maxChannelF;
1871
0
            rgbaPixel[1] = g / info->maxChannelF;
1872
0
            rgbaPixel[2] = b / info->maxChannelF;
1873
0
            rgbaPixel[3] = a / info->maxChannelF;
1874
0
        }
1875
0
    } else {
1876
0
        if (src->format == AVIF_RGB_FORMAT_RGB_565) {
1877
0
            uint8_t r, g, b;
1878
0
            avifGetRGB565(&srcPixel[info->offsetBytesR], &r, &g, &b);
1879
0
            rgbaPixel[0] = r / info->maxChannelF;
1880
0
            rgbaPixel[1] = g / info->maxChannelF;
1881
0
            rgbaPixel[2] = b / info->maxChannelF;
1882
0
            rgbaPixel[3] = 1.0f;
1883
0
        } else {
1884
0
            rgbaPixel[0] = srcPixel[info->offsetBytesR] / info->maxChannelF;
1885
0
            rgbaPixel[1] = srcPixel[info->offsetBytesG] / info->maxChannelF;
1886
0
            rgbaPixel[2] = srcPixel[info->offsetBytesB] / info->maxChannelF;
1887
0
            rgbaPixel[3] = avifRGBFormatHasAlpha(src->format) ? (srcPixel[info->offsetBytesA] / info->maxChannelF) : 1.0f;
1888
0
        }
1889
0
    }
1890
0
}
1891
1892
void avifSetRGBAPixel(const avifRGBImage * dst, uint32_t x, uint32_t y, const avifRGBColorSpaceInfo * info, const float rgbaPixel[4])
1893
0
{
1894
0
    assert(dst != NULL);
1895
0
    assert(!dst->isFloat || dst->depth == 16);
1896
0
    assert(dst->format != AVIF_RGB_FORMAT_RGB_565 || dst->depth == 8);
1897
0
    assert(rgbaPixel[0] >= 0.0f && rgbaPixel[0] <= 1.0f);
1898
0
    assert(rgbaPixel[1] >= 0.0f && rgbaPixel[1] <= 1.0f);
1899
0
    assert(rgbaPixel[2] >= 0.0f && rgbaPixel[2] <= 1.0f);
1900
1901
0
    uint8_t * const dstPixel = &dst->pixels[y * dst->rowBytes + x * info->pixelBytes];
1902
1903
0
    uint8_t * const ptrR = &dstPixel[info->offsetBytesR];
1904
0
    uint8_t * const ptrG = &dstPixel[info->offsetBytesG];
1905
0
    uint8_t * const ptrB = &dstPixel[info->offsetBytesB];
1906
0
    uint8_t * const ptrA = avifRGBFormatHasAlpha(dst->format) ? &dstPixel[info->offsetBytesA] : NULL;
1907
0
    if (dst->depth > 8) {
1908
0
        if (dst->isFloat) {
1909
0
            *((uint16_t *)ptrR) = avifFloatToF16(rgbaPixel[0]);
1910
0
            *((uint16_t *)ptrG) = avifFloatToF16(rgbaPixel[1]);
1911
0
            *((uint16_t *)ptrB) = avifFloatToF16(rgbaPixel[2]);
1912
0
            if (ptrA) {
1913
0
                *((uint16_t *)ptrA) = avifFloatToF16(rgbaPixel[3]);
1914
0
            }
1915
0
        } else {
1916
0
            *((uint16_t *)ptrR) = (uint16_t)(0.5f + (rgbaPixel[0] * info->maxChannelF));
1917
0
            *((uint16_t *)ptrG) = (uint16_t)(0.5f + (rgbaPixel[1] * info->maxChannelF));
1918
0
            *((uint16_t *)ptrB) = (uint16_t)(0.5f + (rgbaPixel[2] * info->maxChannelF));
1919
0
            if (ptrA) {
1920
0
                *((uint16_t *)ptrA) = (uint16_t)(0.5f + (rgbaPixel[3] * info->maxChannelF));
1921
0
            }
1922
0
        }
1923
0
    } else {
1924
0
        avifStoreRGB8Pixel(dst->format,
1925
0
                           (uint8_t)(0.5f + (rgbaPixel[0] * info->maxChannelF)),
1926
0
                           (uint8_t)(0.5f + (rgbaPixel[1] * info->maxChannelF)),
1927
0
                           (uint8_t)(0.5f + (rgbaPixel[2] * info->maxChannelF)),
1928
0
                           ptrR,
1929
0
                           ptrG,
1930
0
                           ptrB);
1931
0
        if (ptrA) {
1932
0
            *ptrA = (uint8_t)(0.5f + (rgbaPixel[3] * info->maxChannelF));
1933
0
        }
1934
0
    }
1935
0
}