Coverage Report

Created: 2022-08-24 06:17

/src/x265/source/common/primitives.h
Line
Count
Source (jump to first uncovered line)
1
/*****************************************************************************
2
 * Copyright (C) 2013-2020 MulticoreWare, Inc
3
 *
4
 * Authors: Steve Borho <steve@borho.org>
5
 *          Mandar Gurav <mandar@multicorewareinc.com>
6
 *          Deepthi Devaki Akkoorath <deepthidevaki@multicorewareinc.com>
7
 *          Mahesh Pittala <mahesh@multicorewareinc.com>
8
 *          Rajesh Paulraj <rajesh@multicorewareinc.com>
9
 *          Praveen Kumar Tiwari <praveen@multicorewareinc.com>
10
 *          Min Chen <chenm003@163.com>
11
 *          Hongbin Liu<liuhongbin1@huawei.com>
12
 *          Yimeng Su <yimeng.su@huawei.com>
13
 *
14
 * This program is free software; you can redistribute it and/or modify
15
 * it under the terms of the GNU General Public License as published by
16
 * the Free Software Foundation; either version 2 of the License, or
17
 * (at your option) any later version.
18
 *
19
 * This program is distributed in the hope that it will be useful,
20
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22
 * GNU General Public License for more details.
23
 *
24
 * You should have received a copy of the GNU General Public License
25
 * along with this program; if not, write to the Free Software
26
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
27
 *
28
 * This program is also available under a commercial proprietary license.
29
 * For more information, contact us at license @ x265.com.
30
 *****************************************************************************/
31
32
#ifndef X265_PRIMITIVES_H
33
#define X265_PRIMITIVES_H
34
35
#include "common.h"
36
#include "cpu.h"
37
38
namespace X265_NS {
39
// x265 private namespace
40
41
enum LumaPU
42
{
43
    // Square (the first 5 PUs match the block sizes)
44
    LUMA_4x4,   LUMA_8x8,   LUMA_16x16, LUMA_32x32, LUMA_64x64,
45
    // Rectangular
46
    LUMA_8x4,   LUMA_4x8,
47
    LUMA_16x8,  LUMA_8x16,
48
    LUMA_32x16, LUMA_16x32,
49
    LUMA_64x32, LUMA_32x64,
50
    // Asymmetrical (0.75, 0.25)
51
    LUMA_16x12, LUMA_12x16, LUMA_16x4,  LUMA_4x16,
52
    LUMA_32x24, LUMA_24x32, LUMA_32x8,  LUMA_8x32,
53
    LUMA_64x48, LUMA_48x64, LUMA_64x16, LUMA_16x64,
54
    NUM_PU_SIZES
55
};
56
57
enum LumaCU // can be indexed using log2n(width)-2
58
{
59
    BLOCK_4x4,
60
    BLOCK_8x8,
61
    BLOCK_16x16,
62
    BLOCK_32x32,
63
    BLOCK_64x64,
64
    NUM_CU_SIZES
65
};
66
67
enum AlignPrimitive
68
{
69
    NONALIGNED,
70
    ALIGNED,
71
    NUM_ALIGNMENT_TYPES
72
};
73
74
enum { NUM_TR_SIZE = 4 }; // TU are 4x4, 8x8, 16x16, and 32x32
75
76
77
/* Chroma partition sizes. These enums are only a convenience for indexing into
78
 * the chroma primitive arrays when instantiating macros or templates. The
79
 * chroma function tables should always be indexed by a LumaPU enum when used. */
80
enum ChromaPU420
81
{
82
    CHROMA_420_2x2,   CHROMA_420_4x4,   CHROMA_420_8x8,  CHROMA_420_16x16, CHROMA_420_32x32,
83
    CHROMA_420_4x2,   CHROMA_420_2x4,
84
    CHROMA_420_8x4,   CHROMA_420_4x8,
85
    CHROMA_420_16x8,  CHROMA_420_8x16,
86
    CHROMA_420_32x16, CHROMA_420_16x32,
87
    CHROMA_420_8x6,   CHROMA_420_6x8,   CHROMA_420_8x2,  CHROMA_420_2x8,
88
    CHROMA_420_16x12, CHROMA_420_12x16, CHROMA_420_16x4, CHROMA_420_4x16,
89
    CHROMA_420_32x24, CHROMA_420_24x32, CHROMA_420_32x8, CHROMA_420_8x32,
90
};
91
92
enum ChromaCU420
93
{
94
    BLOCK_420_2x2,
95
    BLOCK_420_4x4,
96
    BLOCK_420_8x8,
97
    BLOCK_420_16x16,
98
    BLOCK_420_32x32
99
};
100
101
enum ChromaPU422
102
{
103
    CHROMA_422_2x4,   CHROMA_422_4x8,   CHROMA_422_8x16,  CHROMA_422_16x32, CHROMA_422_32x64,
104
    CHROMA_422_4x4,   CHROMA_422_2x8,
105
    CHROMA_422_8x8,   CHROMA_422_4x16,
106
    CHROMA_422_16x16, CHROMA_422_8x32,
107
    CHROMA_422_32x32, CHROMA_422_16x64,
108
    CHROMA_422_8x12,  CHROMA_422_6x16,  CHROMA_422_8x4,   CHROMA_422_2x16,
109
    CHROMA_422_16x24, CHROMA_422_12x32, CHROMA_422_16x8,  CHROMA_422_4x32,
110
    CHROMA_422_32x48, CHROMA_422_24x64, CHROMA_422_32x16, CHROMA_422_8x64,
111
};
112
113
enum ChromaCU422
114
{
115
    BLOCK_422_2x4,
116
    BLOCK_422_4x8,
117
    BLOCK_422_8x16,
118
    BLOCK_422_16x32,
119
    BLOCK_422_32x64
120
};
121
122
enum IntegralSize
123
{
124
    INTEGRAL_4,
125
    INTEGRAL_8,
126
    INTEGRAL_12,
127
    INTEGRAL_16,
128
    INTEGRAL_24,
129
    INTEGRAL_32,
130
    NUM_INTEGRAL_SIZE
131
};
132
133
typedef int  (*pixelcmp_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
134
typedef int  (*pixelcmp_ss_t)(const int16_t* fenc, intptr_t fencstride, const int16_t* fref, intptr_t frefstride);
135
typedef sse_t (*pixel_sse_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
136
typedef sse_t (*pixel_sse_ss_t)(const int16_t* fenc, intptr_t fencstride, const int16_t* fref, intptr_t frefstride);
137
typedef sse_t (*pixel_ssd_s_t)(const int16_t* fenc, intptr_t fencstride);
138
typedef int(*pixelcmp_ads_t)(int encDC[], uint32_t *sums, int delta, uint16_t *costMvX, int16_t *mvs, int width, int thresh);
139
typedef void (*pixelcmp_x4_t)(const pixel* fenc, const pixel* fref0, const pixel* fref1, const pixel* fref2, const pixel* fref3, intptr_t frefstride, int32_t* res);
140
typedef void (*pixelcmp_x3_t)(const pixel* fenc, const pixel* fref0, const pixel* fref1, const pixel* fref2, intptr_t frefstride, int32_t* res);
141
typedef void (*blockfill_s_t)(int16_t* dst, intptr_t dstride, int16_t val);
142
143
typedef void (*intra_pred_t)(pixel* dst, intptr_t dstStride, const pixel *srcPix, int dirMode, int bFilter);
144
typedef void (*intra_allangs_t)(pixel *dst, pixel *refPix, pixel *filtPix, int bLuma);
145
typedef void (*intra_filter_t)(const pixel* references, pixel* filtered);
146
147
typedef void (*cpy2Dto1D_shl_t)(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
148
typedef void (*cpy2Dto1D_shr_t)(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift);
149
typedef void (*cpy1Dto2D_shl_t)(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift);
150
typedef void (*cpy1Dto2D_shr_t)(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift);
151
typedef uint32_t (*copy_cnt_t)(int16_t* coeff, const int16_t* residual, intptr_t resiStride);
152
153
typedef void (*dct_t)(const int16_t* src, int16_t* dst, intptr_t srcStride);
154
typedef void (*idct_t)(const int16_t* src, int16_t* dst, intptr_t dstStride);
155
typedef void (*denoiseDct_t)(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff);
156
157
typedef void (*calcresidual_t)(const pixel* fenc, const pixel* pred, int16_t* residual, intptr_t stride);
158
typedef void (*transpose_t)(pixel* dst, const pixel* src, intptr_t stride);
159
typedef uint32_t (*quant_t)(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff);
160
typedef uint32_t (*nquant_t)(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff);
161
typedef void (*dequant_scaling_t)(const int16_t* src, const int32_t* dequantCoef, int16_t* dst, int num, int mcqp_miper, int shift);
162
typedef void (*dequant_normal_t)(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift);
163
typedef int(*count_nonzero_t)(const int16_t* quantCoeff);
164
typedef void (*weightp_pp_t)(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset);
165
typedef void (*weightp_sp_t)(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset);
166
typedef void (*scale1D_t)(pixel* dst, const pixel* src);
167
typedef void (*scale2D_t)(pixel* dst, const pixel* src, intptr_t stride);
168
typedef void (*downscale_t)(const pixel* src0, pixel* dstf, pixel* dsth, pixel* dstv, pixel* dstc,
169
                            intptr_t src_stride, intptr_t dst_stride, int width, int height);
170
typedef void (*extendCURowBorder_t)(pixel* txt, intptr_t stride, int width, int height, int marginX);
171
typedef void (*ssim_4x4x2_core_t)(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4]);
172
typedef float (*ssim_end4_t)(int sum0[5][4], int sum1[5][4], int width);
173
typedef uint64_t (*var_t)(const pixel* pix, intptr_t stride);
174
typedef void (*plane_copy_deinterleave_t)(pixel* dstu, intptr_t dstuStride, pixel* dstv, intptr_t dstvStride, const pixel* src, intptr_t srcStride, int w, int h);
175
176
typedef void (*filter_pp_t) (const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);
177
typedef void (*filter_hps_t) (const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx, int isRowExt);
178
typedef void (*filter_ps_t) (const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx);
179
typedef void (*filter_sp_t) (const int16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int coeffIdx);
180
typedef void (*filter_ss_t) (const int16_t* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride, int coeffIdx);
181
typedef void (*filter_hv_pp_t) (const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int idxX, int idxY);
182
typedef void (*filter_p2s_t)(const pixel* src, intptr_t srcStride, int16_t* dst, intptr_t dstStride);
183
184
typedef void (*copy_pp_t)(pixel* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride); // dst is aligned
185
typedef void (*copy_sp_t)(pixel* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
186
typedef void (*copy_ps_t)(int16_t* dst, intptr_t dstStride, const pixel* src, intptr_t srcStride);
187
typedef void (*copy_ss_t)(int16_t* dst, intptr_t dstStride, const int16_t* src, intptr_t srcStride);
188
189
typedef void (*pixel_sub_ps_t)(int16_t* dst, intptr_t dstride, const pixel* src0, const pixel* src1, intptr_t sstride0, intptr_t sstride1);
190
typedef void (*pixel_add_ps_t)(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
191
typedef void (*pixelavg_pp_t)(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int weight);
192
typedef void (*addAvg_t)(const int16_t* src0, const int16_t* src1, pixel* dst, intptr_t src0Stride, intptr_t src1Stride, intptr_t dstStride);
193
194
typedef void (*saoCuOrgE0_t)(pixel* rec, int8_t* offsetEo, int width, int8_t* signLeft, intptr_t stride);
195
typedef void (*saoCuOrgE1_t)(pixel* rec, int8_t* upBuff1, int8_t* offsetEo, intptr_t stride, int width);
196
typedef void (*saoCuOrgE2_t)(pixel* rec, int8_t* pBufft, int8_t* pBuff1, int8_t* offsetEo, int lcuWidth, intptr_t stride);
197
typedef void (*saoCuOrgE3_t)(pixel* rec, int8_t* upBuff1, int8_t* m_offsetEo, intptr_t stride, int startX, int endX);
198
typedef void (*saoCuOrgB0_t)(pixel* rec, const int8_t* offsetBo, int ctuWidth, int ctuHeight, intptr_t stride);
199
200
typedef void (*saoCuStatsBO_t)(const int16_t *diff, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count);
201
typedef void (*saoCuStatsE0_t)(const int16_t *diff, const pixel *rec, intptr_t stride, int endX, int endY, int32_t *stats, int32_t *count);
202
typedef void (*saoCuStatsE1_t)(const int16_t *diff, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count);
203
typedef void (*saoCuStatsE2_t)(const int16_t *diff, const pixel *rec, intptr_t stride, int8_t *upBuff1, int8_t *upBuff, int endX, int endY, int32_t *stats, int32_t *count);
204
typedef void (*saoCuStatsE3_t)(const int16_t *diff, const pixel *rec, intptr_t stride, int8_t *upBuff1, int endX, int endY, int32_t *stats, int32_t *count);
205
206
typedef void (*sign_t)(int8_t *dst, const pixel *src1, const pixel *src2, const int endX);
207
typedef void (*planecopy_cp_t) (const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift);
208
typedef void (*planecopy_sp_t) (const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask);
209
typedef void (*planecopy_pp_t) (const pixel* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift);
210
typedef pixel (*planeClipAndMax_t)(pixel *src, intptr_t stride, int width, int height, uint64_t *outsum, const pixel minPix, const pixel maxPix);
211
212
typedef void (*cutree_propagate_cost) (int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts, const int32_t* invQscales, const double* fpsFactor, int len);
213
214
typedef void (*cutree_fix8_unpack)(double *dst, uint16_t *src, int count);
215
typedef void (*cutree_fix8_pack)(uint16_t *dst, double *src, int count);
216
217
typedef int (*scanPosLast_t)(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* scanCG4x4, const int trSize);
218
typedef uint32_t (*findPosFirstLast_t)(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16]);
219
220
typedef uint32_t (*costCoeffNxN_t)(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase);
221
typedef uint32_t (*costCoeffRemain_t)(uint16_t *absCoeff, int numNonZero, int idx);
222
typedef uint32_t (*costC1C2Flag_t)(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset);
223
224
typedef void (*pelFilterLumaStrong_t)(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tcP, int32_t tcQ);
225
typedef void (*pelFilterChroma_t)(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tc, int32_t maskP, int32_t maskQ);
226
227
typedef void (*integralv_t)(uint32_t *sum, intptr_t stride);
228
typedef void (*integralh_t)(uint32_t *sum, pixel *pix, intptr_t stride);
229
typedef void(*nonPsyRdoQuant_t)(int16_t *m_resiDctCoeff, int64_t *costUncoded, int64_t *totalUncodedCost, int64_t *totalRdCost, uint32_t blkPos);
230
typedef void(*psyRdoQuant_t)(int16_t *m_resiDctCoeff, int16_t *m_fencDctCoeff, int64_t *costUncoded, int64_t *totalUncodedCost, int64_t *totalRdCost, int64_t *psyScale, uint32_t blkPos);
231
typedef void(*psyRdoQuant_t1)(int16_t *m_resiDctCoeff, int64_t *costUncoded, int64_t *totalUncodedCost, int64_t *totalRdCost,uint32_t blkPos);
232
typedef void(*psyRdoQuant_t2)(int16_t *m_resiDctCoeff, int16_t *m_fencDctCoeff, int64_t *costUncoded, int64_t *totalUncodedCost, int64_t *totalRdCost, int64_t *psyScale, uint32_t blkPos);
233
typedef void(*ssimDistortion_t)(const pixel *fenc, uint32_t fStride, const pixel *recon,  intptr_t rstride, uint64_t *ssBlock, int shift, uint64_t *ac_k);
234
typedef void(*normFactor_t)(const pixel *src, uint32_t blockSize, int shift, uint64_t *z_k);
235
/* Function pointers to optimized encoder primitives. Each pointer can reference
236
 * either an assembly routine, a SIMD intrinsic primitive, or a C function */
237
struct EncoderPrimitives
238
{
239
    /* These primitives can be used for any sized prediction unit (from 4x4 to
240
     * 64x64, square, rectangular - 50/50 or asymmetrical - 25/75) and are
241
     * generally restricted to motion estimation and motion compensation (inter
242
     * prediction. Note that the 4x4 PU can only be used for intra, which is
243
     * really a 4x4 TU, so at most copy_pp and satd will use 4x4. This array is
244
     * indexed by LumaPU values, which can be retrieved by partitionFromSizes() */
245
    struct PU
246
    {
247
        pixelcmp_t     sad;         // Sum of Absolute Differences
248
        pixelcmp_x3_t  sad_x3;      // Sum of Absolute Differences, 3 mv offsets at once
249
        pixelcmp_x4_t  sad_x4;      // Sum of Absolute Differences, 4 mv offsets at once
250
        pixelcmp_ads_t ads;         // Absolute Differences sum
251
        pixelcmp_t     satd;        // Sum of Absolute Transformed Differences (4x4 Hadamard)
252
253
        filter_pp_t    luma_hpp;    // 8-tap luma motion compensation interpolation filters
254
        filter_hps_t   luma_hps;
255
        filter_pp_t    luma_vpp;
256
        filter_ps_t    luma_vps;
257
        filter_sp_t    luma_vsp;
258
        filter_ss_t    luma_vss;
259
        filter_hv_pp_t luma_hvpp;   // combines hps + vsp
260
        pixelavg_pp_t  pixelavg_pp[NUM_ALIGNMENT_TYPES]; // quick bidir using pixels (borrowed from x264)
261
        addAvg_t       addAvg[NUM_ALIGNMENT_TYPES];      // bidir motion compensation, uses 16bit values
262
        copy_pp_t      copy_pp;
263
        filter_p2s_t   convert_p2s[NUM_ALIGNMENT_TYPES];
264
    }
265
    pu[NUM_PU_SIZES];
266
267
    /* These primitives can be used for square TU blocks (4x4 to 32x32) or
268
     * possibly square CU blocks (8x8 to 64x64). Some primitives are used for
269
     * both CU and TU so we merge them into one array that is indexed uniformly.
270
     * This keeps the index logic uniform and simple and improves cache
271
     * coherency. CU only primitives will leave 4x4 pointers NULL while TU only
272
     * primitives will leave 64x64 pointers NULL.  Indexed by LumaCU */
273
    struct CU
274
    {
275
        dct_t           dct;    // active dct transformation
276
        idct_t          idct;   // active idct transformation
277
278
        dct_t           standard_dct;   // original dct function, used by lowpass_dct
279
        dct_t           lowpass_dct;    // lowpass dct approximation
280
281
        calcresidual_t  calcresidual[NUM_ALIGNMENT_TYPES];
282
        pixel_sub_ps_t  sub_ps;
283
        pixel_add_ps_t  add_ps[NUM_ALIGNMENT_TYPES];
284
        blockfill_s_t   blockfill_s[NUM_ALIGNMENT_TYPES];   // block fill, for DC transforms
285
        copy_cnt_t      copy_cnt;      // copy coeff while counting non-zero
286
        count_nonzero_t count_nonzero;
287
        cpy2Dto1D_shl_t cpy2Dto1D_shl;
288
        cpy2Dto1D_shr_t cpy2Dto1D_shr;
289
        cpy1Dto2D_shl_t cpy1Dto2D_shl[NUM_ALIGNMENT_TYPES];
290
        cpy1Dto2D_shr_t cpy1Dto2D_shr;
291
        copy_sp_t       copy_sp;
292
        copy_ps_t       copy_ps;
293
        copy_ss_t       copy_ss;
294
        copy_pp_t       copy_pp;       // alias to pu[].copy_pp
295
296
        var_t           var;           // block internal variance
297
298
        pixel_sse_t     sse_pp;        // Sum of Square Error (pixel, pixel) fenc alignment not assumed
299
        pixel_sse_ss_t  sse_ss;        // Sum of Square Error (short, short) fenc alignment not assumed
300
        pixelcmp_t      psy_cost_pp;   // difference in AC energy between two pixel blocks
301
        pixel_ssd_s_t   ssd_s[NUM_ALIGNMENT_TYPES];         // Sum of Square Error (residual coeff to self)
302
        pixelcmp_t      sa8d;          // Sum of Transformed Differences (8x8 Hadamard), uses satd for 4x4 intra TU
303
        transpose_t     transpose;     // transpose pixel block; for use with intra all-angs
304
        intra_allangs_t intra_pred_allangs;
305
        intra_filter_t  intra_filter;
306
        intra_pred_t    intra_pred[NUM_INTRA_MODE];
307
        nonPsyRdoQuant_t nonPsyRdoQuant;
308
        psyRdoQuant_t    psyRdoQuant;
309
    psyRdoQuant_t1   psyRdoQuant_1p;
310
    psyRdoQuant_t2   psyRdoQuant_2p;
311
        ssimDistortion_t ssimDist;
312
        normFactor_t     normFact;
313
    }
314
    cu[NUM_CU_SIZES];
315
    /* These remaining primitives work on either fixed block sizes or take
316
     * block dimensions as arguments and thus do not belong in either the PU or
317
     * the CU arrays */
318
    dct_t                 dst4x4;
319
    idct_t                idst4x4;
320
321
    quant_t               quant;
322
    nquant_t              nquant;
323
    dequant_scaling_t     dequant_scaling;
324
    dequant_normal_t      dequant_normal;
325
    denoiseDct_t          denoiseDct;
326
    scale1D_t             scale1D_128to64[NUM_ALIGNMENT_TYPES];
327
    scale2D_t             scale2D_64to32;
328
329
    ssim_4x4x2_core_t     ssim_4x4x2_core;
330
    ssim_end4_t           ssim_end_4;
331
332
    sign_t                sign;
333
    saoCuOrgE0_t          saoCuOrgE0;
334
335
    /* To avoid the overhead in avx2 optimization in handling width=16, SAO_E0_1 is split
336
     * into two parts: saoCuOrgE1, saoCuOrgE1_2Rows */
337
    saoCuOrgE1_t          saoCuOrgE1, saoCuOrgE1_2Rows;
338
339
    // saoCuOrgE2[0] is used for width<=16 and saoCuOrgE2[1] is used for width > 16.
340
    saoCuOrgE2_t          saoCuOrgE2[2];
341
342
    /* In avx2 optimization, two rows cannot be handled simultaneously since it requires 
343
     * a pixel from the previous row. So, saoCuOrgE3[0] is used for width<=16 and 
344
     * saoCuOrgE3[1] is used for width > 16. */
345
    saoCuOrgE3_t          saoCuOrgE3[2];
346
    saoCuOrgB0_t          saoCuOrgB0;
347
348
    saoCuStatsBO_t        saoCuStatsBO;
349
    saoCuStatsE0_t        saoCuStatsE0;
350
    saoCuStatsE1_t        saoCuStatsE1;
351
    saoCuStatsE2_t        saoCuStatsE2;
352
    saoCuStatsE3_t        saoCuStatsE3;
353
354
    downscale_t           frameInitLowres;
355
    downscale_t           frameInitLowerRes;
356
    cutree_propagate_cost propagateCost;
357
    cutree_fix8_unpack    fix8Unpack;
358
    cutree_fix8_pack      fix8Pack;
359
360
    extendCURowBorder_t   extendRowBorder;
361
    planecopy_cp_t        planecopy_cp;
362
    planecopy_sp_t        planecopy_sp;
363
    planecopy_sp_t        planecopy_sp_shl;
364
    planecopy_pp_t        planecopy_pp_shr;
365
    planeClipAndMax_t     planeClipAndMax;
366
367
    weightp_sp_t          weight_sp;
368
    weightp_pp_t          weight_pp;
369
370
371
    scanPosLast_t         scanPosLast;
372
    findPosFirstLast_t    findPosFirstLast;
373
374
    costCoeffNxN_t        costCoeffNxN;
375
    costCoeffRemain_t     costCoeffRemain;
376
    costC1C2Flag_t        costC1C2Flag;
377
378
    pelFilterLumaStrong_t pelFilterLumaStrong[2]; // EDGE_VER = 0, EDGE_HOR = 1
379
    pelFilterChroma_t     pelFilterChroma[2];     // EDGE_VER = 0, EDGE_HOR = 1
380
381
    integralv_t            integral_initv[NUM_INTEGRAL_SIZE];
382
    integralh_t            integral_inith[NUM_INTEGRAL_SIZE];
383
384
    /* There is one set of chroma primitives per color space. An encoder will
385
     * have just a single color space and thus it will only ever use one entry
386
     * in this array. However we always fill all entries in the array in case
387
     * multiple encoders with different color spaces share the primitive table
388
     * in a single process. Note that 4:2:0 PU and CU are 1/2 width and 1/2
389
     * height of their luma counterparts. 4:2:2 PU and CU are 1/2 width and full
390
     * height, while 4:4:4 directly uses the luma block sizes and shares luma
391
     * primitives for all cases except for the interpolation filters. 4:4:4
392
     * interpolation filters have luma partition sizes but are only 4-tap. */
393
    struct Chroma
394
    {
395
        /* Chroma prediction unit primitives. Indexed by LumaPU */
396
        struct PUChroma
397
        {
398
            pixelcmp_t   satd;      // if chroma PU is not multiple of 4x4, will be NULL
399
            filter_pp_t  filter_vpp;
400
            filter_ps_t  filter_vps;
401
            filter_sp_t  filter_vsp;
402
            filter_ss_t  filter_vss;
403
            filter_pp_t  filter_hpp;
404
            filter_hps_t filter_hps;
405
            addAvg_t     addAvg[NUM_ALIGNMENT_TYPES];
406
            copy_pp_t    copy_pp;
407
            filter_p2s_t p2s[NUM_ALIGNMENT_TYPES];
408
409
        }
410
        pu[NUM_PU_SIZES];
411
412
        /* Chroma transform and coding unit primitives. Indexed by LumaCU */
413
        struct CUChroma
414
        {
415
            pixelcmp_t     sa8d;    // if chroma CU is not multiple of 8x8, will use satd
416
            pixel_sse_t    sse_pp;
417
            pixel_sub_ps_t sub_ps;
418
            pixel_add_ps_t add_ps[NUM_ALIGNMENT_TYPES];
419
420
            copy_ps_t      copy_ps;
421
            copy_sp_t      copy_sp;
422
            copy_ss_t      copy_ss;
423
            copy_pp_t      copy_pp;
424
        }
425
        cu[NUM_CU_SIZES];
426
427
    }
428
    chroma[X265_CSP_COUNT];
429
};
430
431
/* This copy of the table is what gets used by the encoder */
432
extern EncoderPrimitives primitives;
433
434
/* Returns a LumaPU enum for the given size, always expected to return a valid enum */
435
inline int partitionFromSizes(int width, int height)
436
2.67M
{
437
2.67M
    X265_CHECK(((width | height) & ~(4 | 8 | 16 | 32 | 64)) == 0, "Invalid block width/height\n");
438
2.67M
    extern const uint8_t lumaPartitionMapTable[];
439
2.67M
    int w = (width >> 2) - 1;
440
2.67M
    int h = (height >> 2) - 1;
441
2.67M
    int part = (int)lumaPartitionMapTable[(w << 4) + h];
442
2.67M
    X265_CHECK(part != 255, "Invalid block width %d height %d\n", width, height);
443
2.67M
    return part;
444
2.67M
}
445
446
/* Computes the size of the LumaPU for a given LumaPU enum */
447
inline void sizesFromPartition(int part, int *width, int *height)
448
0
{
449
0
    X265_CHECK(part >= 0 && part <= 24, "Invalid part %d \n", part);
450
0
    extern const uint8_t lumaPartitionMapTable[];
451
0
    int index = 0;
452
0
    for (int i = 0; i < 256;i++)
453
0
        if (part == lumaPartitionMapTable[i])
454
0
        {
455
0
            index = i;
456
0
            break;
457
0
        }
458
0
    *width = 4 * ((index >> 4) + 1);
459
0
    *height = 4 * ((index % 16) + 1);
460
0
}
461
462
inline int partitionFromLog2Size(int log2Size)
463
775k
{
464
775k
    X265_CHECK(2 <= log2Size && log2Size <= 6, "Invalid block size\n");
465
775k
    return log2Size - 2;
466
775k
}
467
468
void setupCPrimitives(EncoderPrimitives &p);
469
void setupInstrinsicPrimitives(EncoderPrimitives &p, int cpuMask);
470
void setupAssemblyPrimitives(EncoderPrimitives &p, int cpuMask);
471
void setupAliasPrimitives(EncoderPrimitives &p);
472
#if X265_ARCH_ARM64
473
void setupAliasCPrimitives(EncoderPrimitives &cp, EncoderPrimitives &asmp, int cpuMask);
474
#endif
475
#if HAVE_ALTIVEC
476
void setupPixelPrimitives_altivec(EncoderPrimitives &p);
477
void setupDCTPrimitives_altivec(EncoderPrimitives &p);
478
void setupFilterPrimitives_altivec(EncoderPrimitives &p);
479
void setupIntraPrimitives_altivec(EncoderPrimitives &p);
480
#endif
481
}
482
483
#if !EXPORT_C_API
484
extern const int   PFX(max_bit_depth);
485
extern const char* PFX(version_str);
486
extern const char* PFX(build_info_str);
487
#endif
488
489
#if ENABLE_ASSEMBLY && X265_ARCH_ARM64
490
extern "C" {
491
#include "aarch64/pixel-util.h"
492
}
493
#endif
494
495
#endif // ifndef X265_PRIMITIVES_H