/work/aom/av1/common/cfl.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2016, Alliance for Open Media. All rights reserved |
3 | | * |
4 | | * This source code is subject to the terms of the BSD 2 Clause License and |
5 | | * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License |
6 | | * was not distributed with this source code in the LICENSE file, you can |
7 | | * obtain it at www.aomedia.org/license/software. If the Alliance for Open |
8 | | * Media Patent License 1.0 was not distributed with this source code in the |
9 | | * PATENTS file, you can obtain it at www.aomedia.org/license/patent. |
10 | | */ |
11 | | |
12 | | #ifndef AOM_AV1_COMMON_CFL_H_ |
13 | | #define AOM_AV1_COMMON_CFL_H_ |
14 | | |
15 | | #include "av1/common/av1_common_int.h" |
16 | | #include "av1/common/blockd.h" |
17 | | |
18 | | // Can we use CfL for the current block? |
19 | 318k | static INLINE CFL_ALLOWED_TYPE is_cfl_allowed(const MACROBLOCKD *xd) { |
20 | 318k | const MB_MODE_INFO *mbmi = xd->mi[0]; |
21 | 318k | const BLOCK_SIZE bsize = mbmi->bsize; |
22 | 318k | assert(bsize < BLOCK_SIZES_ALL); |
23 | 318k | if (xd->lossless[mbmi->segment_id]) { |
24 | | // In lossless, CfL is available when the partition size is equal to the |
25 | | // transform size. |
26 | 116k | const int ssx = xd->plane[AOM_PLANE_U].subsampling_x; |
27 | 116k | const int ssy = xd->plane[AOM_PLANE_U].subsampling_y; |
28 | 116k | const int plane_bsize = get_plane_block_size(bsize, ssx, ssy); |
29 | 116k | return (CFL_ALLOWED_TYPE)(plane_bsize == BLOCK_4X4); |
30 | 116k | } |
31 | | // Spec: CfL is available to luma partitions lesser than or equal to 32x32 |
32 | 202k | return (CFL_ALLOWED_TYPE)(block_size_wide[bsize] <= 32 && |
33 | 202k | block_size_high[bsize] <= 32); |
34 | 318k | } Unexecuted instantiation: decodeframe.c:is_cfl_allowed Unexecuted instantiation: decodemv.c:is_cfl_allowed bitstream.c:is_cfl_allowed Line | Count | Source | 19 | 27.6k | static INLINE CFL_ALLOWED_TYPE is_cfl_allowed(const MACROBLOCKD *xd) { | 20 | 27.6k | const MB_MODE_INFO *mbmi = xd->mi[0]; | 21 | 27.6k | const BLOCK_SIZE bsize = mbmi->bsize; | 22 | 27.6k | assert(bsize < BLOCK_SIZES_ALL); | 23 | 27.6k | if (xd->lossless[mbmi->segment_id]) { | 24 | | // In lossless, CfL is available when the partition size is equal to the | 25 | | // transform size. | 26 | 10.1k | const int ssx = xd->plane[AOM_PLANE_U].subsampling_x; | 27 | 10.1k | const int ssy = xd->plane[AOM_PLANE_U].subsampling_y; | 28 | 10.1k | const int plane_bsize = get_plane_block_size(bsize, ssx, ssy); | 29 | 10.1k | return (CFL_ALLOWED_TYPE)(plane_bsize == BLOCK_4X4); | 30 | 10.1k | } | 31 | | // Spec: CfL is available to luma partitions lesser than or equal to 32x32 | 32 | 17.5k | return (CFL_ALLOWED_TYPE)(block_size_wide[bsize] <= 32 && | 33 | 17.5k | block_size_high[bsize] <= 32); | 34 | 27.6k | } |
Unexecuted instantiation: encoder.c:is_cfl_allowed Unexecuted instantiation: encoder_utils.c:is_cfl_allowed Unexecuted instantiation: encodetxb.c:is_cfl_allowed Unexecuted instantiation: ethread.c:is_cfl_allowed Unexecuted instantiation: global_motion_facade.c:is_cfl_allowed Unexecuted instantiation: mcomp.c:is_cfl_allowed Unexecuted instantiation: palette.c:is_cfl_allowed Line | Count | Source | 19 | 53.0k | static INLINE CFL_ALLOWED_TYPE is_cfl_allowed(const MACROBLOCKD *xd) { | 20 | 53.0k | const MB_MODE_INFO *mbmi = xd->mi[0]; | 21 | 53.0k | const BLOCK_SIZE bsize = mbmi->bsize; | 22 | 53.0k | assert(bsize < BLOCK_SIZES_ALL); | 23 | 53.0k | if (xd->lossless[mbmi->segment_id]) { | 24 | | // In lossless, CfL is available when the partition size is equal to the | 25 | | // transform size. | 26 | 19.4k | const int ssx = xd->plane[AOM_PLANE_U].subsampling_x; | 27 | 19.4k | const int ssy = xd->plane[AOM_PLANE_U].subsampling_y; | 28 | 19.4k | const int plane_bsize = get_plane_block_size(bsize, ssx, ssy); | 29 | 19.4k | return (CFL_ALLOWED_TYPE)(plane_bsize == BLOCK_4X4); | 30 | 19.4k | } | 31 | | // Spec: CfL is available to luma partitions lesser than or equal to 32x32 | 32 | 33.6k | return (CFL_ALLOWED_TYPE)(block_size_wide[bsize] <= 32 && | 33 | 33.6k | block_size_high[bsize] <= 32); | 34 | 53.0k | } |
Unexecuted instantiation: speed_features.c:is_cfl_allowed Unexecuted instantiation: superres_scale.c:is_cfl_allowed Unexecuted instantiation: svc_layercontext.c:is_cfl_allowed Unexecuted instantiation: tokenize.c:is_cfl_allowed Unexecuted instantiation: tpl_model.c:is_cfl_allowed Unexecuted instantiation: tx_search.c:is_cfl_allowed intra_mode_search.c:is_cfl_allowed Line | Count | Source | 19 | 210k | static INLINE CFL_ALLOWED_TYPE is_cfl_allowed(const MACROBLOCKD *xd) { | 20 | 210k | const MB_MODE_INFO *mbmi = xd->mi[0]; | 21 | 210k | const BLOCK_SIZE bsize = mbmi->bsize; | 22 | 210k | assert(bsize < BLOCK_SIZES_ALL); | 23 | 210k | if (xd->lossless[mbmi->segment_id]) { | 24 | | // In lossless, CfL is available when the partition size is equal to the | 25 | | // transform size. | 26 | 76.9k | const int ssx = xd->plane[AOM_PLANE_U].subsampling_x; | 27 | 76.9k | const int ssy = xd->plane[AOM_PLANE_U].subsampling_y; | 28 | 76.9k | const int plane_bsize = get_plane_block_size(bsize, ssx, ssy); | 29 | 76.9k | return (CFL_ALLOWED_TYPE)(plane_bsize == BLOCK_4X4); | 30 | 76.9k | } | 31 | | // Spec: CfL is available to luma partitions lesser than or equal to 32x32 | 32 | 133k | return (CFL_ALLOWED_TYPE)(block_size_wide[bsize] <= 32 && | 33 | 133k | block_size_high[bsize] <= 32); | 34 | 210k | } |
Unexecuted instantiation: cfl.c:is_cfl_allowed Unexecuted instantiation: reconintra.c:is_cfl_allowed Unexecuted instantiation: allintra_vis.c:is_cfl_allowed Unexecuted instantiation: compound_type.c:is_cfl_allowed Unexecuted instantiation: encodeframe.c:is_cfl_allowed encodeframe_utils.c:is_cfl_allowed Line | Count | Source | 19 | 27.6k | static INLINE CFL_ALLOWED_TYPE is_cfl_allowed(const MACROBLOCKD *xd) { | 20 | 27.6k | const MB_MODE_INFO *mbmi = xd->mi[0]; | 21 | 27.6k | const BLOCK_SIZE bsize = mbmi->bsize; | 22 | 27.6k | assert(bsize < BLOCK_SIZES_ALL); | 23 | 27.6k | if (xd->lossless[mbmi->segment_id]) { | 24 | | // In lossless, CfL is available when the partition size is equal to the | 25 | | // transform size. | 26 | 10.1k | const int ssx = xd->plane[AOM_PLANE_U].subsampling_x; | 27 | 10.1k | const int ssy = xd->plane[AOM_PLANE_U].subsampling_y; | 28 | 10.1k | const int plane_bsize = get_plane_block_size(bsize, ssx, ssy); | 29 | 10.1k | return (CFL_ALLOWED_TYPE)(plane_bsize == BLOCK_4X4); | 30 | 10.1k | } | 31 | | // Spec: CfL is available to luma partitions lesser than or equal to 32x32 | 32 | 17.5k | return (CFL_ALLOWED_TYPE)(block_size_wide[bsize] <= 32 && | 33 | 17.5k | block_size_high[bsize] <= 32); | 34 | 27.6k | } |
Unexecuted instantiation: encodemb.c:is_cfl_allowed Unexecuted instantiation: encode_strategy.c:is_cfl_allowed Unexecuted instantiation: interp_search.c:is_cfl_allowed Unexecuted instantiation: motion_search_facade.c:is_cfl_allowed Unexecuted instantiation: partition_search.c:is_cfl_allowed Unexecuted instantiation: partition_strategy.c:is_cfl_allowed Unexecuted instantiation: nonrd_pickmode.c:is_cfl_allowed |
35 | | |
36 | | // Do we need to save the luma pixels from the current block, |
37 | | // for a possible future CfL prediction? |
38 | | static INLINE CFL_ALLOWED_TYPE store_cfl_required(const AV1_COMMON *cm, |
39 | 78.4k | const MACROBLOCKD *xd) { |
40 | 78.4k | const MB_MODE_INFO *mbmi = xd->mi[0]; |
41 | | |
42 | 78.4k | if (cm->seq_params->monochrome) return CFL_DISALLOWED; |
43 | | |
44 | 78.4k | if (!xd->is_chroma_ref) { |
45 | | // For non-chroma-reference blocks, we should always store the luma pixels, |
46 | | // in case the corresponding chroma-reference block uses CfL. |
47 | | // Note that this can only happen for block sizes which are <8 on |
48 | | // their shortest side, as otherwise they would be chroma reference |
49 | | // blocks. |
50 | 0 | return CFL_ALLOWED; |
51 | 0 | } |
52 | | |
53 | | // If this block has chroma information, we know whether we're |
54 | | // actually going to perform a CfL prediction |
55 | 78.4k | return (CFL_ALLOWED_TYPE)(!is_inter_block(mbmi) && |
56 | 78.4k | mbmi->uv_mode == UV_CFL_PRED); |
57 | 78.4k | } Unexecuted instantiation: decodeframe.c:store_cfl_required Unexecuted instantiation: decodemv.c:store_cfl_required Unexecuted instantiation: bitstream.c:store_cfl_required Unexecuted instantiation: encoder.c:store_cfl_required Unexecuted instantiation: encoder_utils.c:store_cfl_required Unexecuted instantiation: encodetxb.c:store_cfl_required Unexecuted instantiation: ethread.c:store_cfl_required Unexecuted instantiation: global_motion_facade.c:store_cfl_required Unexecuted instantiation: mcomp.c:store_cfl_required Unexecuted instantiation: palette.c:store_cfl_required Unexecuted instantiation: rdopt.c:store_cfl_required Unexecuted instantiation: speed_features.c:store_cfl_required Unexecuted instantiation: superres_scale.c:store_cfl_required Unexecuted instantiation: svc_layercontext.c:store_cfl_required Unexecuted instantiation: tokenize.c:store_cfl_required Unexecuted instantiation: tpl_model.c:store_cfl_required Unexecuted instantiation: tx_search.c:store_cfl_required Unexecuted instantiation: intra_mode_search.c:store_cfl_required Unexecuted instantiation: cfl.c:store_cfl_required Unexecuted instantiation: reconintra.c:store_cfl_required Unexecuted instantiation: allintra_vis.c:store_cfl_required Unexecuted instantiation: compound_type.c:store_cfl_required Unexecuted instantiation: encodeframe.c:store_cfl_required Unexecuted instantiation: encodeframe_utils.c:store_cfl_required Unexecuted instantiation: encodemb.c:store_cfl_required Unexecuted instantiation: encode_strategy.c:store_cfl_required Unexecuted instantiation: interp_search.c:store_cfl_required Unexecuted instantiation: motion_search_facade.c:store_cfl_required partition_search.c:store_cfl_required Line | Count | Source | 39 | 78.4k | const MACROBLOCKD *xd) { | 40 | 78.4k | const MB_MODE_INFO *mbmi = xd->mi[0]; | 41 | | | 42 | 78.4k | if (cm->seq_params->monochrome) return CFL_DISALLOWED; | 43 | | | 44 | 78.4k | if (!xd->is_chroma_ref) { | 45 | | // For non-chroma-reference blocks, we should always store the luma pixels, | 46 | | // in case the corresponding chroma-reference block uses CfL. | 47 | | // Note that this can only happen for block sizes which are <8 on | 48 | | // their shortest side, as otherwise they would be chroma reference | 49 | | // blocks. | 50 | 0 | return CFL_ALLOWED; | 51 | 0 | } | 52 | | | 53 | | // If this block has chroma information, we know whether we're | 54 | | // actually going to perform a CfL prediction | 55 | 78.4k | return (CFL_ALLOWED_TYPE)(!is_inter_block(mbmi) && | 56 | 78.4k | mbmi->uv_mode == UV_CFL_PRED); | 57 | 78.4k | } |
Unexecuted instantiation: partition_strategy.c:store_cfl_required Unexecuted instantiation: nonrd_pickmode.c:store_cfl_required |
58 | | |
59 | 36.2M | static INLINE int get_scaled_luma_q0(int alpha_q3, int16_t pred_buf_q3) { |
60 | 36.2M | int scaled_luma_q6 = alpha_q3 * pred_buf_q3; |
61 | 36.2M | return ROUND_POWER_OF_TWO_SIGNED(scaled_luma_q6, 6); |
62 | 36.2M | } Unexecuted instantiation: decodeframe.c:get_scaled_luma_q0 Unexecuted instantiation: decodemv.c:get_scaled_luma_q0 Unexecuted instantiation: bitstream.c:get_scaled_luma_q0 Unexecuted instantiation: encoder.c:get_scaled_luma_q0 Unexecuted instantiation: encoder_utils.c:get_scaled_luma_q0 Unexecuted instantiation: encodetxb.c:get_scaled_luma_q0 Unexecuted instantiation: ethread.c:get_scaled_luma_q0 Unexecuted instantiation: global_motion_facade.c:get_scaled_luma_q0 Unexecuted instantiation: mcomp.c:get_scaled_luma_q0 Unexecuted instantiation: palette.c:get_scaled_luma_q0 Unexecuted instantiation: rdopt.c:get_scaled_luma_q0 Unexecuted instantiation: speed_features.c:get_scaled_luma_q0 Unexecuted instantiation: superres_scale.c:get_scaled_luma_q0 Unexecuted instantiation: svc_layercontext.c:get_scaled_luma_q0 Unexecuted instantiation: tokenize.c:get_scaled_luma_q0 Unexecuted instantiation: tpl_model.c:get_scaled_luma_q0 Unexecuted instantiation: tx_search.c:get_scaled_luma_q0 Unexecuted instantiation: intra_mode_search.c:get_scaled_luma_q0 Line | Count | Source | 59 | 36.2M | static INLINE int get_scaled_luma_q0(int alpha_q3, int16_t pred_buf_q3) { | 60 | 36.2M | int scaled_luma_q6 = alpha_q3 * pred_buf_q3; | 61 | 36.2M | return ROUND_POWER_OF_TWO_SIGNED(scaled_luma_q6, 6); | 62 | 36.2M | } |
Unexecuted instantiation: reconintra.c:get_scaled_luma_q0 Unexecuted instantiation: allintra_vis.c:get_scaled_luma_q0 Unexecuted instantiation: compound_type.c:get_scaled_luma_q0 Unexecuted instantiation: encodeframe.c:get_scaled_luma_q0 Unexecuted instantiation: encodeframe_utils.c:get_scaled_luma_q0 Unexecuted instantiation: encodemb.c:get_scaled_luma_q0 Unexecuted instantiation: encode_strategy.c:get_scaled_luma_q0 Unexecuted instantiation: interp_search.c:get_scaled_luma_q0 Unexecuted instantiation: motion_search_facade.c:get_scaled_luma_q0 Unexecuted instantiation: partition_search.c:get_scaled_luma_q0 Unexecuted instantiation: partition_strategy.c:get_scaled_luma_q0 Unexecuted instantiation: nonrd_pickmode.c:get_scaled_luma_q0 |
63 | | |
64 | 608k | static INLINE CFL_PRED_TYPE get_cfl_pred_type(PLANE_TYPE plane) { |
65 | 608k | assert(plane > 0); |
66 | 608k | return (CFL_PRED_TYPE)(plane - 1); |
67 | 608k | } Unexecuted instantiation: decodeframe.c:get_cfl_pred_type Unexecuted instantiation: decodemv.c:get_cfl_pred_type Unexecuted instantiation: bitstream.c:get_cfl_pred_type Unexecuted instantiation: encoder.c:get_cfl_pred_type Unexecuted instantiation: encoder_utils.c:get_cfl_pred_type Unexecuted instantiation: encodetxb.c:get_cfl_pred_type Unexecuted instantiation: ethread.c:get_cfl_pred_type Unexecuted instantiation: global_motion_facade.c:get_cfl_pred_type Unexecuted instantiation: mcomp.c:get_cfl_pred_type Unexecuted instantiation: palette.c:get_cfl_pred_type Unexecuted instantiation: rdopt.c:get_cfl_pred_type Unexecuted instantiation: speed_features.c:get_cfl_pred_type Unexecuted instantiation: superres_scale.c:get_cfl_pred_type Unexecuted instantiation: svc_layercontext.c:get_cfl_pred_type Unexecuted instantiation: tokenize.c:get_cfl_pred_type Unexecuted instantiation: tpl_model.c:get_cfl_pred_type Unexecuted instantiation: tx_search.c:get_cfl_pred_type intra_mode_search.c:get_cfl_pred_type Line | Count | Source | 64 | 304k | static INLINE CFL_PRED_TYPE get_cfl_pred_type(PLANE_TYPE plane) { | 65 | | assert(plane > 0); | 66 | 304k | return (CFL_PRED_TYPE)(plane - 1); | 67 | 304k | } |
Unexecuted instantiation: cfl.c:get_cfl_pred_type reconintra.c:get_cfl_pred_type Line | Count | Source | 64 | 304k | static INLINE CFL_PRED_TYPE get_cfl_pred_type(PLANE_TYPE plane) { | 65 | | assert(plane > 0); | 66 | 304k | return (CFL_PRED_TYPE)(plane - 1); | 67 | 304k | } |
Unexecuted instantiation: allintra_vis.c:get_cfl_pred_type Unexecuted instantiation: compound_type.c:get_cfl_pred_type Unexecuted instantiation: encodeframe.c:get_cfl_pred_type Unexecuted instantiation: encodeframe_utils.c:get_cfl_pred_type Unexecuted instantiation: encodemb.c:get_cfl_pred_type Unexecuted instantiation: encode_strategy.c:get_cfl_pred_type Unexecuted instantiation: interp_search.c:get_cfl_pred_type Unexecuted instantiation: motion_search_facade.c:get_cfl_pred_type Unexecuted instantiation: partition_search.c:get_cfl_pred_type Unexecuted instantiation: partition_strategy.c:get_cfl_pred_type Unexecuted instantiation: nonrd_pickmode.c:get_cfl_pred_type |
68 | | |
69 | | void cfl_predict_block(MACROBLOCKD *const xd, uint8_t *dst, int dst_stride, |
70 | | TX_SIZE tx_size, int plane); |
71 | | |
72 | | void cfl_store_block(MACROBLOCKD *const xd, BLOCK_SIZE bsize, TX_SIZE tx_size); |
73 | | |
74 | | void cfl_store_tx(MACROBLOCKD *const xd, int row, int col, TX_SIZE tx_size, |
75 | | BLOCK_SIZE bsize); |
76 | | |
77 | | void cfl_store_dc_pred(MACROBLOCKD *const xd, const uint8_t *input, |
78 | | CFL_PRED_TYPE pred_plane, int width); |
79 | | |
80 | | void cfl_load_dc_pred(MACROBLOCKD *const xd, uint8_t *dst, int dst_stride, |
81 | | TX_SIZE tx_size, CFL_PRED_TYPE pred_plane); |
82 | | |
83 | | // Allows the CFL_SUBSAMPLE function to switch types depending on the bitdepth. |
84 | | #define CFL_lbd_TYPE uint8_t *cfl_type |
85 | | #define CFL_hbd_TYPE uint16_t *cfl_type |
86 | | |
87 | | // Declare a size-specific wrapper for the size-generic function. The compiler |
88 | | // will inline the size generic function in here, the advantage is that the size |
89 | | // will be constant allowing for loop unrolling and other constant propagated |
90 | | // goodness. |
91 | | #define CFL_SUBSAMPLE(arch, sub, bd, width, height) \ |
92 | | void cfl_subsample_##bd##_##sub##_##width##x##height##_##arch( \ |
93 | 51.2k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ |
94 | 51.2k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ |
95 | 51.2k | output_q3, width, height); \ |
96 | 51.2k | } cfl_subsample_lbd_420_4x4_c Line | Count | Source | 93 | 17.5k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 17.5k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 17.5k | output_q3, width, height); \ | 96 | 17.5k | } |
cfl_subsample_lbd_420_8x8_c Line | Count | Source | 93 | 7.60k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 7.60k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 7.60k | output_q3, width, height); \ | 96 | 7.60k | } |
cfl_subsample_lbd_420_16x16_c Line | Count | Source | 93 | 5.15k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 5.15k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 5.15k | output_q3, width, height); \ | 96 | 5.15k | } |
cfl_subsample_lbd_420_32x32_c Line | Count | Source | 93 | 13.2k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 13.2k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 13.2k | output_q3, width, height); \ | 96 | 13.2k | } |
Unexecuted instantiation: cfl_subsample_lbd_420_4x8_c Unexecuted instantiation: cfl_subsample_lbd_420_8x4_c cfl_subsample_lbd_420_8x16_c Line | Count | Source | 93 | 1.69k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 1.69k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 1.69k | output_q3, width, height); \ | 96 | 1.69k | } |
cfl_subsample_lbd_420_16x8_c Line | Count | Source | 93 | 1.99k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 1.99k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 1.99k | output_q3, width, height); \ | 96 | 1.99k | } |
cfl_subsample_lbd_420_16x32_c Line | Count | Source | 93 | 1.91k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 1.91k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 1.91k | output_q3, width, height); \ | 96 | 1.91k | } |
cfl_subsample_lbd_420_32x16_c Line | Count | Source | 93 | 2.05k | const CFL_##bd##_TYPE, int input_stride, uint16_t *output_q3) { \ | 94 | 2.05k | cfl_luma_subsampling_##sub##_##bd##_##arch(cfl_type, input_stride, \ | 95 | 2.05k | output_q3, width, height); \ | 96 | 2.05k | } |
Unexecuted instantiation: cfl_subsample_lbd_420_4x16_c Unexecuted instantiation: cfl_subsample_lbd_420_16x4_c Unexecuted instantiation: cfl_subsample_lbd_420_8x32_c Unexecuted instantiation: cfl_subsample_lbd_420_32x8_c Unexecuted instantiation: cfl_subsample_lbd_422_4x4_c Unexecuted instantiation: cfl_subsample_lbd_422_8x8_c Unexecuted instantiation: cfl_subsample_lbd_422_16x16_c Unexecuted instantiation: cfl_subsample_lbd_422_32x32_c Unexecuted instantiation: cfl_subsample_lbd_422_4x8_c Unexecuted instantiation: cfl_subsample_lbd_422_8x4_c Unexecuted instantiation: cfl_subsample_lbd_422_8x16_c Unexecuted instantiation: cfl_subsample_lbd_422_16x8_c Unexecuted instantiation: cfl_subsample_lbd_422_16x32_c Unexecuted instantiation: cfl_subsample_lbd_422_32x16_c Unexecuted instantiation: cfl_subsample_lbd_422_4x16_c Unexecuted instantiation: cfl_subsample_lbd_422_16x4_c Unexecuted instantiation: cfl_subsample_lbd_422_8x32_c Unexecuted instantiation: cfl_subsample_lbd_422_32x8_c Unexecuted instantiation: cfl_subsample_lbd_444_4x4_c Unexecuted instantiation: cfl_subsample_lbd_444_8x8_c Unexecuted instantiation: cfl_subsample_lbd_444_16x16_c Unexecuted instantiation: cfl_subsample_lbd_444_32x32_c Unexecuted instantiation: cfl_subsample_lbd_444_4x8_c Unexecuted instantiation: cfl_subsample_lbd_444_8x4_c Unexecuted instantiation: cfl_subsample_lbd_444_8x16_c Unexecuted instantiation: cfl_subsample_lbd_444_16x8_c Unexecuted instantiation: cfl_subsample_lbd_444_16x32_c Unexecuted instantiation: cfl_subsample_lbd_444_32x16_c Unexecuted instantiation: cfl_subsample_lbd_444_4x16_c Unexecuted instantiation: cfl_subsample_lbd_444_16x4_c Unexecuted instantiation: cfl_subsample_lbd_444_8x32_c Unexecuted instantiation: cfl_subsample_lbd_444_32x8_c Unexecuted instantiation: cfl_subsample_hbd_420_4x4_c Unexecuted instantiation: cfl_subsample_hbd_420_8x8_c Unexecuted instantiation: cfl_subsample_hbd_420_16x16_c Unexecuted instantiation: cfl_subsample_hbd_420_32x32_c Unexecuted instantiation: cfl_subsample_hbd_420_4x8_c Unexecuted instantiation: cfl_subsample_hbd_420_8x4_c Unexecuted instantiation: cfl_subsample_hbd_420_8x16_c Unexecuted instantiation: cfl_subsample_hbd_420_16x8_c Unexecuted instantiation: cfl_subsample_hbd_420_16x32_c Unexecuted instantiation: cfl_subsample_hbd_420_32x16_c Unexecuted instantiation: cfl_subsample_hbd_420_4x16_c Unexecuted instantiation: cfl_subsample_hbd_420_16x4_c Unexecuted instantiation: cfl_subsample_hbd_420_8x32_c Unexecuted instantiation: cfl_subsample_hbd_420_32x8_c Unexecuted instantiation: cfl_subsample_hbd_422_4x4_c Unexecuted instantiation: cfl_subsample_hbd_422_8x8_c Unexecuted instantiation: cfl_subsample_hbd_422_16x16_c Unexecuted instantiation: cfl_subsample_hbd_422_32x32_c Unexecuted instantiation: cfl_subsample_hbd_422_4x8_c Unexecuted instantiation: cfl_subsample_hbd_422_8x4_c Unexecuted instantiation: cfl_subsample_hbd_422_8x16_c Unexecuted instantiation: cfl_subsample_hbd_422_16x8_c Unexecuted instantiation: cfl_subsample_hbd_422_16x32_c Unexecuted instantiation: cfl_subsample_hbd_422_32x16_c Unexecuted instantiation: cfl_subsample_hbd_422_4x16_c Unexecuted instantiation: cfl_subsample_hbd_422_16x4_c Unexecuted instantiation: cfl_subsample_hbd_422_8x32_c Unexecuted instantiation: cfl_subsample_hbd_422_32x8_c Unexecuted instantiation: cfl_subsample_hbd_444_4x4_c Unexecuted instantiation: cfl_subsample_hbd_444_8x8_c Unexecuted instantiation: cfl_subsample_hbd_444_16x16_c Unexecuted instantiation: cfl_subsample_hbd_444_32x32_c Unexecuted instantiation: cfl_subsample_hbd_444_4x8_c Unexecuted instantiation: cfl_subsample_hbd_444_8x4_c Unexecuted instantiation: cfl_subsample_hbd_444_8x16_c Unexecuted instantiation: cfl_subsample_hbd_444_16x8_c Unexecuted instantiation: cfl_subsample_hbd_444_16x32_c Unexecuted instantiation: cfl_subsample_hbd_444_32x16_c Unexecuted instantiation: cfl_subsample_hbd_444_4x16_c Unexecuted instantiation: cfl_subsample_hbd_444_16x4_c Unexecuted instantiation: cfl_subsample_hbd_444_8x32_c Unexecuted instantiation: cfl_subsample_hbd_444_32x8_c |
97 | | |
98 | | // Declare size-specific wrappers for all valid CfL sizes. |
99 | | #define CFL_SUBSAMPLE_FUNCTIONS(arch, sub, bd) \ |
100 | | CFL_SUBSAMPLE(arch, sub, bd, 4, 4) \ |
101 | | CFL_SUBSAMPLE(arch, sub, bd, 8, 8) \ |
102 | | CFL_SUBSAMPLE(arch, sub, bd, 16, 16) \ |
103 | | CFL_SUBSAMPLE(arch, sub, bd, 32, 32) \ |
104 | | CFL_SUBSAMPLE(arch, sub, bd, 4, 8) \ |
105 | | CFL_SUBSAMPLE(arch, sub, bd, 8, 4) \ |
106 | | CFL_SUBSAMPLE(arch, sub, bd, 8, 16) \ |
107 | | CFL_SUBSAMPLE(arch, sub, bd, 16, 8) \ |
108 | | CFL_SUBSAMPLE(arch, sub, bd, 16, 32) \ |
109 | | CFL_SUBSAMPLE(arch, sub, bd, 32, 16) \ |
110 | | CFL_SUBSAMPLE(arch, sub, bd, 4, 16) \ |
111 | | CFL_SUBSAMPLE(arch, sub, bd, 16, 4) \ |
112 | | CFL_SUBSAMPLE(arch, sub, bd, 8, 32) \ |
113 | | CFL_SUBSAMPLE(arch, sub, bd, 32, 8) \ |
114 | | cfl_subsample_##bd##_fn cfl_get_luma_subsampling_##sub##_##bd##_##arch( \ |
115 | 51.2k | TX_SIZE tx_size) { \ |
116 | 51.2k | CFL_SUBSAMPLE_FUNCTION_ARRAY(arch, sub, bd) \ |
117 | 51.2k | return subfn_##sub[tx_size]; \ |
118 | 51.2k | } |
119 | | |
120 | | // Declare an architecture-specific array of function pointers for size-specific |
121 | | // wrappers. |
122 | | #define CFL_SUBSAMPLE_FUNCTION_ARRAY(arch, sub, bd) \ |
123 | 51.2k | static const cfl_subsample_##bd##_fn subfn_##sub[TX_SIZES_ALL] = { \ |
124 | 51.2k | cfl_subsample_##bd##_##sub##_4x4_##arch, /* 4x4 */ \ |
125 | 51.2k | cfl_subsample_##bd##_##sub##_8x8_##arch, /* 8x8 */ \ |
126 | 51.2k | cfl_subsample_##bd##_##sub##_16x16_##arch, /* 16x16 */ \ |
127 | 51.2k | cfl_subsample_##bd##_##sub##_32x32_##arch, /* 32x32 */ \ |
128 | 51.2k | NULL, /* 64x64 (invalid CFL size) */ \ |
129 | 51.2k | cfl_subsample_##bd##_##sub##_4x8_##arch, /* 4x8 */ \ |
130 | 51.2k | cfl_subsample_##bd##_##sub##_8x4_##arch, /* 8x4 */ \ |
131 | 51.2k | cfl_subsample_##bd##_##sub##_8x16_##arch, /* 8x16 */ \ |
132 | 51.2k | cfl_subsample_##bd##_##sub##_16x8_##arch, /* 16x8 */ \ |
133 | 51.2k | cfl_subsample_##bd##_##sub##_16x32_##arch, /* 16x32 */ \ |
134 | 51.2k | cfl_subsample_##bd##_##sub##_32x16_##arch, /* 32x16 */ \ |
135 | 51.2k | NULL, /* 32x64 (invalid CFL size) */ \ |
136 | 51.2k | NULL, /* 64x32 (invalid CFL size) */ \ |
137 | 51.2k | cfl_subsample_##bd##_##sub##_4x16_##arch, /* 4x16 */ \ |
138 | 51.2k | cfl_subsample_##bd##_##sub##_16x4_##arch, /* 16x4 */ \ |
139 | 51.2k | cfl_subsample_##bd##_##sub##_8x32_##arch, /* 8x32 */ \ |
140 | 51.2k | cfl_subsample_##bd##_##sub##_32x8_##arch, /* 32x8 */ \ |
141 | 51.2k | NULL, /* 16x64 (invalid CFL size) */ \ |
142 | 51.2k | NULL, /* 64x16 (invalid CFL size) */ \ |
143 | 51.2k | }; |
144 | | |
145 | | // The RTCD script does not support passing in an array, so we wrap it in this |
146 | | // function. |
147 | | #if CONFIG_AV1_HIGHBITDEPTH |
148 | | #define CFL_GET_SUBSAMPLE_FUNCTION(arch) \ |
149 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 420, lbd) \ |
150 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 422, lbd) \ |
151 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 444, lbd) \ |
152 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 420, hbd) \ |
153 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 422, hbd) \ |
154 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 444, hbd) |
155 | | #else |
156 | | #define CFL_GET_SUBSAMPLE_FUNCTION(arch) \ |
157 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 420, lbd) \ |
158 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 422, lbd) \ |
159 | | CFL_SUBSAMPLE_FUNCTIONS(arch, 444, lbd) |
160 | | #endif |
161 | | |
162 | | // Declare a size-specific wrapper for the size-generic function. The compiler |
163 | | // will inline the size generic function in here, the advantage is that the size |
164 | | // will be constant allowing for loop unrolling and other constant propagated |
165 | | // goodness. |
166 | | #define CFL_SUB_AVG_X(arch, width, height, round_offset, num_pel_log2) \ |
167 | | void cfl_subtract_average_##width##x##height##_##arch(const uint16_t *src, \ |
168 | 38.0k | int16_t *dst) { \ |
169 | 38.0k | subtract_average_##arch(src, dst, width, height, round_offset, \ |
170 | 38.0k | num_pel_log2); \ |
171 | 38.0k | } cfl_subtract_average_4x4_c Line | Count | Source | 168 | 11.9k | int16_t *dst) { \ | 169 | 11.9k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 11.9k | num_pel_log2); \ | 171 | 11.9k | } |
cfl_subtract_average_4x8_c Line | Count | Source | 168 | 1.69k | int16_t *dst) { \ | 169 | 1.69k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 1.69k | num_pel_log2); \ | 171 | 1.69k | } |
Unexecuted instantiation: cfl_subtract_average_4x16_c cfl_subtract_average_8x4_c Line | Count | Source | 168 | 1.99k | int16_t *dst) { \ | 169 | 1.99k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 1.99k | num_pel_log2); \ | 171 | 1.99k | } |
cfl_subtract_average_8x8_c Line | Count | Source | 168 | 5.15k | int16_t *dst) { \ | 169 | 5.15k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 5.15k | num_pel_log2); \ | 171 | 5.15k | } |
cfl_subtract_average_8x16_c Line | Count | Source | 168 | 1.91k | int16_t *dst) { \ | 169 | 1.91k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 1.91k | num_pel_log2); \ | 171 | 1.91k | } |
Unexecuted instantiation: cfl_subtract_average_8x32_c Unexecuted instantiation: cfl_subtract_average_16x4_c cfl_subtract_average_16x8_c Line | Count | Source | 168 | 2.05k | int16_t *dst) { \ | 169 | 2.05k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 2.05k | num_pel_log2); \ | 171 | 2.05k | } |
cfl_subtract_average_16x16_c Line | Count | Source | 168 | 13.2k | int16_t *dst) { \ | 169 | 13.2k | subtract_average_##arch(src, dst, width, height, round_offset, \ | 170 | 13.2k | num_pel_log2); \ | 171 | 13.2k | } |
Unexecuted instantiation: cfl_subtract_average_16x32_c Unexecuted instantiation: cfl_subtract_average_32x8_c Unexecuted instantiation: cfl_subtract_average_32x16_c Unexecuted instantiation: cfl_subtract_average_32x32_c |
172 | | |
173 | | // Declare size-specific wrappers for all valid CfL sizes. |
174 | | #define CFL_SUB_AVG_FN(arch) \ |
175 | | CFL_SUB_AVG_X(arch, 4, 4, 8, 4) \ |
176 | | CFL_SUB_AVG_X(arch, 4, 8, 16, 5) \ |
177 | | CFL_SUB_AVG_X(arch, 4, 16, 32, 6) \ |
178 | | CFL_SUB_AVG_X(arch, 8, 4, 16, 5) \ |
179 | | CFL_SUB_AVG_X(arch, 8, 8, 32, 6) \ |
180 | | CFL_SUB_AVG_X(arch, 8, 16, 64, 7) \ |
181 | | CFL_SUB_AVG_X(arch, 8, 32, 128, 8) \ |
182 | | CFL_SUB_AVG_X(arch, 16, 4, 32, 6) \ |
183 | | CFL_SUB_AVG_X(arch, 16, 8, 64, 7) \ |
184 | | CFL_SUB_AVG_X(arch, 16, 16, 128, 8) \ |
185 | | CFL_SUB_AVG_X(arch, 16, 32, 256, 9) \ |
186 | | CFL_SUB_AVG_X(arch, 32, 8, 128, 8) \ |
187 | | CFL_SUB_AVG_X(arch, 32, 16, 256, 9) \ |
188 | | CFL_SUB_AVG_X(arch, 32, 32, 512, 10) \ |
189 | | cfl_subtract_average_fn cfl_get_subtract_average_fn_##arch( \ |
190 | 38.0k | TX_SIZE tx_size) { \ |
191 | 38.0k | static const cfl_subtract_average_fn sub_avg[TX_SIZES_ALL] = { \ |
192 | 38.0k | cfl_subtract_average_4x4_##arch, /* 4x4 */ \ |
193 | 38.0k | cfl_subtract_average_8x8_##arch, /* 8x8 */ \ |
194 | 38.0k | cfl_subtract_average_16x16_##arch, /* 16x16 */ \ |
195 | 38.0k | cfl_subtract_average_32x32_##arch, /* 32x32 */ \ |
196 | 38.0k | NULL, /* 64x64 (invalid CFL size) */ \ |
197 | 38.0k | cfl_subtract_average_4x8_##arch, /* 4x8 */ \ |
198 | 38.0k | cfl_subtract_average_8x4_##arch, /* 8x4 */ \ |
199 | 38.0k | cfl_subtract_average_8x16_##arch, /* 8x16 */ \ |
200 | 38.0k | cfl_subtract_average_16x8_##arch, /* 16x8 */ \ |
201 | 38.0k | cfl_subtract_average_16x32_##arch, /* 16x32 */ \ |
202 | 38.0k | cfl_subtract_average_32x16_##arch, /* 32x16 */ \ |
203 | 38.0k | NULL, /* 32x64 (invalid CFL size) */ \ |
204 | 38.0k | NULL, /* 64x32 (invalid CFL size) */ \ |
205 | 38.0k | cfl_subtract_average_4x16_##arch, /* 4x16 (invalid CFL size) */ \ |
206 | 38.0k | cfl_subtract_average_16x4_##arch, /* 16x4 (invalid CFL size) */ \ |
207 | 38.0k | cfl_subtract_average_8x32_##arch, /* 8x32 (invalid CFL size) */ \ |
208 | 38.0k | cfl_subtract_average_32x8_##arch, /* 32x8 (invalid CFL size) */ \ |
209 | 38.0k | NULL, /* 16x64 (invalid CFL size) */ \ |
210 | 38.0k | NULL, /* 64x16 (invalid CFL size) */ \ |
211 | 38.0k | }; \ |
212 | 38.0k | /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ |
213 | 38.0k | /* index the function pointer array out of bounds. */ \ |
214 | 38.0k | return sub_avg[tx_size % TX_SIZES_ALL]; \ |
215 | 38.0k | } |
216 | | |
217 | | // For VSX SIMD optimization, the C versions of width == 4 subtract are |
218 | | // faster than the VSX. As such, the VSX code calls the C versions. |
219 | | void cfl_subtract_average_4x4_c(const uint16_t *src, int16_t *dst); |
220 | | void cfl_subtract_average_4x8_c(const uint16_t *src, int16_t *dst); |
221 | | void cfl_subtract_average_4x16_c(const uint16_t *src, int16_t *dst); |
222 | | |
223 | | #define CFL_PREDICT_lbd(arch, width, height) \ |
224 | | void cfl_predict_lbd_##width##x##height##_##arch( \ |
225 | | const int16_t *pred_buf_q3, uint8_t *dst, int dst_stride, \ |
226 | 304k | int alpha_q3) { \ |
227 | 304k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ |
228 | 304k | height); \ |
229 | 304k | } Line | Count | Source | 226 | 95.9k | int alpha_q3) { \ | 227 | 95.9k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 95.9k | height); \ | 229 | 95.9k | } |
Line | Count | Source | 226 | 13.5k | int alpha_q3) { \ | 227 | 13.5k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 13.5k | height); \ | 229 | 13.5k | } |
Unexecuted instantiation: cfl_predict_lbd_4x16_c Line | Count | Source | 226 | 15.9k | int alpha_q3) { \ | 227 | 15.9k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 15.9k | height); \ | 229 | 15.9k | } |
Line | Count | Source | 226 | 41.2k | int alpha_q3) { \ | 227 | 41.2k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 41.2k | height); \ | 229 | 41.2k | } |
Line | Count | Source | 226 | 15.2k | int alpha_q3) { \ | 227 | 15.2k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 15.2k | height); \ | 229 | 15.2k | } |
Unexecuted instantiation: cfl_predict_lbd_8x32_c Unexecuted instantiation: cfl_predict_lbd_16x4_c Line | Count | Source | 226 | 16.4k | int alpha_q3) { \ | 227 | 16.4k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 16.4k | height); \ | 229 | 16.4k | } |
Line | Count | Source | 226 | 105k | int alpha_q3) { \ | 227 | 105k | cfl_predict_lbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, width, \ | 228 | 105k | height); \ | 229 | 105k | } |
Unexecuted instantiation: cfl_predict_lbd_16x32_c Unexecuted instantiation: cfl_predict_lbd_32x8_c Unexecuted instantiation: cfl_predict_lbd_32x16_c Unexecuted instantiation: cfl_predict_lbd_32x32_c |
230 | | |
231 | | #if CONFIG_AV1_HIGHBITDEPTH |
232 | | #define CFL_PREDICT_hbd(arch, width, height) \ |
233 | | void cfl_predict_hbd_##width##x##height##_##arch( \ |
234 | | const int16_t *pred_buf_q3, uint16_t *dst, int dst_stride, int alpha_q3, \ |
235 | 0 | int bd) { \ |
236 | 0 | cfl_predict_hbd_##arch(pred_buf_q3, dst, dst_stride, alpha_q3, bd, width, \ |
237 | 0 | height); \ |
238 | 0 | } Unexecuted instantiation: cfl_predict_hbd_4x4_c Unexecuted instantiation: cfl_predict_hbd_4x8_c Unexecuted instantiation: cfl_predict_hbd_4x16_c Unexecuted instantiation: cfl_predict_hbd_8x4_c Unexecuted instantiation: cfl_predict_hbd_8x8_c Unexecuted instantiation: cfl_predict_hbd_8x16_c Unexecuted instantiation: cfl_predict_hbd_8x32_c Unexecuted instantiation: cfl_predict_hbd_16x4_c Unexecuted instantiation: cfl_predict_hbd_16x8_c Unexecuted instantiation: cfl_predict_hbd_16x16_c Unexecuted instantiation: cfl_predict_hbd_16x32_c Unexecuted instantiation: cfl_predict_hbd_32x8_c Unexecuted instantiation: cfl_predict_hbd_32x16_c Unexecuted instantiation: cfl_predict_hbd_32x32_c |
239 | | #endif |
240 | | |
241 | | // This wrapper exists because clang format does not like calling macros with |
242 | | // lowercase letters. |
243 | | #define CFL_PREDICT_X(arch, width, height, bd) \ |
244 | | CFL_PREDICT_##bd(arch, width, height) |
245 | | |
246 | | #define CFL_PREDICT_FN(arch, bd) \ |
247 | | CFL_PREDICT_X(arch, 4, 4, bd) \ |
248 | | CFL_PREDICT_X(arch, 4, 8, bd) \ |
249 | | CFL_PREDICT_X(arch, 4, 16, bd) \ |
250 | | CFL_PREDICT_X(arch, 8, 4, bd) \ |
251 | | CFL_PREDICT_X(arch, 8, 8, bd) \ |
252 | | CFL_PREDICT_X(arch, 8, 16, bd) \ |
253 | | CFL_PREDICT_X(arch, 8, 32, bd) \ |
254 | | CFL_PREDICT_X(arch, 16, 4, bd) \ |
255 | | CFL_PREDICT_X(arch, 16, 8, bd) \ |
256 | | CFL_PREDICT_X(arch, 16, 16, bd) \ |
257 | | CFL_PREDICT_X(arch, 16, 32, bd) \ |
258 | | CFL_PREDICT_X(arch, 32, 8, bd) \ |
259 | | CFL_PREDICT_X(arch, 32, 16, bd) \ |
260 | | CFL_PREDICT_X(arch, 32, 32, bd) \ |
261 | 304k | cfl_predict_##bd##_fn cfl_get_predict_##bd##_fn_##arch(TX_SIZE tx_size) { \ |
262 | 304k | static const cfl_predict_##bd##_fn pred[TX_SIZES_ALL] = { \ |
263 | 304k | cfl_predict_##bd##_4x4_##arch, /* 4x4 */ \ |
264 | 304k | cfl_predict_##bd##_8x8_##arch, /* 8x8 */ \ |
265 | 304k | cfl_predict_##bd##_16x16_##arch, /* 16x16 */ \ |
266 | 304k | cfl_predict_##bd##_32x32_##arch, /* 32x32 */ \ |
267 | 304k | NULL, /* 64x64 (invalid CFL size) */ \ |
268 | 304k | cfl_predict_##bd##_4x8_##arch, /* 4x8 */ \ |
269 | 304k | cfl_predict_##bd##_8x4_##arch, /* 8x4 */ \ |
270 | 304k | cfl_predict_##bd##_8x16_##arch, /* 8x16 */ \ |
271 | 304k | cfl_predict_##bd##_16x8_##arch, /* 16x8 */ \ |
272 | 304k | cfl_predict_##bd##_16x32_##arch, /* 16x32 */ \ |
273 | 304k | cfl_predict_##bd##_32x16_##arch, /* 32x16 */ \ |
274 | 304k | NULL, /* 32x64 (invalid CFL size) */ \ |
275 | 304k | NULL, /* 64x32 (invalid CFL size) */ \ |
276 | 304k | cfl_predict_##bd##_4x16_##arch, /* 4x16 */ \ |
277 | 304k | cfl_predict_##bd##_16x4_##arch, /* 16x4 */ \ |
278 | 304k | cfl_predict_##bd##_8x32_##arch, /* 8x32 */ \ |
279 | 304k | cfl_predict_##bd##_32x8_##arch, /* 32x8 */ \ |
280 | 304k | NULL, /* 16x64 (invalid CFL size) */ \ |
281 | 304k | NULL, /* 64x16 (invalid CFL size) */ \ |
282 | 304k | }; \ |
283 | 304k | /* Modulo TX_SIZES_ALL to ensure that an attacker won't be able to */ \ |
284 | 304k | /* index the function pointer array out of bounds. */ \ |
285 | 304k | return pred[tx_size % TX_SIZES_ALL]; \ |
286 | 304k | } |
287 | | |
288 | | #endif // AOM_AV1_COMMON_CFL_H_ |