Line | Count | Source |
1 | | /***************************************************************************** |
2 | | * rdo.c: rate-distortion optimization |
3 | | ***************************************************************************** |
4 | | * Copyright (C) 2005-2025 x264 project |
5 | | * |
6 | | * Authors: Loren Merritt <lorenm@u.washington.edu> |
7 | | * Fiona Glaser <fiona@x264.com> |
8 | | * |
9 | | * This program is free software; you can redistribute it and/or modify |
10 | | * it under the terms of the GNU General Public License as published by |
11 | | * the Free Software Foundation; either version 2 of the License, or |
12 | | * (at your option) any later version. |
13 | | * |
14 | | * This program is distributed in the hope that it will be useful, |
15 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | | * GNU General Public License for more details. |
18 | | * |
19 | | * You should have received a copy of the GNU General Public License |
20 | | * along with this program; if not, write to the Free Software |
21 | | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. |
22 | | * |
23 | | * This program is also available under a commercial proprietary license. |
24 | | * For more information, contact us at licensing@x264.com. |
25 | | *****************************************************************************/ |
26 | | |
27 | | /* duplicate all the writer functions, just calculating bit cost |
28 | | * instead of writing the bitstream. |
29 | | * TODO: use these for fast 1st pass too. */ |
30 | | |
31 | | #define RDO_SKIP_BS 1 |
32 | | |
33 | | /* Transition and size tables for abs<9 MVD and residual coding */ |
34 | | /* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */ |
35 | 0 | #define x264_cabac_transition_unary x264_template(cabac_transition_unary) |
36 | | uint8_t x264_cabac_transition_unary[15][128]; |
37 | 0 | #define x264_cabac_size_unary x264_template(cabac_size_unary) |
38 | | uint16_t x264_cabac_size_unary[15][128]; |
39 | | /* Transition and size tables for abs>9 MVD */ |
40 | | /* Consist of 5 1s and a bypass sign bit */ |
41 | | static uint8_t cabac_transition_5ones[128]; |
42 | | static uint16_t cabac_size_5ones[128]; |
43 | | |
44 | | /* CAVLC: produces exactly the same bit count as a normal encode */ |
45 | | /* this probably still leaves some unnecessary computations */ |
46 | 0 | #define bs_write1(s,v) ((s)->i_bits_encoded += 1) |
47 | 0 | #define bs_write(s,n,v) ((s)->i_bits_encoded += (n)) |
48 | 0 | #define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v)) |
49 | 0 | #define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v)) |
50 | 0 | #define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l)) |
51 | | #undef x264_macroblock_write_cavlc |
52 | | #define x264_macroblock_write_cavlc static macroblock_size_cavlc |
53 | | #include "cavlc.c" |
54 | | |
55 | | /* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of |
56 | | * fractional bits, but only finite precision. */ |
57 | | #undef x264_cabac_encode_decision |
58 | | #undef x264_cabac_encode_decision_noup |
59 | | #undef x264_cabac_encode_bypass |
60 | | #undef x264_cabac_encode_terminal |
61 | | #undef x264_cabac_encode_ue_bypass |
62 | 0 | #define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v) |
63 | 0 | #define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v) |
64 | 0 | #define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7) |
65 | 0 | #define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256) |
66 | 0 | #define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8) |
67 | | #undef x264_macroblock_write_cabac |
68 | | #define x264_macroblock_write_cabac static macroblock_size_cabac |
69 | | #include "cabac.c" |
70 | | |
71 | 0 | #define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \ |
72 | 0 | sizeof(int) + (CHROMA444 ? 1024+12 : 460) ) |
73 | | #define COPY_CABAC_PART( pos, size ) memcpy( &cb->state[pos], &h->cabac.state[pos], size ) |
74 | | |
75 | | static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y ) |
76 | 0 | { |
77 | 0 | static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3}; |
78 | 0 | static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1}; |
79 | 0 | static const uint8_t hadamard_offset[4] = {0, 1, 3, 5}; |
80 | 0 | int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size]) |
81 | 0 | + hadamard_offset[size]; |
82 | 0 | uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index]; |
83 | 0 | if( res ) |
84 | 0 | return res - 1; |
85 | 0 | else |
86 | 0 | { |
87 | 0 | pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE; |
88 | 0 | res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE ); |
89 | 0 | h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1; |
90 | 0 | return res; |
91 | 0 | } |
92 | 0 | } |
93 | | |
94 | | static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y ) |
95 | 0 | { |
96 | 0 | static const uint8_t satd_shift_x[3] = {3, 2, 2}; |
97 | 0 | static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2}; |
98 | 0 | static const uint8_t satd_offset[3] = {0, 8, 16}; |
99 | 0 | int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4]) |
100 | 0 | + satd_offset[size - PIXEL_8x4]; |
101 | 0 | int res = h->mb.pic.fenc_satd_cache[cache_index]; |
102 | 0 | if( res ) |
103 | 0 | return res - 1; |
104 | 0 | else |
105 | 0 | { |
106 | 0 | pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE; |
107 | 0 | int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, (pixel*)x264_zero, 0 ) >> 1; |
108 | 0 | res = h->pixf.satd[size]( fenc, FENC_STRIDE, (pixel*)x264_zero, 0 ) - dc; |
109 | 0 | h->mb.pic.fenc_satd_cache[cache_index] = res + 1; |
110 | 0 | return res; |
111 | 0 | } |
112 | 0 | } |
113 | | |
114 | | /* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */ |
115 | | /* SATD and SA8D are used to measure block complexity. */ |
116 | | /* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */ |
117 | | /* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */ |
118 | | |
119 | | /* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */ |
120 | | /* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */ |
121 | | /* This optimization can also be used in non-RD transform decision. */ |
122 | | |
123 | | static inline int ssd_plane( x264_t *h, int size, int p, int x, int y ) |
124 | 0 | { |
125 | 0 | int satd = 0; |
126 | 0 | pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE; |
127 | 0 | pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE; |
128 | 0 | if( p == 0 && h->mb.i_psy_rd ) |
129 | 0 | { |
130 | | /* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */ |
131 | 0 | if( size <= PIXEL_8x8 ) |
132 | 0 | { |
133 | 0 | uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE ); |
134 | 0 | uint64_t fenc_acs = cached_hadamard( h, size, x, y ); |
135 | 0 | satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs) |
136 | 0 | + abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32)); |
137 | 0 | satd >>= 1; |
138 | 0 | } |
139 | 0 | else |
140 | 0 | { |
141 | 0 | int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, (pixel*)x264_zero, 0 ) >> 1; |
142 | 0 | satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, (pixel*)x264_zero, 0 ) - dc - cached_satd( h, size, x, y )); |
143 | 0 | } |
144 | 0 | int64_t tmp = ((int64_t)satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8; |
145 | 0 | satd = X264_MIN( tmp, COST_MAX ); |
146 | 0 | } |
147 | 0 | return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd; |
148 | 0 | } |
149 | | |
150 | | static inline int ssd_mb( x264_t *h ) |
151 | 0 | { |
152 | 0 | int i_ssd = ssd_plane( h, PIXEL_16x16, 0, 0, 0 ); |
153 | 0 | if( CHROMA_FORMAT ) |
154 | 0 | { |
155 | 0 | int chroma_size = h->luma2chroma_pixel[PIXEL_16x16]; |
156 | 0 | int chroma_ssd = ssd_plane( h, chroma_size, 1, 0, 0 ) + ssd_plane( h, chroma_size, 2, 0, 0 ); |
157 | 0 | i_ssd += ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8; |
158 | 0 | } |
159 | 0 | return i_ssd; |
160 | 0 | } |
161 | | |
162 | | static int rd_cost_mb( x264_t *h, int i_lambda2 ) |
163 | 0 | { |
164 | 0 | int b_transform_bak = h->mb.b_transform_8x8; |
165 | 0 | int i_ssd; |
166 | 0 | int i_bits; |
167 | 0 | int type_bak = h->mb.i_type; |
168 | |
|
169 | 0 | x264_macroblock_encode( h ); |
170 | |
|
171 | 0 | if( h->mb.b_deblock_rdo ) |
172 | 0 | x264_macroblock_deblock( h ); |
173 | |
|
174 | 0 | i_ssd = ssd_mb( h ); |
175 | |
|
176 | 0 | if( IS_SKIP( h->mb.i_type ) ) |
177 | 0 | { |
178 | 0 | i_bits = (1 * i_lambda2 + 128) >> 8; |
179 | 0 | } |
180 | 0 | else if( h->param.b_cabac ) |
181 | 0 | { |
182 | 0 | x264_cabac_t cabac_tmp; |
183 | 0 | COPY_CABAC; |
184 | 0 | macroblock_size_cabac( h, &cabac_tmp ); |
185 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16; |
186 | 0 | } |
187 | 0 | else |
188 | 0 | { |
189 | 0 | macroblock_size_cavlc( h ); |
190 | 0 | i_bits = ( (uint64_t)h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8; |
191 | 0 | } |
192 | |
|
193 | 0 | h->mb.b_transform_8x8 = b_transform_bak; |
194 | 0 | h->mb.i_type = type_bak; |
195 | |
|
196 | 0 | return X264_MIN( i_ssd + i_bits, COST_MAX ); |
197 | 0 | } |
198 | | |
199 | | /* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */ |
200 | | |
201 | | static uint64_t rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel ) |
202 | 0 | { |
203 | 0 | uint64_t i_ssd, i_bits; |
204 | |
|
205 | 0 | x264_macroblock_encode_p4x4( h, i4 ); |
206 | 0 | if( i_pixel == PIXEL_8x4 ) |
207 | 0 | x264_macroblock_encode_p4x4( h, i4+1 ); |
208 | 0 | if( i_pixel == PIXEL_4x8 ) |
209 | 0 | x264_macroblock_encode_p4x4( h, i4+2 ); |
210 | |
|
211 | 0 | i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 ); |
212 | 0 | if( CHROMA444 ) |
213 | 0 | { |
214 | 0 | int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 ) |
215 | 0 | + ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 ); |
216 | 0 | chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8; |
217 | 0 | i_ssd += chromassd; |
218 | 0 | } |
219 | |
|
220 | 0 | if( h->param.b_cabac ) |
221 | 0 | { |
222 | 0 | x264_cabac_t cabac_tmp; |
223 | 0 | COPY_CABAC; |
224 | 0 | subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel ); |
225 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8; |
226 | 0 | } |
227 | 0 | else |
228 | 0 | i_bits = subpartition_size_cavlc( h, i4, i_pixel ); |
229 | |
|
230 | 0 | return (i_ssd<<8) + i_bits; |
231 | 0 | } |
232 | | |
233 | | uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel ) |
234 | 0 | { |
235 | 0 | uint64_t i_ssd, i_bits; |
236 | 0 | int i8 = i4 >> 2; |
237 | |
|
238 | 0 | if( i_pixel == PIXEL_16x16 ) |
239 | 0 | { |
240 | 0 | int i_cost = rd_cost_mb( h, i_lambda2 ); |
241 | 0 | return i_cost; |
242 | 0 | } |
243 | | |
244 | 0 | if( i_pixel > PIXEL_8x8 ) |
245 | 0 | return rd_cost_subpart( h, i_lambda2, i4, i_pixel ); |
246 | | |
247 | 0 | h->mb.i_cbp_luma = 0; |
248 | |
|
249 | 0 | x264_macroblock_encode_p8x8( h, i8 ); |
250 | 0 | if( i_pixel == PIXEL_16x8 ) |
251 | 0 | x264_macroblock_encode_p8x8( h, i8+1 ); |
252 | 0 | if( i_pixel == PIXEL_8x16 ) |
253 | 0 | x264_macroblock_encode_p8x8( h, i8+2 ); |
254 | |
|
255 | 0 | int ssd_x = 8*(i8&1); |
256 | 0 | int ssd_y = 8*(i8>>1); |
257 | 0 | i_ssd = ssd_plane( h, i_pixel, 0, ssd_x, ssd_y ); |
258 | 0 | if( CHROMA_FORMAT ) |
259 | 0 | { |
260 | 0 | int chroma_size = h->luma2chroma_pixel[i_pixel]; |
261 | 0 | int chroma_ssd = ssd_plane( h, chroma_size, 1, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT ) |
262 | 0 | + ssd_plane( h, chroma_size, 2, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT ); |
263 | 0 | i_ssd += ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8; |
264 | 0 | } |
265 | |
|
266 | 0 | if( h->param.b_cabac ) |
267 | 0 | { |
268 | 0 | x264_cabac_t cabac_tmp; |
269 | 0 | COPY_CABAC; |
270 | 0 | partition_size_cabac( h, &cabac_tmp, i8, i_pixel ); |
271 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8; |
272 | 0 | } |
273 | 0 | else |
274 | 0 | i_bits = (uint64_t)partition_size_cavlc( h, i8, i_pixel ) * i_lambda2; |
275 | |
|
276 | 0 | return (i_ssd<<8) + i_bits; |
277 | 0 | } Unexecuted instantiation: x264_8_rd_cost_part Unexecuted instantiation: x264_10_rd_cost_part |
278 | | |
279 | | static uint64_t rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[4][32] ) |
280 | 0 | { |
281 | 0 | uint64_t i_ssd, i_bits; |
282 | 0 | int plane_count = CHROMA444 ? 3 : 1; |
283 | 0 | int i_qp = h->mb.i_qp; |
284 | 0 | h->mb.i_cbp_luma &= ~(1<<i8); |
285 | 0 | h->mb.b_transform_8x8 = 1; |
286 | |
|
287 | 0 | for( int p = 0; p < plane_count; p++ ) |
288 | 0 | { |
289 | 0 | x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p], 1 ); |
290 | 0 | i_qp = h->mb.i_chroma_qp; |
291 | 0 | } |
292 | |
|
293 | 0 | i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 ); |
294 | 0 | if( CHROMA444 ) |
295 | 0 | { |
296 | 0 | int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 ) |
297 | 0 | + ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 ); |
298 | 0 | chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8; |
299 | 0 | i_ssd += chromassd; |
300 | 0 | } |
301 | |
|
302 | 0 | if( h->param.b_cabac ) |
303 | 0 | { |
304 | 0 | x264_cabac_t cabac_tmp; |
305 | 0 | COPY_CABAC; |
306 | 0 | partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode ); |
307 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8; |
308 | 0 | } |
309 | 0 | else |
310 | 0 | i_bits = (uint64_t)partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2; |
311 | |
|
312 | 0 | return (i_ssd<<8) + i_bits; |
313 | 0 | } |
314 | | |
315 | | static uint64_t rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode ) |
316 | 0 | { |
317 | 0 | uint64_t i_ssd, i_bits; |
318 | 0 | int plane_count = CHROMA444 ? 3 : 1; |
319 | 0 | int i_qp = h->mb.i_qp; |
320 | |
|
321 | 0 | for( int p = 0; p < plane_count; p++ ) |
322 | 0 | { |
323 | 0 | x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode, 1 ); |
324 | 0 | i_qp = h->mb.i_chroma_qp; |
325 | 0 | } |
326 | |
|
327 | 0 | i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 ); |
328 | 0 | if( CHROMA444 ) |
329 | 0 | { |
330 | 0 | int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 ) |
331 | 0 | + ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 ); |
332 | 0 | chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8; |
333 | 0 | i_ssd += chromassd; |
334 | 0 | } |
335 | |
|
336 | 0 | if( h->param.b_cabac ) |
337 | 0 | { |
338 | 0 | x264_cabac_t cabac_tmp; |
339 | 0 | COPY_CABAC; |
340 | 0 | partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode ); |
341 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8; |
342 | 0 | } |
343 | 0 | else |
344 | 0 | i_bits = (uint64_t)partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2; |
345 | |
|
346 | 0 | return (i_ssd<<8) + i_bits; |
347 | 0 | } |
348 | | |
349 | | static uint64_t rd_cost_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct ) |
350 | 0 | { |
351 | 0 | uint64_t i_ssd, i_bits; |
352 | |
|
353 | 0 | if( b_dct ) |
354 | 0 | x264_mb_encode_chroma( h, 0, h->mb.i_chroma_qp ); |
355 | |
|
356 | 0 | int chromapix = h->luma2chroma_pixel[PIXEL_16x16]; |
357 | 0 | i_ssd = ssd_plane( h, chromapix, 1, 0, 0 ) |
358 | 0 | + ssd_plane( h, chromapix, 2, 0, 0 ); |
359 | |
|
360 | 0 | h->mb.i_chroma_pred_mode = i_mode; |
361 | |
|
362 | 0 | if( h->param.b_cabac ) |
363 | 0 | { |
364 | 0 | x264_cabac_t cabac_tmp; |
365 | 0 | COPY_CABAC; |
366 | 0 | chroma_size_cabac( h, &cabac_tmp ); |
367 | 0 | i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8; |
368 | 0 | } |
369 | 0 | else |
370 | 0 | i_bits = (uint64_t)chroma_size_cavlc( h ) * i_lambda2; |
371 | |
|
372 | 0 | return (i_ssd<<8) + i_bits; |
373 | 0 | } |
374 | | /**************************************************************************** |
375 | | * Trellis RD quantization |
376 | | ****************************************************************************/ |
377 | | |
378 | 0 | #define TRELLIS_SCORE_MAX (~0ULL) // marks the node as invalid |
379 | 0 | #define TRELLIS_SCORE_BIAS (1ULL<<60) // bias so that all valid scores are positive, even after negative contributions from psy |
380 | 0 | #define CABAC_SIZE_BITS 8 |
381 | 0 | #define LAMBDA_BITS 4 |
382 | | |
383 | | /* precalculate the cost of coding various combinations of bits in a single context */ |
384 | | void x264_rdo_init( void ) |
385 | 0 | { |
386 | 0 | for( int i_prefix = 0; i_prefix < 15; i_prefix++ ) |
387 | 0 | { |
388 | 0 | for( int i_ctx = 0; i_ctx < 128; i_ctx++ ) |
389 | 0 | { |
390 | 0 | int f8_bits = 0; |
391 | 0 | uint8_t ctx = i_ctx; |
392 | |
|
393 | 0 | for( int i = 1; i < i_prefix; i++ ) |
394 | 0 | f8_bits += x264_cabac_size_decision2( &ctx, 1 ); |
395 | 0 | if( i_prefix > 0 && i_prefix < 14 ) |
396 | 0 | f8_bits += x264_cabac_size_decision2( &ctx, 0 ); |
397 | 0 | f8_bits += 1 << CABAC_SIZE_BITS; //sign |
398 | |
|
399 | 0 | x264_cabac_size_unary[i_prefix][i_ctx] = f8_bits; |
400 | 0 | x264_cabac_transition_unary[i_prefix][i_ctx] = ctx; |
401 | 0 | } |
402 | 0 | } |
403 | 0 | for( int i_ctx = 0; i_ctx < 128; i_ctx++ ) |
404 | 0 | { |
405 | 0 | int f8_bits = 0; |
406 | 0 | uint8_t ctx = i_ctx; |
407 | |
|
408 | 0 | for( int i = 0; i < 5; i++ ) |
409 | 0 | f8_bits += x264_cabac_size_decision2( &ctx, 1 ); |
410 | 0 | f8_bits += 1 << CABAC_SIZE_BITS; //sign |
411 | |
|
412 | 0 | cabac_size_5ones[i_ctx] = f8_bits; |
413 | 0 | cabac_transition_5ones[i_ctx] = ctx; |
414 | 0 | } |
415 | 0 | } Unexecuted instantiation: x264_8_rdo_init Unexecuted instantiation: x264_10_rdo_init |
416 | | |
417 | | typedef struct |
418 | | { |
419 | | uint64_t score; |
420 | | int level_idx; // index into level_tree[] |
421 | | uint8_t cabac_state[4]; // just contexts 0,4,8,9 of the 10 relevant to coding abs_level_m1 |
422 | | } trellis_node_t; |
423 | | |
424 | | typedef struct |
425 | | { |
426 | | uint16_t next; |
427 | | uint16_t abs_level; |
428 | | } trellis_level_t; |
429 | | |
430 | | // TODO: |
431 | | // save cabac state between blocks? |
432 | | // use trellis' RD score instead of x264_mb_decimate_score? |
433 | | // code 8x8 sig/last flags forwards with deadzone and save the contexts at |
434 | | // each position? |
435 | | // change weights when using CQMs? |
436 | | |
437 | | // possible optimizations: |
438 | | // make scores fit in 32bit |
439 | | // save quantized coefs during rd, to avoid a duplicate trellis in the final encode |
440 | | // if trellissing all MBRD modes, finish SSD calculation so we can skip all of |
441 | | // the normal dequant/idct/ssd/cabac |
442 | | |
443 | | // the unquant_mf here is not the same as dequant_mf: |
444 | | // in normal operation (dct->quant->dequant->idct) the dct and idct are not |
445 | | // normalized. quant/dequant absorb those scaling factors. |
446 | | // in this function, we just do (quant->unquant) and want the output to be |
447 | | // comparable to the input. so unquant is the direct inverse of quant, |
448 | | // and uses the dct scaling factors, not the idct ones. |
449 | | |
450 | 0 | #define SIGN(x,y) ((x^(y >> 31))-(y >> 31)) |
451 | | |
452 | 0 | #define SET_LEVEL(ndst, nsrc, l) {\ |
453 | 0 | if( sizeof(trellis_level_t) == sizeof(uint32_t) )\ |
454 | 0 | M32( &level_tree[levels_used] ) = pack16to32( nsrc.level_idx, l );\ |
455 | 0 | else\ |
456 | 0 | level_tree[levels_used] = (trellis_level_t){ nsrc.level_idx, l };\ |
457 | 0 | ndst.level_idx = levels_used;\ |
458 | 0 | levels_used++;\ |
459 | 0 | } |
460 | | |
461 | | // encode all values of the dc coef in a block which is known to have no ac |
462 | | static NOINLINE |
463 | | int trellis_dc_shortcut( int sign_coef, int quant_coef, int unquant_mf, int coef_weight, int lambda2, uint8_t *cabac_state, int cost_sig ) |
464 | 0 | { |
465 | 0 | uint64_t bscore = TRELLIS_SCORE_MAX; |
466 | 0 | int ret = 0; |
467 | 0 | int q = abs( quant_coef ); |
468 | 0 | for( int abs_level = q-1; abs_level <= q; abs_level++ ) |
469 | 0 | { |
470 | 0 | int unquant_abs_level = (unquant_mf * abs_level + 128) >> 8; |
471 | | |
472 | | /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */ |
473 | 0 | int d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15); |
474 | 0 | uint64_t score = (int64_t)d*d * coef_weight; |
475 | | |
476 | | /* code the proposed level, and count how much entropy it would take */ |
477 | 0 | if( abs_level ) |
478 | 0 | { |
479 | 0 | unsigned f8_bits = cost_sig; |
480 | 0 | int prefix = X264_MIN( abs_level - 1, 14 ); |
481 | 0 | f8_bits += x264_cabac_size_decision_noup2( cabac_state+1, prefix > 0 ); |
482 | 0 | f8_bits += x264_cabac_size_unary[prefix][cabac_state[5]]; |
483 | 0 | if( abs_level >= 15 ) |
484 | 0 | f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS; |
485 | 0 | score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS ); |
486 | 0 | } |
487 | |
|
488 | 0 | COPY2_IF_LT( bscore, score, ret, abs_level ); |
489 | 0 | } |
490 | 0 | return SIGN(ret, sign_coef); |
491 | 0 | } |
492 | | |
493 | | // encode one value of one coef in one context |
494 | | static ALWAYS_INLINE |
495 | | int trellis_coef( int j, int const_level, int abs_level, int prefix, int suffix_cost, |
496 | | int node_ctx, int level1_ctx, int levelgt1_ctx, uint64_t ssd, int cost_siglast[3], |
497 | | trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
498 | | trellis_level_t *level_tree, int levels_used, int lambda2, uint8_t *level_state ) |
499 | 0 | { |
500 | 0 | uint64_t score = nodes_prev[j].score + ssd; |
501 | | /* code the proposed level, and count how much entropy it would take */ |
502 | 0 | unsigned f8_bits = cost_siglast[ j ? 1 : 2 ]; |
503 | 0 | uint8_t level1_state = (j >= 3) ? nodes_prev[j].cabac_state[level1_ctx>>2] : level_state[level1_ctx]; |
504 | 0 | f8_bits += x264_cabac_entropy[level1_state ^ (const_level > 1)]; |
505 | 0 | uint8_t levelgt1_state; |
506 | 0 | if( const_level > 1 ) |
507 | 0 | { |
508 | 0 | levelgt1_state = j >= 6 ? nodes_prev[j].cabac_state[levelgt1_ctx-6] : level_state[levelgt1_ctx]; |
509 | 0 | f8_bits += x264_cabac_size_unary[prefix][levelgt1_state] + suffix_cost; |
510 | 0 | } |
511 | 0 | else |
512 | 0 | f8_bits += 1 << CABAC_SIZE_BITS; |
513 | 0 | score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS ); |
514 | | |
515 | | /* save the node if it's better than any existing node with the same cabac ctx */ |
516 | 0 | if( score < nodes_cur[node_ctx].score ) |
517 | 0 | { |
518 | 0 | nodes_cur[node_ctx].score = score; |
519 | 0 | if( j == 2 || (j <= 3 && node_ctx == 4) ) // init from input state |
520 | 0 | M32(nodes_cur[node_ctx].cabac_state) = M32(level_state+12); |
521 | 0 | else if( j >= 3 ) |
522 | 0 | M32(nodes_cur[node_ctx].cabac_state) = M32(nodes_prev[j].cabac_state); |
523 | 0 | if( j >= 3 ) // skip the transition if we're not going to reuse the context |
524 | 0 | nodes_cur[node_ctx].cabac_state[level1_ctx>>2] = x264_cabac_transition[level1_state][const_level > 1]; |
525 | 0 | if( const_level > 1 && node_ctx == 7 ) |
526 | 0 | nodes_cur[node_ctx].cabac_state[levelgt1_ctx-6] = x264_cabac_transition_unary[prefix][levelgt1_state]; |
527 | 0 | nodes_cur[node_ctx].level_idx = nodes_prev[j].level_idx; |
528 | 0 | SET_LEVEL( nodes_cur[node_ctx], nodes_prev[j], abs_level ); |
529 | 0 | } |
530 | 0 | return levels_used; |
531 | 0 | } |
532 | | |
533 | | // encode one value of one coef in all contexts, templated by which value that is. |
534 | | // in ctx_lo, the set of live nodes is contiguous and starts at ctx0, so return as soon as we've seen one failure. |
535 | | // in ctx_hi, they're contiguous within each block of 4 ctxs, but not necessarily starting at the beginning, |
536 | | // so exploiting that would be more complicated. |
537 | | static NOINLINE |
538 | | int trellis_coef0_0( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
539 | | trellis_level_t *level_tree, int levels_used ) |
540 | 0 | { |
541 | 0 | nodes_cur[0].score = nodes_prev[0].score + ssd0; |
542 | 0 | nodes_cur[0].level_idx = nodes_prev[0].level_idx; |
543 | 0 | for( int j = 1; j < 4 && (int64_t)nodes_prev[j].score >= 0; j++ ) |
544 | 0 | { |
545 | 0 | nodes_cur[j].score = nodes_prev[j].score; |
546 | 0 | if( j >= 3 ) |
547 | 0 | M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state); |
548 | 0 | SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 ); |
549 | 0 | } |
550 | 0 | return levels_used; |
551 | 0 | } |
552 | | |
553 | | static NOINLINE |
554 | | int trellis_coef0_1( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
555 | | trellis_level_t *level_tree, int levels_used ) |
556 | 0 | { |
557 | 0 | for( int j = 1; j < 8; j++ ) |
558 | | // this branch only affects speed, not function; there's nothing wrong with updating invalid nodes in coef0. |
559 | 0 | if( (int64_t)nodes_prev[j].score >= 0 ) |
560 | 0 | { |
561 | 0 | nodes_cur[j].score = nodes_prev[j].score; |
562 | 0 | if( j >= 3 ) |
563 | 0 | M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state); |
564 | 0 | SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 ); |
565 | 0 | } |
566 | 0 | return levels_used; |
567 | 0 | } |
568 | | |
569 | | #define COEF(const_level, ctx_hi, j, ...)\ |
570 | 0 | if( !j || (int64_t)nodes_prev[j].score >= 0 )\ |
571 | 0 | levels_used = trellis_coef( j, const_level, abs_level, prefix, suffix_cost, __VA_ARGS__,\ |
572 | 0 | j?ssd1:ssd0, cost_siglast, nodes_cur, nodes_prev,\ |
573 | 0 | level_tree, levels_used, lambda2, level_state );\ |
574 | 0 | else if( !ctx_hi )\ |
575 | 0 | return levels_used; |
576 | | |
577 | | static NOINLINE |
578 | | int trellis_coef1_0( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3], |
579 | | trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
580 | | trellis_level_t *level_tree, int levels_used, int lambda2, |
581 | | uint8_t *level_state ) |
582 | 0 | { |
583 | 0 | int abs_level = 1, prefix = 1, suffix_cost = 0; |
584 | 0 | COEF( 1, 0, 0, 1, 1, 0 ); |
585 | 0 | COEF( 1, 0, 1, 2, 2, 0 ); |
586 | 0 | COEF( 1, 0, 2, 3, 3, 0 ); |
587 | 0 | COEF( 1, 0, 3, 3, 4, 0 ); |
588 | 0 | return levels_used; |
589 | 0 | } |
590 | | |
591 | | static NOINLINE |
592 | | int trellis_coef1_1( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3], |
593 | | trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
594 | | trellis_level_t *level_tree, int levels_used, int lambda2, |
595 | | uint8_t *level_state ) |
596 | 0 | { |
597 | 0 | int abs_level = 1, prefix = 1, suffix_cost = 0; |
598 | 0 | COEF( 1, 1, 1, 2, 2, 0 ); |
599 | 0 | COEF( 1, 1, 2, 3, 3, 0 ); |
600 | 0 | COEF( 1, 1, 3, 3, 4, 0 ); |
601 | 0 | COEF( 1, 1, 4, 4, 0, 0 ); |
602 | 0 | COEF( 1, 1, 5, 5, 0, 0 ); |
603 | 0 | COEF( 1, 1, 6, 6, 0, 0 ); |
604 | 0 | COEF( 1, 1, 7, 7, 0, 0 ); |
605 | 0 | return levels_used; |
606 | 0 | } |
607 | | |
608 | | static NOINLINE |
609 | | int trellis_coefn_0( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3], |
610 | | trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
611 | | trellis_level_t *level_tree, int levels_used, int lambda2, |
612 | | uint8_t *level_state, int levelgt1_ctx ) |
613 | 0 | { |
614 | 0 | int prefix = X264_MIN( abs_level-1, 14 ); |
615 | 0 | int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0; |
616 | 0 | COEF( 2, 0, 0, 4, 1, 5 ); |
617 | 0 | COEF( 2, 0, 1, 4, 2, 5 ); |
618 | 0 | COEF( 2, 0, 2, 4, 3, 5 ); |
619 | 0 | COEF( 2, 0, 3, 4, 4, 5 ); |
620 | 0 | return levels_used; |
621 | 0 | } |
622 | | |
623 | | static NOINLINE |
624 | | int trellis_coefn_1( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3], |
625 | | trellis_node_t *nodes_cur, trellis_node_t *nodes_prev, |
626 | | trellis_level_t *level_tree, int levels_used, int lambda2, |
627 | | uint8_t *level_state, int levelgt1_ctx ) |
628 | 0 | { |
629 | 0 | int prefix = X264_MIN( abs_level-1, 14 ); |
630 | 0 | int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0; |
631 | 0 | COEF( 2, 1, 1, 4, 2, 5 ); |
632 | 0 | COEF( 2, 1, 2, 4, 3, 5 ); |
633 | 0 | COEF( 2, 1, 3, 4, 4, 5 ); |
634 | 0 | COEF( 2, 1, 4, 5, 0, 6 ); |
635 | 0 | COEF( 2, 1, 5, 6, 0, 7 ); |
636 | 0 | COEF( 2, 1, 6, 7, 0, 8 ); |
637 | 0 | COEF( 2, 1, 7, 7, 0, levelgt1_ctx ); |
638 | 0 | return levels_used; |
639 | 0 | } |
640 | | |
641 | | static ALWAYS_INLINE |
642 | | int quant_trellis_cabac( x264_t *h, dctcoef *dct, |
643 | | udctcoef *quant_mf, udctcoef *quant_bias, const int *unquant_mf, |
644 | | const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac, |
645 | | int b_chroma, int dc, int num_coefs, int idx ) |
646 | 0 | { |
647 | 0 | ALIGNED_ARRAY_64( dctcoef, orig_coefs, [64] ); |
648 | 0 | ALIGNED_ARRAY_64( dctcoef, quant_coefs, [64] ); |
649 | 0 | const uint32_t *coef_weight1 = num_coefs == 64 ? x264_dct8_weight_tab : x264_dct4_weight_tab; |
650 | 0 | const uint32_t *coef_weight2 = num_coefs == 64 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab; |
651 | 0 | const int b_interlaced = MB_INTERLACED; |
652 | 0 | uint8_t *cabac_state_sig = &h->cabac.state[ x264_significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ]; |
653 | 0 | uint8_t *cabac_state_last = &h->cabac.state[ x264_last_coeff_flag_offset[b_interlaced][ctx_block_cat] ]; |
654 | 0 | int levelgt1_ctx = b_chroma && dc ? 8 : 9; |
655 | |
|
656 | 0 | if( dc ) |
657 | 0 | { |
658 | 0 | if( num_coefs == 16 ) |
659 | 0 | { |
660 | 0 | memcpy( orig_coefs, dct, sizeof(dctcoef)*16 ); |
661 | 0 | if( !h->quantf.quant_4x4_dc( dct, quant_mf[0] >> 1, quant_bias[0] << 1 ) ) |
662 | 0 | return 0; |
663 | 0 | h->zigzagf.scan_4x4( quant_coefs, dct ); |
664 | 0 | } |
665 | 0 | else |
666 | 0 | { |
667 | 0 | memcpy( orig_coefs, dct, sizeof(dctcoef)*num_coefs ); |
668 | 0 | int nz = h->quantf.quant_2x2_dc( &dct[0], quant_mf[0] >> 1, quant_bias[0] << 1 ); |
669 | 0 | if( num_coefs == 8 ) |
670 | 0 | nz |= h->quantf.quant_2x2_dc( &dct[4], quant_mf[0] >> 1, quant_bias[0] << 1 ); |
671 | 0 | if( !nz ) |
672 | 0 | return 0; |
673 | 0 | for( int i = 0; i < num_coefs; i++ ) |
674 | 0 | quant_coefs[i] = dct[zigzag[i]]; |
675 | 0 | } |
676 | 0 | } |
677 | 0 | else |
678 | 0 | { |
679 | 0 | if( num_coefs == 64 ) |
680 | 0 | { |
681 | 0 | h->mc.memcpy_aligned( orig_coefs, dct, sizeof(dctcoef)*64 ); |
682 | 0 | if( !h->quantf.quant_8x8( dct, quant_mf, quant_bias ) ) |
683 | 0 | return 0; |
684 | 0 | h->zigzagf.scan_8x8( quant_coefs, dct ); |
685 | 0 | } |
686 | 0 | else //if( num_coefs == 16 ) |
687 | 0 | { |
688 | 0 | memcpy( orig_coefs, dct, sizeof(dctcoef)*16 ); |
689 | 0 | if( !h->quantf.quant_4x4( dct, quant_mf, quant_bias ) ) |
690 | 0 | return 0; |
691 | 0 | h->zigzagf.scan_4x4( quant_coefs, dct ); |
692 | 0 | } |
693 | 0 | } |
694 | | |
695 | 0 | int last_nnz = h->quantf.coeff_last[ctx_block_cat]( quant_coefs+b_ac )+b_ac; |
696 | 0 | uint8_t *cabac_state = &h->cabac.state[ x264_coeff_abs_level_m1_offset[ctx_block_cat] ]; |
697 | | |
698 | | /* shortcut for dc-only blocks. |
699 | | * this doesn't affect the output, but saves some unnecessary computation. */ |
700 | 0 | if( last_nnz == 0 && !dc ) |
701 | 0 | { |
702 | 0 | int cost_sig = x264_cabac_size_decision_noup2( &cabac_state_sig[0], 1 ) |
703 | 0 | + x264_cabac_size_decision_noup2( &cabac_state_last[0], 1 ); |
704 | 0 | dct[0] = trellis_dc_shortcut( orig_coefs[0], quant_coefs[0], unquant_mf[0], coef_weight2[0], lambda2, cabac_state, cost_sig ); |
705 | 0 | return !!dct[0]; |
706 | 0 | } |
707 | | |
708 | | #if HAVE_MMX && ARCH_X86_64 |
709 | | uint64_t level_state0; |
710 | | memcpy( &level_state0, cabac_state, sizeof(uint64_t) ); |
711 | | uint16_t level_state1; |
712 | | memcpy( &level_state1, cabac_state+8, sizeof(uint16_t) ); |
713 | | #define TRELLIS_ARGS unquant_mf, zigzag, lambda2, last_nnz, orig_coefs, quant_coefs, dct,\ |
714 | | cabac_state_sig, cabac_state_last, level_state0, level_state1 |
715 | | if( num_coefs == 16 && !dc ) |
716 | | if( b_chroma || !h->mb.i_psy_trellis ) |
717 | | return h->quantf.trellis_cabac_4x4( TRELLIS_ARGS, b_ac ); |
718 | | else |
719 | | return h->quantf.trellis_cabac_4x4_psy( TRELLIS_ARGS, b_ac, h->mb.pic.fenc_dct4[idx&15], h->mb.i_psy_trellis ); |
720 | | else if( num_coefs == 64 && !dc ) |
721 | | if( b_chroma || !h->mb.i_psy_trellis ) |
722 | | return h->quantf.trellis_cabac_8x8( TRELLIS_ARGS, b_interlaced ); |
723 | | else |
724 | | return h->quantf.trellis_cabac_8x8_psy( TRELLIS_ARGS, b_interlaced, h->mb.pic.fenc_dct8[idx&3], h->mb.i_psy_trellis); |
725 | | else if( num_coefs == 8 && dc ) |
726 | | return h->quantf.trellis_cabac_chroma_422_dc( TRELLIS_ARGS ); |
727 | | else if( dc ) |
728 | | return h->quantf.trellis_cabac_dc( TRELLIS_ARGS, num_coefs-1 ); |
729 | | #endif |
730 | | |
731 | | // (# of coefs) * (# of ctx) * (# of levels tried) = 1024 |
732 | | // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough, |
733 | | // but it takes more time to remove dead states than you gain in reduced memory. |
734 | 0 | trellis_level_t level_tree[64*8*2]; |
735 | 0 | int levels_used = 1; |
736 | | /* init trellis */ |
737 | 0 | trellis_node_t nodes[2][8] = {0}; |
738 | 0 | trellis_node_t *nodes_cur = nodes[0]; |
739 | 0 | trellis_node_t *nodes_prev = nodes[1]; |
740 | 0 | trellis_node_t *bnode; |
741 | 0 | for( int j = 1; j < 8; j++ ) |
742 | 0 | nodes_cur[j].score = TRELLIS_SCORE_MAX; |
743 | 0 | nodes_cur[0].score = TRELLIS_SCORE_BIAS; |
744 | 0 | nodes_cur[0].level_idx = 0; |
745 | 0 | level_tree[0].abs_level = 0; |
746 | 0 | level_tree[0].next = 0; |
747 | 0 | ALIGNED_4( uint8_t level_state[16] ); |
748 | 0 | memcpy( level_state, cabac_state, 10 ); |
749 | 0 | level_state[12] = cabac_state[0]; // packed subset for copying into trellis_node_t |
750 | 0 | level_state[13] = cabac_state[4]; |
751 | 0 | level_state[14] = cabac_state[8]; |
752 | 0 | level_state[15] = cabac_state[9]; |
753 | |
|
754 | 0 | idx &= num_coefs == 64 ? 3 : 15; |
755 | | |
756 | | // coefs are processed in reverse order, because that's how the abs value is coded. |
757 | | // last_coef and significant_coef flags are normally coded in forward order, but |
758 | | // we have to reverse them to match the levels. |
759 | | // in 4x4 blocks, last_coef and significant_coef use a separate context for each |
760 | | // position, so the order doesn't matter, and we don't even have to update their contexts. |
761 | | // in 8x8 blocks, some positions share contexts, so we'll just have to hope that |
762 | | // cabac isn't too sensitive. |
763 | 0 | int i = last_nnz; |
764 | 0 | #define TRELLIS_LOOP(ctx_hi)\ |
765 | 0 | for( ; i >= b_ac; i-- )\ |
766 | 0 | {\ |
767 | | /* skip 0s: this doesn't affect the output, but saves some unnecessary computation. */\ |
768 | 0 | if( !quant_coefs[i] )\ |
769 | 0 | {\ |
770 | | /* no need to calculate ssd of 0s: it's the same in all nodes.\ |
771 | | * no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.\ |
772 | | * subtracting from one score is equivalent to adding to the rest. */\ |
773 | 0 | if( !ctx_hi )\ |
774 | 0 | {\ |
775 | 0 | int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\ |
776 | 0 | b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\ |
777 | 0 | uint64_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )\ |
778 | 0 | * (uint64_t)lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\ |
779 | 0 | nodes_cur[0].score -= cost_sig0;\ |
780 | 0 | }\ |
781 | 0 | for( int j = 1; j < (ctx_hi?8:4); j++ )\ |
782 | 0 | SET_LEVEL( nodes_cur[j], nodes_cur[j], 0 );\ |
783 | 0 | continue;\ |
784 | 0 | }\ |
785 | 0 | \ |
786 | 0 | int sign_coef = orig_coefs[zigzag[i]];\ |
787 | 0 | int abs_coef = abs( sign_coef );\ |
788 | 0 | int q = abs( quant_coefs[i] );\ |
789 | 0 | int cost_siglast[3]; /* { zero, nonzero, nonzero-and-last } */\ |
790 | 0 | XCHG( trellis_node_t*, nodes_cur, nodes_prev );\ |
791 | 0 | for( int j = ctx_hi; j < 8; j++ )\ |
792 | 0 | nodes_cur[j].score = TRELLIS_SCORE_MAX;\ |
793 | 0 | \ |
794 | 0 | if( i < num_coefs-1 || ctx_hi )\ |
795 | 0 | {\ |
796 | 0 | int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\ |
797 | 0 | b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\ |
798 | 0 | int lastindex = !dc && num_coefs == 64 ? x264_last_coeff_flag_offset_8x8[i] :\ |
799 | 0 | b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\ |
800 | 0 | cost_siglast[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );\ |
801 | 0 | int cost_sig1 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );\ |
802 | 0 | cost_siglast[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 ) + cost_sig1;\ |
803 | 0 | if( !ctx_hi )\ |
804 | 0 | cost_siglast[2] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 ) + cost_sig1;\ |
805 | 0 | }\ |
806 | 0 | else\ |
807 | 0 | {\ |
808 | 0 | cost_siglast[0] = cost_siglast[1] = cost_siglast[2] = 0;\ |
809 | 0 | }\ |
810 | 0 | \ |
811 | | /* there are a few cases where increasing the coeff magnitude helps,\ |
812 | | * but it's only around .003 dB, and skipping them ~doubles the speed of trellis.\ |
813 | | * could also try q-2: that sometimes helps, but also sometimes decimates blocks\ |
814 | | * that are better left coded, especially at QP > 40. */\ |
815 | 0 | uint64_t ssd0[2], ssd1[2];\ |
816 | 0 | for( int k = 0; k < 2; k++ )\ |
817 | 0 | {\ |
818 | 0 | int abs_level = q-1+k;\ |
819 | 0 | int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);\ |
820 | 0 | int d = abs_coef - unquant_abs_level;\ |
821 | | /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */\ |
822 | 0 | if( h->mb.i_psy_trellis && i && !dc && !b_chroma )\ |
823 | 0 | {\ |
824 | 0 | int orig_coef = (num_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];\ |
825 | 0 | int predicted_coef = orig_coef - sign_coef;\ |
826 | 0 | int psy_value = abs(unquant_abs_level + SIGN(predicted_coef, sign_coef));\ |
827 | 0 | int psy_weight = coef_weight1[zigzag[i]] * h->mb.i_psy_trellis;\ |
828 | 0 | int64_t tmp = (int64_t)d*d * coef_weight2[zigzag[i]] - (int64_t)psy_weight * psy_value;\ |
829 | 0 | ssd1[k] = (uint64_t)tmp;\ |
830 | 0 | }\ |
831 | 0 | else\ |
832 | | /* FIXME: for i16x16 dc is this weight optimal? */\ |
833 | 0 | ssd1[k] = (int64_t)d*d * (dc?256:coef_weight2[zigzag[i]]);\ |
834 | 0 | ssd0[k] = ssd1[k];\ |
835 | 0 | if( !i && !dc && !ctx_hi )\ |
836 | 0 | {\ |
837 | | /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */\ |
838 | 0 | d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);\ |
839 | 0 | ssd0[k] = (int64_t)d*d * coef_weight2[zigzag[i]];\ |
840 | 0 | }\ |
841 | 0 | }\ |
842 | 0 | \ |
843 | | /* argument passing imposes some significant overhead here. gcc's interprocedural register allocation isn't up to it. */\ |
844 | 0 | switch( q )\ |
845 | 0 | {\ |
846 | 0 | case 1:\ |
847 | 0 | ssd1[0] += (uint64_t)cost_siglast[0] * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\ |
848 | 0 | levels_used = trellis_coef0_##ctx_hi( ssd0[0]-ssd1[0], nodes_cur, nodes_prev, level_tree, levels_used );\ |
849 | 0 | levels_used = trellis_coef1_##ctx_hi( ssd0[1]-ssd1[0], ssd1[1]-ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\ |
850 | 0 | goto next##ctx_hi;\ |
851 | 0 | case 2:\ |
852 | 0 | levels_used = trellis_coef1_##ctx_hi( ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\ |
853 | 0 | levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\ |
854 | 0 | goto next1;\ |
855 | 0 | default:\ |
856 | 0 | levels_used = trellis_coefn_##ctx_hi( q-1, ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\ |
857 | 0 | levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\ |
858 | 0 | goto next1;\ |
859 | 0 | }\ |
860 | 0 | next##ctx_hi:;\ |
861 | 0 | }\ |
862 | | /* output levels from the best path through the trellis */\ |
863 | 0 | bnode = &nodes_cur[ctx_hi];\ |
864 | 0 | for( int j = ctx_hi+1; j < (ctx_hi?8:4); j++ )\ |
865 | 0 | if( nodes_cur[j].score < bnode->score )\ |
866 | 0 | bnode = &nodes_cur[j]; |
867 | | |
868 | | // keep 2 versions of the main quantization loop, depending on which subsets of the node_ctxs are live |
869 | | // node_ctx 0..3, i.e. having not yet encountered any coefs that might be quantized to >1 |
870 | 0 | TRELLIS_LOOP(0); |
871 | |
|
872 | 0 | if( bnode == &nodes_cur[0] ) |
873 | 0 | { |
874 | | /* We only need to zero an empty 4x4 block. 8x8 can be |
875 | | implicitly emptied via zero nnz, as can dc. */ |
876 | 0 | if( num_coefs == 16 && !dc ) |
877 | 0 | memset( dct, 0, 16 * sizeof(dctcoef) ); |
878 | 0 | return 0; |
879 | 0 | } |
880 | | |
881 | 0 | if( 0 ) // accessible only by goto, not fallthrough |
882 | 0 | { |
883 | | // node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef) |
884 | 0 | TRELLIS_LOOP(1); |
885 | 0 | } |
886 | | |
887 | 0 | int level = bnode->level_idx; |
888 | 0 | for( i = b_ac; i <= last_nnz; i++ ) |
889 | 0 | { |
890 | 0 | dct[zigzag[i]] = SIGN(level_tree[level].abs_level, dct[zigzag[i]]); |
891 | 0 | level = level_tree[level].next; |
892 | 0 | } |
893 | |
|
894 | 0 | return 1; |
895 | 0 | } |
896 | | |
897 | | /* FIXME: This is a gigantic hack. See below. |
898 | | * |
899 | | * CAVLC is much more difficult to trellis than CABAC. |
900 | | * |
901 | | * CABAC has only three states to track: significance map, last, and the |
902 | | * level state machine. |
903 | | * CAVLC, by comparison, has five: coeff_token (trailing + total), |
904 | | * total_zeroes, zero_run, and the level state machine. |
905 | | * |
906 | | * I know of no paper that has managed to design a close-to-optimal trellis |
907 | | * that covers all five of these and isn't exponential-time. As a result, this |
908 | | * "trellis" isn't: it's just a QNS search. Patches welcome for something better. |
909 | | * It's actually surprisingly fast, albeit not quite optimal. It's pretty close |
910 | | * though; since CAVLC only has 2^16 possible rounding modes (assuming only two |
911 | | * roundings as options), a bruteforce search is feasible. Testing shows |
912 | | * that this QNS is reasonably close to optimal in terms of compression. |
913 | | * |
914 | | * TODO: |
915 | | * Don't bother changing large coefficients when it wouldn't affect bit cost |
916 | | * (e.g. only affecting bypassed suffix bits). |
917 | | * Don't re-run all parts of CAVLC bit cost calculation when not necessary. |
918 | | * e.g. when changing a coefficient from one non-zero value to another in |
919 | | * such a way that trailing ones and suffix length isn't affected. */ |
920 | | static ALWAYS_INLINE |
921 | | int quant_trellis_cavlc( x264_t *h, dctcoef *dct, |
922 | | const udctcoef *quant_mf, const int *unquant_mf, |
923 | | const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac, |
924 | | int b_chroma, int dc, int num_coefs, int idx, int b_8x8 ) |
925 | 0 | { |
926 | 0 | ALIGNED_ARRAY_16( dctcoef, quant_coefs,[2],[16] ); |
927 | 0 | ALIGNED_ARRAY_16( dctcoef, coefs,[16] ); |
928 | 0 | const uint32_t *coef_weight1 = b_8x8 ? x264_dct8_weight_tab : x264_dct4_weight_tab; |
929 | 0 | const uint32_t *coef_weight2 = b_8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab; |
930 | 0 | int64_t delta_distortion[16]; |
931 | 0 | int64_t score = 1ULL<<62; |
932 | 0 | int i, j; |
933 | 0 | const int f = 1<<15; |
934 | 0 | int nC = b_chroma && dc ? 3 + (num_coefs>>2) |
935 | 0 | : ct_index[x264_mb_predict_non_zero_code( h, !b_chroma && dc ? (idx - LUMA_DC)*16 : idx )]; |
936 | |
|
937 | 0 | for( i = 0; i < 16; i += 16/sizeof(*coefs) ) |
938 | 0 | M128( &coefs[i] ) = M128_ZERO; |
939 | | |
940 | | /* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different |
941 | | * step/start/end than internal processing. */ |
942 | 0 | int step = 1; |
943 | 0 | int start = b_ac; |
944 | 0 | int end = num_coefs - 1; |
945 | 0 | if( b_8x8 ) |
946 | 0 | { |
947 | 0 | start = idx&3; |
948 | 0 | end = 60 + start; |
949 | 0 | step = 4; |
950 | 0 | } |
951 | 0 | idx &= 15; |
952 | |
|
953 | 0 | lambda2 <<= LAMBDA_BITS; |
954 | | |
955 | | /* Find last non-zero coefficient. */ |
956 | 0 | for( i = end; i >= start; i -= step ) |
957 | 0 | if( abs(dct[zigzag[i]]) * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) >= f ) |
958 | 0 | break; |
959 | |
|
960 | 0 | if( i < start ) |
961 | 0 | goto zeroblock; |
962 | | |
963 | | /* Prepare for QNS search: calculate distortion caused by each DCT coefficient |
964 | | * rounding to be searched. |
965 | | * |
966 | | * We only search two roundings (nearest and nearest-1) like in CABAC trellis, |
967 | | * so we just store the difference in distortion between them. */ |
968 | 0 | int last_nnz = b_8x8 ? i >> 2 : i; |
969 | 0 | int coef_mask = 0; |
970 | 0 | int round_mask = 0; |
971 | 0 | for( i = b_ac, j = start; i <= last_nnz; i++, j += step ) |
972 | 0 | { |
973 | 0 | int coef = dct[zigzag[j]]; |
974 | 0 | int abs_coef = abs(coef); |
975 | 0 | int sign = coef < 0 ? -1 : 1; |
976 | 0 | int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16; |
977 | 0 | quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant; |
978 | 0 | coefs[i] = quant_coefs[1][i]; |
979 | 0 | if( nearest_quant ) |
980 | 0 | { |
981 | | /* We initialize the trellis with a deadzone halfway between nearest rounding |
982 | | * and always-round-down. This gives much better results than initializing to either |
983 | | * extreme. |
984 | | * FIXME: should we initialize to the deadzones used by deadzone quant? */ |
985 | 0 | int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16; |
986 | 0 | int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8); |
987 | 0 | int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8); |
988 | 0 | int d1 = abs_coef - unquant1; |
989 | 0 | int d0 = abs_coef - unquant0; |
990 | 0 | delta_distortion[i] = (int64_t)(d0*d0 - d1*d1) * (dc?256:coef_weight2[zigzag[j]]); |
991 | | |
992 | | /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */ |
993 | 0 | if( h->mb.i_psy_trellis && j && !dc && !b_chroma ) |
994 | 0 | { |
995 | 0 | int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]]; |
996 | 0 | int predicted_coef = orig_coef - coef; |
997 | 0 | int psy_weight = coef_weight1[zigzag[j]]; |
998 | 0 | int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign); |
999 | 0 | int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign); |
1000 | 0 | delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight; |
1001 | 0 | } |
1002 | |
|
1003 | 0 | quant_coefs[0][i] = sign * (nearest_quant-1); |
1004 | 0 | if( deadzone_quant != nearest_quant ) |
1005 | 0 | coefs[i] = quant_coefs[0][i]; |
1006 | 0 | else |
1007 | 0 | round_mask |= 1 << i; |
1008 | 0 | } |
1009 | 0 | else |
1010 | 0 | delta_distortion[i] = 0; |
1011 | 0 | coef_mask |= (!!coefs[i]) << i; |
1012 | 0 | } |
1013 | | |
1014 | | /* Calculate the cost of the starting state. */ |
1015 | 0 | h->out.bs.i_bits_encoded = 0; |
1016 | 0 | if( !coef_mask ) |
1017 | 0 | bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] ); |
1018 | 0 | else |
1019 | 0 | cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC ); |
1020 | 0 | score = (int64_t)h->out.bs.i_bits_encoded * lambda2; |
1021 | | |
1022 | | /* QNS loop: pick the change that improves RD the most, apply it, repeat. |
1023 | | * coef_mask and round_mask are used to simplify tracking of nonzeroness |
1024 | | * and rounding modes chosen. */ |
1025 | 0 | while( 1 ) |
1026 | 0 | { |
1027 | 0 | int64_t iter_score = score; |
1028 | 0 | int64_t iter_distortion_delta = 0; |
1029 | 0 | int iter_coef = -1; |
1030 | 0 | int iter_mask = coef_mask; |
1031 | 0 | int iter_round = round_mask; |
1032 | 0 | for( i = b_ac; i <= last_nnz; i++ ) |
1033 | 0 | { |
1034 | 0 | if( !delta_distortion[i] ) |
1035 | 0 | continue; |
1036 | | |
1037 | | /* Set up all the variables for this iteration. */ |
1038 | 0 | int cur_round = round_mask ^ (1 << i); |
1039 | 0 | int round_change = (cur_round >> i)&1; |
1040 | 0 | int old_coef = coefs[i]; |
1041 | 0 | int new_coef = quant_coefs[round_change][i]; |
1042 | 0 | int cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i); |
1043 | 0 | int64_t cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1); |
1044 | 0 | int64_t cur_score = cur_distortion_delta; |
1045 | 0 | coefs[i] = new_coef; |
1046 | | |
1047 | | /* Count up bits. */ |
1048 | 0 | h->out.bs.i_bits_encoded = 0; |
1049 | 0 | if( !cur_mask ) |
1050 | 0 | bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] ); |
1051 | 0 | else |
1052 | 0 | cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC ); |
1053 | 0 | cur_score += (int64_t)h->out.bs.i_bits_encoded * lambda2; |
1054 | |
|
1055 | 0 | coefs[i] = old_coef; |
1056 | 0 | if( cur_score < iter_score ) |
1057 | 0 | { |
1058 | 0 | iter_score = cur_score; |
1059 | 0 | iter_coef = i; |
1060 | 0 | iter_mask = cur_mask; |
1061 | 0 | iter_round = cur_round; |
1062 | 0 | iter_distortion_delta = cur_distortion_delta; |
1063 | 0 | } |
1064 | 0 | } |
1065 | 0 | if( iter_coef >= 0 ) |
1066 | 0 | { |
1067 | 0 | score = iter_score - iter_distortion_delta; |
1068 | 0 | coef_mask = iter_mask; |
1069 | 0 | round_mask = iter_round; |
1070 | 0 | coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef]; |
1071 | | /* Don't try adjusting coefficients we've already adjusted. |
1072 | | * Testing suggests this doesn't hurt results -- and sometimes actually helps. */ |
1073 | 0 | delta_distortion[iter_coef] = 0; |
1074 | 0 | } |
1075 | 0 | else |
1076 | 0 | break; |
1077 | 0 | } |
1078 | |
|
1079 | 0 | if( coef_mask ) |
1080 | 0 | { |
1081 | 0 | for( i = b_ac, j = start; i < num_coefs; i++, j += step ) |
1082 | 0 | dct[zigzag[j]] = coefs[i]; |
1083 | 0 | return 1; |
1084 | 0 | } |
1085 | | |
1086 | 0 | zeroblock: |
1087 | 0 | if( !dc ) |
1088 | 0 | { |
1089 | 0 | if( b_8x8 ) |
1090 | 0 | for( i = start; i <= end; i+=step ) |
1091 | 0 | dct[zigzag[i]] = 0; |
1092 | 0 | else |
1093 | 0 | memset( dct, 0, 16*sizeof(dctcoef) ); |
1094 | 0 | } |
1095 | 0 | return 0; |
1096 | 0 | } |
1097 | | |
1098 | | int x264_quant_luma_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, int i_qp, int ctx_block_cat, int b_intra, int idx ) |
1099 | 0 | { |
1100 | 0 | if( h->param.b_cabac ) |
1101 | 0 | return quant_trellis_cabac( h, dct, |
1102 | 0 | h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp], |
1103 | 0 | h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED], |
1104 | 0 | ctx_block_cat, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx ); |
1105 | | |
1106 | 0 | return quant_trellis_cavlc( h, dct, |
1107 | 0 | h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED], |
1108 | 0 | DCT_LUMA_DC, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx, 0 ); |
1109 | 0 | } Unexecuted instantiation: x264_8_quant_luma_dc_trellis Unexecuted instantiation: x264_10_quant_luma_dc_trellis |
1110 | | |
1111 | | static const uint8_t zigzag_scan2x2[4] = { 0, 1, 2, 3 }; |
1112 | | static const uint8_t zigzag_scan2x4[8] = { 0, 2, 1, 4, 6, 3, 5, 7 }; |
1113 | | |
1114 | | int x264_quant_chroma_dc_trellis( x264_t *h, dctcoef *dct, int i_qp, int b_intra, int idx ) |
1115 | 0 | { |
1116 | 0 | const uint8_t *zigzag; |
1117 | 0 | int num_coefs; |
1118 | 0 | int quant_cat = CQM_4IC+1 - b_intra; |
1119 | |
|
1120 | 0 | if( CHROMA_FORMAT == CHROMA_422 ) |
1121 | 0 | { |
1122 | 0 | zigzag = zigzag_scan2x4; |
1123 | 0 | num_coefs = 8; |
1124 | 0 | } |
1125 | 0 | else |
1126 | 0 | { |
1127 | 0 | zigzag = zigzag_scan2x2; |
1128 | 0 | num_coefs = 4; |
1129 | 0 | } |
1130 | |
|
1131 | 0 | if( h->param.b_cabac ) |
1132 | 0 | return quant_trellis_cabac( h, dct, |
1133 | 0 | h->quant4_mf[quant_cat][i_qp], h->quant4_bias0[quant_cat][i_qp], |
1134 | 0 | h->unquant4_mf[quant_cat][i_qp], zigzag, |
1135 | 0 | DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx ); |
1136 | | |
1137 | 0 | return quant_trellis_cavlc( h, dct, |
1138 | 0 | h->quant4_mf[quant_cat][i_qp], h->unquant4_mf[quant_cat][i_qp], zigzag, |
1139 | 0 | DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx, 0 ); |
1140 | 0 | } Unexecuted instantiation: x264_8_quant_chroma_dc_trellis Unexecuted instantiation: x264_10_quant_chroma_dc_trellis |
1141 | | |
1142 | | int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, |
1143 | | int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx ) |
1144 | 0 | { |
1145 | 0 | static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0}; |
1146 | 0 | int b_ac = ctx_ac[ctx_block_cat]; |
1147 | 0 | if( h->param.b_cabac ) |
1148 | 0 | return quant_trellis_cabac( h, dct, |
1149 | 0 | h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp], |
1150 | 0 | h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED], |
1151 | 0 | ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx ); |
1152 | | |
1153 | 0 | return quant_trellis_cavlc( h, dct, |
1154 | 0 | h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], |
1155 | 0 | x264_zigzag_scan4[MB_INTERLACED], |
1156 | 0 | ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 ); |
1157 | 0 | } Unexecuted instantiation: x264_8_quant_4x4_trellis Unexecuted instantiation: x264_10_quant_4x4_trellis |
1158 | | |
1159 | | int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, |
1160 | | int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx ) |
1161 | 0 | { |
1162 | 0 | if( h->param.b_cabac ) |
1163 | 0 | { |
1164 | 0 | return quant_trellis_cabac( h, dct, |
1165 | 0 | h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias0[i_quant_cat][i_qp], |
1166 | 0 | h->unquant8_mf[i_quant_cat][i_qp], x264_zigzag_scan8[MB_INTERLACED], |
1167 | 0 | ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx ); |
1168 | 0 | } |
1169 | | |
1170 | | /* 8x8 CAVLC is split into 4 4x4 blocks */ |
1171 | 0 | int nzaccum = 0; |
1172 | 0 | for( int i = 0; i < 4; i++ ) |
1173 | 0 | { |
1174 | 0 | int nz = quant_trellis_cavlc( h, dct, |
1175 | 0 | h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp], |
1176 | 0 | x264_zigzag_scan8[MB_INTERLACED], |
1177 | 0 | DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 ); |
1178 | | /* Set up nonzero count for future calls */ |
1179 | 0 | h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz; |
1180 | 0 | nzaccum |= nz; |
1181 | 0 | } |
1182 | 0 | STORE_8x8_NNZ( 0, idx, 0 ); |
1183 | 0 | return nzaccum; |
1184 | 0 | } Unexecuted instantiation: x264_8_quant_8x8_trellis Unexecuted instantiation: x264_10_quant_8x8_trellis |