/src/libvpx/vp8/encoder/encodeframe.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2010 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | #include <limits.h> |
11 | | #include <stdio.h> |
12 | | |
13 | | #include "vpx_config.h" |
14 | | |
15 | | #include "vp8/common/common.h" |
16 | | #include "vp8/common/entropymode.h" |
17 | | #include "vp8/common/extend.h" |
18 | | #include "vp8/common/invtrans.h" |
19 | | #include "vp8/common/quant_common.h" |
20 | | #include "vp8/common/reconinter.h" |
21 | | #include "vp8/common/setupintrarecon.h" |
22 | | #include "vp8/common/threading.h" |
23 | | #include "vp8/encoder/bitstream.h" |
24 | | #include "vp8/encoder/encodeframe.h" |
25 | | #include "vp8/encoder/encodeintra.h" |
26 | | #include "vp8/encoder/encodemb.h" |
27 | | #include "vp8/encoder/onyx_int.h" |
28 | | #include "vp8/encoder/pickinter.h" |
29 | | #include "vp8/encoder/rdopt.h" |
30 | | #include "vp8_rtcd.h" |
31 | | #include "vpx/internal/vpx_codec_internal.h" |
32 | | #include "vpx_dsp_rtcd.h" |
33 | | #include "vpx_mem/vpx_mem.h" |
34 | | #include "vpx_ports/vpx_timer.h" |
35 | | |
36 | | #if CONFIG_MULTITHREAD |
37 | | #include "vp8/encoder/ethreading.h" |
38 | | #endif |
39 | | |
40 | | extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t); |
41 | | static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x); |
42 | | |
43 | | #ifdef MODE_STATS |
44 | | unsigned int inter_y_modes[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
45 | | unsigned int inter_uv_modes[4] = { 0, 0, 0, 0 }; |
46 | | unsigned int inter_b_modes[15] = { |
47 | | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 |
48 | | }; |
49 | | unsigned int y_modes[5] = { 0, 0, 0, 0, 0 }; |
50 | | unsigned int uv_modes[4] = { 0, 0, 0, 0 }; |
51 | | unsigned int b_modes[14] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
52 | | #endif |
53 | | |
54 | | /* activity_avg must be positive, or flat regions could get a zero weight |
55 | | * (infinite lambda), which confounds analysis. |
56 | | * This also avoids the need for divide by zero checks in |
57 | | * vp8_activity_masking(). |
58 | | */ |
59 | 0 | #define VP8_ACTIVITY_AVG_MIN (64) |
60 | | |
61 | | /* This is used as a reference when computing the source variance for the |
62 | | * purposes of activity masking. |
63 | | * Eventually this should be replaced by custom no-reference routines, |
64 | | * which will be faster. |
65 | | */ |
66 | | static const unsigned char VP8_VAR_OFFS[16] = { 128, 128, 128, 128, 128, 128, |
67 | | 128, 128, 128, 128, 128, 128, |
68 | | 128, 128, 128, 128 }; |
69 | | |
70 | | /* Original activity measure from Tim T's code. */ |
71 | 0 | static unsigned int tt_activity_measure(MACROBLOCK *x) { |
72 | 0 | unsigned int act; |
73 | 0 | unsigned int sse; |
74 | 0 | /* TODO: This could also be done over smaller areas (8x8), but that would |
75 | 0 | * require extensive changes elsewhere, as lambda is assumed to be fixed |
76 | 0 | * over an entire MB in most of the code. |
77 | 0 | * Another option is to compute four 8x8 variances, and pick a single |
78 | 0 | * lambda using a non-linear combination (e.g., the smallest, or second |
79 | 0 | * smallest, etc.). |
80 | 0 | */ |
81 | 0 | act = vpx_variance16x16(x->src.y_buffer, x->src.y_stride, VP8_VAR_OFFS, 0, |
82 | 0 | &sse); |
83 | 0 | act = act << 4; |
84 | 0 |
|
85 | 0 | /* If the region is flat, lower the activity some more. */ |
86 | 0 | if (act < 8 << 12) act = act < 5 << 12 ? act : 5 << 12; |
87 | 0 |
|
88 | 0 | return act; |
89 | 0 | } |
90 | | |
91 | | /* Measure the activity of the current macroblock |
92 | | * What we measure here is TBD so abstracted to this function |
93 | | */ |
94 | 0 | #define ALT_ACT_MEASURE 1 |
95 | 0 | static unsigned int mb_activity_measure(MACROBLOCK *x, int mb_row, int mb_col) { |
96 | 0 | unsigned int mb_activity; |
97 | |
|
98 | 0 | if (ALT_ACT_MEASURE) { |
99 | 0 | int use_dc_pred = (mb_col > 1 && mb_row > 1); |
100 | | /* Or use an alternative. */ |
101 | 0 | mb_activity = vp8_encode_intra(x, use_dc_pred); |
102 | 0 | } else { |
103 | | /* Original activity measure from Tim T's code. */ |
104 | 0 | mb_activity = tt_activity_measure(x); |
105 | 0 | } |
106 | |
|
107 | 0 | if (mb_activity < VP8_ACTIVITY_AVG_MIN) mb_activity = VP8_ACTIVITY_AVG_MIN; |
108 | |
|
109 | 0 | return mb_activity; |
110 | 0 | } |
111 | | |
112 | | /* Calculate an "average" mb activity value for the frame */ |
113 | | #define ACT_MEDIAN 0 |
114 | 0 | static void calc_av_activity(VP8_COMP *cpi, int64_t activity_sum) { |
115 | | #if ACT_MEDIAN |
116 | | /* Find median: Simple n^2 algorithm for experimentation */ |
117 | | { |
118 | | unsigned int median; |
119 | | unsigned int i, j; |
120 | | unsigned int *sortlist; |
121 | | unsigned int tmp; |
122 | | |
123 | | /* Create a list to sort to */ |
124 | | CHECK_MEM_ERROR(&cpi->common.error, sortlist, |
125 | | vpx_calloc(sizeof(unsigned int), cpi->common.MBs)); |
126 | | |
127 | | /* Copy map to sort list */ |
128 | | memcpy(sortlist, cpi->mb_activity_map, |
129 | | sizeof(unsigned int) * cpi->common.MBs); |
130 | | |
131 | | /* Ripple each value down to its correct position */ |
132 | | for (i = 1; i < cpi->common.MBs; ++i) { |
133 | | for (j = i; j > 0; j--) { |
134 | | if (sortlist[j] < sortlist[j - 1]) { |
135 | | /* Swap values */ |
136 | | tmp = sortlist[j - 1]; |
137 | | sortlist[j - 1] = sortlist[j]; |
138 | | sortlist[j] = tmp; |
139 | | } else |
140 | | break; |
141 | | } |
142 | | } |
143 | | |
144 | | /* Even number MBs so estimate median as mean of two either side. */ |
145 | | median = (1 + sortlist[cpi->common.MBs >> 1] + |
146 | | sortlist[(cpi->common.MBs >> 1) + 1]) >> |
147 | | 1; |
148 | | |
149 | | cpi->activity_avg = median; |
150 | | |
151 | | vpx_free(sortlist); |
152 | | } |
153 | | #else |
154 | | /* Simple mean for now */ |
155 | 0 | cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs); |
156 | 0 | #endif |
157 | |
|
158 | 0 | if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN) { |
159 | 0 | cpi->activity_avg = VP8_ACTIVITY_AVG_MIN; |
160 | 0 | } |
161 | | |
162 | | /* Experimental code: return fixed value normalized for several clips */ |
163 | 0 | if (ALT_ACT_MEASURE) cpi->activity_avg = 100000; |
164 | 0 | } |
165 | | |
166 | | #define USE_ACT_INDEX 0 |
167 | | #define OUTPUT_NORM_ACT_STATS 0 |
168 | | |
169 | | #if USE_ACT_INDEX |
170 | | /* Calculate and activity index for each mb */ |
171 | | static void calc_activity_index(VP8_COMP *cpi, MACROBLOCK *x) { |
172 | | VP8_COMMON *const cm = &cpi->common; |
173 | | int mb_row, mb_col; |
174 | | |
175 | | int64_t act; |
176 | | int64_t a; |
177 | | int64_t b; |
178 | | |
179 | | #if OUTPUT_NORM_ACT_STATS |
180 | | FILE *f = fopen("norm_act.stt", "a"); |
181 | | fprintf(f, "\n%12d\n", cpi->activity_avg); |
182 | | #endif |
183 | | |
184 | | /* Reset pointers to start of activity map */ |
185 | | x->mb_activity_ptr = cpi->mb_activity_map; |
186 | | |
187 | | /* Calculate normalized mb activity number. */ |
188 | | for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { |
189 | | /* for each macroblock col in image */ |
190 | | for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { |
191 | | /* Read activity from the map */ |
192 | | act = *(x->mb_activity_ptr); |
193 | | |
194 | | /* Calculate a normalized activity number */ |
195 | | a = act + 4 * cpi->activity_avg; |
196 | | b = 4 * act + cpi->activity_avg; |
197 | | |
198 | | if (b >= a) |
199 | | *(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1; |
200 | | else |
201 | | *(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b); |
202 | | |
203 | | #if OUTPUT_NORM_ACT_STATS |
204 | | fprintf(f, " %6d", *(x->mb_activity_ptr)); |
205 | | #endif |
206 | | /* Increment activity map pointers */ |
207 | | x->mb_activity_ptr++; |
208 | | } |
209 | | |
210 | | #if OUTPUT_NORM_ACT_STATS |
211 | | fprintf(f, "\n"); |
212 | | #endif |
213 | | } |
214 | | |
215 | | #if OUTPUT_NORM_ACT_STATS |
216 | | fclose(f); |
217 | | #endif |
218 | | } |
219 | | #endif |
220 | | |
221 | | /* Loop through all MBs. Note activity of each, average activity and |
222 | | * calculate a normalized activity for each |
223 | | */ |
224 | 0 | static void build_activity_map(VP8_COMP *cpi) { |
225 | 0 | MACROBLOCK *const x = &cpi->mb; |
226 | 0 | MACROBLOCKD *xd = &x->e_mbd; |
227 | 0 | VP8_COMMON *const cm = &cpi->common; |
228 | |
|
229 | 0 | #if ALT_ACT_MEASURE |
230 | 0 | YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx]; |
231 | 0 | int recon_yoffset; |
232 | 0 | int recon_y_stride = new_yv12->y_stride; |
233 | 0 | #endif |
234 | |
|
235 | 0 | int mb_row, mb_col; |
236 | 0 | unsigned int mb_activity; |
237 | 0 | int64_t activity_sum = 0; |
238 | | |
239 | | /* for each macroblock row in image */ |
240 | 0 | for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { |
241 | 0 | #if ALT_ACT_MEASURE |
242 | | /* reset above block coeffs */ |
243 | 0 | xd->up_available = (mb_row != 0); |
244 | 0 | recon_yoffset = (mb_row * recon_y_stride * 16); |
245 | 0 | #endif |
246 | | /* for each macroblock col in image */ |
247 | 0 | for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { |
248 | 0 | #if ALT_ACT_MEASURE |
249 | 0 | xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset; |
250 | 0 | xd->left_available = (mb_col != 0); |
251 | 0 | recon_yoffset += 16; |
252 | 0 | #endif |
253 | | /* Copy current mb to a buffer */ |
254 | 0 | vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); |
255 | | |
256 | | /* measure activity */ |
257 | 0 | mb_activity = mb_activity_measure(x, mb_row, mb_col); |
258 | | |
259 | | /* Keep frame sum */ |
260 | 0 | activity_sum += mb_activity; |
261 | | |
262 | | /* Store MB level activity details. */ |
263 | 0 | *x->mb_activity_ptr = mb_activity; |
264 | | |
265 | | /* Increment activity map pointer */ |
266 | 0 | x->mb_activity_ptr++; |
267 | | |
268 | | /* adjust to the next column of source macroblocks */ |
269 | 0 | x->src.y_buffer += 16; |
270 | 0 | } |
271 | | |
272 | | /* adjust to the next row of mbs */ |
273 | 0 | x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; |
274 | |
|
275 | 0 | #if ALT_ACT_MEASURE |
276 | | /* extend the recon for intra prediction */ |
277 | 0 | vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, |
278 | 0 | xd->dst.v_buffer + 8); |
279 | 0 | #endif |
280 | 0 | } |
281 | | |
282 | | /* Calculate an "average" MB activity */ |
283 | 0 | calc_av_activity(cpi, activity_sum); |
284 | |
|
285 | | #if USE_ACT_INDEX |
286 | | /* Calculate an activity index number of each mb */ |
287 | | calc_activity_index(cpi, x); |
288 | | #endif |
289 | 0 | } |
290 | | |
291 | | /* Macroblock activity masking */ |
292 | 0 | void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x) { |
293 | | #if USE_ACT_INDEX |
294 | | x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2); |
295 | | x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); |
296 | | x->errorperbit += (x->errorperbit == 0); |
297 | | #else |
298 | 0 | int64_t a; |
299 | 0 | int64_t b; |
300 | 0 | int64_t act = *(x->mb_activity_ptr); |
301 | | |
302 | | /* Apply the masking to the RD multiplier. */ |
303 | 0 | a = act + (2 * cpi->activity_avg); |
304 | 0 | b = (2 * act) + cpi->activity_avg; |
305 | |
|
306 | 0 | x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a); |
307 | 0 | x->errorperbit = x->rdmult * 100 / (110 * x->rddiv); |
308 | 0 | x->errorperbit += (x->errorperbit == 0); |
309 | 0 | #endif |
310 | | |
311 | | /* Activity based Zbin adjustment */ |
312 | 0 | adjust_act_zbin(cpi, x); |
313 | 0 | } |
314 | | |
315 | | static void encode_mb_row(VP8_COMP *cpi, VP8_COMMON *cm, int mb_row, |
316 | | MACROBLOCK *x, MACROBLOCKD *xd, TOKENEXTRA **tp, |
317 | 551k | int *segment_counts, int *totalrate) { |
318 | 551k | int recon_yoffset, recon_uvoffset; |
319 | 551k | int mb_col; |
320 | 551k | int ref_fb_idx = cm->lst_fb_idx; |
321 | 551k | int dst_fb_idx = cm->new_fb_idx; |
322 | 551k | int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride; |
323 | 551k | int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride; |
324 | 551k | int map_index = (mb_row * cpi->common.mb_cols); |
325 | | |
326 | | #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) |
327 | | const int num_part = (1 << cm->multi_token_partition); |
328 | | TOKENEXTRA *tp_start = cpi->tok; |
329 | | vp8_writer *w; |
330 | | #endif |
331 | | |
332 | 551k | #if CONFIG_MULTITHREAD |
333 | 551k | const int nsync = cpi->mt_sync_range; |
334 | 551k | vpx_atomic_int rightmost_col = VPX_ATOMIC_INIT(cm->mb_cols + nsync); |
335 | 551k | const vpx_atomic_int *last_row_current_mb_col; |
336 | 551k | vpx_atomic_int *current_mb_col = NULL; |
337 | | |
338 | 551k | if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) { |
339 | 0 | current_mb_col = &cpi->mt_current_mb_col[mb_row]; |
340 | 0 | } |
341 | 551k | if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0 && mb_row != 0) { |
342 | 0 | last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1]; |
343 | 551k | } else { |
344 | 551k | last_row_current_mb_col = &rightmost_col; |
345 | 551k | } |
346 | 551k | #endif |
347 | | |
348 | | #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) |
349 | | if (num_part > 1) |
350 | | w = &cpi->bc[1 + (mb_row % num_part)]; |
351 | | else |
352 | | w = &cpi->bc[1]; |
353 | | #endif |
354 | | |
355 | | /* reset above block coeffs */ |
356 | 551k | xd->above_context = cm->above_context; |
357 | | |
358 | 551k | xd->up_available = (mb_row != 0); |
359 | 551k | recon_yoffset = (mb_row * recon_y_stride * 16); |
360 | 551k | recon_uvoffset = (mb_row * recon_uv_stride * 8); |
361 | | |
362 | 551k | cpi->tplist[mb_row].start = *tp; |
363 | | /* printf("Main mb_row = %d\n", mb_row); */ |
364 | | |
365 | | /* Distance of Mb to the top & bottom edges, specified in 1/8th pel |
366 | | * units as they are always compared to values that are in 1/8th pel |
367 | | */ |
368 | 551k | xd->mb_to_top_edge = -((mb_row * 16) << 3); |
369 | 551k | xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3; |
370 | | |
371 | | /* Set up limit values for vertical motion vector components |
372 | | * to prevent them extending beyond the UMV borders |
373 | | */ |
374 | 551k | x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16)); |
375 | 551k | x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16) + (VP8BORDERINPIXELS - 16); |
376 | | |
377 | | /* Set the mb activity pointer to the start of the row. */ |
378 | 551k | x->mb_activity_ptr = &cpi->mb_activity_map[map_index]; |
379 | | |
380 | | /* for each macroblock col in image */ |
381 | 3.47M | for (mb_col = 0; mb_col < cm->mb_cols; ++mb_col) { |
382 | | #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING) |
383 | | *tp = cpi->tok; |
384 | | #endif |
385 | | /* Distance of Mb to the left & right edges, specified in |
386 | | * 1/8th pel units as they are always compared to values |
387 | | * that are in 1/8th pel units |
388 | | */ |
389 | 2.92M | xd->mb_to_left_edge = -((mb_col * 16) << 3); |
390 | 2.92M | xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3; |
391 | | |
392 | | /* Set up limit values for horizontal motion vector components |
393 | | * to prevent them extending beyond the UMV borders |
394 | | */ |
395 | 2.92M | x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16)); |
396 | 2.92M | x->mv_col_max = |
397 | 2.92M | ((cm->mb_cols - 1 - mb_col) * 16) + (VP8BORDERINPIXELS - 16); |
398 | | |
399 | 2.92M | xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset; |
400 | 2.92M | xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; |
401 | 2.92M | xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; |
402 | 2.92M | xd->left_available = (mb_col != 0); |
403 | | |
404 | 2.92M | x->rddiv = cpi->RDDIV; |
405 | 2.92M | x->rdmult = cpi->RDMULT; |
406 | | |
407 | | /* Copy current mb to a buffer */ |
408 | 2.92M | vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); |
409 | | |
410 | 2.92M | #if CONFIG_MULTITHREAD |
411 | 2.92M | if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) { |
412 | 0 | if (((mb_col - 1) % nsync) == 0) { |
413 | 0 | vpx_atomic_store_release(current_mb_col, mb_col - 1); |
414 | 0 | } |
415 | |
|
416 | 0 | if (mb_row && !(mb_col & (nsync - 1))) { |
417 | 0 | vp8_atomic_spin_wait(mb_col, last_row_current_mb_col, nsync); |
418 | 0 | } |
419 | 0 | } |
420 | 2.92M | #endif |
421 | | |
422 | 2.92M | if (cpi->oxcf.tuning == VP8_TUNE_SSIM) vp8_activity_masking(cpi, x); |
423 | | |
424 | | /* Is segmentation enabled */ |
425 | | /* MB level adjustment to quantizer */ |
426 | 2.92M | if (xd->segmentation_enabled) { |
427 | | /* Code to set segment id in xd->mbmi.segment_id for current MB |
428 | | * (with range checking) |
429 | | */ |
430 | 0 | if (cpi->segmentation_map[map_index + mb_col] <= 3) { |
431 | 0 | xd->mode_info_context->mbmi.segment_id = |
432 | 0 | cpi->segmentation_map[map_index + mb_col]; |
433 | 0 | } else { |
434 | 0 | xd->mode_info_context->mbmi.segment_id = 0; |
435 | 0 | } |
436 | |
|
437 | 0 | vp8cx_mb_init_quantizer(cpi, x, 1); |
438 | 2.92M | } else { |
439 | | /* Set to Segment 0 by default */ |
440 | 2.92M | xd->mode_info_context->mbmi.segment_id = 0; |
441 | 2.92M | } |
442 | | |
443 | 2.92M | x->active_ptr = cpi->active_map + map_index + mb_col; |
444 | | |
445 | 2.92M | if (cm->frame_type == KEY_FRAME) { |
446 | 1.40M | const int intra_rate_cost = vp8cx_encode_intra_macroblock(cpi, x, tp); |
447 | 1.40M | if (INT_MAX - *totalrate > intra_rate_cost) |
448 | 1.40M | *totalrate += intra_rate_cost; |
449 | 0 | else |
450 | 0 | *totalrate = INT_MAX; |
451 | | #ifdef MODE_STATS |
452 | | y_modes[xd->mbmi.mode]++; |
453 | | #endif |
454 | 1.52M | } else { |
455 | 1.52M | const int inter_rate_cost = vp8cx_encode_inter_macroblock( |
456 | 1.52M | cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col); |
457 | 1.52M | if (INT_MAX - *totalrate > inter_rate_cost) |
458 | 1.52M | *totalrate += inter_rate_cost; |
459 | 0 | else |
460 | 0 | *totalrate = INT_MAX; |
461 | | |
462 | | #ifdef MODE_STATS |
463 | | inter_y_modes[xd->mbmi.mode]++; |
464 | | |
465 | | if (xd->mbmi.mode == SPLITMV) { |
466 | | int b; |
467 | | |
468 | | for (b = 0; b < xd->mbmi.partition_count; ++b) { |
469 | | inter_b_modes[x->partition->bmi[b].mode]++; |
470 | | } |
471 | | } |
472 | | |
473 | | #endif |
474 | | |
475 | | // Keep track of how many (consecutive) times a block is coded |
476 | | // as ZEROMV_LASTREF, for base layer frames. |
477 | | // Reset to 0 if its coded as anything else. |
478 | 1.52M | if (cpi->current_layer == 0) { |
479 | 1.52M | if (xd->mode_info_context->mbmi.mode == ZEROMV && |
480 | 148k | xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { |
481 | | // Increment, check for wrap-around. |
482 | 136k | if (cpi->consec_zero_last[map_index + mb_col] < 255) { |
483 | 135k | cpi->consec_zero_last[map_index + mb_col] += 1; |
484 | 135k | } |
485 | 136k | if (cpi->consec_zero_last_mvbias[map_index + mb_col] < 255) { |
486 | 135k | cpi->consec_zero_last_mvbias[map_index + mb_col] += 1; |
487 | 135k | } |
488 | 1.39M | } else { |
489 | 1.39M | cpi->consec_zero_last[map_index + mb_col] = 0; |
490 | 1.39M | cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; |
491 | 1.39M | } |
492 | 1.52M | if (x->zero_last_dot_suppress) { |
493 | 886 | cpi->consec_zero_last_mvbias[map_index + mb_col] = 0; |
494 | 886 | } |
495 | 1.52M | } |
496 | | |
497 | | /* Special case code for cyclic refresh |
498 | | * If cyclic update enabled then copy xd->mbmi.segment_id; (which |
499 | | * may have been updated based on mode during |
500 | | * vp8cx_encode_inter_macroblock()) back into the global |
501 | | * segmentation map |
502 | | */ |
503 | 1.52M | if ((cpi->current_layer == 0) && |
504 | 1.52M | (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled)) { |
505 | 0 | cpi->segmentation_map[map_index + mb_col] = |
506 | 0 | xd->mode_info_context->mbmi.segment_id; |
507 | | |
508 | | /* If the block has been refreshed mark it as clean (the |
509 | | * magnitude of the -ve influences how long it will be before |
510 | | * we consider another refresh): |
511 | | * Else if it was coded (last frame 0,0) and has not already |
512 | | * been refreshed then mark it as a candidate for cleanup |
513 | | * next time (marked 0) else mark it as dirty (1). |
514 | | */ |
515 | 0 | if (xd->mode_info_context->mbmi.segment_id) { |
516 | 0 | cpi->cyclic_refresh_map[map_index + mb_col] = -1; |
517 | 0 | } else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && |
518 | 0 | (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)) { |
519 | 0 | if (cpi->cyclic_refresh_map[map_index + mb_col] == 1) { |
520 | 0 | cpi->cyclic_refresh_map[map_index + mb_col] = 0; |
521 | 0 | } |
522 | 0 | } else { |
523 | 0 | cpi->cyclic_refresh_map[map_index + mb_col] = 1; |
524 | 0 | } |
525 | 0 | } |
526 | 1.52M | } |
527 | | |
528 | 2.92M | cpi->tplist[mb_row].stop = *tp; |
529 | | |
530 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
531 | | /* pack tokens for this MB */ |
532 | | { |
533 | | int tok_count = *tp - tp_start; |
534 | | vp8_pack_tokens(w, tp_start, tok_count); |
535 | | } |
536 | | #endif |
537 | | /* Increment pointer into gf usage flags structure. */ |
538 | 2.92M | x->gf_active_ptr++; |
539 | | |
540 | | /* Increment the activity mask pointers. */ |
541 | 2.92M | x->mb_activity_ptr++; |
542 | | |
543 | | /* adjust to the next column of macroblocks */ |
544 | 2.92M | x->src.y_buffer += 16; |
545 | 2.92M | x->src.u_buffer += 8; |
546 | 2.92M | x->src.v_buffer += 8; |
547 | | |
548 | 2.92M | recon_yoffset += 16; |
549 | 2.92M | recon_uvoffset += 8; |
550 | | |
551 | | /* Keep track of segment usage */ |
552 | 2.92M | segment_counts[xd->mode_info_context->mbmi.segment_id]++; |
553 | | |
554 | | /* skip to next mb */ |
555 | 2.92M | xd->mode_info_context++; |
556 | 2.92M | x->partition_info++; |
557 | 2.92M | xd->above_context++; |
558 | 2.92M | } |
559 | | |
560 | | /* extend the recon for intra prediction */ |
561 | 551k | vp8_extend_mb_row(&cm->yv12_fb[dst_fb_idx], xd->dst.y_buffer + 16, |
562 | 551k | xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); |
563 | | |
564 | 551k | #if CONFIG_MULTITHREAD |
565 | 551k | if (vpx_atomic_load_acquire(&cpi->b_multi_threaded) != 0) { |
566 | 0 | vpx_atomic_store_release(current_mb_col, |
567 | 0 | vpx_atomic_load_acquire(&rightmost_col)); |
568 | 0 | } |
569 | 551k | #endif |
570 | | |
571 | | /* this is to account for the border */ |
572 | 551k | xd->mode_info_context++; |
573 | 551k | x->partition_info++; |
574 | 551k | } |
575 | | |
576 | 151k | static void init_encode_frame_mb_context(VP8_COMP *cpi) { |
577 | 151k | MACROBLOCK *const x = &cpi->mb; |
578 | 151k | VP8_COMMON *const cm = &cpi->common; |
579 | 151k | MACROBLOCKD *const xd = &x->e_mbd; |
580 | | |
581 | | /* GF active flags data structure */ |
582 | 151k | x->gf_active_ptr = (signed char *)cpi->gf_active_flags; |
583 | | |
584 | | /* Activity map pointer */ |
585 | 151k | x->mb_activity_ptr = cpi->mb_activity_map; |
586 | | |
587 | 151k | x->act_zbin_adj = 0; |
588 | | |
589 | 151k | x->partition_info = x->pi; |
590 | | |
591 | 151k | xd->mode_info_context = cm->mi; |
592 | 151k | xd->mode_info_stride = cm->mode_info_stride; |
593 | | |
594 | 151k | xd->frame_type = cm->frame_type; |
595 | | |
596 | | /* reset intra mode contexts */ |
597 | 151k | if (cm->frame_type == KEY_FRAME) vp8_init_mbmode_probs(cm); |
598 | | |
599 | | /* Copy data over into macro block data structures. */ |
600 | 151k | x->src = *cpi->Source; |
601 | 151k | xd->pre = cm->yv12_fb[cm->lst_fb_idx]; |
602 | 151k | xd->dst = cm->yv12_fb[cm->new_fb_idx]; |
603 | | |
604 | | /* set up frame for intra coded blocks */ |
605 | 151k | vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]); |
606 | | |
607 | 151k | vp8_build_block_offsets(x); |
608 | | |
609 | 151k | xd->mode_info_context->mbmi.mode = DC_PRED; |
610 | 151k | xd->mode_info_context->mbmi.uv_mode = DC_PRED; |
611 | | |
612 | 151k | xd->left_context = &cm->left_context; |
613 | | |
614 | 151k | x->mvc = cm->fc.mvc; |
615 | | |
616 | 151k | memset(cm->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols); |
617 | | |
618 | | /* Special case treatment when GF and ARF are not sensible options |
619 | | * for reference |
620 | | */ |
621 | 151k | if (cpi->ref_frame_flags == VP8_LAST_FRAME) { |
622 | 36.2k | vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 255, |
623 | 36.2k | 128); |
624 | 115k | } else if ((cpi->oxcf.number_of_layers > 1) && |
625 | 0 | (cpi->ref_frame_flags == VP8_GOLD_FRAME)) { |
626 | 0 | vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 255); |
627 | 115k | } else if ((cpi->oxcf.number_of_layers > 1) && |
628 | 0 | (cpi->ref_frame_flags == VP8_ALTR_FRAME)) { |
629 | 0 | vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, 1, 1); |
630 | 115k | } else { |
631 | 115k | vp8_calc_ref_frame_costs(x->ref_frame_cost, cpi->prob_intra_coded, |
632 | 115k | cpi->prob_last_coded, cpi->prob_gf_coded); |
633 | 115k | } |
634 | | |
635 | 151k | xd->fullpixel_mask = ~0; |
636 | 151k | if (cm->full_pixel) xd->fullpixel_mask = ~7; |
637 | | |
638 | 151k | vp8_zero(x->coef_counts); |
639 | 151k | vp8_zero(x->ymode_count); |
640 | 151k | vp8_zero(x->uv_mode_count); |
641 | 151k | x->prediction_error = 0; |
642 | 151k | x->intra_error = 0; |
643 | 151k | vp8_zero(x->count_mb_ref_frame_usage); |
644 | 151k | } |
645 | | |
646 | | #if CONFIG_MULTITHREAD |
647 | 0 | static void sum_coef_counts(MACROBLOCK *x, MACROBLOCK *x_thread) { |
648 | 0 | int i = 0; |
649 | 0 | do { |
650 | 0 | int j = 0; |
651 | 0 | do { |
652 | 0 | int k = 0; |
653 | 0 | do { |
654 | | /* at every context */ |
655 | | |
656 | | /* calc probs and branch cts for this frame only */ |
657 | 0 | int t = 0; /* token/prob index */ |
658 | |
|
659 | 0 | do { |
660 | 0 | x->coef_counts[i][j][k][t] += x_thread->coef_counts[i][j][k][t]; |
661 | 0 | } while (++t < ENTROPY_NODES); |
662 | 0 | } while (++k < PREV_COEF_CONTEXTS); |
663 | 0 | } while (++j < COEF_BANDS); |
664 | 0 | } while (++i < BLOCK_TYPES); |
665 | 0 | } |
666 | | #endif // CONFIG_MULTITHREAD |
667 | | |
668 | 151k | void vp8_encode_frame(VP8_COMP *cpi) { |
669 | 151k | int mb_row; |
670 | 151k | MACROBLOCK *const x = &cpi->mb; |
671 | 151k | VP8_COMMON *const cm = &cpi->common; |
672 | 151k | MACROBLOCKD *const xd = &x->e_mbd; |
673 | 151k | TOKENEXTRA *tp = cpi->tok; |
674 | 151k | int segment_counts[MAX_MB_SEGMENTS]; |
675 | 151k | int totalrate; |
676 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
677 | | BOOL_CODER *bc = &cpi->bc[1]; /* bc[0] is for control partition */ |
678 | | const int num_part = (1 << cm->multi_token_partition); |
679 | | #endif |
680 | | |
681 | 151k | memset(segment_counts, 0, sizeof(segment_counts)); |
682 | 151k | totalrate = 0; |
683 | | |
684 | 151k | if (cpi->compressor_speed == 2) { |
685 | 59.1k | if (cpi->oxcf.cpu_used < 0) { |
686 | 0 | cpi->Speed = -(cpi->oxcf.cpu_used); |
687 | 59.1k | } else { |
688 | 59.1k | vp8_auto_select_speed(cpi); |
689 | 59.1k | } |
690 | 59.1k | } |
691 | | |
692 | | /* Functions setup for all frame types so we can use MC in AltRef */ |
693 | 151k | if (!cm->use_bilinear_mc_filter) { |
694 | 151k | xd->subpixel_predict = vp8_sixtap_predict4x4; |
695 | 151k | xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; |
696 | 151k | xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; |
697 | 151k | xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; |
698 | 151k | } else { |
699 | 0 | xd->subpixel_predict = vp8_bilinear_predict4x4; |
700 | 0 | xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; |
701 | 0 | xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; |
702 | 0 | xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; |
703 | 0 | } |
704 | | |
705 | 151k | cpi->mb.skip_true_count = 0; |
706 | 151k | cpi->tok_count = 0; |
707 | | |
708 | | #if 0 |
709 | | /* Experimental code */ |
710 | | cpi->frame_distortion = 0; |
711 | | cpi->last_mb_distortion = 0; |
712 | | #endif |
713 | | |
714 | 151k | xd->mode_info_context = cm->mi; |
715 | | |
716 | 151k | vp8_zero(cpi->mb.MVcount); |
717 | | |
718 | 151k | vp8cx_frame_init_quantizer(cpi); |
719 | | |
720 | 151k | vp8_initialize_rd_consts(cpi, x, |
721 | 151k | vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q)); |
722 | | |
723 | 151k | vp8cx_initialize_me_consts(cpi, cm->base_qindex); |
724 | | |
725 | 151k | if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
726 | | /* Initialize encode frame context. */ |
727 | 0 | init_encode_frame_mb_context(cpi); |
728 | | |
729 | | /* Build a frame level activity map */ |
730 | 0 | build_activity_map(cpi); |
731 | 0 | } |
732 | | |
733 | | /* re-init encode frame context. */ |
734 | 151k | init_encode_frame_mb_context(cpi); |
735 | | |
736 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
737 | | { |
738 | | int i; |
739 | | for (i = 0; i < num_part; ++i) { |
740 | | vp8_start_encode(&bc[i], cpi->partition_d[i + 1], |
741 | | cpi->partition_d_end[i + 1]); |
742 | | bc[i].error = &cm->error; |
743 | | } |
744 | | } |
745 | | |
746 | | #endif |
747 | | |
748 | 151k | { |
749 | | #if CONFIG_INTERNAL_STATS |
750 | | struct vpx_usec_timer emr_timer; |
751 | | vpx_usec_timer_start(&emr_timer); |
752 | | #endif |
753 | | |
754 | 151k | #if CONFIG_MULTITHREAD |
755 | 151k | if (vpx_atomic_load_acquire(&cpi->b_multi_threaded)) { |
756 | 0 | int i; |
757 | |
|
758 | 0 | vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, |
759 | 0 | cpi->encoding_thread_count); |
760 | |
|
761 | 0 | if (cpi->mt_current_mb_col_size != cm->mb_rows) { |
762 | 0 | vpx_free(cpi->mt_current_mb_col); |
763 | 0 | cpi->mt_current_mb_col = NULL; |
764 | 0 | cpi->mt_current_mb_col_size = 0; |
765 | 0 | CHECK_MEM_ERROR( |
766 | 0 | &cpi->common.error, cpi->mt_current_mb_col, |
767 | 0 | vpx_malloc(sizeof(*cpi->mt_current_mb_col) * cm->mb_rows)); |
768 | 0 | cpi->mt_current_mb_col_size = cm->mb_rows; |
769 | 0 | } |
770 | 0 | for (i = 0; i < cm->mb_rows; ++i) |
771 | 0 | vpx_atomic_store_release(&cpi->mt_current_mb_col[i], -1); |
772 | |
|
773 | 0 | for (i = 0; i < cpi->encoding_thread_count; ++i) { |
774 | 0 | vp8_sem_post(&cpi->h_event_start_encoding[i]); |
775 | 0 | } |
776 | |
|
777 | 0 | for (mb_row = 0; mb_row < cm->mb_rows; |
778 | 0 | mb_row += (cpi->encoding_thread_count + 1)) { |
779 | 0 | vp8_zero(cm->left_context); |
780 | |
|
781 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
782 | | tp = cpi->tok; |
783 | | #else |
784 | 0 | tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24); |
785 | 0 | #endif |
786 | |
|
787 | 0 | encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); |
788 | | |
789 | | /* adjust to the next row of mbs */ |
790 | 0 | x->src.y_buffer += |
791 | 0 | 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - |
792 | 0 | 16 * cm->mb_cols; |
793 | 0 | x->src.u_buffer += |
794 | 0 | 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - |
795 | 0 | 8 * cm->mb_cols; |
796 | 0 | x->src.v_buffer += |
797 | 0 | 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - |
798 | 0 | 8 * cm->mb_cols; |
799 | |
|
800 | 0 | xd->mode_info_context += |
801 | 0 | xd->mode_info_stride * cpi->encoding_thread_count; |
802 | 0 | x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count; |
803 | 0 | x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count; |
804 | 0 | } |
805 | | /* Wait for all the threads to finish. */ |
806 | 0 | for (i = 0; i < cpi->encoding_thread_count; ++i) { |
807 | 0 | vp8_sem_wait(&cpi->h_event_end_encoding[i]); |
808 | 0 | } |
809 | |
|
810 | 0 | for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { |
811 | 0 | cpi->tok_count += (unsigned int)(cpi->tplist[mb_row].stop - |
812 | 0 | cpi->tplist[mb_row].start); |
813 | 0 | } |
814 | |
|
815 | 0 | if (xd->segmentation_enabled) { |
816 | 0 | int j; |
817 | |
|
818 | 0 | if (xd->segmentation_enabled) { |
819 | 0 | for (i = 0; i < cpi->encoding_thread_count; ++i) { |
820 | 0 | for (j = 0; j < 4; ++j) { |
821 | 0 | segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j]; |
822 | 0 | } |
823 | 0 | } |
824 | 0 | } |
825 | 0 | } |
826 | |
|
827 | 0 | for (i = 0; i < cpi->encoding_thread_count; ++i) { |
828 | 0 | int mode_count; |
829 | 0 | int c_idx; |
830 | 0 | totalrate += cpi->mb_row_ei[i].totalrate; |
831 | |
|
832 | 0 | cpi->mb.skip_true_count += cpi->mb_row_ei[i].mb.skip_true_count; |
833 | |
|
834 | 0 | for (mode_count = 0; mode_count < VP8_YMODES; ++mode_count) { |
835 | 0 | cpi->mb.ymode_count[mode_count] += |
836 | 0 | cpi->mb_row_ei[i].mb.ymode_count[mode_count]; |
837 | 0 | } |
838 | |
|
839 | 0 | for (mode_count = 0; mode_count < VP8_UV_MODES; ++mode_count) { |
840 | 0 | cpi->mb.uv_mode_count[mode_count] += |
841 | 0 | cpi->mb_row_ei[i].mb.uv_mode_count[mode_count]; |
842 | 0 | } |
843 | |
|
844 | 0 | for (c_idx = 0; c_idx < MVvals; ++c_idx) { |
845 | 0 | cpi->mb.MVcount[0][c_idx] += cpi->mb_row_ei[i].mb.MVcount[0][c_idx]; |
846 | 0 | cpi->mb.MVcount[1][c_idx] += cpi->mb_row_ei[i].mb.MVcount[1][c_idx]; |
847 | 0 | } |
848 | |
|
849 | 0 | cpi->mb.prediction_error += cpi->mb_row_ei[i].mb.prediction_error; |
850 | 0 | cpi->mb.intra_error += cpi->mb_row_ei[i].mb.intra_error; |
851 | |
|
852 | 0 | for (c_idx = 0; c_idx < MAX_REF_FRAMES; ++c_idx) { |
853 | 0 | cpi->mb.count_mb_ref_frame_usage[c_idx] += |
854 | 0 | cpi->mb_row_ei[i].mb.count_mb_ref_frame_usage[c_idx]; |
855 | 0 | } |
856 | |
|
857 | 0 | for (c_idx = 0; c_idx < MAX_ERROR_BINS; ++c_idx) { |
858 | 0 | cpi->mb.error_bins[c_idx] += cpi->mb_row_ei[i].mb.error_bins[c_idx]; |
859 | 0 | } |
860 | | |
861 | | /* add up counts for each thread */ |
862 | 0 | sum_coef_counts(x, &cpi->mb_row_ei[i].mb); |
863 | 0 | } |
864 | |
|
865 | 0 | } else |
866 | 151k | #endif // CONFIG_MULTITHREAD |
867 | 151k | { |
868 | | |
869 | | /* for each macroblock row in image */ |
870 | 703k | for (mb_row = 0; mb_row < cm->mb_rows; ++mb_row) { |
871 | 551k | vp8_zero(cm->left_context); |
872 | | |
873 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
874 | | tp = cpi->tok; |
875 | | #endif |
876 | | |
877 | 551k | encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate); |
878 | | |
879 | | /* adjust to the next row of mbs */ |
880 | 551k | x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols; |
881 | 551k | x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; |
882 | 551k | x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols; |
883 | 551k | } |
884 | | |
885 | 151k | cpi->tok_count = (unsigned int)(tp - cpi->tok); |
886 | 151k | } |
887 | | |
888 | | #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING |
889 | | { |
890 | | int i; |
891 | | for (i = 0; i < num_part; ++i) { |
892 | | vp8_stop_encode(&bc[i]); |
893 | | cpi->partition_sz[i + 1] = bc[i].pos; |
894 | | } |
895 | | } |
896 | | #endif |
897 | | |
898 | | #if CONFIG_INTERNAL_STATS |
899 | | vpx_usec_timer_mark(&emr_timer); |
900 | | cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer); |
901 | | #endif |
902 | 151k | } |
903 | | |
904 | | // Work out the segment probabilities if segmentation is enabled |
905 | | // and needs to be updated |
906 | 151k | if (xd->segmentation_enabled && xd->update_mb_segmentation_map) { |
907 | 0 | int tot_count; |
908 | 0 | int i; |
909 | | |
910 | | /* Set to defaults */ |
911 | 0 | memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); |
912 | |
|
913 | 0 | tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + |
914 | 0 | segment_counts[3]; |
915 | |
|
916 | 0 | if (tot_count) { |
917 | 0 | xd->mb_segment_tree_probs[0] = |
918 | 0 | ((segment_counts[0] + segment_counts[1]) * 255) / tot_count; |
919 | |
|
920 | 0 | tot_count = segment_counts[0] + segment_counts[1]; |
921 | |
|
922 | 0 | if (tot_count > 0) { |
923 | 0 | xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count; |
924 | 0 | } |
925 | |
|
926 | 0 | tot_count = segment_counts[2] + segment_counts[3]; |
927 | |
|
928 | 0 | if (tot_count > 0) { |
929 | 0 | xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count; |
930 | 0 | } |
931 | | |
932 | | /* Zero probabilities not allowed */ |
933 | 0 | for (i = 0; i < MB_FEATURE_TREE_PROBS; ++i) { |
934 | 0 | if (xd->mb_segment_tree_probs[i] == 0) xd->mb_segment_tree_probs[i] = 1; |
935 | 0 | } |
936 | 0 | } |
937 | 0 | } |
938 | | |
939 | | /* projected_frame_size in units of BYTES */ |
940 | 151k | cpi->projected_frame_size = totalrate >> 8; |
941 | | |
942 | | /* Make a note of the percentage MBs coded Intra. */ |
943 | 151k | if (cm->frame_type == KEY_FRAME) { |
944 | 35.7k | cpi->this_frame_percent_intra = 100; |
945 | 115k | } else { |
946 | 115k | int tot_modes; |
947 | | |
948 | 115k | tot_modes = cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] + |
949 | 115k | cpi->mb.count_mb_ref_frame_usage[LAST_FRAME] + |
950 | 115k | cpi->mb.count_mb_ref_frame_usage[GOLDEN_FRAME] + |
951 | 115k | cpi->mb.count_mb_ref_frame_usage[ALTREF_FRAME]; |
952 | | |
953 | 115k | if (tot_modes) { |
954 | 115k | cpi->this_frame_percent_intra = |
955 | 115k | cpi->mb.count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes; |
956 | 115k | } |
957 | 115k | } |
958 | | |
959 | 151k | #if !CONFIG_REALTIME_ONLY |
960 | | /* Adjust the projected reference frame usage probability numbers to |
961 | | * reflect what we have just seen. This may be useful when we make |
962 | | * multiple iterations of the recode loop rather than continuing to use |
963 | | * values from the previous frame. |
964 | | */ |
965 | 151k | if ((cm->frame_type != KEY_FRAME) && |
966 | 115k | ((cpi->oxcf.number_of_layers > 1) || |
967 | 115k | (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame))) { |
968 | 104k | vp8_convert_rfct_to_prob(cpi); |
969 | 104k | } |
970 | 151k | #endif |
971 | 151k | } |
972 | 6.85k | void vp8_setup_block_ptrs(MACROBLOCK *x) { |
973 | 6.85k | int r, c; |
974 | 6.85k | int i; |
975 | | |
976 | 34.2k | for (r = 0; r < 4; ++r) { |
977 | 137k | for (c = 0; c < 4; ++c) { |
978 | 109k | x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4; |
979 | 109k | } |
980 | 27.4k | } |
981 | | |
982 | 20.5k | for (r = 0; r < 2; ++r) { |
983 | 41.1k | for (c = 0; c < 2; ++c) { |
984 | 27.4k | x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4; |
985 | 27.4k | } |
986 | 13.7k | } |
987 | | |
988 | 20.5k | for (r = 0; r < 2; ++r) { |
989 | 41.1k | for (c = 0; c < 2; ++c) { |
990 | 27.4k | x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4; |
991 | 27.4k | } |
992 | 13.7k | } |
993 | | |
994 | 6.85k | x->block[24].src_diff = x->src_diff + 384; |
995 | | |
996 | 178k | for (i = 0; i < 25; ++i) { |
997 | 171k | x->block[i].coeff = x->coeff + i * 16; |
998 | 171k | } |
999 | 6.85k | } |
1000 | | |
1001 | 151k | void vp8_build_block_offsets(MACROBLOCK *x) { |
1002 | 151k | int block = 0; |
1003 | 151k | int br, bc; |
1004 | | |
1005 | 151k | vp8_build_block_doffsets(&x->e_mbd); |
1006 | | |
1007 | | /* y blocks */ |
1008 | 151k | x->thismb_ptr = &x->thismb[0]; |
1009 | 758k | for (br = 0; br < 4; ++br) { |
1010 | 3.03M | for (bc = 0; bc < 4; ++bc) { |
1011 | 2.42M | BLOCK *this_block = &x->block[block]; |
1012 | 2.42M | this_block->base_src = &x->thismb_ptr; |
1013 | 2.42M | this_block->src_stride = 16; |
1014 | 2.42M | this_block->src = 4 * br * 16 + 4 * bc; |
1015 | 2.42M | ++block; |
1016 | 2.42M | } |
1017 | 606k | } |
1018 | | |
1019 | | /* u blocks */ |
1020 | 454k | for (br = 0; br < 2; ++br) { |
1021 | 909k | for (bc = 0; bc < 2; ++bc) { |
1022 | 606k | BLOCK *this_block = &x->block[block]; |
1023 | 606k | this_block->base_src = &x->src.u_buffer; |
1024 | 606k | this_block->src_stride = x->src.uv_stride; |
1025 | 606k | this_block->src = 4 * br * this_block->src_stride + 4 * bc; |
1026 | 606k | ++block; |
1027 | 606k | } |
1028 | 303k | } |
1029 | | |
1030 | | /* v blocks */ |
1031 | 454k | for (br = 0; br < 2; ++br) { |
1032 | 909k | for (bc = 0; bc < 2; ++bc) { |
1033 | 606k | BLOCK *this_block = &x->block[block]; |
1034 | 606k | this_block->base_src = &x->src.v_buffer; |
1035 | 606k | this_block->src_stride = x->src.uv_stride; |
1036 | 606k | this_block->src = 4 * br * this_block->src_stride + 4 * bc; |
1037 | 606k | ++block; |
1038 | 606k | } |
1039 | 303k | } |
1040 | 151k | } |
1041 | | |
1042 | 2.23M | static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) { |
1043 | 2.23M | const MACROBLOCKD *xd = &x->e_mbd; |
1044 | 2.23M | const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode; |
1045 | 2.23M | const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; |
1046 | | |
1047 | | #ifdef MODE_STATS |
1048 | | const int is_key = cpi->common.frame_type == KEY_FRAME; |
1049 | | |
1050 | | ++(is_key ? uv_modes : inter_uv_modes)[uvm]; |
1051 | | |
1052 | | if (m == B_PRED) { |
1053 | | unsigned int *const bct = is_key ? b_modes : inter_b_modes; |
1054 | | |
1055 | | int b = 0; |
1056 | | |
1057 | | do { |
1058 | | ++bct[xd->block[b].bmi.mode]; |
1059 | | } while (++b < 16); |
1060 | | } |
1061 | | |
1062 | | #else |
1063 | 2.23M | (void)cpi; |
1064 | 2.23M | #endif |
1065 | | |
1066 | 2.23M | ++x->ymode_count[m]; |
1067 | 2.23M | ++x->uv_mode_count[uvm]; |
1068 | 2.23M | } |
1069 | | |
1070 | | /* Experimental stub function to create a per MB zbin adjustment based on |
1071 | | * some previously calculated measure of MB activity. |
1072 | | */ |
1073 | 0 | static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) { |
1074 | | #if USE_ACT_INDEX |
1075 | | x->act_zbin_adj = *(x->mb_activity_ptr); |
1076 | | #else |
1077 | 0 | int64_t a; |
1078 | 0 | int64_t b; |
1079 | 0 | int64_t act = *(x->mb_activity_ptr); |
1080 | | |
1081 | | /* Apply the masking to the RD multiplier. */ |
1082 | 0 | a = act + 4 * cpi->activity_avg; |
1083 | 0 | b = 4 * act + cpi->activity_avg; |
1084 | |
|
1085 | 0 | if (act > cpi->activity_avg) { |
1086 | 0 | x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1; |
1087 | 0 | } else { |
1088 | 0 | x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b); |
1089 | 0 | } |
1090 | 0 | #endif |
1091 | 0 | } |
1092 | | |
1093 | | int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, |
1094 | 1.40M | TOKENEXTRA **t) { |
1095 | 1.40M | MACROBLOCKD *xd = &x->e_mbd; |
1096 | 1.40M | int rate; |
1097 | | |
1098 | 1.40M | if (cpi->sf.RD && cpi->compressor_speed != 2) { |
1099 | 771k | vp8_rd_pick_intra_mode(x, &rate); |
1100 | 771k | } else { |
1101 | 628k | vp8_pick_intra_mode(x, &rate); |
1102 | 628k | } |
1103 | | |
1104 | 1.40M | if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
1105 | 0 | adjust_act_zbin(cpi, x); |
1106 | 0 | vp8_update_zbin_extra(cpi, x); |
1107 | 0 | } |
1108 | | |
1109 | 1.40M | if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) { |
1110 | 686k | vp8_encode_intra4x4mby(x); |
1111 | 713k | } else { |
1112 | 713k | vp8_encode_intra16x16mby(x); |
1113 | 713k | } |
1114 | | |
1115 | 1.40M | vp8_encode_intra16x16mbuv(x); |
1116 | | |
1117 | 1.40M | sum_intra_stats(cpi, x); |
1118 | | |
1119 | 1.40M | vp8_tokenize_mb(cpi, x, t); |
1120 | | |
1121 | 1.40M | if (xd->mode_info_context->mbmi.mode != B_PRED) vp8_inverse_transform_mby(xd); |
1122 | | |
1123 | 1.40M | vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, |
1124 | 1.40M | xd->dst.u_buffer, xd->dst.v_buffer, |
1125 | 1.40M | xd->dst.uv_stride, xd->eobs + 16); |
1126 | 1.40M | return rate; |
1127 | 1.40M | } |
1128 | | #ifdef SPEEDSTATS |
1129 | | extern int cnt_pm; |
1130 | | #endif |
1131 | | |
1132 | | extern void vp8_fix_contexts(MACROBLOCKD *x); |
1133 | | |
1134 | | int vp8cx_encode_inter_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, |
1135 | | int recon_yoffset, int recon_uvoffset, |
1136 | 1.52M | int mb_row, int mb_col) { |
1137 | 1.52M | MACROBLOCKD *const xd = &x->e_mbd; |
1138 | 1.52M | int intra_error = 0; |
1139 | 1.52M | int rate; |
1140 | 1.52M | int distortion; |
1141 | | |
1142 | 1.52M | x->skip = 0; |
1143 | | |
1144 | 1.52M | if (xd->segmentation_enabled) { |
1145 | 0 | x->encode_breakout = |
1146 | 0 | cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id]; |
1147 | 1.52M | } else { |
1148 | 1.52M | x->encode_breakout = cpi->oxcf.encode_breakout; |
1149 | 1.52M | } |
1150 | | |
1151 | 1.52M | #if CONFIG_TEMPORAL_DENOISING |
1152 | | /* Reset the best sse mode/mv for each macroblock. */ |
1153 | 1.52M | x->best_reference_frame = INTRA_FRAME; |
1154 | 1.52M | x->best_zeromv_reference_frame = INTRA_FRAME; |
1155 | 1.52M | x->best_sse_inter_mode = 0; |
1156 | 1.52M | x->best_sse_mv.as_int = 0; |
1157 | 1.52M | x->need_to_clamp_best_mvs = 0; |
1158 | 1.52M | #endif |
1159 | | |
1160 | 1.52M | if (cpi->sf.RD) { |
1161 | 769k | int zbin_mode_boost_enabled = x->zbin_mode_boost_enabled; |
1162 | | |
1163 | | /* Are we using the fast quantizer for the mode selection? */ |
1164 | 769k | if (cpi->sf.use_fastquant_for_pick) { |
1165 | 769k | x->quantize_b = vp8_fast_quantize_b; |
1166 | | |
1167 | | /* the fast quantizer does not use zbin_extra, so |
1168 | | * do not recalculate */ |
1169 | 769k | x->zbin_mode_boost_enabled = 0; |
1170 | 769k | } |
1171 | 769k | vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, |
1172 | 769k | &distortion, &intra_error, mb_row, mb_col); |
1173 | | |
1174 | | /* switch back to the regular quantizer for the encode */ |
1175 | 769k | if (cpi->sf.improved_quant) { |
1176 | 769k | x->quantize_b = vp8_regular_quantize_b; |
1177 | 769k | } |
1178 | | |
1179 | | /* restore cpi->zbin_mode_boost_enabled */ |
1180 | 769k | x->zbin_mode_boost_enabled = zbin_mode_boost_enabled; |
1181 | | |
1182 | 769k | } else { |
1183 | 758k | vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate, |
1184 | 758k | &distortion, &intra_error, mb_row, mb_col); |
1185 | 758k | } |
1186 | | |
1187 | 1.52M | x->prediction_error += distortion; |
1188 | 1.52M | x->intra_error += intra_error; |
1189 | | |
1190 | 1.52M | if (cpi->oxcf.tuning == VP8_TUNE_SSIM) { |
1191 | | /* Adjust the zbin based on this MB rate. */ |
1192 | 0 | adjust_act_zbin(cpi, x); |
1193 | 0 | } |
1194 | | |
1195 | | #if 0 |
1196 | | /* Experimental RD code */ |
1197 | | cpi->frame_distortion += distortion; |
1198 | | cpi->last_mb_distortion = distortion; |
1199 | | #endif |
1200 | | |
1201 | | /* MB level adjutment to quantizer setup */ |
1202 | 1.52M | if (xd->segmentation_enabled) { |
1203 | | /* If cyclic update enabled */ |
1204 | 0 | if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled) { |
1205 | | /* Clear segment_id back to 0 if not coded (last frame 0,0) */ |
1206 | 0 | if ((xd->mode_info_context->mbmi.segment_id == 1) && |
1207 | 0 | ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || |
1208 | 0 | (xd->mode_info_context->mbmi.mode != ZEROMV))) { |
1209 | 0 | xd->mode_info_context->mbmi.segment_id = 0; |
1210 | | |
1211 | | /* segment_id changed, so update */ |
1212 | 0 | vp8cx_mb_init_quantizer(cpi, x, 1); |
1213 | 0 | } |
1214 | 0 | } |
1215 | 0 | } |
1216 | | |
1217 | 1.52M | { |
1218 | | /* Experimental code. |
1219 | | * Special case for gf and arf zeromv modes, for 1 temporal layer. |
1220 | | * Increase zbin size to supress noise. |
1221 | | */ |
1222 | 1.52M | x->zbin_mode_boost = 0; |
1223 | 1.52M | if (x->zbin_mode_boost_enabled) { |
1224 | 1.52M | if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) { |
1225 | 689k | if (xd->mode_info_context->mbmi.mode == ZEROMV) { |
1226 | 148k | if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME && |
1227 | 12.5k | cpi->oxcf.number_of_layers == 1) { |
1228 | 12.5k | x->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; |
1229 | 136k | } else { |
1230 | 136k | x->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; |
1231 | 136k | } |
1232 | 541k | } else if (xd->mode_info_context->mbmi.mode == SPLITMV) { |
1233 | 181k | x->zbin_mode_boost = 0; |
1234 | 359k | } else { |
1235 | 359k | x->zbin_mode_boost = MV_ZBIN_BOOST; |
1236 | 359k | } |
1237 | 689k | } |
1238 | 1.52M | } |
1239 | | |
1240 | | /* The fast quantizer doesn't use zbin_extra, only do so with |
1241 | | * the regular quantizer. */ |
1242 | 1.52M | if (cpi->sf.improved_quant) vp8_update_zbin_extra(cpi, x); |
1243 | 1.52M | } |
1244 | | |
1245 | 1.52M | x->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame]++; |
1246 | | |
1247 | 1.52M | if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { |
1248 | 837k | vp8_encode_intra16x16mbuv(x); |
1249 | | |
1250 | 837k | if (xd->mode_info_context->mbmi.mode == B_PRED) { |
1251 | 351k | vp8_encode_intra4x4mby(x); |
1252 | 485k | } else { |
1253 | 485k | vp8_encode_intra16x16mby(x); |
1254 | 485k | } |
1255 | | |
1256 | 837k | sum_intra_stats(cpi, x); |
1257 | 837k | } else { |
1258 | 689k | int ref_fb_idx; |
1259 | | |
1260 | 689k | if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME) { |
1261 | 609k | ref_fb_idx = cpi->common.lst_fb_idx; |
1262 | 609k | } else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME) { |
1263 | 61.1k | ref_fb_idx = cpi->common.gld_fb_idx; |
1264 | 61.1k | } else { |
1265 | 19.5k | ref_fb_idx = cpi->common.alt_fb_idx; |
1266 | 19.5k | } |
1267 | | |
1268 | 689k | xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset; |
1269 | 689k | xd->pre.u_buffer = |
1270 | 689k | cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset; |
1271 | 689k | xd->pre.v_buffer = |
1272 | 689k | cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset; |
1273 | | |
1274 | 689k | if (!x->skip) { |
1275 | 689k | vp8_encode_inter16x16(x); |
1276 | 689k | } else { |
1277 | 0 | vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer, xd->dst.u_buffer, |
1278 | 0 | xd->dst.v_buffer, xd->dst.y_stride, |
1279 | 0 | xd->dst.uv_stride); |
1280 | 0 | } |
1281 | 689k | } |
1282 | | |
1283 | 1.52M | if (!x->skip) { |
1284 | 1.52M | vp8_tokenize_mb(cpi, x, t); |
1285 | | |
1286 | 1.52M | if (xd->mode_info_context->mbmi.mode != B_PRED) { |
1287 | 1.17M | vp8_inverse_transform_mby(xd); |
1288 | 1.17M | } |
1289 | | |
1290 | 1.52M | vp8_dequant_idct_add_uv_block(xd->qcoeff + 16 * 16, xd->dequant_uv, |
1291 | 1.52M | xd->dst.u_buffer, xd->dst.v_buffer, |
1292 | 1.52M | xd->dst.uv_stride, xd->eobs + 16); |
1293 | 1.52M | } else { |
1294 | | /* always set mb_skip_coeff as it is needed by the loopfilter */ |
1295 | 0 | xd->mode_info_context->mbmi.mb_skip_coeff = 1; |
1296 | |
|
1297 | 0 | if (cpi->common.mb_no_coeff_skip) { |
1298 | 0 | x->skip_true_count++; |
1299 | 0 | vp8_fix_contexts(xd); |
1300 | 0 | } else { |
1301 | 0 | vp8_stuff_mb(cpi, x, t); |
1302 | 0 | } |
1303 | 0 | } |
1304 | | |
1305 | 1.52M | return rate; |
1306 | 1.52M | } |