/src/libvpx/vp9/common/vp9_thread_common.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <limits.h> |
13 | | #include "./vpx_config.h" |
14 | | #include "vpx_dsp/vpx_dsp_common.h" |
15 | | #include "vpx_mem/vpx_mem.h" |
16 | | #include "vpx_util/vpx_pthread.h" |
17 | | #include "vp9/common/vp9_entropymode.h" |
18 | | #include "vp9/common/vp9_thread_common.h" |
19 | | #include "vp9/common/vp9_reconinter.h" |
20 | | #include "vp9/common/vp9_loopfilter.h" |
21 | | |
22 | 0 | static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) { |
23 | 0 | #if CONFIG_MULTITHREAD |
24 | 0 | const int nsync = lf_sync->sync_range; |
25 | |
|
26 | 0 | if (r && !(c & (nsync - 1))) { |
27 | 0 | pthread_mutex_t *const mutex = &lf_sync->mutex[r - 1]; |
28 | 0 | pthread_mutex_lock(mutex); |
29 | |
|
30 | 0 | while (c > lf_sync->cur_sb_col[r - 1] - nsync) { |
31 | 0 | pthread_cond_wait(&lf_sync->cond[r - 1], mutex); |
32 | 0 | } |
33 | 0 | pthread_mutex_unlock(mutex); |
34 | 0 | } |
35 | | #else |
36 | | (void)lf_sync; |
37 | | (void)r; |
38 | | (void)c; |
39 | | #endif // CONFIG_MULTITHREAD |
40 | 0 | } |
41 | | |
42 | | static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, |
43 | 0 | const int sb_cols) { |
44 | 0 | #if CONFIG_MULTITHREAD |
45 | 0 | const int nsync = lf_sync->sync_range; |
46 | 0 | int cur; |
47 | | // Only signal when there are enough filtered SB for next row to run. |
48 | 0 | int sig = 1; |
49 | |
|
50 | 0 | if (c < sb_cols - 1) { |
51 | 0 | cur = c; |
52 | 0 | if (c % nsync) sig = 0; |
53 | 0 | } else { |
54 | 0 | cur = sb_cols + nsync; |
55 | 0 | } |
56 | |
|
57 | 0 | if (sig) { |
58 | 0 | pthread_mutex_lock(&lf_sync->mutex[r]); |
59 | |
|
60 | 0 | lf_sync->cur_sb_col[r] = cur; |
61 | |
|
62 | 0 | pthread_cond_signal(&lf_sync->cond[r]); |
63 | 0 | pthread_mutex_unlock(&lf_sync->mutex[r]); |
64 | 0 | } |
65 | | #else |
66 | | (void)lf_sync; |
67 | | (void)r; |
68 | | (void)c; |
69 | | (void)sb_cols; |
70 | | #endif // CONFIG_MULTITHREAD |
71 | 0 | } |
72 | | |
73 | | // Implement row loopfiltering for each thread. |
74 | | static INLINE void thread_loop_filter_rows( |
75 | | const YV12_BUFFER_CONFIG *const frame_buffer, VP9_COMMON *const cm, |
76 | | struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop, |
77 | 0 | int y_only, VP9LfSync *const lf_sync) { |
78 | 0 | const int num_planes = y_only ? 1 : MAX_MB_PLANE; |
79 | 0 | const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; |
80 | 0 | const int num_active_workers = lf_sync->num_active_workers; |
81 | 0 | int mi_row, mi_col; |
82 | 0 | enum lf_path path; |
83 | 0 | if (y_only) |
84 | 0 | path = LF_PATH_444; |
85 | 0 | else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1) |
86 | 0 | path = LF_PATH_420; |
87 | 0 | else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0) |
88 | 0 | path = LF_PATH_444; |
89 | 0 | else |
90 | 0 | path = LF_PATH_SLOW; |
91 | |
|
92 | 0 | assert(num_active_workers > 0); |
93 | |
|
94 | 0 | for (mi_row = start; mi_row < stop; |
95 | 0 | mi_row += num_active_workers * MI_BLOCK_SIZE) { |
96 | 0 | MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride; |
97 | 0 | LOOP_FILTER_MASK *lfm = get_lfm(&cm->lf, mi_row, 0); |
98 | |
|
99 | 0 | for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE, ++lfm) { |
100 | 0 | const int r = mi_row >> MI_BLOCK_SIZE_LOG2; |
101 | 0 | const int c = mi_col >> MI_BLOCK_SIZE_LOG2; |
102 | 0 | int plane; |
103 | |
|
104 | 0 | sync_read(lf_sync, r, c); |
105 | |
|
106 | 0 | vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); |
107 | |
|
108 | 0 | vp9_adjust_mask(cm, mi_row, mi_col, lfm); |
109 | |
|
110 | 0 | vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, lfm); |
111 | 0 | for (plane = 1; plane < num_planes; ++plane) { |
112 | 0 | switch (path) { |
113 | 0 | case LF_PATH_420: |
114 | 0 | vp9_filter_block_plane_ss11(cm, &planes[plane], mi_row, lfm); |
115 | 0 | break; |
116 | 0 | case LF_PATH_444: |
117 | 0 | vp9_filter_block_plane_ss00(cm, &planes[plane], mi_row, lfm); |
118 | 0 | break; |
119 | 0 | case LF_PATH_SLOW: |
120 | 0 | vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, |
121 | 0 | mi_row, mi_col); |
122 | 0 | break; |
123 | 0 | } |
124 | 0 | } |
125 | | |
126 | 0 | sync_write(lf_sync, r, c, sb_cols); |
127 | 0 | } |
128 | 0 | } |
129 | 0 | } |
130 | | |
131 | | // Row-based multi-threaded loopfilter hook |
132 | 0 | static int loop_filter_row_worker(void *arg1, void *arg2) { |
133 | 0 | VP9LfSync *const lf_sync = (VP9LfSync *)arg1; |
134 | 0 | LFWorkerData *const lf_data = (LFWorkerData *)arg2; |
135 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
136 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
137 | 0 | lf_sync); |
138 | 0 | return 1; |
139 | 0 | } |
140 | | |
141 | | static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
142 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
143 | | int start, int stop, int y_only, |
144 | | VPxWorker *workers, int nworkers, |
145 | 0 | VP9LfSync *lf_sync) { |
146 | 0 | const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); |
147 | | // Number of superblock rows and cols |
148 | 0 | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
149 | 0 | const int num_tile_cols = 1 << cm->log2_tile_cols; |
150 | | // Limit the number of workers to prevent changes in frame dimensions from |
151 | | // causing incorrect sync calculations when sb_rows < threads/tile_cols. |
152 | | // Further restrict them by the number of tile columns should the user |
153 | | // request more as this implementation doesn't scale well beyond that. |
154 | 0 | const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows)); |
155 | 0 | int i; |
156 | |
|
157 | 0 | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
158 | 0 | num_workers > lf_sync->num_workers) { |
159 | 0 | vp9_loop_filter_dealloc(lf_sync); |
160 | 0 | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
161 | 0 | } |
162 | 0 | lf_sync->num_active_workers = num_workers; |
163 | | |
164 | | // Initialize cur_sb_col to -1 for all SB rows. |
165 | 0 | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
166 | | |
167 | | // Set up loopfilter thread data. |
168 | | // The decoder is capping num_workers because it has been observed that using |
169 | | // more threads on the loopfilter than there are cores will hurt performance |
170 | | // on Android. This is because the system will only schedule the tile decode |
171 | | // workers on cores equal to the number of tile columns. Then if the decoder |
172 | | // tries to use more threads for the loopfilter, it will hurt performance |
173 | | // because of contention. If the multithreading code changes in the future |
174 | | // then the number of workers used by the loopfilter should be revisited. |
175 | 0 | for (i = 0; i < num_workers; ++i) { |
176 | 0 | VPxWorker *const worker = &workers[i]; |
177 | 0 | LFWorkerData *const lf_data = &lf_sync->lfdata[i]; |
178 | |
|
179 | 0 | worker->hook = loop_filter_row_worker; |
180 | 0 | worker->data1 = lf_sync; |
181 | 0 | worker->data2 = lf_data; |
182 | | |
183 | | // Loopfilter data |
184 | 0 | vp9_loop_filter_data_reset(lf_data, frame, cm, planes); |
185 | 0 | lf_data->start = start + i * MI_BLOCK_SIZE; |
186 | 0 | lf_data->stop = stop; |
187 | 0 | lf_data->y_only = y_only; |
188 | | |
189 | | // Start loopfiltering |
190 | 0 | if (i == num_workers - 1) { |
191 | 0 | winterface->execute(worker); |
192 | 0 | } else { |
193 | 0 | winterface->launch(worker); |
194 | 0 | } |
195 | 0 | } |
196 | | |
197 | | // Wait till all rows are finished |
198 | 0 | for (i = 0; i < num_workers; ++i) { |
199 | 0 | winterface->sync(&workers[i]); |
200 | 0 | } |
201 | 0 | } |
202 | | |
203 | | void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
204 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
205 | | int frame_filter_level, int y_only, |
206 | | int partial_frame, VPxWorker *workers, |
207 | 0 | int num_workers, VP9LfSync *lf_sync) { |
208 | 0 | int start_mi_row, end_mi_row, mi_rows_to_filter; |
209 | |
|
210 | 0 | if (!frame_filter_level) return; |
211 | | |
212 | 0 | start_mi_row = 0; |
213 | 0 | mi_rows_to_filter = cm->mi_rows; |
214 | 0 | if (partial_frame && cm->mi_rows > 8) { |
215 | 0 | start_mi_row = cm->mi_rows >> 1; |
216 | 0 | start_mi_row &= 0xfffffff8; |
217 | 0 | mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); |
218 | 0 | } |
219 | 0 | end_mi_row = start_mi_row + mi_rows_to_filter; |
220 | 0 | vp9_loop_filter_frame_init(cm, frame_filter_level); |
221 | |
|
222 | 0 | loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only, |
223 | 0 | workers, num_workers, lf_sync); |
224 | 0 | } |
225 | | |
226 | | void vp9_lpf_mt_init(VP9LfSync *lf_sync, VP9_COMMON *cm, int frame_filter_level, |
227 | 0 | int num_workers) { |
228 | 0 | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
229 | |
|
230 | 0 | if (!frame_filter_level) return; |
231 | | |
232 | 0 | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
233 | 0 | num_workers > lf_sync->num_workers) { |
234 | 0 | vp9_loop_filter_dealloc(lf_sync); |
235 | 0 | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
236 | 0 | } |
237 | | |
238 | | // Initialize cur_sb_col to -1 for all SB rows. |
239 | 0 | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
240 | |
|
241 | 0 | lf_sync->corrupted = 0; |
242 | |
|
243 | 0 | memset(lf_sync->num_tiles_done, 0, |
244 | 0 | sizeof(*lf_sync->num_tiles_done) * sb_rows); |
245 | 0 | cm->lf_row = 0; |
246 | 0 | } |
247 | | |
248 | | // Set up nsync by width. |
249 | 0 | static INLINE int get_sync_range(int width) { |
250 | | // nsync numbers are picked by testing. For example, for 4k |
251 | | // video, using 4 gives best performance. |
252 | 0 | if (width < 640) |
253 | 0 | return 1; |
254 | 0 | else if (width <= 1280) |
255 | 0 | return 2; |
256 | 0 | else if (width <= 4096) |
257 | 0 | return 4; |
258 | 0 | else |
259 | 0 | return 8; |
260 | 0 | } |
261 | | |
262 | | // Allocate memory for lf row synchronization |
263 | | void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows, |
264 | 0 | int width, int num_workers) { |
265 | 0 | lf_sync->rows = rows; |
266 | 0 | #if CONFIG_MULTITHREAD |
267 | 0 | { |
268 | 0 | int i; |
269 | |
|
270 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->mutex, |
271 | 0 | vpx_malloc(sizeof(*lf_sync->mutex) * rows)); |
272 | 0 | if (lf_sync->mutex) { |
273 | 0 | for (i = 0; i < rows; ++i) { |
274 | 0 | pthread_mutex_init(&lf_sync->mutex[i], NULL); |
275 | 0 | } |
276 | 0 | } |
277 | |
|
278 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->cond, |
279 | 0 | vpx_malloc(sizeof(*lf_sync->cond) * rows)); |
280 | 0 | if (lf_sync->cond) { |
281 | 0 | for (i = 0; i < rows; ++i) { |
282 | 0 | pthread_cond_init(&lf_sync->cond[i], NULL); |
283 | 0 | } |
284 | 0 | } |
285 | |
|
286 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->lf_mutex, |
287 | 0 | vpx_malloc(sizeof(*lf_sync->lf_mutex))); |
288 | 0 | pthread_mutex_init(lf_sync->lf_mutex, NULL); |
289 | |
|
290 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_mutex, |
291 | 0 | vpx_malloc(sizeof(*lf_sync->recon_done_mutex) * rows)); |
292 | 0 | if (lf_sync->recon_done_mutex) { |
293 | 0 | for (i = 0; i < rows; ++i) { |
294 | 0 | pthread_mutex_init(&lf_sync->recon_done_mutex[i], NULL); |
295 | 0 | } |
296 | 0 | } |
297 | |
|
298 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_cond, |
299 | 0 | vpx_malloc(sizeof(*lf_sync->recon_done_cond) * rows)); |
300 | 0 | if (lf_sync->recon_done_cond) { |
301 | 0 | for (i = 0; i < rows; ++i) { |
302 | 0 | pthread_cond_init(&lf_sync->recon_done_cond[i], NULL); |
303 | 0 | } |
304 | 0 | } |
305 | 0 | } |
306 | 0 | #endif // CONFIG_MULTITHREAD |
307 | |
|
308 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->lfdata, |
309 | 0 | vpx_malloc(num_workers * sizeof(*lf_sync->lfdata))); |
310 | 0 | lf_sync->num_workers = num_workers; |
311 | 0 | lf_sync->num_active_workers = lf_sync->num_workers; |
312 | |
|
313 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->cur_sb_col, |
314 | 0 | vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows)); |
315 | |
|
316 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->num_tiles_done, |
317 | 0 | vpx_malloc(sizeof(*lf_sync->num_tiles_done) * |
318 | 0 | mi_cols_aligned_to_sb(cm->mi_rows) >> |
319 | 0 | MI_BLOCK_SIZE_LOG2)); |
320 | | |
321 | | // Set up nsync. |
322 | 0 | lf_sync->sync_range = get_sync_range(width); |
323 | 0 | } |
324 | | |
325 | | // Deallocate lf synchronization related mutex and data |
326 | 3.99k | void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) { |
327 | 3.99k | assert(lf_sync != NULL); |
328 | | |
329 | 3.99k | #if CONFIG_MULTITHREAD |
330 | 3.99k | if (lf_sync->mutex != NULL) { |
331 | 0 | int i; |
332 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
333 | 0 | pthread_mutex_destroy(&lf_sync->mutex[i]); |
334 | 0 | } |
335 | 0 | vpx_free(lf_sync->mutex); |
336 | 0 | } |
337 | 3.99k | if (lf_sync->cond != NULL) { |
338 | 0 | int i; |
339 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
340 | 0 | pthread_cond_destroy(&lf_sync->cond[i]); |
341 | 0 | } |
342 | 0 | vpx_free(lf_sync->cond); |
343 | 0 | } |
344 | 3.99k | if (lf_sync->recon_done_mutex != NULL) { |
345 | 0 | int i; |
346 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
347 | 0 | pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]); |
348 | 0 | } |
349 | 0 | vpx_free(lf_sync->recon_done_mutex); |
350 | 0 | } |
351 | | |
352 | 3.99k | if (lf_sync->lf_mutex != NULL) { |
353 | 0 | pthread_mutex_destroy(lf_sync->lf_mutex); |
354 | 0 | vpx_free(lf_sync->lf_mutex); |
355 | 0 | } |
356 | 3.99k | if (lf_sync->recon_done_cond != NULL) { |
357 | 0 | int i; |
358 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
359 | 0 | pthread_cond_destroy(&lf_sync->recon_done_cond[i]); |
360 | 0 | } |
361 | 0 | vpx_free(lf_sync->recon_done_cond); |
362 | 0 | } |
363 | 3.99k | #endif // CONFIG_MULTITHREAD |
364 | | |
365 | 3.99k | vpx_free(lf_sync->lfdata); |
366 | 3.99k | vpx_free(lf_sync->cur_sb_col); |
367 | 3.99k | vpx_free(lf_sync->num_tiles_done); |
368 | | // clear the structure as the source of this call may be a resize in which |
369 | | // case this call will be followed by an _alloc() which may fail. |
370 | 3.99k | vp9_zero(*lf_sync); |
371 | 3.99k | } |
372 | | |
373 | 0 | static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) { |
374 | 0 | int return_val = -1; |
375 | 0 | const int max_rows = cm->mi_rows; |
376 | |
|
377 | 0 | #if CONFIG_MULTITHREAD |
378 | 0 | int cur_row; |
379 | 0 | const int tile_cols = 1 << cm->log2_tile_cols; |
380 | |
|
381 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
382 | 0 | if (cm->lf_row < max_rows) { |
383 | 0 | cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2; |
384 | 0 | return_val = cm->lf_row; |
385 | 0 | cm->lf_row += MI_BLOCK_SIZE; |
386 | 0 | if (cm->lf_row < max_rows) { |
387 | | /* If this is not the last row, make sure the next row is also decoded. |
388 | | * This is because the intra predict has to happen before loop filter */ |
389 | 0 | cur_row += 1; |
390 | 0 | } |
391 | 0 | } |
392 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
393 | |
|
394 | 0 | if (return_val == -1) return return_val; |
395 | | |
396 | 0 | pthread_mutex_lock(&lf_sync->recon_done_mutex[cur_row]); |
397 | 0 | if (lf_sync->num_tiles_done[cur_row] < tile_cols) { |
398 | 0 | pthread_cond_wait(&lf_sync->recon_done_cond[cur_row], |
399 | 0 | &lf_sync->recon_done_mutex[cur_row]); |
400 | 0 | } |
401 | 0 | pthread_mutex_unlock(&lf_sync->recon_done_mutex[cur_row]); |
402 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
403 | 0 | if (lf_sync->corrupted) { |
404 | 0 | int row = return_val >> MI_BLOCK_SIZE_LOG2; |
405 | 0 | pthread_mutex_lock(&lf_sync->mutex[row]); |
406 | 0 | lf_sync->cur_sb_col[row] = INT_MAX; |
407 | 0 | pthread_cond_signal(&lf_sync->cond[row]); |
408 | 0 | pthread_mutex_unlock(&lf_sync->mutex[row]); |
409 | 0 | return_val = -1; |
410 | 0 | } |
411 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
412 | | #else |
413 | | (void)lf_sync; |
414 | | if (cm->lf_row < max_rows) { |
415 | | return_val = cm->lf_row; |
416 | | cm->lf_row += MI_BLOCK_SIZE; |
417 | | } |
418 | | #endif // CONFIG_MULTITHREAD |
419 | |
|
420 | 0 | return return_val; |
421 | 0 | } |
422 | | |
423 | 0 | void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
424 | 0 | int mi_row; |
425 | 0 | VP9_COMMON *cm = lf_data->cm; |
426 | |
|
427 | 0 | while ((mi_row = get_next_row(cm, lf_sync)) != -1 && mi_row < cm->mi_rows) { |
428 | 0 | lf_data->start = mi_row; |
429 | 0 | lf_data->stop = mi_row + MI_BLOCK_SIZE; |
430 | |
|
431 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
432 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
433 | 0 | lf_sync); |
434 | 0 | } |
435 | 0 | } |
436 | | |
437 | | void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row, |
438 | 0 | int corrupted) { |
439 | 0 | #if CONFIG_MULTITHREAD |
440 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
441 | 0 | lf_sync->corrupted |= corrupted; |
442 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
443 | 0 | pthread_mutex_lock(&lf_sync->recon_done_mutex[row]); |
444 | 0 | lf_sync->num_tiles_done[row] += 1; |
445 | 0 | if (num_tiles == lf_sync->num_tiles_done[row]) { |
446 | 0 | if (is_last_row) { |
447 | | /* The last 2 rows wait on the last row to be done. |
448 | | * So, we have to broadcast the signal in this case. |
449 | | */ |
450 | 0 | pthread_cond_broadcast(&lf_sync->recon_done_cond[row]); |
451 | 0 | } else { |
452 | 0 | pthread_cond_signal(&lf_sync->recon_done_cond[row]); |
453 | 0 | } |
454 | 0 | } |
455 | 0 | pthread_mutex_unlock(&lf_sync->recon_done_mutex[row]); |
456 | | #else |
457 | | (void)lf_sync; |
458 | | (void)num_tiles; |
459 | | (void)row; |
460 | | (void)is_last_row; |
461 | | (void)corrupted; |
462 | | #endif // CONFIG_MULTITHREAD |
463 | 0 | } |
464 | | |
465 | 0 | void vp9_loopfilter_job(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
466 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
467 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
468 | 0 | lf_sync); |
469 | 0 | } |
470 | | |
471 | | // Accumulate frame counts. |
472 | | void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, |
473 | 0 | const FRAME_COUNTS *counts, int is_dec) { |
474 | 0 | int i, j, k, l, m; |
475 | |
|
476 | 0 | for (i = 0; i < BLOCK_SIZE_GROUPS; i++) |
477 | 0 | for (j = 0; j < INTRA_MODES; j++) |
478 | 0 | accum->y_mode[i][j] += counts->y_mode[i][j]; |
479 | |
|
480 | 0 | for (i = 0; i < INTRA_MODES; i++) |
481 | 0 | for (j = 0; j < INTRA_MODES; j++) |
482 | 0 | accum->uv_mode[i][j] += counts->uv_mode[i][j]; |
483 | |
|
484 | 0 | for (i = 0; i < PARTITION_CONTEXTS; i++) |
485 | 0 | for (j = 0; j < PARTITION_TYPES; j++) |
486 | 0 | accum->partition[i][j] += counts->partition[i][j]; |
487 | |
|
488 | 0 | if (is_dec) { |
489 | 0 | int n; |
490 | 0 | for (i = 0; i < TX_SIZES; i++) |
491 | 0 | for (j = 0; j < PLANE_TYPES; j++) |
492 | 0 | for (k = 0; k < REF_TYPES; k++) |
493 | 0 | for (l = 0; l < COEF_BANDS; l++) |
494 | 0 | for (m = 0; m < COEFF_CONTEXTS; m++) { |
495 | 0 | accum->eob_branch[i][j][k][l][m] += |
496 | 0 | counts->eob_branch[i][j][k][l][m]; |
497 | 0 | for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
498 | 0 | accum->coef[i][j][k][l][m][n] += counts->coef[i][j][k][l][m][n]; |
499 | 0 | } |
500 | 0 | } else { |
501 | 0 | for (i = 0; i < TX_SIZES; i++) |
502 | 0 | for (j = 0; j < PLANE_TYPES; j++) |
503 | 0 | for (k = 0; k < REF_TYPES; k++) |
504 | 0 | for (l = 0; l < COEF_BANDS; l++) |
505 | 0 | for (m = 0; m < COEFF_CONTEXTS; m++) |
506 | 0 | accum->eob_branch[i][j][k][l][m] += |
507 | 0 | counts->eob_branch[i][j][k][l][m]; |
508 | | // In the encoder, coef is only updated at frame |
509 | | // level, so not need to accumulate it here. |
510 | | // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
511 | | // accum->coef[i][j][k][l][m][n] += |
512 | | // counts->coef[i][j][k][l][m][n]; |
513 | 0 | } |
514 | |
|
515 | 0 | for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) |
516 | 0 | for (j = 0; j < SWITCHABLE_FILTERS; j++) |
517 | 0 | accum->switchable_interp[i][j] += counts->switchable_interp[i][j]; |
518 | |
|
519 | 0 | for (i = 0; i < INTER_MODE_CONTEXTS; i++) |
520 | 0 | for (j = 0; j < INTER_MODES; j++) |
521 | 0 | accum->inter_mode[i][j] += counts->inter_mode[i][j]; |
522 | |
|
523 | 0 | for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
524 | 0 | for (j = 0; j < 2; j++) |
525 | 0 | accum->intra_inter[i][j] += counts->intra_inter[i][j]; |
526 | |
|
527 | 0 | for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
528 | 0 | for (j = 0; j < 2; j++) accum->comp_inter[i][j] += counts->comp_inter[i][j]; |
529 | |
|
530 | 0 | for (i = 0; i < REF_CONTEXTS; i++) |
531 | 0 | for (j = 0; j < 2; j++) |
532 | 0 | for (k = 0; k < 2; k++) |
533 | 0 | accum->single_ref[i][j][k] += counts->single_ref[i][j][k]; |
534 | |
|
535 | 0 | for (i = 0; i < REF_CONTEXTS; i++) |
536 | 0 | for (j = 0; j < 2; j++) accum->comp_ref[i][j] += counts->comp_ref[i][j]; |
537 | |
|
538 | 0 | for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
539 | 0 | for (j = 0; j < TX_SIZES; j++) |
540 | 0 | accum->tx.p32x32[i][j] += counts->tx.p32x32[i][j]; |
541 | |
|
542 | 0 | for (j = 0; j < TX_SIZES - 1; j++) |
543 | 0 | accum->tx.p16x16[i][j] += counts->tx.p16x16[i][j]; |
544 | |
|
545 | 0 | for (j = 0; j < TX_SIZES - 2; j++) |
546 | 0 | accum->tx.p8x8[i][j] += counts->tx.p8x8[i][j]; |
547 | 0 | } |
548 | |
|
549 | 0 | for (i = 0; i < TX_SIZES; i++) |
550 | 0 | accum->tx.tx_totals[i] += counts->tx.tx_totals[i]; |
551 | |
|
552 | 0 | for (i = 0; i < SKIP_CONTEXTS; i++) |
553 | 0 | for (j = 0; j < 2; j++) accum->skip[i][j] += counts->skip[i][j]; |
554 | |
|
555 | 0 | for (i = 0; i < MV_JOINTS; i++) accum->mv.joints[i] += counts->mv.joints[i]; |
556 | |
|
557 | 0 | for (k = 0; k < 2; k++) { |
558 | 0 | nmv_component_counts *const comps = &accum->mv.comps[k]; |
559 | 0 | const nmv_component_counts *const comps_t = &counts->mv.comps[k]; |
560 | |
|
561 | 0 | for (i = 0; i < 2; i++) { |
562 | 0 | comps->sign[i] += comps_t->sign[i]; |
563 | 0 | comps->class0_hp[i] += comps_t->class0_hp[i]; |
564 | 0 | comps->hp[i] += comps_t->hp[i]; |
565 | 0 | } |
566 | |
|
567 | 0 | for (i = 0; i < MV_CLASSES; i++) comps->classes[i] += comps_t->classes[i]; |
568 | |
|
569 | 0 | for (i = 0; i < CLASS0_SIZE; i++) { |
570 | 0 | comps->class0[i] += comps_t->class0[i]; |
571 | 0 | for (j = 0; j < MV_FP_SIZE; j++) |
572 | 0 | comps->class0_fp[i][j] += comps_t->class0_fp[i][j]; |
573 | 0 | } |
574 | |
|
575 | 0 | for (i = 0; i < MV_OFFSET_BITS; i++) |
576 | 0 | for (j = 0; j < 2; j++) comps->bits[i][j] += comps_t->bits[i][j]; |
577 | |
|
578 | 0 | for (i = 0; i < MV_FP_SIZE; i++) comps->fp[i] += comps_t->fp[i]; |
579 | 0 | } |
580 | 0 | } |