/src/libvpx/vp9/common/vp9_thread_common.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <limits.h> |
13 | | #include "./vpx_config.h" |
14 | | #include "vpx_dsp/vpx_dsp_common.h" |
15 | | #include "vpx_mem/vpx_mem.h" |
16 | | #include "vpx_util/vpx_pthread.h" |
17 | | #include "vp9/common/vp9_entropymode.h" |
18 | | #include "vp9/common/vp9_thread_common.h" |
19 | | #include "vp9/common/vp9_reconinter.h" |
20 | | #include "vp9/common/vp9_loopfilter.h" |
21 | | |
22 | | #if CONFIG_MULTITHREAD |
23 | 0 | static INLINE void mutex_lock(pthread_mutex_t *const mutex) { |
24 | 0 | const int kMaxTryLocks = 4000; |
25 | 0 | int locked = 0; |
26 | 0 | int i; |
27 | |
|
28 | 0 | for (i = 0; i < kMaxTryLocks; ++i) { |
29 | 0 | if (!pthread_mutex_trylock(mutex)) { |
30 | 0 | locked = 1; |
31 | 0 | break; |
32 | 0 | } |
33 | 0 | } |
34 | |
|
35 | 0 | if (!locked) pthread_mutex_lock(mutex); |
36 | 0 | } |
37 | | #endif // CONFIG_MULTITHREAD |
38 | | |
39 | 0 | static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) { |
40 | 0 | #if CONFIG_MULTITHREAD |
41 | 0 | const int nsync = lf_sync->sync_range; |
42 | |
|
43 | 0 | if (r && !(c & (nsync - 1))) { |
44 | 0 | pthread_mutex_t *const mutex = &lf_sync->mutex[r - 1]; |
45 | 0 | mutex_lock(mutex); |
46 | |
|
47 | 0 | while (c > lf_sync->cur_sb_col[r - 1] - nsync) { |
48 | 0 | pthread_cond_wait(&lf_sync->cond[r - 1], mutex); |
49 | 0 | } |
50 | 0 | pthread_mutex_unlock(mutex); |
51 | 0 | } |
52 | | #else |
53 | | (void)lf_sync; |
54 | | (void)r; |
55 | | (void)c; |
56 | | #endif // CONFIG_MULTITHREAD |
57 | 0 | } |
58 | | |
59 | | static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, |
60 | 0 | const int sb_cols) { |
61 | 0 | #if CONFIG_MULTITHREAD |
62 | 0 | const int nsync = lf_sync->sync_range; |
63 | 0 | int cur; |
64 | | // Only signal when there are enough filtered SB for next row to run. |
65 | 0 | int sig = 1; |
66 | |
|
67 | 0 | if (c < sb_cols - 1) { |
68 | 0 | cur = c; |
69 | 0 | if (c % nsync) sig = 0; |
70 | 0 | } else { |
71 | 0 | cur = sb_cols + nsync; |
72 | 0 | } |
73 | |
|
74 | 0 | if (sig) { |
75 | 0 | mutex_lock(&lf_sync->mutex[r]); |
76 | |
|
77 | 0 | lf_sync->cur_sb_col[r] = cur; |
78 | |
|
79 | 0 | pthread_cond_signal(&lf_sync->cond[r]); |
80 | 0 | pthread_mutex_unlock(&lf_sync->mutex[r]); |
81 | 0 | } |
82 | | #else |
83 | | (void)lf_sync; |
84 | | (void)r; |
85 | | (void)c; |
86 | | (void)sb_cols; |
87 | | #endif // CONFIG_MULTITHREAD |
88 | 0 | } |
89 | | |
90 | | // Implement row loopfiltering for each thread. |
91 | | static INLINE void thread_loop_filter_rows( |
92 | | const YV12_BUFFER_CONFIG *const frame_buffer, VP9_COMMON *const cm, |
93 | | struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop, |
94 | 0 | int y_only, VP9LfSync *const lf_sync) { |
95 | 0 | const int num_planes = y_only ? 1 : MAX_MB_PLANE; |
96 | 0 | const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; |
97 | 0 | const int num_active_workers = lf_sync->num_active_workers; |
98 | 0 | int mi_row, mi_col; |
99 | 0 | enum lf_path path; |
100 | 0 | if (y_only) |
101 | 0 | path = LF_PATH_444; |
102 | 0 | else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1) |
103 | 0 | path = LF_PATH_420; |
104 | 0 | else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0) |
105 | 0 | path = LF_PATH_444; |
106 | 0 | else |
107 | 0 | path = LF_PATH_SLOW; |
108 | |
|
109 | 0 | assert(num_active_workers > 0); |
110 | |
|
111 | 0 | for (mi_row = start; mi_row < stop; |
112 | 0 | mi_row += num_active_workers * MI_BLOCK_SIZE) { |
113 | 0 | MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride; |
114 | 0 | LOOP_FILTER_MASK *lfm = get_lfm(&cm->lf, mi_row, 0); |
115 | |
|
116 | 0 | for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE, ++lfm) { |
117 | 0 | const int r = mi_row >> MI_BLOCK_SIZE_LOG2; |
118 | 0 | const int c = mi_col >> MI_BLOCK_SIZE_LOG2; |
119 | 0 | int plane; |
120 | |
|
121 | 0 | sync_read(lf_sync, r, c); |
122 | |
|
123 | 0 | vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); |
124 | |
|
125 | 0 | vp9_adjust_mask(cm, mi_row, mi_col, lfm); |
126 | |
|
127 | 0 | vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, lfm); |
128 | 0 | for (plane = 1; plane < num_planes; ++plane) { |
129 | 0 | switch (path) { |
130 | 0 | case LF_PATH_420: |
131 | 0 | vp9_filter_block_plane_ss11(cm, &planes[plane], mi_row, lfm); |
132 | 0 | break; |
133 | 0 | case LF_PATH_444: |
134 | 0 | vp9_filter_block_plane_ss00(cm, &planes[plane], mi_row, lfm); |
135 | 0 | break; |
136 | 0 | case LF_PATH_SLOW: |
137 | 0 | vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, |
138 | 0 | mi_row, mi_col); |
139 | 0 | break; |
140 | 0 | } |
141 | 0 | } |
142 | | |
143 | 0 | sync_write(lf_sync, r, c, sb_cols); |
144 | 0 | } |
145 | 0 | } |
146 | 0 | } |
147 | | |
148 | | // Row-based multi-threaded loopfilter hook |
149 | 0 | static int loop_filter_row_worker(void *arg1, void *arg2) { |
150 | 0 | VP9LfSync *const lf_sync = (VP9LfSync *)arg1; |
151 | 0 | LFWorkerData *const lf_data = (LFWorkerData *)arg2; |
152 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
153 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
154 | 0 | lf_sync); |
155 | 0 | return 1; |
156 | 0 | } |
157 | | |
158 | | static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
159 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
160 | | int start, int stop, int y_only, |
161 | | VPxWorker *workers, int nworkers, |
162 | 0 | VP9LfSync *lf_sync) { |
163 | 0 | const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); |
164 | | // Number of superblock rows and cols |
165 | 0 | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
166 | 0 | const int num_tile_cols = 1 << cm->log2_tile_cols; |
167 | | // Limit the number of workers to prevent changes in frame dimensions from |
168 | | // causing incorrect sync calculations when sb_rows < threads/tile_cols. |
169 | | // Further restrict them by the number of tile columns should the user |
170 | | // request more as this implementation doesn't scale well beyond that. |
171 | 0 | const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows)); |
172 | 0 | int i; |
173 | |
|
174 | 0 | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
175 | 0 | num_workers > lf_sync->num_workers) { |
176 | 0 | vp9_loop_filter_dealloc(lf_sync); |
177 | 0 | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
178 | 0 | } |
179 | 0 | lf_sync->num_active_workers = num_workers; |
180 | | |
181 | | // Initialize cur_sb_col to -1 for all SB rows. |
182 | 0 | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
183 | | |
184 | | // Set up loopfilter thread data. |
185 | | // The decoder is capping num_workers because it has been observed that using |
186 | | // more threads on the loopfilter than there are cores will hurt performance |
187 | | // on Android. This is because the system will only schedule the tile decode |
188 | | // workers on cores equal to the number of tile columns. Then if the decoder |
189 | | // tries to use more threads for the loopfilter, it will hurt performance |
190 | | // because of contention. If the multithreading code changes in the future |
191 | | // then the number of workers used by the loopfilter should be revisited. |
192 | 0 | for (i = 0; i < num_workers; ++i) { |
193 | 0 | VPxWorker *const worker = &workers[i]; |
194 | 0 | LFWorkerData *const lf_data = &lf_sync->lfdata[i]; |
195 | |
|
196 | 0 | worker->hook = loop_filter_row_worker; |
197 | 0 | worker->data1 = lf_sync; |
198 | 0 | worker->data2 = lf_data; |
199 | | |
200 | | // Loopfilter data |
201 | 0 | vp9_loop_filter_data_reset(lf_data, frame, cm, planes); |
202 | 0 | lf_data->start = start + i * MI_BLOCK_SIZE; |
203 | 0 | lf_data->stop = stop; |
204 | 0 | lf_data->y_only = y_only; |
205 | | |
206 | | // Start loopfiltering |
207 | 0 | if (i == num_workers - 1) { |
208 | 0 | winterface->execute(worker); |
209 | 0 | } else { |
210 | 0 | winterface->launch(worker); |
211 | 0 | } |
212 | 0 | } |
213 | | |
214 | | // Wait till all rows are finished |
215 | 0 | for (i = 0; i < num_workers; ++i) { |
216 | 0 | winterface->sync(&workers[i]); |
217 | 0 | } |
218 | 0 | } |
219 | | |
220 | | void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
221 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
222 | | int frame_filter_level, int y_only, |
223 | | int partial_frame, VPxWorker *workers, |
224 | 0 | int num_workers, VP9LfSync *lf_sync) { |
225 | 0 | int start_mi_row, end_mi_row, mi_rows_to_filter; |
226 | |
|
227 | 0 | if (!frame_filter_level) return; |
228 | | |
229 | 0 | start_mi_row = 0; |
230 | 0 | mi_rows_to_filter = cm->mi_rows; |
231 | 0 | if (partial_frame && cm->mi_rows > 8) { |
232 | 0 | start_mi_row = cm->mi_rows >> 1; |
233 | 0 | start_mi_row &= 0xfffffff8; |
234 | 0 | mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); |
235 | 0 | } |
236 | 0 | end_mi_row = start_mi_row + mi_rows_to_filter; |
237 | 0 | vp9_loop_filter_frame_init(cm, frame_filter_level); |
238 | |
|
239 | 0 | loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only, |
240 | 0 | workers, num_workers, lf_sync); |
241 | 0 | } |
242 | | |
243 | | void vp9_lpf_mt_init(VP9LfSync *lf_sync, VP9_COMMON *cm, int frame_filter_level, |
244 | 0 | int num_workers) { |
245 | 0 | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
246 | |
|
247 | 0 | if (!frame_filter_level) return; |
248 | | |
249 | 0 | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
250 | 0 | num_workers > lf_sync->num_workers) { |
251 | 0 | vp9_loop_filter_dealloc(lf_sync); |
252 | 0 | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
253 | 0 | } |
254 | | |
255 | | // Initialize cur_sb_col to -1 for all SB rows. |
256 | 0 | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
257 | |
|
258 | 0 | lf_sync->corrupted = 0; |
259 | |
|
260 | 0 | memset(lf_sync->num_tiles_done, 0, |
261 | 0 | sizeof(*lf_sync->num_tiles_done) * sb_rows); |
262 | 0 | cm->lf_row = 0; |
263 | 0 | } |
264 | | |
265 | | // Set up nsync by width. |
266 | 0 | static INLINE int get_sync_range(int width) { |
267 | | // nsync numbers are picked by testing. For example, for 4k |
268 | | // video, using 4 gives best performance. |
269 | 0 | if (width < 640) |
270 | 0 | return 1; |
271 | 0 | else if (width <= 1280) |
272 | 0 | return 2; |
273 | 0 | else if (width <= 4096) |
274 | 0 | return 4; |
275 | 0 | else |
276 | 0 | return 8; |
277 | 0 | } |
278 | | |
279 | | // Allocate memory for lf row synchronization |
280 | | void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows, |
281 | 0 | int width, int num_workers) { |
282 | 0 | lf_sync->rows = rows; |
283 | 0 | #if CONFIG_MULTITHREAD |
284 | 0 | { |
285 | 0 | int i; |
286 | |
|
287 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->mutex, |
288 | 0 | vpx_malloc(sizeof(*lf_sync->mutex) * rows)); |
289 | 0 | if (lf_sync->mutex) { |
290 | 0 | for (i = 0; i < rows; ++i) { |
291 | 0 | pthread_mutex_init(&lf_sync->mutex[i], NULL); |
292 | 0 | } |
293 | 0 | } |
294 | |
|
295 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->cond, |
296 | 0 | vpx_malloc(sizeof(*lf_sync->cond) * rows)); |
297 | 0 | if (lf_sync->cond) { |
298 | 0 | for (i = 0; i < rows; ++i) { |
299 | 0 | pthread_cond_init(&lf_sync->cond[i], NULL); |
300 | 0 | } |
301 | 0 | } |
302 | |
|
303 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->lf_mutex, |
304 | 0 | vpx_malloc(sizeof(*lf_sync->lf_mutex))); |
305 | 0 | pthread_mutex_init(lf_sync->lf_mutex, NULL); |
306 | |
|
307 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_mutex, |
308 | 0 | vpx_malloc(sizeof(*lf_sync->recon_done_mutex) * rows)); |
309 | 0 | if (lf_sync->recon_done_mutex) { |
310 | 0 | for (i = 0; i < rows; ++i) { |
311 | 0 | pthread_mutex_init(&lf_sync->recon_done_mutex[i], NULL); |
312 | 0 | } |
313 | 0 | } |
314 | |
|
315 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_cond, |
316 | 0 | vpx_malloc(sizeof(*lf_sync->recon_done_cond) * rows)); |
317 | 0 | if (lf_sync->recon_done_cond) { |
318 | 0 | for (i = 0; i < rows; ++i) { |
319 | 0 | pthread_cond_init(&lf_sync->recon_done_cond[i], NULL); |
320 | 0 | } |
321 | 0 | } |
322 | 0 | } |
323 | 0 | #endif // CONFIG_MULTITHREAD |
324 | |
|
325 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->lfdata, |
326 | 0 | vpx_malloc(num_workers * sizeof(*lf_sync->lfdata))); |
327 | 0 | lf_sync->num_workers = num_workers; |
328 | 0 | lf_sync->num_active_workers = lf_sync->num_workers; |
329 | |
|
330 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->cur_sb_col, |
331 | 0 | vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows)); |
332 | |
|
333 | 0 | CHECK_MEM_ERROR(&cm->error, lf_sync->num_tiles_done, |
334 | 0 | vpx_malloc(sizeof(*lf_sync->num_tiles_done) * |
335 | 0 | mi_cols_aligned_to_sb(cm->mi_rows) >> |
336 | 0 | MI_BLOCK_SIZE_LOG2)); |
337 | | |
338 | | // Set up nsync. |
339 | 0 | lf_sync->sync_range = get_sync_range(width); |
340 | 0 | } |
341 | | |
342 | | // Deallocate lf synchronization related mutex and data |
343 | 2.98k | void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) { |
344 | 2.98k | assert(lf_sync != NULL); |
345 | | |
346 | 2.98k | #if CONFIG_MULTITHREAD |
347 | 2.98k | if (lf_sync->mutex != NULL) { |
348 | 0 | int i; |
349 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
350 | 0 | pthread_mutex_destroy(&lf_sync->mutex[i]); |
351 | 0 | } |
352 | 0 | vpx_free(lf_sync->mutex); |
353 | 0 | } |
354 | 2.98k | if (lf_sync->cond != NULL) { |
355 | 0 | int i; |
356 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
357 | 0 | pthread_cond_destroy(&lf_sync->cond[i]); |
358 | 0 | } |
359 | 0 | vpx_free(lf_sync->cond); |
360 | 0 | } |
361 | 2.98k | if (lf_sync->recon_done_mutex != NULL) { |
362 | 0 | int i; |
363 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
364 | 0 | pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]); |
365 | 0 | } |
366 | 0 | vpx_free(lf_sync->recon_done_mutex); |
367 | 0 | } |
368 | | |
369 | 2.98k | if (lf_sync->lf_mutex != NULL) { |
370 | 0 | pthread_mutex_destroy(lf_sync->lf_mutex); |
371 | 0 | vpx_free(lf_sync->lf_mutex); |
372 | 0 | } |
373 | 2.98k | if (lf_sync->recon_done_cond != NULL) { |
374 | 0 | int i; |
375 | 0 | for (i = 0; i < lf_sync->rows; ++i) { |
376 | 0 | pthread_cond_destroy(&lf_sync->recon_done_cond[i]); |
377 | 0 | } |
378 | 0 | vpx_free(lf_sync->recon_done_cond); |
379 | 0 | } |
380 | 2.98k | #endif // CONFIG_MULTITHREAD |
381 | | |
382 | 2.98k | vpx_free(lf_sync->lfdata); |
383 | 2.98k | vpx_free(lf_sync->cur_sb_col); |
384 | 2.98k | vpx_free(lf_sync->num_tiles_done); |
385 | | // clear the structure as the source of this call may be a resize in which |
386 | | // case this call will be followed by an _alloc() which may fail. |
387 | 2.98k | vp9_zero(*lf_sync); |
388 | 2.98k | } |
389 | | |
390 | 0 | static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) { |
391 | 0 | int return_val = -1; |
392 | 0 | const int max_rows = cm->mi_rows; |
393 | |
|
394 | 0 | #if CONFIG_MULTITHREAD |
395 | 0 | int cur_row; |
396 | 0 | const int tile_cols = 1 << cm->log2_tile_cols; |
397 | |
|
398 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
399 | 0 | if (cm->lf_row < max_rows) { |
400 | 0 | cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2; |
401 | 0 | return_val = cm->lf_row; |
402 | 0 | cm->lf_row += MI_BLOCK_SIZE; |
403 | 0 | if (cm->lf_row < max_rows) { |
404 | | /* If this is not the last row, make sure the next row is also decoded. |
405 | | * This is because the intra predict has to happen before loop filter */ |
406 | 0 | cur_row += 1; |
407 | 0 | } |
408 | 0 | } |
409 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
410 | |
|
411 | 0 | if (return_val == -1) return return_val; |
412 | | |
413 | 0 | pthread_mutex_lock(&lf_sync->recon_done_mutex[cur_row]); |
414 | 0 | if (lf_sync->num_tiles_done[cur_row] < tile_cols) { |
415 | 0 | pthread_cond_wait(&lf_sync->recon_done_cond[cur_row], |
416 | 0 | &lf_sync->recon_done_mutex[cur_row]); |
417 | 0 | } |
418 | 0 | pthread_mutex_unlock(&lf_sync->recon_done_mutex[cur_row]); |
419 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
420 | 0 | if (lf_sync->corrupted) { |
421 | 0 | int row = return_val >> MI_BLOCK_SIZE_LOG2; |
422 | 0 | pthread_mutex_lock(&lf_sync->mutex[row]); |
423 | 0 | lf_sync->cur_sb_col[row] = INT_MAX; |
424 | 0 | pthread_cond_signal(&lf_sync->cond[row]); |
425 | 0 | pthread_mutex_unlock(&lf_sync->mutex[row]); |
426 | 0 | return_val = -1; |
427 | 0 | } |
428 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
429 | | #else |
430 | | (void)lf_sync; |
431 | | if (cm->lf_row < max_rows) { |
432 | | return_val = cm->lf_row; |
433 | | cm->lf_row += MI_BLOCK_SIZE; |
434 | | } |
435 | | #endif // CONFIG_MULTITHREAD |
436 | |
|
437 | 0 | return return_val; |
438 | 0 | } |
439 | | |
440 | 0 | void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
441 | 0 | int mi_row; |
442 | 0 | VP9_COMMON *cm = lf_data->cm; |
443 | |
|
444 | 0 | while ((mi_row = get_next_row(cm, lf_sync)) != -1 && mi_row < cm->mi_rows) { |
445 | 0 | lf_data->start = mi_row; |
446 | 0 | lf_data->stop = mi_row + MI_BLOCK_SIZE; |
447 | |
|
448 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
449 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
450 | 0 | lf_sync); |
451 | 0 | } |
452 | 0 | } |
453 | | |
454 | | void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row, |
455 | 0 | int corrupted) { |
456 | 0 | #if CONFIG_MULTITHREAD |
457 | 0 | pthread_mutex_lock(lf_sync->lf_mutex); |
458 | 0 | lf_sync->corrupted |= corrupted; |
459 | 0 | pthread_mutex_unlock(lf_sync->lf_mutex); |
460 | 0 | pthread_mutex_lock(&lf_sync->recon_done_mutex[row]); |
461 | 0 | lf_sync->num_tiles_done[row] += 1; |
462 | 0 | if (num_tiles == lf_sync->num_tiles_done[row]) { |
463 | 0 | if (is_last_row) { |
464 | | /* The last 2 rows wait on the last row to be done. |
465 | | * So, we have to broadcast the signal in this case. |
466 | | */ |
467 | 0 | pthread_cond_broadcast(&lf_sync->recon_done_cond[row]); |
468 | 0 | } else { |
469 | 0 | pthread_cond_signal(&lf_sync->recon_done_cond[row]); |
470 | 0 | } |
471 | 0 | } |
472 | 0 | pthread_mutex_unlock(&lf_sync->recon_done_mutex[row]); |
473 | | #else |
474 | | (void)lf_sync; |
475 | | (void)num_tiles; |
476 | | (void)row; |
477 | | (void)is_last_row; |
478 | | (void)corrupted; |
479 | | #endif // CONFIG_MULTITHREAD |
480 | 0 | } |
481 | | |
482 | 0 | void vp9_loopfilter_job(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
483 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
484 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
485 | 0 | lf_sync); |
486 | 0 | } |
487 | | |
488 | | // Accumulate frame counts. |
489 | | void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, |
490 | 0 | const FRAME_COUNTS *counts, int is_dec) { |
491 | 0 | int i, j, k, l, m; |
492 | |
|
493 | 0 | for (i = 0; i < BLOCK_SIZE_GROUPS; i++) |
494 | 0 | for (j = 0; j < INTRA_MODES; j++) |
495 | 0 | accum->y_mode[i][j] += counts->y_mode[i][j]; |
496 | |
|
497 | 0 | for (i = 0; i < INTRA_MODES; i++) |
498 | 0 | for (j = 0; j < INTRA_MODES; j++) |
499 | 0 | accum->uv_mode[i][j] += counts->uv_mode[i][j]; |
500 | |
|
501 | 0 | for (i = 0; i < PARTITION_CONTEXTS; i++) |
502 | 0 | for (j = 0; j < PARTITION_TYPES; j++) |
503 | 0 | accum->partition[i][j] += counts->partition[i][j]; |
504 | |
|
505 | 0 | if (is_dec) { |
506 | 0 | int n; |
507 | 0 | for (i = 0; i < TX_SIZES; i++) |
508 | 0 | for (j = 0; j < PLANE_TYPES; j++) |
509 | 0 | for (k = 0; k < REF_TYPES; k++) |
510 | 0 | for (l = 0; l < COEF_BANDS; l++) |
511 | 0 | for (m = 0; m < COEFF_CONTEXTS; m++) { |
512 | 0 | accum->eob_branch[i][j][k][l][m] += |
513 | 0 | counts->eob_branch[i][j][k][l][m]; |
514 | 0 | for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
515 | 0 | accum->coef[i][j][k][l][m][n] += counts->coef[i][j][k][l][m][n]; |
516 | 0 | } |
517 | 0 | } else { |
518 | 0 | for (i = 0; i < TX_SIZES; i++) |
519 | 0 | for (j = 0; j < PLANE_TYPES; j++) |
520 | 0 | for (k = 0; k < REF_TYPES; k++) |
521 | 0 | for (l = 0; l < COEF_BANDS; l++) |
522 | 0 | for (m = 0; m < COEFF_CONTEXTS; m++) |
523 | 0 | accum->eob_branch[i][j][k][l][m] += |
524 | 0 | counts->eob_branch[i][j][k][l][m]; |
525 | | // In the encoder, coef is only updated at frame |
526 | | // level, so not need to accumulate it here. |
527 | | // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
528 | | // accum->coef[i][j][k][l][m][n] += |
529 | | // counts->coef[i][j][k][l][m][n]; |
530 | 0 | } |
531 | |
|
532 | 0 | for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) |
533 | 0 | for (j = 0; j < SWITCHABLE_FILTERS; j++) |
534 | 0 | accum->switchable_interp[i][j] += counts->switchable_interp[i][j]; |
535 | |
|
536 | 0 | for (i = 0; i < INTER_MODE_CONTEXTS; i++) |
537 | 0 | for (j = 0; j < INTER_MODES; j++) |
538 | 0 | accum->inter_mode[i][j] += counts->inter_mode[i][j]; |
539 | |
|
540 | 0 | for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
541 | 0 | for (j = 0; j < 2; j++) |
542 | 0 | accum->intra_inter[i][j] += counts->intra_inter[i][j]; |
543 | |
|
544 | 0 | for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
545 | 0 | for (j = 0; j < 2; j++) accum->comp_inter[i][j] += counts->comp_inter[i][j]; |
546 | |
|
547 | 0 | for (i = 0; i < REF_CONTEXTS; i++) |
548 | 0 | for (j = 0; j < 2; j++) |
549 | 0 | for (k = 0; k < 2; k++) |
550 | 0 | accum->single_ref[i][j][k] += counts->single_ref[i][j][k]; |
551 | |
|
552 | 0 | for (i = 0; i < REF_CONTEXTS; i++) |
553 | 0 | for (j = 0; j < 2; j++) accum->comp_ref[i][j] += counts->comp_ref[i][j]; |
554 | |
|
555 | 0 | for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
556 | 0 | for (j = 0; j < TX_SIZES; j++) |
557 | 0 | accum->tx.p32x32[i][j] += counts->tx.p32x32[i][j]; |
558 | |
|
559 | 0 | for (j = 0; j < TX_SIZES - 1; j++) |
560 | 0 | accum->tx.p16x16[i][j] += counts->tx.p16x16[i][j]; |
561 | |
|
562 | 0 | for (j = 0; j < TX_SIZES - 2; j++) |
563 | 0 | accum->tx.p8x8[i][j] += counts->tx.p8x8[i][j]; |
564 | 0 | } |
565 | |
|
566 | 0 | for (i = 0; i < TX_SIZES; i++) |
567 | 0 | accum->tx.tx_totals[i] += counts->tx.tx_totals[i]; |
568 | |
|
569 | 0 | for (i = 0; i < SKIP_CONTEXTS; i++) |
570 | 0 | for (j = 0; j < 2; j++) accum->skip[i][j] += counts->skip[i][j]; |
571 | |
|
572 | 0 | for (i = 0; i < MV_JOINTS; i++) accum->mv.joints[i] += counts->mv.joints[i]; |
573 | |
|
574 | 0 | for (k = 0; k < 2; k++) { |
575 | 0 | nmv_component_counts *const comps = &accum->mv.comps[k]; |
576 | 0 | const nmv_component_counts *const comps_t = &counts->mv.comps[k]; |
577 | |
|
578 | 0 | for (i = 0; i < 2; i++) { |
579 | 0 | comps->sign[i] += comps_t->sign[i]; |
580 | 0 | comps->class0_hp[i] += comps_t->class0_hp[i]; |
581 | 0 | comps->hp[i] += comps_t->hp[i]; |
582 | 0 | } |
583 | |
|
584 | 0 | for (i = 0; i < MV_CLASSES; i++) comps->classes[i] += comps_t->classes[i]; |
585 | |
|
586 | 0 | for (i = 0; i < CLASS0_SIZE; i++) { |
587 | 0 | comps->class0[i] += comps_t->class0[i]; |
588 | 0 | for (j = 0; j < MV_FP_SIZE; j++) |
589 | 0 | comps->class0_fp[i][j] += comps_t->class0_fp[i][j]; |
590 | 0 | } |
591 | |
|
592 | 0 | for (i = 0; i < MV_OFFSET_BITS; i++) |
593 | 0 | for (j = 0; j < 2; j++) comps->bits[i][j] += comps_t->bits[i][j]; |
594 | |
|
595 | 0 | for (i = 0; i < MV_FP_SIZE; i++) comps->fp[i] += comps_t->fp[i]; |
596 | 0 | } |
597 | 0 | } |