/src/libvpx/vp9/common/vp9_thread_common.c
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
3 | | * |
4 | | * Use of this source code is governed by a BSD-style license |
5 | | * that can be found in the LICENSE file in the root of the source |
6 | | * tree. An additional intellectual property rights grant can be found |
7 | | * in the file PATENTS. All contributing project authors may |
8 | | * be found in the AUTHORS file in the root of the source tree. |
9 | | */ |
10 | | |
11 | | #include <assert.h> |
12 | | #include <limits.h> |
13 | | #include "./vpx_config.h" |
14 | | #include "vpx_dsp/vpx_dsp_common.h" |
15 | | #include "vpx_mem/vpx_mem.h" |
16 | | #include "vpx_util/vpx_pthread.h" |
17 | | #include "vp9/common/vp9_entropymode.h" |
18 | | #include "vp9/common/vp9_thread_common.h" |
19 | | #include "vp9/common/vp9_reconinter.h" |
20 | | #include "vp9/common/vp9_loopfilter.h" |
21 | | |
22 | 921k | static INLINE void sync_read(VP9LfSync *const lf_sync, int r, int c) { |
23 | 921k | #if CONFIG_MULTITHREAD |
24 | 921k | const int nsync = lf_sync->sync_range; |
25 | | |
26 | 921k | if (r && !(c & (nsync - 1))) { |
27 | 381k | pthread_mutex_t *const mutex = &lf_sync->mutex[r - 1]; |
28 | 381k | pthread_mutex_lock(mutex); |
29 | | |
30 | 416k | while (c > lf_sync->cur_sb_col[r - 1] - nsync) { |
31 | 34.7k | pthread_cond_wait(&lf_sync->cond[r - 1], mutex); |
32 | 34.7k | } |
33 | 381k | pthread_mutex_unlock(mutex); |
34 | 381k | } |
35 | | #else |
36 | | (void)lf_sync; |
37 | | (void)r; |
38 | | (void)c; |
39 | | #endif // CONFIG_MULTITHREAD |
40 | 921k | } |
41 | | |
42 | | static INLINE void sync_write(VP9LfSync *const lf_sync, int r, int c, |
43 | 921k | const int sb_cols) { |
44 | 921k | #if CONFIG_MULTITHREAD |
45 | 921k | const int nsync = lf_sync->sync_range; |
46 | 921k | int cur; |
47 | | // Only signal when there are enough filtered SB for next row to run. |
48 | 921k | int sig = 1; |
49 | | |
50 | 921k | if (c < sb_cols - 1) { |
51 | 855k | cur = c; |
52 | 855k | if (c % nsync) sig = 0; |
53 | 855k | } else { |
54 | 65.8k | cur = sb_cols + nsync; |
55 | 65.8k | } |
56 | | |
57 | 921k | if (sig) { |
58 | 539k | pthread_mutex_lock(&lf_sync->mutex[r]); |
59 | | |
60 | 539k | lf_sync->cur_sb_col[r] = cur; |
61 | | |
62 | 539k | pthread_cond_signal(&lf_sync->cond[r]); |
63 | 539k | pthread_mutex_unlock(&lf_sync->mutex[r]); |
64 | 539k | } |
65 | | #else |
66 | | (void)lf_sync; |
67 | | (void)r; |
68 | | (void)c; |
69 | | (void)sb_cols; |
70 | | #endif // CONFIG_MULTITHREAD |
71 | 921k | } |
72 | | |
73 | | // Implement row loopfiltering for each thread. |
74 | | static INLINE void thread_loop_filter_rows( |
75 | | const YV12_BUFFER_CONFIG *const frame_buffer, VP9_COMMON *const cm, |
76 | | struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop, |
77 | 46.2k | int y_only, VP9LfSync *const lf_sync) { |
78 | 46.2k | const int num_planes = y_only ? 1 : MAX_MB_PLANE; |
79 | 46.2k | const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2; |
80 | 46.2k | const int num_active_workers = lf_sync->num_active_workers; |
81 | 46.2k | int mi_row, mi_col; |
82 | 46.2k | enum lf_path path; |
83 | 46.2k | if (y_only) |
84 | 0 | path = LF_PATH_444; |
85 | 46.2k | else if (planes[1].subsampling_y == 1 && planes[1].subsampling_x == 1) |
86 | 21.6k | path = LF_PATH_420; |
87 | 24.5k | else if (planes[1].subsampling_y == 0 && planes[1].subsampling_x == 0) |
88 | 6.86k | path = LF_PATH_444; |
89 | 17.7k | else |
90 | 17.7k | path = LF_PATH_SLOW; |
91 | | |
92 | 46.2k | assert(num_active_workers > 0); |
93 | | |
94 | 110k | for (mi_row = start; mi_row < stop; |
95 | 65.9k | mi_row += num_active_workers * MI_BLOCK_SIZE) { |
96 | 65.9k | MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride; |
97 | 65.9k | LOOP_FILTER_MASK *lfm = get_lfm(&cm->lf, mi_row, 0); |
98 | | |
99 | 985k | for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE, ++lfm) { |
100 | 921k | const int r = mi_row >> MI_BLOCK_SIZE_LOG2; |
101 | 921k | const int c = mi_col >> MI_BLOCK_SIZE_LOG2; |
102 | 921k | int plane; |
103 | | |
104 | 921k | sync_read(lf_sync, r, c); |
105 | | |
106 | 921k | vp9_setup_dst_planes(planes, frame_buffer, mi_row, mi_col); |
107 | | |
108 | 921k | vp9_adjust_mask(cm, mi_row, mi_col, lfm); |
109 | | |
110 | 921k | vp9_filter_block_plane_ss00(cm, &planes[0], mi_row, lfm); |
111 | 2.75M | for (plane = 1; plane < num_planes; ++plane) { |
112 | 1.83M | switch (path) { |
113 | 1.25M | case LF_PATH_420: |
114 | 1.25M | vp9_filter_block_plane_ss11(cm, &planes[plane], mi_row, lfm); |
115 | 1.25M | break; |
116 | 118k | case LF_PATH_444: |
117 | 118k | vp9_filter_block_plane_ss00(cm, &planes[plane], mi_row, lfm); |
118 | 118k | break; |
119 | 469k | case LF_PATH_SLOW: |
120 | 469k | vp9_filter_block_plane_non420(cm, &planes[plane], mi + mi_col, |
121 | 469k | mi_row, mi_col); |
122 | 469k | break; |
123 | 1.83M | } |
124 | 1.83M | } |
125 | | |
126 | 920k | sync_write(lf_sync, r, c, sb_cols); |
127 | 920k | } |
128 | 65.9k | } |
129 | 46.2k | } |
130 | | |
131 | | // Row-based multi-threaded loopfilter hook |
132 | 15.3k | static int loop_filter_row_worker(void *arg1, void *arg2) { |
133 | 15.3k | VP9LfSync *const lf_sync = (VP9LfSync *)arg1; |
134 | 15.3k | LFWorkerData *const lf_data = (LFWorkerData *)arg2; |
135 | 15.3k | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
136 | 15.3k | lf_data->start, lf_data->stop, lf_data->y_only, |
137 | 15.3k | lf_sync); |
138 | 15.3k | return 1; |
139 | 15.3k | } |
140 | | |
141 | | static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
142 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
143 | | int start, int stop, int y_only, |
144 | | VPxWorker *workers, int nworkers, |
145 | 8.38k | VP9LfSync *lf_sync) { |
146 | 8.38k | const VPxWorkerInterface *const winterface = vpx_get_worker_interface(); |
147 | | // Number of superblock rows and cols |
148 | 8.38k | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
149 | 8.38k | const int num_tile_cols = 1 << cm->log2_tile_cols; |
150 | | // Limit the number of workers to prevent changes in frame dimensions from |
151 | | // causing incorrect sync calculations when sb_rows < threads/tile_cols. |
152 | | // Further restrict them by the number of tile columns should the user |
153 | | // request more as this implementation doesn't scale well beyond that. |
154 | 8.38k | const int num_workers = VPXMIN(nworkers, VPXMIN(num_tile_cols, sb_rows)); |
155 | 8.38k | int i; |
156 | | |
157 | 8.38k | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
158 | 6.98k | num_workers > lf_sync->num_workers) { |
159 | 1.39k | vp9_loop_filter_dealloc(lf_sync); |
160 | 1.39k | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
161 | 1.39k | } |
162 | 8.38k | lf_sync->num_active_workers = num_workers; |
163 | | |
164 | | // Initialize cur_sb_col to -1 for all SB rows. |
165 | 8.38k | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
166 | | |
167 | | // Set up loopfilter thread data. |
168 | | // The decoder is capping num_workers because it has been observed that using |
169 | | // more threads on the loopfilter than there are cores will hurt performance |
170 | | // on Android. This is because the system will only schedule the tile decode |
171 | | // workers on cores equal to the number of tile columns. Then if the decoder |
172 | | // tries to use more threads for the loopfilter, it will hurt performance |
173 | | // because of contention. If the multithreading code changes in the future |
174 | | // then the number of workers used by the loopfilter should be revisited. |
175 | 23.7k | for (i = 0; i < num_workers; ++i) { |
176 | 15.3k | VPxWorker *const worker = &workers[i]; |
177 | 15.3k | LFWorkerData *const lf_data = &lf_sync->lfdata[i]; |
178 | | |
179 | 15.3k | worker->hook = loop_filter_row_worker; |
180 | 15.3k | worker->data1 = lf_sync; |
181 | 15.3k | worker->data2 = lf_data; |
182 | | |
183 | | // Loopfilter data |
184 | 15.3k | vp9_loop_filter_data_reset(lf_data, frame, cm, planes); |
185 | 15.3k | lf_data->start = start + i * MI_BLOCK_SIZE; |
186 | 15.3k | lf_data->stop = stop; |
187 | 15.3k | lf_data->y_only = y_only; |
188 | | |
189 | | // Start loopfiltering |
190 | 15.3k | if (i == num_workers - 1) { |
191 | 8.38k | winterface->execute(worker); |
192 | 8.38k | } else { |
193 | 6.98k | winterface->launch(worker); |
194 | 6.98k | } |
195 | 15.3k | } |
196 | | |
197 | | // Wait till all rows are finished |
198 | 23.7k | for (i = 0; i < num_workers; ++i) { |
199 | 15.3k | winterface->sync(&workers[i]); |
200 | 15.3k | } |
201 | 8.38k | } |
202 | | |
203 | | void vp9_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP9_COMMON *cm, |
204 | | struct macroblockd_plane planes[MAX_MB_PLANE], |
205 | | int frame_filter_level, int y_only, |
206 | | int partial_frame, VPxWorker *workers, |
207 | 9.27k | int num_workers, VP9LfSync *lf_sync) { |
208 | 9.27k | int start_mi_row, end_mi_row, mi_rows_to_filter; |
209 | | |
210 | 9.27k | if (!frame_filter_level) return; |
211 | | |
212 | 8.38k | start_mi_row = 0; |
213 | 8.38k | mi_rows_to_filter = cm->mi_rows; |
214 | 8.38k | if (partial_frame && cm->mi_rows > 8) { |
215 | 0 | start_mi_row = cm->mi_rows >> 1; |
216 | 0 | start_mi_row &= 0xfffffff8; |
217 | 0 | mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8); |
218 | 0 | } |
219 | 8.38k | end_mi_row = start_mi_row + mi_rows_to_filter; |
220 | 8.38k | vp9_loop_filter_frame_init(cm, frame_filter_level); |
221 | | |
222 | 8.38k | loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only, |
223 | 8.38k | workers, num_workers, lf_sync); |
224 | 8.38k | } |
225 | | |
226 | | void vp9_lpf_mt_init(VP9LfSync *lf_sync, VP9_COMMON *cm, int frame_filter_level, |
227 | 10.3k | int num_workers) { |
228 | 10.3k | const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2; |
229 | | |
230 | 10.3k | if (!frame_filter_level) return; |
231 | | |
232 | 10.3k | if (!lf_sync->sync_range || sb_rows != lf_sync->rows || |
233 | 5.57k | num_workers > lf_sync->num_workers) { |
234 | 5.57k | vp9_loop_filter_dealloc(lf_sync); |
235 | 5.57k | vp9_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers); |
236 | 5.57k | } |
237 | | |
238 | | // Initialize cur_sb_col to -1 for all SB rows. |
239 | 10.3k | memset(lf_sync->cur_sb_col, -1, sizeof(*lf_sync->cur_sb_col) * sb_rows); |
240 | | |
241 | 10.3k | lf_sync->corrupted = 0; |
242 | | |
243 | 10.3k | memset(lf_sync->num_tiles_done, 0, |
244 | 10.3k | sizeof(*lf_sync->num_tiles_done) * sb_rows); |
245 | 10.3k | cm->lf_row = 0; |
246 | 10.3k | } |
247 | | |
248 | | // Set up nsync by width. |
249 | 6.96k | static INLINE int get_sync_range(int width) { |
250 | | // nsync numbers are picked by testing. For example, for 4k |
251 | | // video, using 4 gives best performance. |
252 | 6.96k | if (width < 640) |
253 | 2.92k | return 1; |
254 | 4.04k | else if (width <= 1280) |
255 | 2.50k | return 2; |
256 | 1.53k | else if (width <= 4096) |
257 | 421 | return 4; |
258 | 1.11k | else |
259 | 1.11k | return 8; |
260 | 6.96k | } |
261 | | |
262 | | // Allocate memory for lf row synchronization |
263 | | void vp9_loop_filter_alloc(VP9LfSync *lf_sync, VP9_COMMON *cm, int rows, |
264 | 6.96k | int width, int num_workers) { |
265 | 6.96k | lf_sync->rows = rows; |
266 | 6.96k | #if CONFIG_MULTITHREAD |
267 | 6.96k | { |
268 | 6.96k | int i; |
269 | | |
270 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->mutex, |
271 | 6.96k | vpx_malloc(sizeof(*lf_sync->mutex) * rows)); |
272 | 6.96k | if (lf_sync->mutex) { |
273 | 48.9k | for (i = 0; i < rows; ++i) { |
274 | 41.9k | pthread_mutex_init(&lf_sync->mutex[i], NULL); |
275 | 41.9k | } |
276 | 6.96k | } |
277 | | |
278 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->cond, |
279 | 6.96k | vpx_malloc(sizeof(*lf_sync->cond) * rows)); |
280 | 6.96k | if (lf_sync->cond) { |
281 | 48.9k | for (i = 0; i < rows; ++i) { |
282 | 41.9k | pthread_cond_init(&lf_sync->cond[i], NULL); |
283 | 41.9k | } |
284 | 6.96k | } |
285 | | |
286 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->lf_mutex, |
287 | 6.96k | vpx_malloc(sizeof(*lf_sync->lf_mutex))); |
288 | 6.96k | pthread_mutex_init(lf_sync->lf_mutex, NULL); |
289 | | |
290 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_mutex, |
291 | 6.96k | vpx_malloc(sizeof(*lf_sync->recon_done_mutex) * rows)); |
292 | 6.96k | if (lf_sync->recon_done_mutex) { |
293 | 48.9k | for (i = 0; i < rows; ++i) { |
294 | 41.9k | pthread_mutex_init(&lf_sync->recon_done_mutex[i], NULL); |
295 | 41.9k | } |
296 | 6.96k | } |
297 | | |
298 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->recon_done_cond, |
299 | 6.96k | vpx_malloc(sizeof(*lf_sync->recon_done_cond) * rows)); |
300 | 6.96k | if (lf_sync->recon_done_cond) { |
301 | 48.9k | for (i = 0; i < rows; ++i) { |
302 | 41.9k | pthread_cond_init(&lf_sync->recon_done_cond[i], NULL); |
303 | 41.9k | } |
304 | 6.96k | } |
305 | 6.96k | } |
306 | 6.96k | #endif // CONFIG_MULTITHREAD |
307 | | |
308 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->lfdata, |
309 | 6.96k | vpx_malloc(num_workers * sizeof(*lf_sync->lfdata))); |
310 | 6.96k | lf_sync->num_workers = num_workers; |
311 | 6.96k | lf_sync->num_active_workers = lf_sync->num_workers; |
312 | | |
313 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->cur_sb_col, |
314 | 6.96k | vpx_malloc(sizeof(*lf_sync->cur_sb_col) * rows)); |
315 | | |
316 | 6.96k | CHECK_MEM_ERROR(&cm->error, lf_sync->num_tiles_done, |
317 | 6.96k | vpx_malloc(sizeof(*lf_sync->num_tiles_done) * |
318 | 6.96k | mi_cols_aligned_to_sb(cm->mi_rows) >> |
319 | 6.96k | MI_BLOCK_SIZE_LOG2)); |
320 | | |
321 | | // Set up nsync. |
322 | 6.96k | lf_sync->sync_range = get_sync_range(width); |
323 | 6.96k | } |
324 | | |
325 | | // Deallocate lf synchronization related mutex and data |
326 | 24.8k | void vp9_loop_filter_dealloc(VP9LfSync *lf_sync) { |
327 | 24.8k | assert(lf_sync != NULL); |
328 | | |
329 | 24.8k | #if CONFIG_MULTITHREAD |
330 | 24.8k | if (lf_sync->mutex != NULL) { |
331 | 6.96k | int i; |
332 | 48.9k | for (i = 0; i < lf_sync->rows; ++i) { |
333 | 41.9k | pthread_mutex_destroy(&lf_sync->mutex[i]); |
334 | 41.9k | } |
335 | 6.96k | vpx_free(lf_sync->mutex); |
336 | 6.96k | } |
337 | 24.8k | if (lf_sync->cond != NULL) { |
338 | 6.96k | int i; |
339 | 48.9k | for (i = 0; i < lf_sync->rows; ++i) { |
340 | 41.9k | pthread_cond_destroy(&lf_sync->cond[i]); |
341 | 41.9k | } |
342 | 6.96k | vpx_free(lf_sync->cond); |
343 | 6.96k | } |
344 | 24.8k | if (lf_sync->recon_done_mutex != NULL) { |
345 | 6.96k | int i; |
346 | 48.9k | for (i = 0; i < lf_sync->rows; ++i) { |
347 | 41.9k | pthread_mutex_destroy(&lf_sync->recon_done_mutex[i]); |
348 | 41.9k | } |
349 | 6.96k | vpx_free(lf_sync->recon_done_mutex); |
350 | 6.96k | } |
351 | | |
352 | 24.8k | if (lf_sync->lf_mutex != NULL) { |
353 | 6.96k | pthread_mutex_destroy(lf_sync->lf_mutex); |
354 | 6.96k | vpx_free(lf_sync->lf_mutex); |
355 | 6.96k | } |
356 | 24.8k | if (lf_sync->recon_done_cond != NULL) { |
357 | 6.96k | int i; |
358 | 48.9k | for (i = 0; i < lf_sync->rows; ++i) { |
359 | 41.9k | pthread_cond_destroy(&lf_sync->recon_done_cond[i]); |
360 | 41.9k | } |
361 | 6.96k | vpx_free(lf_sync->recon_done_cond); |
362 | 6.96k | } |
363 | 24.8k | #endif // CONFIG_MULTITHREAD |
364 | | |
365 | 24.8k | vpx_free(lf_sync->lfdata); |
366 | 24.8k | vpx_free(lf_sync->cur_sb_col); |
367 | 24.8k | vpx_free(lf_sync->num_tiles_done); |
368 | | // clear the structure as the source of this call may be a resize in which |
369 | | // case this call will be followed by an _alloc() which may fail. |
370 | 24.8k | vp9_zero(*lf_sync); |
371 | 24.8k | } |
372 | | |
373 | 49.9k | static int get_next_row(VP9_COMMON *cm, VP9LfSync *lf_sync) { |
374 | 49.9k | int return_val = -1; |
375 | 49.9k | const int max_rows = cm->mi_rows; |
376 | | |
377 | 49.9k | #if CONFIG_MULTITHREAD |
378 | 49.9k | int cur_row; |
379 | 49.9k | const int tile_cols = 1 << cm->log2_tile_cols; |
380 | | |
381 | 49.9k | pthread_mutex_lock(lf_sync->lf_mutex); |
382 | 49.9k | if (cm->lf_row < max_rows) { |
383 | 31.3k | cur_row = cm->lf_row >> MI_BLOCK_SIZE_LOG2; |
384 | 31.3k | return_val = cm->lf_row; |
385 | 31.3k | cm->lf_row += MI_BLOCK_SIZE; |
386 | 31.3k | if (cm->lf_row < max_rows) { |
387 | | /* If this is not the last row, make sure the next row is also decoded. |
388 | | * This is because the intra predict has to happen before loop filter */ |
389 | 22.4k | cur_row += 1; |
390 | 22.4k | } |
391 | 31.3k | } |
392 | 49.9k | pthread_mutex_unlock(lf_sync->lf_mutex); |
393 | | |
394 | 49.9k | if (return_val == -1) return return_val; |
395 | | |
396 | 31.3k | pthread_mutex_lock(&lf_sync->recon_done_mutex[cur_row]); |
397 | 31.3k | if (lf_sync->num_tiles_done[cur_row] < tile_cols) { |
398 | 6.08k | pthread_cond_wait(&lf_sync->recon_done_cond[cur_row], |
399 | 6.08k | &lf_sync->recon_done_mutex[cur_row]); |
400 | 6.08k | } |
401 | 31.3k | pthread_mutex_unlock(&lf_sync->recon_done_mutex[cur_row]); |
402 | 31.3k | pthread_mutex_lock(lf_sync->lf_mutex); |
403 | 31.3k | if (lf_sync->corrupted) { |
404 | 392 | int row = return_val >> MI_BLOCK_SIZE_LOG2; |
405 | 392 | pthread_mutex_lock(&lf_sync->mutex[row]); |
406 | 392 | lf_sync->cur_sb_col[row] = INT_MAX; |
407 | 392 | pthread_cond_signal(&lf_sync->cond[row]); |
408 | 392 | pthread_mutex_unlock(&lf_sync->mutex[row]); |
409 | 392 | return_val = -1; |
410 | 392 | } |
411 | 31.3k | pthread_mutex_unlock(lf_sync->lf_mutex); |
412 | | #else |
413 | | (void)lf_sync; |
414 | | if (cm->lf_row < max_rows) { |
415 | | return_val = cm->lf_row; |
416 | | cm->lf_row += MI_BLOCK_SIZE; |
417 | | } |
418 | | #endif // CONFIG_MULTITHREAD |
419 | | |
420 | 31.3k | return return_val; |
421 | 49.9k | } |
422 | | |
423 | 19.0k | void vp9_loopfilter_rows(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
424 | 19.0k | int mi_row; |
425 | 19.0k | VP9_COMMON *cm = lf_data->cm; |
426 | | |
427 | 49.9k | while ((mi_row = get_next_row(cm, lf_sync)) != -1 && mi_row < cm->mi_rows) { |
428 | 30.9k | lf_data->start = mi_row; |
429 | 30.9k | lf_data->stop = mi_row + MI_BLOCK_SIZE; |
430 | | |
431 | 30.9k | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
432 | 30.9k | lf_data->start, lf_data->stop, lf_data->y_only, |
433 | 30.9k | lf_sync); |
434 | 30.9k | } |
435 | 19.0k | } |
436 | | |
437 | | void vp9_set_row(VP9LfSync *lf_sync, int num_tiles, int row, int is_last_row, |
438 | 156k | int corrupted) { |
439 | 156k | #if CONFIG_MULTITHREAD |
440 | 156k | pthread_mutex_lock(lf_sync->lf_mutex); |
441 | 156k | lf_sync->corrupted |= corrupted; |
442 | 156k | pthread_mutex_unlock(lf_sync->lf_mutex); |
443 | 156k | pthread_mutex_lock(&lf_sync->recon_done_mutex[row]); |
444 | 156k | lf_sync->num_tiles_done[row] += 1; |
445 | 156k | if (num_tiles == lf_sync->num_tiles_done[row]) { |
446 | 46.3k | if (is_last_row) { |
447 | | /* The last 2 rows wait on the last row to be done. |
448 | | * So, we have to broadcast the signal in this case. |
449 | | */ |
450 | 9.96k | pthread_cond_broadcast(&lf_sync->recon_done_cond[row]); |
451 | 36.3k | } else { |
452 | 36.3k | pthread_cond_signal(&lf_sync->recon_done_cond[row]); |
453 | 36.3k | } |
454 | 46.3k | } |
455 | 156k | pthread_mutex_unlock(&lf_sync->recon_done_mutex[row]); |
456 | | #else |
457 | | (void)lf_sync; |
458 | | (void)num_tiles; |
459 | | (void)row; |
460 | | (void)is_last_row; |
461 | | (void)corrupted; |
462 | | #endif // CONFIG_MULTITHREAD |
463 | 156k | } |
464 | | |
465 | 0 | void vp9_loopfilter_job(LFWorkerData *lf_data, VP9LfSync *lf_sync) { |
466 | 0 | thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes, |
467 | 0 | lf_data->start, lf_data->stop, lf_data->y_only, |
468 | 0 | lf_sync); |
469 | 0 | } |
470 | | |
471 | | // Accumulate frame counts. |
472 | | void vp9_accumulate_frame_counts(FRAME_COUNTS *accum, |
473 | 50.0k | const FRAME_COUNTS *counts, int is_dec) { |
474 | 50.0k | int i, j, k, l, m; |
475 | | |
476 | 250k | for (i = 0; i < BLOCK_SIZE_GROUPS; i++) |
477 | 2.20M | for (j = 0; j < INTRA_MODES; j++) |
478 | 2.00M | accum->y_mode[i][j] += counts->y_mode[i][j]; |
479 | | |
480 | 550k | for (i = 0; i < INTRA_MODES; i++) |
481 | 5.50M | for (j = 0; j < INTRA_MODES; j++) |
482 | 5.00M | accum->uv_mode[i][j] += counts->uv_mode[i][j]; |
483 | | |
484 | 850k | for (i = 0; i < PARTITION_CONTEXTS; i++) |
485 | 4.00M | for (j = 0; j < PARTITION_TYPES; j++) |
486 | 3.20M | accum->partition[i][j] += counts->partition[i][j]; |
487 | | |
488 | 50.0k | if (is_dec) { |
489 | 50.0k | int n; |
490 | 250k | for (i = 0; i < TX_SIZES; i++) |
491 | 600k | for (j = 0; j < PLANE_TYPES; j++) |
492 | 1.20M | for (k = 0; k < REF_TYPES; k++) |
493 | 5.60M | for (l = 0; l < COEF_BANDS; l++) |
494 | 33.6M | for (m = 0; m < COEFF_CONTEXTS; m++) { |
495 | 28.8M | accum->eob_branch[i][j][k][l][m] += |
496 | 28.8M | counts->eob_branch[i][j][k][l][m]; |
497 | 144M | for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
498 | 115M | accum->coef[i][j][k][l][m][n] += counts->coef[i][j][k][l][m][n]; |
499 | 28.8M | } |
500 | 50.0k | } else { |
501 | 0 | for (i = 0; i < TX_SIZES; i++) |
502 | 0 | for (j = 0; j < PLANE_TYPES; j++) |
503 | 0 | for (k = 0; k < REF_TYPES; k++) |
504 | 0 | for (l = 0; l < COEF_BANDS; l++) |
505 | 0 | for (m = 0; m < COEFF_CONTEXTS; m++) |
506 | 0 | accum->eob_branch[i][j][k][l][m] += |
507 | 0 | counts->eob_branch[i][j][k][l][m]; |
508 | | // In the encoder, coef is only updated at frame |
509 | | // level, so not need to accumulate it here. |
510 | | // for (n = 0; n < UNCONSTRAINED_NODES + 1; n++) |
511 | | // accum->coef[i][j][k][l][m][n] += |
512 | | // counts->coef[i][j][k][l][m][n]; |
513 | 0 | } |
514 | | |
515 | 250k | for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) |
516 | 800k | for (j = 0; j < SWITCHABLE_FILTERS; j++) |
517 | 600k | accum->switchable_interp[i][j] += counts->switchable_interp[i][j]; |
518 | | |
519 | 400k | for (i = 0; i < INTER_MODE_CONTEXTS; i++) |
520 | 1.75M | for (j = 0; j < INTER_MODES; j++) |
521 | 1.40M | accum->inter_mode[i][j] += counts->inter_mode[i][j]; |
522 | | |
523 | 250k | for (i = 0; i < INTRA_INTER_CONTEXTS; i++) |
524 | 600k | for (j = 0; j < 2; j++) |
525 | 400k | accum->intra_inter[i][j] += counts->intra_inter[i][j]; |
526 | | |
527 | 300k | for (i = 0; i < COMP_INTER_CONTEXTS; i++) |
528 | 750k | for (j = 0; j < 2; j++) accum->comp_inter[i][j] += counts->comp_inter[i][j]; |
529 | | |
530 | 300k | for (i = 0; i < REF_CONTEXTS; i++) |
531 | 750k | for (j = 0; j < 2; j++) |
532 | 1.50M | for (k = 0; k < 2; k++) |
533 | 1.00M | accum->single_ref[i][j][k] += counts->single_ref[i][j][k]; |
534 | | |
535 | 300k | for (i = 0; i < REF_CONTEXTS; i++) |
536 | 750k | for (j = 0; j < 2; j++) accum->comp_ref[i][j] += counts->comp_ref[i][j]; |
537 | | |
538 | 150k | for (i = 0; i < TX_SIZE_CONTEXTS; i++) { |
539 | 500k | for (j = 0; j < TX_SIZES; j++) |
540 | 400k | accum->tx.p32x32[i][j] += counts->tx.p32x32[i][j]; |
541 | | |
542 | 400k | for (j = 0; j < TX_SIZES - 1; j++) |
543 | 300k | accum->tx.p16x16[i][j] += counts->tx.p16x16[i][j]; |
544 | | |
545 | 300k | for (j = 0; j < TX_SIZES - 2; j++) |
546 | 200k | accum->tx.p8x8[i][j] += counts->tx.p8x8[i][j]; |
547 | 100k | } |
548 | | |
549 | 250k | for (i = 0; i < TX_SIZES; i++) |
550 | 200k | accum->tx.tx_totals[i] += counts->tx.tx_totals[i]; |
551 | | |
552 | 200k | for (i = 0; i < SKIP_CONTEXTS; i++) |
553 | 450k | for (j = 0; j < 2; j++) accum->skip[i][j] += counts->skip[i][j]; |
554 | | |
555 | 250k | for (i = 0; i < MV_JOINTS; i++) accum->mv.joints[i] += counts->mv.joints[i]; |
556 | | |
557 | 150k | for (k = 0; k < 2; k++) { |
558 | 100k | nmv_component_counts *const comps = &accum->mv.comps[k]; |
559 | 100k | const nmv_component_counts *const comps_t = &counts->mv.comps[k]; |
560 | | |
561 | 300k | for (i = 0; i < 2; i++) { |
562 | 200k | comps->sign[i] += comps_t->sign[i]; |
563 | 200k | comps->class0_hp[i] += comps_t->class0_hp[i]; |
564 | 200k | comps->hp[i] += comps_t->hp[i]; |
565 | 200k | } |
566 | | |
567 | 1.20M | for (i = 0; i < MV_CLASSES; i++) comps->classes[i] += comps_t->classes[i]; |
568 | | |
569 | 300k | for (i = 0; i < CLASS0_SIZE; i++) { |
570 | 200k | comps->class0[i] += comps_t->class0[i]; |
571 | 1.00M | for (j = 0; j < MV_FP_SIZE; j++) |
572 | 800k | comps->class0_fp[i][j] += comps_t->class0_fp[i][j]; |
573 | 200k | } |
574 | | |
575 | 1.10M | for (i = 0; i < MV_OFFSET_BITS; i++) |
576 | 3.00M | for (j = 0; j < 2; j++) comps->bits[i][j] += comps_t->bits[i][j]; |
577 | | |
578 | 500k | for (i = 0; i < MV_FP_SIZE; i++) comps->fp[i] += comps_t->fp[i]; |
579 | 100k | } |
580 | 50.0k | } |