/src/gstreamer/subprojects/gst-plugins-base/gst-libs/gst/video/video-converter.c
Line | Count | Source |
1 | | /* GStreamer |
2 | | * Copyright (C) 2010 David Schleef <ds@schleef.org> |
3 | | * Copyright (C) 2010 Sebastian Dröge <sebastian.droege@collabora.co.uk> |
4 | | * |
5 | | * This library is free software; you can redistribute it and/or |
6 | | * modify it under the terms of the GNU Library General Public |
7 | | * License as published by the Free Software Foundation; either |
8 | | * version 2 of the License, or (at your option) any later version. |
9 | | * |
10 | | * This library is distributed in the hope that it will be useful, |
11 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
13 | | * Library General Public License for more details. |
14 | | * |
15 | | * You should have received a copy of the GNU Library General Public |
16 | | * License along with this library; if not, write to the |
17 | | * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, |
18 | | * Boston, MA 02110-1301, USA. |
19 | | */ |
20 | | |
21 | | #ifdef HAVE_CONFIG_H |
22 | | #include "config.h" |
23 | | #endif |
24 | | |
25 | | #if 0 |
26 | | #ifdef HAVE_PTHREAD |
27 | | #define _GNU_SOURCE |
28 | | #include <pthread.h> |
29 | | #endif |
30 | | #endif |
31 | | |
32 | | #include "video-converter.h" |
33 | | |
34 | | #include <glib.h> |
35 | | #include <string.h> |
36 | | #include <math.h> |
37 | | #include <gst/base/base.h> |
38 | | |
39 | | #include "video-orc.h" |
40 | | |
41 | | /** |
42 | | * SECTION:videoconverter |
43 | | * @title: GstVideoConverter |
44 | | * @short_description: Generic video conversion |
45 | | * |
46 | | * This object is used to convert video frames from one format to another. |
47 | | * The object can perform conversion of: |
48 | | * |
49 | | * * video format |
50 | | * * video colorspace |
51 | | * * chroma-siting |
52 | | * * video size |
53 | | * |
54 | | */ |
55 | | |
56 | | /* |
57 | | * (a) unpack |
58 | | * (b) chroma upsample |
59 | | * (c) (convert Y'CbCr to R'G'B') |
60 | | * (d) gamma decode |
61 | | * (e) downscale |
62 | | * (f) colorspace convert through XYZ |
63 | | * (g) upscale |
64 | | * (h) gamma encode |
65 | | * (i) (convert R'G'B' to Y'CbCr) |
66 | | * (j) chroma downsample |
67 | | * (k) pack |
68 | | * |
69 | | * quality options |
70 | | * |
71 | | * (a) range truncate, range expand |
72 | | * (b) full upsample, 1-1 non-cosited upsample, no upsample |
73 | | * (c) 8 bits, 16 bits |
74 | | * (d) |
75 | | * (e) 8 bits, 16 bits |
76 | | * (f) 8 bits, 16 bits |
77 | | * (g) 8 bits, 16 bits |
78 | | * (h) |
79 | | * (i) 8 bits, 16 bits |
80 | | * (j) 1-1 cosited downsample, no downsample |
81 | | * (k) |
82 | | * |
83 | | * |
84 | | * 1 : a -> -> -> -> e -> f -> g -> -> -> -> k |
85 | | * 2 : a -> -> -> -> e -> f* -> g -> -> -> -> k |
86 | | * 3 : a -> -> -> -> e* -> f* -> g* -> -> -> -> k |
87 | | * 4 : a -> b -> -> -> e -> f -> g -> -> -> j -> k |
88 | | * 5 : a -> b -> -> -> e* -> f* -> g* -> -> -> j -> k |
89 | | * 6 : a -> b -> c -> d -> e -> f -> g -> h -> i -> j -> k |
90 | | * 7 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k |
91 | | * |
92 | | * 8 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k |
93 | | * 9 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k |
94 | | * 10 : a -> b -> c -> d -> e* -> f* -> g* -> h -> i -> j -> k |
95 | | */ |
96 | | |
97 | | #ifndef GST_DISABLE_GST_DEBUG |
98 | | #define GST_CAT_DEFAULT ensure_debug_category() |
99 | | static GstDebugCategory * |
100 | | ensure_debug_category (void) |
101 | 0 | { |
102 | 0 | static gsize cat_gonce = 0; |
103 | |
|
104 | 0 | if (g_once_init_enter (&cat_gonce)) { |
105 | 0 | gsize cat_done; |
106 | |
|
107 | 0 | cat_done = (gsize) _gst_debug_category_new ("video-converter", 0, |
108 | 0 | "video-converter object"); |
109 | |
|
110 | 0 | g_once_init_leave (&cat_gonce, cat_done); |
111 | 0 | } |
112 | |
|
113 | 0 | return (GstDebugCategory *) cat_gonce; |
114 | 0 | } |
115 | | #else |
116 | | #define ensure_debug_category() /* NOOP */ |
117 | | #endif /* GST_DISABLE_GST_DEBUG */ |
118 | | |
119 | | typedef void (*GstParallelizedTaskFunc) (gpointer user_data); |
120 | | |
121 | | typedef struct _GstParallelizedTaskRunner GstParallelizedTaskRunner; |
122 | | typedef struct _GstParallelizedWorkItem GstParallelizedWorkItem; |
123 | | |
124 | | struct _GstParallelizedWorkItem |
125 | | { |
126 | | GstParallelizedTaskRunner *self; |
127 | | GstParallelizedTaskFunc func; |
128 | | gpointer user_data; |
129 | | }; |
130 | | |
131 | | struct _GstParallelizedTaskRunner |
132 | | { |
133 | | GstTaskPool *pool; |
134 | | gboolean own_pool; |
135 | | guint n_threads; |
136 | | |
137 | | GstVecDeque *tasks; |
138 | | GstVecDeque *work_items; |
139 | | |
140 | | GMutex lock; |
141 | | |
142 | | gboolean async_tasks; |
143 | | }; |
144 | | |
145 | | static void |
146 | | gst_parallelized_task_thread_func (gpointer data) |
147 | 0 | { |
148 | 0 | GstParallelizedTaskRunner *runner = data; |
149 | 0 | GstParallelizedWorkItem *work_item; |
150 | |
|
151 | 0 | g_mutex_lock (&runner->lock); |
152 | 0 | work_item = gst_vec_deque_pop_head (runner->work_items); |
153 | 0 | g_mutex_unlock (&runner->lock); |
154 | |
|
155 | 0 | g_assert (work_item != NULL); |
156 | 0 | g_assert (work_item->func != NULL); |
157 | | |
158 | | |
159 | 0 | work_item->func (work_item->user_data); |
160 | 0 | if (runner->async_tasks) |
161 | 0 | g_free (work_item); |
162 | 0 | } |
163 | | |
164 | | static void |
165 | | gst_parallelized_task_runner_join (GstParallelizedTaskRunner * self) |
166 | 0 | { |
167 | 0 | gboolean joined = FALSE; |
168 | |
|
169 | 0 | while (!joined) { |
170 | 0 | g_mutex_lock (&self->lock); |
171 | 0 | if (!(joined = gst_vec_deque_is_empty (self->tasks))) { |
172 | 0 | gpointer task = gst_vec_deque_pop_head (self->tasks); |
173 | 0 | g_mutex_unlock (&self->lock); |
174 | 0 | gst_task_pool_join (self->pool, task); |
175 | 0 | } else { |
176 | 0 | g_mutex_unlock (&self->lock); |
177 | 0 | } |
178 | 0 | } |
179 | 0 | } |
180 | | |
181 | | static void |
182 | | gst_parallelized_task_runner_free (GstParallelizedTaskRunner * self) |
183 | 0 | { |
184 | 0 | gst_parallelized_task_runner_join (self); |
185 | |
|
186 | 0 | gst_vec_deque_free (self->work_items); |
187 | 0 | gst_vec_deque_free (self->tasks); |
188 | 0 | if (self->own_pool) |
189 | 0 | gst_task_pool_cleanup (self->pool); |
190 | 0 | gst_object_unref (self->pool); |
191 | 0 | g_mutex_clear (&self->lock); |
192 | 0 | g_free (self); |
193 | 0 | } |
194 | | |
195 | | static GstParallelizedTaskRunner * |
196 | | gst_parallelized_task_runner_new (guint n_threads, GstTaskPool * pool, |
197 | | gboolean async_tasks) |
198 | 0 | { |
199 | 0 | GstParallelizedTaskRunner *self; |
200 | |
|
201 | 0 | if (n_threads == 0) |
202 | 0 | n_threads = g_get_num_processors (); |
203 | |
|
204 | 0 | self = g_new0 (GstParallelizedTaskRunner, 1); |
205 | |
|
206 | 0 | if (pool) { |
207 | 0 | self->pool = g_object_ref (pool); |
208 | 0 | self->own_pool = FALSE; |
209 | | |
210 | | /* No reason to split up the work between more threads than the |
211 | | * pool can spawn */ |
212 | 0 | if (GST_IS_SHARED_TASK_POOL (pool)) |
213 | 0 | n_threads = |
214 | 0 | MIN (n_threads, |
215 | 0 | gst_shared_task_pool_get_max_threads (GST_SHARED_TASK_POOL (pool))); |
216 | 0 | } else { |
217 | 0 | self->pool = gst_shared_task_pool_new (); |
218 | 0 | self->own_pool = TRUE; |
219 | 0 | gst_shared_task_pool_set_max_threads (GST_SHARED_TASK_POOL (self->pool), |
220 | 0 | n_threads); |
221 | 0 | gst_task_pool_prepare (self->pool, NULL); |
222 | 0 | } |
223 | |
|
224 | 0 | self->tasks = gst_vec_deque_new (n_threads); |
225 | 0 | self->work_items = gst_vec_deque_new (n_threads); |
226 | |
|
227 | 0 | self->n_threads = n_threads; |
228 | |
|
229 | 0 | g_mutex_init (&self->lock); |
230 | | |
231 | | /* Set when scheduling a job */ |
232 | 0 | self->async_tasks = async_tasks; |
233 | |
|
234 | 0 | return self; |
235 | 0 | } |
236 | | |
237 | | static void |
238 | | gst_parallelized_task_runner_finish (GstParallelizedTaskRunner * self) |
239 | 0 | { |
240 | 0 | gst_parallelized_task_runner_join (self); |
241 | 0 | } |
242 | | |
243 | | static void |
244 | | gst_parallelized_task_runner_run (GstParallelizedTaskRunner * self, |
245 | | GstParallelizedTaskFunc func, gpointer * task_data) |
246 | 0 | { |
247 | 0 | guint n_threads = self->n_threads; |
248 | |
|
249 | 0 | if (n_threads > 1 || self->async_tasks) { |
250 | 0 | guint i = 0; |
251 | 0 | g_mutex_lock (&self->lock); |
252 | 0 | if (!self->async_tasks) { |
253 | | /* if not async, perform one of the functions in the current thread */ |
254 | 0 | i = 1; |
255 | 0 | } |
256 | 0 | for (; i < n_threads; i++) { |
257 | 0 | gpointer task; |
258 | 0 | GstParallelizedWorkItem *work_item; |
259 | |
|
260 | 0 | if (!self->async_tasks) |
261 | 0 | work_item = g_newa (GstParallelizedWorkItem, 1); |
262 | 0 | else |
263 | 0 | work_item = g_new0 (GstParallelizedWorkItem, 1); |
264 | |
|
265 | 0 | work_item->self = self; |
266 | 0 | work_item->func = func; |
267 | 0 | work_item->user_data = task_data[i]; |
268 | 0 | gst_vec_deque_push_tail (self->work_items, work_item); |
269 | |
|
270 | 0 | task = |
271 | 0 | gst_task_pool_push (self->pool, gst_parallelized_task_thread_func, |
272 | 0 | self, NULL); |
273 | | |
274 | | /* The return value of push() is unfortunately nullable, and we can't deal with that */ |
275 | 0 | g_assert (task != NULL); |
276 | 0 | gst_vec_deque_push_tail (self->tasks, task); |
277 | 0 | } |
278 | 0 | g_mutex_unlock (&self->lock); |
279 | 0 | } |
280 | | |
281 | 0 | if (!self->async_tasks) { |
282 | 0 | func (task_data[0]); |
283 | |
|
284 | 0 | gst_parallelized_task_runner_finish (self); |
285 | 0 | } |
286 | 0 | } |
287 | | |
288 | | typedef struct _GstLineCache GstLineCache; |
289 | | |
290 | 0 | #define SCALE (8) |
291 | 0 | #define SCALE_F ((float) (1 << SCALE)) |
292 | | |
293 | | typedef struct _MatrixData MatrixData; |
294 | | |
295 | | struct _MatrixData |
296 | | { |
297 | | gdouble dm[4][4]; |
298 | | gint im[4][4]; |
299 | | gint width; |
300 | | guint64 orc_p1; |
301 | | guint64 orc_p2; |
302 | | guint64 orc_p3; |
303 | | guint64 orc_p4; |
304 | | gint64 *t_r; |
305 | | gint64 *t_g; |
306 | | gint64 *t_b; |
307 | | gint64 t_c; |
308 | | void (*matrix_func) (MatrixData * data, gpointer pixels); |
309 | | }; |
310 | | |
311 | | typedef struct _GammaData GammaData; |
312 | | |
313 | | struct _GammaData |
314 | | { |
315 | | gpointer gamma_table; |
316 | | gint width; |
317 | | void (*gamma_func) (GammaData * data, gpointer dest, gpointer src); |
318 | | }; |
319 | | |
320 | | typedef enum |
321 | | { |
322 | | ALPHA_MODE_NONE = 0, |
323 | | ALPHA_MODE_COPY = (1 << 0), |
324 | | ALPHA_MODE_SET = (1 << 1), |
325 | | ALPHA_MODE_MULT = (1 << 2) |
326 | | } AlphaMode; |
327 | | |
328 | | typedef struct |
329 | | { |
330 | | guint8 *data; |
331 | | guint stride; |
332 | | guint n_lines; |
333 | | guint idx; |
334 | | gpointer user_data; |
335 | | GDestroyNotify notify; |
336 | | } ConverterAlloc; |
337 | | |
338 | | typedef void (*FastConvertFunc) (GstVideoConverter * convert, |
339 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane); |
340 | | |
341 | | struct _GstVideoConverter |
342 | | { |
343 | | gint flags; |
344 | | |
345 | | GstVideoInfo in_info; |
346 | | GstVideoInfo out_info; |
347 | | |
348 | | gint in_x; |
349 | | gint in_y; |
350 | | gint in_width; |
351 | | gint in_height; |
352 | | gint in_maxwidth; |
353 | | gint in_maxheight; |
354 | | gint out_x; |
355 | | gint out_y; |
356 | | gint out_width; |
357 | | gint out_height; |
358 | | gint out_maxwidth; |
359 | | gint out_maxheight; |
360 | | |
361 | | gint current_pstride; |
362 | | gint current_width; |
363 | | gint current_height; |
364 | | GstVideoFormat current_format; |
365 | | gint current_bits; |
366 | | |
367 | | GstStructure *config; |
368 | | |
369 | | GstParallelizedTaskRunner *conversion_runner; |
370 | | |
371 | | guint16 **tmpline; |
372 | | |
373 | | gboolean fill_border; |
374 | | gpointer borderline; |
375 | | guint64 borders[4]; |
376 | | guint32 border_argb; |
377 | | guint32 alpha_value; |
378 | | AlphaMode alpha_mode; |
379 | | |
380 | | void (*convert) (GstVideoConverter * convert, const GstVideoFrame * src, |
381 | | GstVideoFrame * dest); |
382 | | |
383 | | /* data for unpack */ |
384 | | GstLineCache **unpack_lines; |
385 | | GstVideoFormat unpack_format; |
386 | | guint unpack_bits; |
387 | | gboolean unpack_rgb; |
388 | | gboolean identity_unpack; |
389 | | gint unpack_pstride; |
390 | | |
391 | | /* chroma upsample */ |
392 | | GstLineCache **upsample_lines; |
393 | | GstVideoChromaResample **upsample; |
394 | | GstVideoChromaResample **upsample_p; |
395 | | GstVideoChromaResample **upsample_i; |
396 | | guint up_n_lines; |
397 | | gint up_offset; |
398 | | |
399 | | /* to R'G'B */ |
400 | | GstLineCache **to_RGB_lines; |
401 | | MatrixData to_RGB_matrix; |
402 | | /* gamma decode */ |
403 | | GammaData gamma_dec; |
404 | | |
405 | | /* scaling */ |
406 | | GstLineCache **hscale_lines; |
407 | | GstVideoScaler **h_scaler; |
408 | | gint h_scale_format; |
409 | | GstLineCache **vscale_lines; |
410 | | GstVideoScaler **v_scaler; |
411 | | GstVideoScaler **v_scaler_p; |
412 | | GstVideoScaler **v_scaler_i; |
413 | | gint v_scale_width; |
414 | | gint v_scale_format; |
415 | | |
416 | | /* color space conversion */ |
417 | | GstLineCache **convert_lines; |
418 | | MatrixData convert_matrix; |
419 | | gint in_bits; |
420 | | gint out_bits; |
421 | | |
422 | | /* alpha correction */ |
423 | | GstLineCache **alpha_lines; |
424 | | void (*alpha_func) (GstVideoConverter * convert, gpointer pixels, gint width); |
425 | | |
426 | | /* gamma encode */ |
427 | | GammaData gamma_enc; |
428 | | /* to Y'CbCr */ |
429 | | GstLineCache **to_YUV_lines; |
430 | | MatrixData to_YUV_matrix; |
431 | | |
432 | | /* chroma downsample */ |
433 | | GstLineCache **downsample_lines; |
434 | | GstVideoChromaResample **downsample; |
435 | | GstVideoChromaResample **downsample_p; |
436 | | GstVideoChromaResample **downsample_i; |
437 | | guint down_n_lines; |
438 | | gint down_offset; |
439 | | |
440 | | /* dither */ |
441 | | GstLineCache **dither_lines; |
442 | | GstVideoDither **dither; |
443 | | |
444 | | /* pack */ |
445 | | GstLineCache **pack_lines; |
446 | | guint pack_nlines; |
447 | | GstVideoFormat pack_format; |
448 | | guint pack_bits; |
449 | | gboolean pack_rgb; |
450 | | gboolean identity_pack; |
451 | | gint pack_pstride; |
452 | | gconstpointer pack_pal; |
453 | | gsize pack_palsize; |
454 | | |
455 | | const GstVideoFrame *src; |
456 | | GstVideoFrame *dest; |
457 | | |
458 | | /* fastpath */ |
459 | | GstVideoFormat fformat[4]; |
460 | | gint fin_x[4]; |
461 | | gint fin_y[4]; |
462 | | gint fout_x[4]; |
463 | | gint fout_y[4]; |
464 | | gint fout_width[4]; |
465 | | gint fout_height[4]; |
466 | | gint fsplane[4]; |
467 | | gint ffill[4]; |
468 | | |
469 | | struct |
470 | | { |
471 | | GstVideoScaler **scaler; |
472 | | } fh_scaler[4]; |
473 | | struct |
474 | | { |
475 | | GstVideoScaler **scaler; |
476 | | } fv_scaler[4]; |
477 | | FastConvertFunc fconvert[4]; |
478 | | |
479 | | /* for parallel async running */ |
480 | | gpointer tasks[4]; |
481 | | gpointer tasks_p[4]; |
482 | | }; |
483 | | |
484 | | typedef gpointer (*GstLineCacheAllocLineFunc) (GstLineCache * cache, gint idx, |
485 | | gpointer user_data); |
486 | | typedef gboolean (*GstLineCacheNeedLineFunc) (GstLineCache * cache, gint idx, |
487 | | gint out_line, gint in_line, gpointer user_data); |
488 | | |
489 | | struct _GstLineCache |
490 | | { |
491 | | gint first; |
492 | | gint backlog; |
493 | | GPtrArray *lines; |
494 | | |
495 | | GstLineCache *prev; |
496 | | gboolean write_input; |
497 | | gboolean pass_alloc; |
498 | | gboolean alloc_writable; |
499 | | |
500 | | GstLineCacheNeedLineFunc need_line; |
501 | | gint need_line_idx; |
502 | | gpointer need_line_data; |
503 | | GDestroyNotify need_line_notify; |
504 | | |
505 | | guint n_lines; |
506 | | guint stride; |
507 | | GstLineCacheAllocLineFunc alloc_line; |
508 | | gpointer alloc_line_data; |
509 | | GDestroyNotify alloc_line_notify; |
510 | | }; |
511 | | |
512 | | static GstLineCache * |
513 | | gst_line_cache_new (GstLineCache * prev) |
514 | 0 | { |
515 | 0 | GstLineCache *result; |
516 | |
|
517 | 0 | result = g_new0 (GstLineCache, 1); |
518 | 0 | result->lines = g_ptr_array_new (); |
519 | 0 | result->prev = prev; |
520 | |
|
521 | 0 | return result; |
522 | 0 | } |
523 | | |
524 | | static void |
525 | | gst_line_cache_clear (GstLineCache * cache) |
526 | 0 | { |
527 | 0 | g_return_if_fail (cache != NULL); |
528 | | |
529 | 0 | g_ptr_array_set_size (cache->lines, 0); |
530 | 0 | cache->first = 0; |
531 | 0 | } |
532 | | |
533 | | static void |
534 | | gst_line_cache_free (GstLineCache * cache) |
535 | 0 | { |
536 | 0 | if (cache->need_line_notify) |
537 | 0 | cache->need_line_notify (cache->need_line_data); |
538 | 0 | if (cache->alloc_line_notify) |
539 | 0 | cache->alloc_line_notify (cache->alloc_line_data); |
540 | 0 | gst_line_cache_clear (cache); |
541 | 0 | g_ptr_array_unref (cache->lines); |
542 | 0 | g_free (cache); |
543 | 0 | } |
544 | | |
545 | | static void |
546 | | gst_line_cache_set_need_line_func (GstLineCache * cache, |
547 | | GstLineCacheNeedLineFunc need_line, gint idx, gpointer user_data, |
548 | | GDestroyNotify notify) |
549 | 0 | { |
550 | 0 | cache->need_line = need_line; |
551 | 0 | cache->need_line_idx = idx; |
552 | 0 | cache->need_line_data = user_data; |
553 | 0 | cache->need_line_notify = notify; |
554 | 0 | } |
555 | | |
556 | | static void |
557 | | gst_line_cache_set_alloc_line_func (GstLineCache * cache, |
558 | | GstLineCacheAllocLineFunc alloc_line, gpointer user_data, |
559 | | GDestroyNotify notify) |
560 | 0 | { |
561 | 0 | cache->alloc_line = alloc_line; |
562 | 0 | cache->alloc_line_data = user_data; |
563 | 0 | cache->alloc_line_notify = notify; |
564 | 0 | } |
565 | | |
566 | | /* keep this much backlog for interlaced video */ |
567 | 0 | #define BACKLOG 2 |
568 | | |
569 | | static gpointer * |
570 | | gst_line_cache_get_lines (GstLineCache * cache, gint idx, gint out_line, |
571 | | gint in_line, gint n_lines) |
572 | 0 | { |
573 | 0 | if (cache->first + cache->backlog < in_line) { |
574 | 0 | gint to_remove = |
575 | 0 | MIN (in_line - (cache->first + cache->backlog), cache->lines->len); |
576 | 0 | if (to_remove > 0) { |
577 | 0 | g_ptr_array_remove_range (cache->lines, 0, to_remove); |
578 | 0 | } |
579 | 0 | cache->first += to_remove; |
580 | 0 | } else if (in_line < cache->first) { |
581 | 0 | gst_line_cache_clear (cache); |
582 | 0 | cache->first = in_line; |
583 | 0 | } |
584 | |
|
585 | 0 | while (TRUE) { |
586 | 0 | gint oline; |
587 | |
|
588 | 0 | if (cache->first <= in_line |
589 | 0 | && in_line + n_lines <= cache->first + (gint) cache->lines->len) { |
590 | 0 | return cache->lines->pdata + (in_line - cache->first); |
591 | 0 | } |
592 | | |
593 | 0 | if (cache->need_line == NULL) |
594 | 0 | break; |
595 | | |
596 | | /* We may be able to skip ahead to the earliest line needed */ |
597 | 0 | if (cache->lines->len == 0 && cache->first + cache->backlog < in_line) |
598 | 0 | cache->first = in_line - cache->backlog; |
599 | |
|
600 | 0 | oline = out_line + cache->first + cache->lines->len - in_line; |
601 | |
|
602 | 0 | if (!cache->need_line (cache, idx, oline, cache->first + cache->lines->len, |
603 | 0 | cache->need_line_data)) |
604 | 0 | break; |
605 | 0 | } |
606 | 0 | GST_LOG ("no lines"); |
607 | 0 | return NULL; |
608 | 0 | } |
609 | | |
610 | | static void |
611 | | gst_line_cache_add_line (GstLineCache * cache, gint idx, gpointer line) |
612 | 0 | { |
613 | 0 | if (cache->first + cache->lines->len != idx) { |
614 | 0 | gst_line_cache_clear (cache); |
615 | 0 | cache->first = idx; |
616 | 0 | } |
617 | 0 | g_ptr_array_add (cache->lines, line); |
618 | 0 | } |
619 | | |
620 | | static gpointer |
621 | | gst_line_cache_alloc_line (GstLineCache * cache, gint idx) |
622 | 0 | { |
623 | 0 | gpointer res; |
624 | |
|
625 | 0 | if (cache->alloc_line) |
626 | 0 | res = cache->alloc_line (cache, idx, cache->alloc_line_data); |
627 | 0 | else |
628 | 0 | res = NULL; |
629 | |
|
630 | 0 | return res; |
631 | 0 | } |
632 | | |
633 | | static void video_converter_generic (GstVideoConverter * convert, |
634 | | const GstVideoFrame * src, GstVideoFrame * dest); |
635 | | static gboolean video_converter_lookup_fastpath (GstVideoConverter * convert); |
636 | | static void video_converter_compute_matrix (GstVideoConverter * convert); |
637 | | static void video_converter_compute_resample (GstVideoConverter * convert, |
638 | | gint idx); |
639 | | |
640 | | static gpointer get_dest_line (GstLineCache * cache, gint idx, |
641 | | gpointer user_data); |
642 | | |
643 | | static gboolean do_unpack_lines (GstLineCache * cache, gint idx, gint out_line, |
644 | | gint in_line, gpointer user_data); |
645 | | static gboolean do_downsample_lines (GstLineCache * cache, gint idx, |
646 | | gint out_line, gint in_line, gpointer user_data); |
647 | | static gboolean do_convert_to_RGB_lines (GstLineCache * cache, gint idx, |
648 | | gint out_line, gint in_line, gpointer user_data); |
649 | | static gboolean do_convert_lines (GstLineCache * cache, gint idx, gint out_line, |
650 | | gint in_line, gpointer user_data); |
651 | | static gboolean do_alpha_lines (GstLineCache * cache, gint idx, gint out_line, |
652 | | gint in_line, gpointer user_data); |
653 | | static gboolean do_convert_to_YUV_lines (GstLineCache * cache, gint idx, |
654 | | gint out_line, gint in_line, gpointer user_data); |
655 | | static gboolean do_upsample_lines (GstLineCache * cache, gint idx, |
656 | | gint out_line, gint in_line, gpointer user_data); |
657 | | static gboolean do_vscale_lines (GstLineCache * cache, gint idx, gint out_line, |
658 | | gint in_line, gpointer user_data); |
659 | | static gboolean do_hscale_lines (GstLineCache * cache, gint idx, gint out_line, |
660 | | gint in_line, gpointer user_data); |
661 | | static gboolean do_dither_lines (GstLineCache * cache, gint idx, gint out_line, |
662 | | gint in_line, gpointer user_data); |
663 | | |
664 | | static ConverterAlloc * |
665 | | converter_alloc_new (guint stride, guint n_lines, gpointer user_data, |
666 | | GDestroyNotify notify) |
667 | 0 | { |
668 | 0 | ConverterAlloc *alloc; |
669 | |
|
670 | 0 | GST_LOG ("stride %d, n_lines %d", stride, n_lines); |
671 | 0 | alloc = g_new0 (ConverterAlloc, 1); |
672 | 0 | alloc->data = g_malloc (stride * n_lines); |
673 | 0 | alloc->stride = stride; |
674 | 0 | alloc->n_lines = n_lines; |
675 | 0 | alloc->idx = 0; |
676 | 0 | alloc->user_data = user_data; |
677 | 0 | alloc->notify = notify; |
678 | |
|
679 | 0 | return alloc; |
680 | 0 | } |
681 | | |
682 | | static void |
683 | | converter_alloc_free (ConverterAlloc * alloc) |
684 | 0 | { |
685 | 0 | if (alloc->notify) |
686 | 0 | alloc->notify (alloc->user_data); |
687 | 0 | g_free (alloc->data); |
688 | 0 | g_free (alloc); |
689 | 0 | } |
690 | | |
691 | | static void |
692 | | setup_border_alloc (GstVideoConverter * convert, ConverterAlloc * alloc) |
693 | 0 | { |
694 | 0 | gint i; |
695 | |
|
696 | 0 | if (convert->borderline) { |
697 | 0 | for (i = 0; i < alloc->n_lines; i++) |
698 | 0 | memcpy (&alloc->data[i * alloc->stride], convert->borderline, |
699 | 0 | alloc->stride); |
700 | 0 | } |
701 | 0 | } |
702 | | |
703 | | static gpointer |
704 | | get_temp_line (GstLineCache * cache, gint idx, gpointer user_data) |
705 | 0 | { |
706 | 0 | ConverterAlloc *alloc = user_data; |
707 | 0 | gpointer tmpline; |
708 | |
|
709 | 0 | GST_LOG ("get temp line %d (%p %d)", idx, alloc, alloc->idx); |
710 | 0 | tmpline = &alloc->data[alloc->stride * alloc->idx]; |
711 | 0 | alloc->idx = (alloc->idx + 1) % alloc->n_lines; |
712 | |
|
713 | 0 | return tmpline; |
714 | 0 | } |
715 | | |
716 | | static gpointer |
717 | | get_border_temp_line (GstLineCache * cache, gint idx, gpointer user_data) |
718 | 0 | { |
719 | 0 | ConverterAlloc *alloc = user_data; |
720 | 0 | GstVideoConverter *convert = alloc->user_data; |
721 | 0 | gpointer tmpline; |
722 | |
|
723 | 0 | GST_LOG ("get temp line %d (%p %d)", idx, alloc, alloc->idx); |
724 | 0 | tmpline = &alloc->data[alloc->stride * alloc->idx] + |
725 | 0 | (convert->out_x * convert->pack_pstride); |
726 | 0 | alloc->idx = (alloc->idx + 1) % alloc->n_lines; |
727 | |
|
728 | 0 | return tmpline; |
729 | 0 | } |
730 | | |
731 | | static gint |
732 | | get_opt_int (GstVideoConverter * convert, const gchar * opt, gint def) |
733 | 0 | { |
734 | 0 | gint res; |
735 | 0 | if (!gst_structure_get_int (convert->config, opt, &res)) |
736 | 0 | res = def; |
737 | 0 | return res; |
738 | 0 | } |
739 | | |
740 | | static guint |
741 | | get_opt_uint (GstVideoConverter * convert, const gchar * opt, guint def) |
742 | 0 | { |
743 | 0 | guint res; |
744 | 0 | if (!gst_structure_get_uint (convert->config, opt, &res)) |
745 | 0 | res = def; |
746 | 0 | return res; |
747 | 0 | } |
748 | | |
749 | | static gdouble |
750 | | get_opt_double (GstVideoConverter * convert, const gchar * opt, gdouble def) |
751 | 0 | { |
752 | 0 | gdouble res; |
753 | 0 | if (!gst_structure_get_double (convert->config, opt, &res)) |
754 | 0 | res = def; |
755 | 0 | return res; |
756 | 0 | } |
757 | | |
758 | | static gboolean |
759 | | get_opt_bool (GstVideoConverter * convert, const gchar * opt, gboolean def) |
760 | 0 | { |
761 | 0 | gboolean res; |
762 | 0 | if (!gst_structure_get_boolean (convert->config, opt, &res)) |
763 | 0 | res = def; |
764 | 0 | return res; |
765 | 0 | } |
766 | | |
767 | | static gint |
768 | | get_opt_enum (GstVideoConverter * convert, const gchar * opt, GType type, |
769 | | gint def) |
770 | 0 | { |
771 | 0 | gint res; |
772 | 0 | if (!gst_structure_get_enum (convert->config, opt, type, &res)) |
773 | 0 | res = def; |
774 | 0 | return res; |
775 | 0 | } |
776 | | |
777 | 0 | #define DEFAULT_OPT_FILL_BORDER TRUE |
778 | 0 | #define DEFAULT_OPT_ALPHA_VALUE 1.0 |
779 | | /* options copy, set, mult */ |
780 | 0 | #define DEFAULT_OPT_ALPHA_MODE GST_VIDEO_ALPHA_MODE_COPY |
781 | 0 | #define DEFAULT_OPT_BORDER_ARGB 0xff000000 |
782 | | /* options full, input-only, output-only, none */ |
783 | 0 | #define DEFAULT_OPT_MATRIX_MODE GST_VIDEO_MATRIX_MODE_FULL |
784 | | /* none, remap */ |
785 | 0 | #define DEFAULT_OPT_GAMMA_MODE GST_VIDEO_GAMMA_MODE_NONE |
786 | | /* none, merge-only, fast */ |
787 | 0 | #define DEFAULT_OPT_PRIMARIES_MODE GST_VIDEO_PRIMARIES_MODE_NONE |
788 | | /* options full, upsample-only, downsample-only, none */ |
789 | 0 | #define DEFAULT_OPT_CHROMA_MODE GST_VIDEO_CHROMA_MODE_FULL |
790 | 0 | #define DEFAULT_OPT_RESAMPLER_METHOD GST_VIDEO_RESAMPLER_METHOD_CUBIC |
791 | 0 | #define DEFAULT_OPT_CHROMA_RESAMPLER_METHOD GST_VIDEO_RESAMPLER_METHOD_LINEAR |
792 | 0 | #define DEFAULT_OPT_RESAMPLER_TAPS 0 |
793 | 0 | #define DEFAULT_OPT_DITHER_METHOD GST_VIDEO_DITHER_BAYER |
794 | 0 | #define DEFAULT_OPT_DITHER_QUANTIZATION 1 |
795 | 0 | #define DEFAULT_OPT_ASYNC_TASKS FALSE |
796 | | |
797 | 0 | #define GET_OPT_FILL_BORDER(c) get_opt_bool(c, \ |
798 | 0 | GST_VIDEO_CONVERTER_OPT_FILL_BORDER, DEFAULT_OPT_FILL_BORDER) |
799 | 0 | #define GET_OPT_ALPHA_VALUE(c) get_opt_double(c, \ |
800 | 0 | GST_VIDEO_CONVERTER_OPT_ALPHA_VALUE, DEFAULT_OPT_ALPHA_VALUE) |
801 | 0 | #define GET_OPT_ALPHA_MODE(c) get_opt_enum(c, \ |
802 | 0 | GST_VIDEO_CONVERTER_OPT_ALPHA_MODE, GST_TYPE_VIDEO_ALPHA_MODE, DEFAULT_OPT_ALPHA_MODE) |
803 | | #define GET_OPT_BORDER_ARGB(c) get_opt_uint(c, \ |
804 | | GST_VIDEO_CONVERTER_OPT_BORDER_ARGB, DEFAULT_OPT_BORDER_ARGB) |
805 | 0 | #define GET_OPT_MATRIX_MODE(c) get_opt_enum(c, \ |
806 | 0 | GST_VIDEO_CONVERTER_OPT_MATRIX_MODE, GST_TYPE_VIDEO_MATRIX_MODE, DEFAULT_OPT_MATRIX_MODE) |
807 | 0 | #define GET_OPT_GAMMA_MODE(c) get_opt_enum(c, \ |
808 | 0 | GST_VIDEO_CONVERTER_OPT_GAMMA_MODE, GST_TYPE_VIDEO_GAMMA_MODE, DEFAULT_OPT_GAMMA_MODE) |
809 | 0 | #define GET_OPT_PRIMARIES_MODE(c) get_opt_enum(c, \ |
810 | 0 | GST_VIDEO_CONVERTER_OPT_PRIMARIES_MODE, GST_TYPE_VIDEO_PRIMARIES_MODE, DEFAULT_OPT_PRIMARIES_MODE) |
811 | 0 | #define GET_OPT_CHROMA_MODE(c) get_opt_enum(c, \ |
812 | 0 | GST_VIDEO_CONVERTER_OPT_CHROMA_MODE, GST_TYPE_VIDEO_CHROMA_MODE, DEFAULT_OPT_CHROMA_MODE) |
813 | 0 | #define GET_OPT_RESAMPLER_METHOD(c) get_opt_enum(c, \ |
814 | 0 | GST_VIDEO_CONVERTER_OPT_RESAMPLER_METHOD, GST_TYPE_VIDEO_RESAMPLER_METHOD, \ |
815 | 0 | DEFAULT_OPT_RESAMPLER_METHOD) |
816 | 0 | #define GET_OPT_CHROMA_RESAMPLER_METHOD(c) get_opt_enum(c, \ |
817 | 0 | GST_VIDEO_CONVERTER_OPT_CHROMA_RESAMPLER_METHOD, GST_TYPE_VIDEO_RESAMPLER_METHOD, \ |
818 | 0 | DEFAULT_OPT_CHROMA_RESAMPLER_METHOD) |
819 | 0 | #define GET_OPT_RESAMPLER_TAPS(c) get_opt_uint(c, \ |
820 | 0 | GST_VIDEO_CONVERTER_OPT_RESAMPLER_TAPS, DEFAULT_OPT_RESAMPLER_TAPS) |
821 | 0 | #define GET_OPT_DITHER_METHOD(c) get_opt_enum(c, \ |
822 | 0 | GST_VIDEO_CONVERTER_OPT_DITHER_METHOD, GST_TYPE_VIDEO_DITHER_METHOD, \ |
823 | 0 | DEFAULT_OPT_DITHER_METHOD) |
824 | 0 | #define GET_OPT_DITHER_QUANTIZATION(c) get_opt_uint(c, \ |
825 | 0 | GST_VIDEO_CONVERTER_OPT_DITHER_QUANTIZATION, DEFAULT_OPT_DITHER_QUANTIZATION) |
826 | 0 | #define GET_OPT_ASYNC_TASKS(c) get_opt_bool(c, \ |
827 | 0 | GST_VIDEO_CONVERTER_OPT_ASYNC_TASKS, DEFAULT_OPT_ASYNC_TASKS) |
828 | | |
829 | 0 | #define CHECK_ALPHA_COPY(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_COPY) |
830 | | #define CHECK_ALPHA_SET(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_SET) |
831 | 0 | #define CHECK_ALPHA_MULT(c) (GET_OPT_ALPHA_MODE(c) == GST_VIDEO_ALPHA_MODE_MULT) |
832 | | |
833 | | #define CHECK_MATRIX_FULL(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_FULL) |
834 | 0 | #define CHECK_MATRIX_INPUT(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_INPUT_ONLY) |
835 | 0 | #define CHECK_MATRIX_OUTPUT(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_OUTPUT_ONLY) |
836 | 0 | #define CHECK_MATRIX_NONE(c) (GET_OPT_MATRIX_MODE(c) == GST_VIDEO_MATRIX_MODE_NONE) |
837 | | |
838 | | #define CHECK_GAMMA_NONE(c) (GET_OPT_GAMMA_MODE(c) == GST_VIDEO_GAMMA_MODE_NONE) |
839 | 0 | #define CHECK_GAMMA_REMAP(c) (GET_OPT_GAMMA_MODE(c) == GST_VIDEO_GAMMA_MODE_REMAP) |
840 | | |
841 | 0 | #define CHECK_PRIMARIES_NONE(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_NONE) |
842 | | #define CHECK_PRIMARIES_MERGE(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_MERGE_ONLY) |
843 | | #define CHECK_PRIMARIES_FAST(c) (GET_OPT_PRIMARIES_MODE(c) == GST_VIDEO_PRIMARIES_MODE_FAST) |
844 | | |
845 | | #define CHECK_CHROMA_FULL(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_FULL) |
846 | 0 | #define CHECK_CHROMA_UPSAMPLE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_UPSAMPLE_ONLY) |
847 | 0 | #define CHECK_CHROMA_DOWNSAMPLE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_DOWNSAMPLE_ONLY) |
848 | 0 | #define CHECK_CHROMA_NONE(c) (GET_OPT_CHROMA_MODE(c) == GST_VIDEO_CHROMA_MODE_NONE) |
849 | | |
850 | | static GstLineCache * |
851 | | chain_unpack_line (GstVideoConverter * convert, gint idx) |
852 | 0 | { |
853 | 0 | GstLineCache *prev; |
854 | 0 | GstVideoInfo *info; |
855 | |
|
856 | 0 | info = &convert->in_info; |
857 | |
|
858 | 0 | convert->current_format = convert->unpack_format; |
859 | 0 | convert->current_bits = convert->unpack_bits; |
860 | 0 | convert->current_pstride = convert->current_bits >> 1; |
861 | |
|
862 | 0 | convert->unpack_pstride = convert->current_pstride; |
863 | 0 | convert->identity_unpack = (convert->current_format == info->finfo->format); |
864 | |
|
865 | 0 | GST_LOG ("chain unpack line format %s, pstride %d, identity_unpack %d", |
866 | 0 | gst_video_format_to_string (convert->current_format), |
867 | 0 | convert->current_pstride, convert->identity_unpack); |
868 | |
|
869 | 0 | prev = convert->unpack_lines[idx] = gst_line_cache_new (NULL); |
870 | 0 | prev->write_input = FALSE; |
871 | 0 | prev->pass_alloc = FALSE; |
872 | 0 | prev->n_lines = 1; |
873 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
874 | 0 | gst_line_cache_set_need_line_func (prev, do_unpack_lines, idx, convert, NULL); |
875 | |
|
876 | 0 | return prev; |
877 | 0 | } |
878 | | |
879 | | static GstLineCache * |
880 | | chain_upsample (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
881 | 0 | { |
882 | 0 | video_converter_compute_resample (convert, idx); |
883 | |
|
884 | 0 | if (convert->upsample_p[idx] || convert->upsample_i[idx]) { |
885 | 0 | GST_LOG ("chain upsample"); |
886 | 0 | prev = convert->upsample_lines[idx] = gst_line_cache_new (prev); |
887 | 0 | prev->write_input = TRUE; |
888 | 0 | prev->pass_alloc = TRUE; |
889 | | /* XXX: why this hardcoded value? */ |
890 | 0 | prev->n_lines = 5; |
891 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
892 | 0 | gst_line_cache_set_need_line_func (prev, |
893 | 0 | do_upsample_lines, idx, convert, NULL); |
894 | 0 | } |
895 | 0 | return prev; |
896 | 0 | } |
897 | | |
898 | | static void |
899 | | color_matrix_set_identity (MatrixData * m) |
900 | 0 | { |
901 | 0 | int i, j; |
902 | |
|
903 | 0 | for (i = 0; i < 4; i++) { |
904 | 0 | for (j = 0; j < 4; j++) { |
905 | 0 | m->dm[i][j] = (i == j); |
906 | 0 | } |
907 | 0 | } |
908 | 0 | } |
909 | | |
910 | | static void |
911 | | color_matrix_copy (MatrixData * d, const MatrixData * s) |
912 | 0 | { |
913 | 0 | gint i, j; |
914 | |
|
915 | 0 | for (i = 0; i < 4; i++) |
916 | 0 | for (j = 0; j < 4; j++) |
917 | 0 | d->dm[i][j] = s->dm[i][j]; |
918 | 0 | } |
919 | | |
920 | | /* Perform 4x4 matrix multiplication: |
921 | | * - @dst@ = @a@ * @b@ |
922 | | * - @dst@ may be a pointer to @a@ andor @b@ |
923 | | */ |
924 | | static void |
925 | | color_matrix_multiply (MatrixData * dst, MatrixData * a, MatrixData * b) |
926 | 0 | { |
927 | 0 | MatrixData tmp; |
928 | 0 | int i, j, k; |
929 | |
|
930 | 0 | for (i = 0; i < 4; i++) { |
931 | 0 | for (j = 0; j < 4; j++) { |
932 | 0 | double x = 0; |
933 | 0 | for (k = 0; k < 4; k++) { |
934 | 0 | x += a->dm[i][k] * b->dm[k][j]; |
935 | 0 | } |
936 | 0 | tmp.dm[i][j] = x; |
937 | 0 | } |
938 | 0 | } |
939 | 0 | color_matrix_copy (dst, &tmp); |
940 | 0 | } |
941 | | |
942 | | static void |
943 | | color_matrix_invert (MatrixData * d, MatrixData * s) |
944 | 0 | { |
945 | 0 | MatrixData tmp; |
946 | 0 | int i, j; |
947 | 0 | double det; |
948 | |
|
949 | 0 | color_matrix_set_identity (&tmp); |
950 | 0 | for (j = 0; j < 3; j++) { |
951 | 0 | for (i = 0; i < 3; i++) { |
952 | 0 | tmp.dm[j][i] = |
953 | 0 | s->dm[(i + 1) % 3][(j + 1) % 3] * s->dm[(i + 2) % 3][(j + 2) % 3] - |
954 | 0 | s->dm[(i + 1) % 3][(j + 2) % 3] * s->dm[(i + 2) % 3][(j + 1) % 3]; |
955 | 0 | } |
956 | 0 | } |
957 | 0 | det = |
958 | 0 | tmp.dm[0][0] * s->dm[0][0] + tmp.dm[0][1] * s->dm[1][0] + |
959 | 0 | tmp.dm[0][2] * s->dm[2][0]; |
960 | 0 | for (j = 0; j < 3; j++) { |
961 | 0 | for (i = 0; i < 3; i++) { |
962 | 0 | tmp.dm[i][j] /= det; |
963 | 0 | } |
964 | 0 | } |
965 | 0 | color_matrix_copy (d, &tmp); |
966 | 0 | } |
967 | | |
968 | | static void |
969 | | color_matrix_offset_components (MatrixData * m, double a1, double a2, double a3) |
970 | 0 | { |
971 | 0 | MatrixData a; |
972 | |
|
973 | 0 | color_matrix_set_identity (&a); |
974 | 0 | a.dm[0][3] = a1; |
975 | 0 | a.dm[1][3] = a2; |
976 | 0 | a.dm[2][3] = a3; |
977 | 0 | color_matrix_multiply (m, &a, m); |
978 | 0 | } |
979 | | |
980 | | static void |
981 | | color_matrix_scale_components (MatrixData * m, double a1, double a2, double a3) |
982 | 0 | { |
983 | 0 | MatrixData a; |
984 | |
|
985 | 0 | color_matrix_set_identity (&a); |
986 | 0 | a.dm[0][0] = a1; |
987 | 0 | a.dm[1][1] = a2; |
988 | 0 | a.dm[2][2] = a3; |
989 | 0 | color_matrix_multiply (m, &a, m); |
990 | 0 | } |
991 | | |
992 | | static void |
993 | | color_matrix_debug (const MatrixData * s) |
994 | 0 | { |
995 | 0 | GST_LOG ("[%f %f %f %f]", s->dm[0][0], s->dm[0][1], s->dm[0][2], s->dm[0][3]); |
996 | 0 | GST_LOG ("[%f %f %f %f]", s->dm[1][0], s->dm[1][1], s->dm[1][2], s->dm[1][3]); |
997 | 0 | GST_LOG ("[%f %f %f %f]", s->dm[2][0], s->dm[2][1], s->dm[2][2], s->dm[2][3]); |
998 | 0 | GST_LOG ("[%f %f %f %f]", s->dm[3][0], s->dm[3][1], s->dm[3][2], s->dm[3][3]); |
999 | 0 | } |
1000 | | |
1001 | | static void |
1002 | | color_matrix_convert (MatrixData * s) |
1003 | 0 | { |
1004 | 0 | gint i, j; |
1005 | |
|
1006 | 0 | for (i = 0; i < 4; i++) |
1007 | 0 | for (j = 0; j < 4; j++) |
1008 | 0 | s->im[i][j] = rint (s->dm[i][j]); |
1009 | |
|
1010 | 0 | GST_LOG ("[%6d %6d %6d %6d]", s->im[0][0], s->im[0][1], s->im[0][2], |
1011 | 0 | s->im[0][3]); |
1012 | 0 | GST_LOG ("[%6d %6d %6d %6d]", s->im[1][0], s->im[1][1], s->im[1][2], |
1013 | 0 | s->im[1][3]); |
1014 | 0 | GST_LOG ("[%6d %6d %6d %6d]", s->im[2][0], s->im[2][1], s->im[2][2], |
1015 | 0 | s->im[2][3]); |
1016 | 0 | GST_LOG ("[%6d %6d %6d %6d]", s->im[3][0], s->im[3][1], s->im[3][2], |
1017 | 0 | s->im[3][3]); |
1018 | 0 | } |
1019 | | |
1020 | | static void |
1021 | | color_matrix_YCbCr_to_RGB (MatrixData * m, double Kr, double Kb) |
1022 | 0 | { |
1023 | 0 | double Kg = 1.0 - Kr - Kb; |
1024 | 0 | MatrixData k = { |
1025 | 0 | { |
1026 | 0 | {1., 0., 2 * (1 - Kr), 0.}, |
1027 | 0 | {1., -2 * Kb * (1 - Kb) / Kg, -2 * Kr * (1 - Kr) / Kg, 0.}, |
1028 | 0 | {1., 2 * (1 - Kb), 0., 0.}, |
1029 | 0 | {0., 0., 0., 1.}, |
1030 | 0 | } |
1031 | 0 | }; |
1032 | |
|
1033 | 0 | color_matrix_multiply (m, &k, m); |
1034 | 0 | } |
1035 | | |
1036 | | static void |
1037 | | color_matrix_RGB_to_YCbCr (MatrixData * m, double Kr, double Kb) |
1038 | 0 | { |
1039 | 0 | double Kg = 1.0 - Kr - Kb; |
1040 | 0 | MatrixData k; |
1041 | 0 | double x; |
1042 | |
|
1043 | 0 | k.dm[0][0] = Kr; |
1044 | 0 | k.dm[0][1] = Kg; |
1045 | 0 | k.dm[0][2] = Kb; |
1046 | 0 | k.dm[0][3] = 0; |
1047 | |
|
1048 | 0 | x = 1 / (2 * (1 - Kb)); |
1049 | 0 | k.dm[1][0] = -x * Kr; |
1050 | 0 | k.dm[1][1] = -x * Kg; |
1051 | 0 | k.dm[1][2] = x * (1 - Kb); |
1052 | 0 | k.dm[1][3] = 0; |
1053 | |
|
1054 | 0 | x = 1 / (2 * (1 - Kr)); |
1055 | 0 | k.dm[2][0] = x * (1 - Kr); |
1056 | 0 | k.dm[2][1] = -x * Kg; |
1057 | 0 | k.dm[2][2] = -x * Kb; |
1058 | 0 | k.dm[2][3] = 0; |
1059 | |
|
1060 | 0 | k.dm[3][0] = 0; |
1061 | 0 | k.dm[3][1] = 0; |
1062 | 0 | k.dm[3][2] = 0; |
1063 | 0 | k.dm[3][3] = 1; |
1064 | |
|
1065 | 0 | color_matrix_multiply (m, &k, m); |
1066 | 0 | } |
1067 | | |
1068 | | static void |
1069 | | color_matrix_RGB_to_XYZ (MatrixData * dst, double Rx, double Ry, double Gx, |
1070 | | double Gy, double Bx, double By, double Wx, double Wy) |
1071 | 0 | { |
1072 | 0 | MatrixData m, im; |
1073 | 0 | double sx, sy, sz; |
1074 | 0 | double wx, wy, wz; |
1075 | |
|
1076 | 0 | color_matrix_set_identity (&m); |
1077 | |
|
1078 | 0 | m.dm[0][0] = Rx; |
1079 | 0 | m.dm[1][0] = Ry; |
1080 | 0 | m.dm[2][0] = (1.0 - Rx - Ry); |
1081 | 0 | m.dm[0][1] = Gx; |
1082 | 0 | m.dm[1][1] = Gy; |
1083 | 0 | m.dm[2][1] = (1.0 - Gx - Gy); |
1084 | 0 | m.dm[0][2] = Bx; |
1085 | 0 | m.dm[1][2] = By; |
1086 | 0 | m.dm[2][2] = (1.0 - Bx - By); |
1087 | |
|
1088 | 0 | color_matrix_invert (&im, &m); |
1089 | |
|
1090 | 0 | wx = Wx / Wy; |
1091 | 0 | wy = 1.0; |
1092 | 0 | wz = (1.0 - Wx - Wy) / Wy; |
1093 | |
|
1094 | 0 | sx = im.dm[0][0] * wx + im.dm[0][1] * wy + im.dm[0][2] * wz; |
1095 | 0 | sy = im.dm[1][0] * wx + im.dm[1][1] * wy + im.dm[1][2] * wz; |
1096 | 0 | sz = im.dm[2][0] * wx + im.dm[2][1] * wy + im.dm[2][2] * wz; |
1097 | |
|
1098 | 0 | m.dm[0][0] *= sx; |
1099 | 0 | m.dm[1][0] *= sx; |
1100 | 0 | m.dm[2][0] *= sx; |
1101 | 0 | m.dm[0][1] *= sy; |
1102 | 0 | m.dm[1][1] *= sy; |
1103 | 0 | m.dm[2][1] *= sy; |
1104 | 0 | m.dm[0][2] *= sz; |
1105 | 0 | m.dm[1][2] *= sz; |
1106 | 0 | m.dm[2][2] *= sz; |
1107 | |
|
1108 | 0 | color_matrix_copy (dst, &m); |
1109 | 0 | } |
1110 | | |
1111 | | static void |
1112 | | videoconvert_convert_init_tables (MatrixData * data) |
1113 | 0 | { |
1114 | 0 | gint i, j; |
1115 | |
|
1116 | 0 | data->t_r = g_new (gint64, 256); |
1117 | 0 | data->t_g = g_new (gint64, 256); |
1118 | 0 | data->t_b = g_new (gint64, 256); |
1119 | |
|
1120 | 0 | for (i = 0; i < 256; i++) { |
1121 | 0 | gint64 r = 0, g = 0, b = 0; |
1122 | |
|
1123 | 0 | for (j = 0; j < 3; j++) { |
1124 | 0 | r = (r << 16) + data->im[j][0] * i; |
1125 | 0 | g = (g << 16) + data->im[j][1] * i; |
1126 | 0 | b = (b << 16) + data->im[j][2] * i; |
1127 | 0 | } |
1128 | 0 | data->t_r[i] = r; |
1129 | 0 | data->t_g[i] = g; |
1130 | 0 | data->t_b[i] = b; |
1131 | 0 | } |
1132 | 0 | data->t_c = ((gint64) data->im[0][3] << 32) |
1133 | 0 | + ((gint64) data->im[1][3] << 16) |
1134 | 0 | + ((gint64) data->im[2][3] << 0); |
1135 | 0 | } |
1136 | | |
1137 | | void |
1138 | | _custom_video_orc_matrix8 (guint8 * ORC_RESTRICT d1, |
1139 | | const guint8 * ORC_RESTRICT s1, orc_int64 p1, orc_int64 p2, orc_int64 p3, |
1140 | | orc_int64 p4, int n) |
1141 | 0 | { |
1142 | 0 | gint i; |
1143 | 0 | gint r, g, b; |
1144 | 0 | gint y, u, v; |
1145 | 0 | gint a00, a01, a02, a03; |
1146 | 0 | gint a10, a11, a12, a13; |
1147 | 0 | gint a20, a21, a22, a23; |
1148 | |
|
1149 | 0 | a00 = (gint16) (p1 >> 16); |
1150 | 0 | a01 = (gint16) (p2 >> 16); |
1151 | 0 | a02 = (gint16) (p3 >> 16); |
1152 | 0 | a03 = (gint16) (p4 >> 16); |
1153 | 0 | a10 = (gint16) (p1 >> 32); |
1154 | 0 | a11 = (gint16) (p2 >> 32); |
1155 | 0 | a12 = (gint16) (p3 >> 32); |
1156 | 0 | a13 = (gint16) (p4 >> 32); |
1157 | 0 | a20 = (gint16) (p1 >> 48); |
1158 | 0 | a21 = (gint16) (p2 >> 48); |
1159 | 0 | a22 = (gint16) (p3 >> 48); |
1160 | 0 | a23 = (gint16) (p4 >> 48); |
1161 | |
|
1162 | 0 | for (i = 0; i < n; i++) { |
1163 | 0 | r = s1[i * 4 + 1]; |
1164 | 0 | g = s1[i * 4 + 2]; |
1165 | 0 | b = s1[i * 4 + 3]; |
1166 | |
|
1167 | 0 | y = ((a00 * r + a01 * g + a02 * b) >> SCALE) + a03; |
1168 | 0 | u = ((a10 * r + a11 * g + a12 * b) >> SCALE) + a13; |
1169 | 0 | v = ((a20 * r + a21 * g + a22 * b) >> SCALE) + a23; |
1170 | |
|
1171 | 0 | d1[i * 4 + 1] = CLAMP (y, 0, 255); |
1172 | 0 | d1[i * 4 + 2] = CLAMP (u, 0, 255); |
1173 | 0 | d1[i * 4 + 3] = CLAMP (v, 0, 255); |
1174 | 0 | } |
1175 | 0 | } |
1176 | | |
1177 | | static void |
1178 | | video_converter_matrix8 (MatrixData * data, gpointer pixels) |
1179 | 0 | { |
1180 | 0 | gpointer d = pixels; |
1181 | 0 | video_orc_matrix8 (d, pixels, data->orc_p1, data->orc_p2, |
1182 | 0 | data->orc_p3, data->orc_p4, data->width); |
1183 | 0 | } |
1184 | | |
1185 | | static void |
1186 | | video_converter_matrix8_table (MatrixData * data, gpointer pixels) |
1187 | 0 | { |
1188 | 0 | gint i, width = data->width * 4; |
1189 | 0 | guint8 r, g, b; |
1190 | 0 | gint64 c = data->t_c; |
1191 | 0 | guint8 *p = pixels; |
1192 | 0 | gint64 x; |
1193 | |
|
1194 | 0 | for (i = 0; i < width; i += 4) { |
1195 | 0 | r = p[i + 1]; |
1196 | 0 | g = p[i + 2]; |
1197 | 0 | b = p[i + 3]; |
1198 | |
|
1199 | 0 | x = data->t_r[r] + data->t_g[g] + data->t_b[b] + c; |
1200 | |
|
1201 | 0 | p[i + 1] = x >> (32 + SCALE); |
1202 | 0 | p[i + 2] = x >> (16 + SCALE); |
1203 | 0 | p[i + 3] = x >> (0 + SCALE); |
1204 | 0 | } |
1205 | 0 | } |
1206 | | |
1207 | | static void |
1208 | | video_converter_matrix8_AYUV_ARGB (MatrixData * data, gpointer pixels) |
1209 | 0 | { |
1210 | 0 | gpointer d = pixels; |
1211 | |
|
1212 | 0 | video_orc_convert_AYUV_ARGB (d, 0, pixels, 0, |
1213 | 0 | data->im[0][0], data->im[0][2], |
1214 | 0 | data->im[2][1], data->im[1][1], data->im[1][2], data->width, 1); |
1215 | 0 | } |
1216 | | |
1217 | | static gboolean |
1218 | | is_ayuv_to_rgb_matrix (MatrixData * data) |
1219 | 0 | { |
1220 | 0 | if (data->im[0][0] != data->im[1][0] || data->im[1][0] != data->im[2][0]) |
1221 | 0 | return FALSE; |
1222 | | |
1223 | 0 | if (data->im[0][1] != 0 || data->im[2][2] != 0) |
1224 | 0 | return FALSE; |
1225 | | |
1226 | 0 | return TRUE; |
1227 | 0 | } |
1228 | | |
1229 | | static gboolean |
1230 | | is_identity_matrix (MatrixData * data) |
1231 | 0 | { |
1232 | 0 | gint i, j; |
1233 | 0 | gint c = data->im[0][0]; |
1234 | | |
1235 | | /* not really checking identity because of rounding errors but given |
1236 | | * the conversions we do we just check for anything that looks like: |
1237 | | * |
1238 | | * c 0 0 0 |
1239 | | * 0 c 0 0 |
1240 | | * 0 0 c 0 |
1241 | | * 0 0 0 1 |
1242 | | */ |
1243 | 0 | for (i = 0; i < 4; i++) { |
1244 | 0 | for (j = 0; j < 4; j++) { |
1245 | 0 | if (i == j) { |
1246 | 0 | if (i == 3 && data->im[i][j] != 1) |
1247 | 0 | return FALSE; |
1248 | 0 | else if (data->im[i][j] != c) |
1249 | 0 | return FALSE; |
1250 | 0 | } else if (data->im[i][j] != 0) |
1251 | 0 | return FALSE; |
1252 | 0 | } |
1253 | 0 | } |
1254 | 0 | return TRUE; |
1255 | 0 | } |
1256 | | |
1257 | | static gboolean |
1258 | | is_no_clip_matrix (MatrixData * data) |
1259 | 0 | { |
1260 | 0 | gint i; |
1261 | 0 | static const guint8 test[8][3] = { |
1262 | 0 | {0, 0, 0}, |
1263 | 0 | {0, 0, 255}, |
1264 | 0 | {0, 255, 0}, |
1265 | 0 | {0, 255, 255}, |
1266 | 0 | {255, 0, 0}, |
1267 | 0 | {255, 0, 255}, |
1268 | 0 | {255, 255, 0}, |
1269 | 0 | {255, 255, 255} |
1270 | 0 | }; |
1271 | |
|
1272 | 0 | for (i = 0; i < 8; i++) { |
1273 | 0 | gint r, g, b; |
1274 | 0 | gint y, u, v; |
1275 | |
|
1276 | 0 | r = test[i][0]; |
1277 | 0 | g = test[i][1]; |
1278 | 0 | b = test[i][2]; |
1279 | |
|
1280 | 0 | y = (data->im[0][0] * r + data->im[0][1] * g + |
1281 | 0 | data->im[0][2] * b + data->im[0][3]) >> SCALE; |
1282 | 0 | u = (data->im[1][0] * r + data->im[1][1] * g + |
1283 | 0 | data->im[1][2] * b + data->im[1][3]) >> SCALE; |
1284 | 0 | v = (data->im[2][0] * r + data->im[2][1] * g + |
1285 | 0 | data->im[2][2] * b + data->im[2][3]) >> SCALE; |
1286 | |
|
1287 | 0 | if (y != CLAMP (y, 0, 255) || u != CLAMP (u, 0, 255) |
1288 | 0 | || v != CLAMP (v, 0, 255)) |
1289 | 0 | return FALSE; |
1290 | 0 | } |
1291 | 0 | return TRUE; |
1292 | 0 | } |
1293 | | |
1294 | | static void |
1295 | | video_converter_matrix16 (MatrixData * data, gpointer pixels) |
1296 | 0 | { |
1297 | 0 | int i; |
1298 | 0 | int r, g, b; |
1299 | 0 | int y, u, v; |
1300 | 0 | guint16 *p = pixels; |
1301 | 0 | gint width = data->width; |
1302 | |
|
1303 | 0 | for (i = 0; i < width; i++) { |
1304 | 0 | r = p[i * 4 + 1]; |
1305 | 0 | g = p[i * 4 + 2]; |
1306 | 0 | b = p[i * 4 + 3]; |
1307 | |
|
1308 | 0 | y = (data->im[0][0] * r + data->im[0][1] * g + |
1309 | 0 | data->im[0][2] * b + data->im[0][3]) >> SCALE; |
1310 | 0 | u = (data->im[1][0] * r + data->im[1][1] * g + |
1311 | 0 | data->im[1][2] * b + data->im[1][3]) >> SCALE; |
1312 | 0 | v = (data->im[2][0] * r + data->im[2][1] * g + |
1313 | 0 | data->im[2][2] * b + data->im[2][3]) >> SCALE; |
1314 | |
|
1315 | 0 | p[i * 4 + 1] = CLAMP (y, 0, 65535); |
1316 | 0 | p[i * 4 + 2] = CLAMP (u, 0, 65535); |
1317 | 0 | p[i * 4 + 3] = CLAMP (v, 0, 65535); |
1318 | 0 | } |
1319 | 0 | } |
1320 | | |
1321 | | |
1322 | | static void |
1323 | | prepare_matrix (GstVideoConverter * convert, MatrixData * data) |
1324 | 0 | { |
1325 | 0 | if (is_identity_matrix (data)) |
1326 | 0 | return; |
1327 | | |
1328 | 0 | color_matrix_scale_components (data, SCALE_F, SCALE_F, SCALE_F); |
1329 | 0 | color_matrix_convert (data); |
1330 | |
|
1331 | 0 | data->width = convert->current_width; |
1332 | |
|
1333 | 0 | if (convert->current_bits == 8) { |
1334 | 0 | if (!convert->unpack_rgb && convert->pack_rgb |
1335 | 0 | && is_ayuv_to_rgb_matrix (data)) { |
1336 | 0 | GST_LOG ("use fast AYUV -> RGB matrix"); |
1337 | 0 | data->matrix_func = video_converter_matrix8_AYUV_ARGB; |
1338 | 0 | } else if (is_no_clip_matrix (data)) { |
1339 | 0 | GST_LOG ("use 8bit table"); |
1340 | 0 | data->matrix_func = video_converter_matrix8_table; |
1341 | 0 | videoconvert_convert_init_tables (data); |
1342 | 0 | } else { |
1343 | 0 | gint a03, a13, a23; |
1344 | |
|
1345 | 0 | GST_LOG ("use 8bit matrix"); |
1346 | 0 | data->matrix_func = video_converter_matrix8; |
1347 | |
|
1348 | 0 | data->orc_p1 = (((guint64) (guint16) data->im[2][0]) << 48) | |
1349 | 0 | (((guint64) (guint16) data->im[1][0]) << 32) | |
1350 | 0 | (((guint64) (guint16) data->im[0][0]) << 16); |
1351 | 0 | data->orc_p2 = (((guint64) (guint16) data->im[2][1]) << 48) | |
1352 | 0 | (((guint64) (guint16) data->im[1][1]) << 32) | |
1353 | 0 | (((guint64) (guint16) data->im[0][1]) << 16); |
1354 | 0 | data->orc_p3 = (((guint64) (guint16) data->im[2][2]) << 48) | |
1355 | 0 | (((guint64) (guint16) data->im[1][2]) << 32) | |
1356 | 0 | (((guint64) (guint16) data->im[0][2]) << 16); |
1357 | |
|
1358 | 0 | a03 = data->im[0][3] >> SCALE; |
1359 | 0 | a13 = data->im[1][3] >> SCALE; |
1360 | 0 | a23 = data->im[2][3] >> SCALE; |
1361 | |
|
1362 | 0 | data->orc_p4 = (((guint64) (guint16) a23) << 48) | |
1363 | 0 | (((guint64) (guint16) a13) << 32) | (((guint64) (guint16) a03) << 16); |
1364 | 0 | } |
1365 | 0 | } else { |
1366 | 0 | GST_LOG ("use 16bit matrix"); |
1367 | 0 | data->matrix_func = video_converter_matrix16; |
1368 | 0 | } |
1369 | 0 | } |
1370 | | |
1371 | | static void |
1372 | | compute_matrix_to_RGB (GstVideoConverter * convert, MatrixData * data) |
1373 | 0 | { |
1374 | 0 | GstVideoInfo *info; |
1375 | 0 | gdouble Kr = 0, Kb = 0; |
1376 | |
|
1377 | 0 | info = &convert->in_info; |
1378 | |
|
1379 | 0 | { |
1380 | 0 | const GstVideoFormatInfo *uinfo; |
1381 | 0 | gint offset[4], scale[4]; |
1382 | |
|
1383 | 0 | uinfo = gst_video_format_get_info (convert->unpack_format); |
1384 | | |
1385 | | /* bring color components to [0..1.0] range */ |
1386 | 0 | gst_video_color_range_offsets (info->colorimetry.range, uinfo, offset, |
1387 | 0 | scale); |
1388 | |
|
1389 | 0 | color_matrix_offset_components (data, -offset[0], -offset[1], -offset[2]); |
1390 | 0 | color_matrix_scale_components (data, 1 / ((float) scale[0]), |
1391 | 0 | 1 / ((float) scale[1]), 1 / ((float) scale[2])); |
1392 | 0 | } |
1393 | |
|
1394 | 0 | if (!convert->unpack_rgb && !CHECK_MATRIX_NONE (convert)) { |
1395 | 0 | if (CHECK_MATRIX_OUTPUT (convert)) |
1396 | 0 | info = &convert->out_info; |
1397 | | |
1398 | | /* bring components to R'G'B' space */ |
1399 | 0 | if (gst_video_color_matrix_get_Kr_Kb (info->colorimetry.matrix, &Kr, &Kb)) |
1400 | 0 | color_matrix_YCbCr_to_RGB (data, Kr, Kb); |
1401 | 0 | } |
1402 | 0 | color_matrix_debug (data); |
1403 | 0 | } |
1404 | | |
1405 | | static void |
1406 | | compute_matrix_to_YUV (GstVideoConverter * convert, MatrixData * data, |
1407 | | gboolean force) |
1408 | 0 | { |
1409 | 0 | GstVideoInfo *info; |
1410 | 0 | gdouble Kr = 0, Kb = 0; |
1411 | |
|
1412 | 0 | if (force || (!convert->pack_rgb && !CHECK_MATRIX_NONE (convert))) { |
1413 | 0 | if (CHECK_MATRIX_INPUT (convert)) |
1414 | 0 | info = &convert->in_info; |
1415 | 0 | else |
1416 | 0 | info = &convert->out_info; |
1417 | | |
1418 | | /* bring components to YCbCr space */ |
1419 | 0 | if (gst_video_color_matrix_get_Kr_Kb (info->colorimetry.matrix, &Kr, &Kb)) |
1420 | 0 | color_matrix_RGB_to_YCbCr (data, Kr, Kb); |
1421 | 0 | } |
1422 | |
|
1423 | 0 | info = &convert->out_info; |
1424 | |
|
1425 | 0 | { |
1426 | 0 | const GstVideoFormatInfo *uinfo; |
1427 | 0 | gint offset[4], scale[4]; |
1428 | |
|
1429 | 0 | uinfo = gst_video_format_get_info (convert->pack_format); |
1430 | | |
1431 | | /* bring color components to nominal range */ |
1432 | 0 | gst_video_color_range_offsets (info->colorimetry.range, uinfo, offset, |
1433 | 0 | scale); |
1434 | |
|
1435 | 0 | color_matrix_scale_components (data, (float) scale[0], (float) scale[1], |
1436 | 0 | (float) scale[2]); |
1437 | 0 | color_matrix_offset_components (data, offset[0], offset[1], offset[2]); |
1438 | 0 | } |
1439 | |
|
1440 | 0 | color_matrix_debug (data); |
1441 | 0 | } |
1442 | | |
1443 | | |
1444 | | static void |
1445 | | gamma_convert_u8_u16 (GammaData * data, gpointer dest, gpointer src) |
1446 | 0 | { |
1447 | 0 | gint i; |
1448 | 0 | guint8 *s = src; |
1449 | 0 | guint16 *d = dest; |
1450 | 0 | guint16 *table = data->gamma_table; |
1451 | 0 | gint width = data->width * 4; |
1452 | |
|
1453 | 0 | for (i = 0; i < width; i += 4) { |
1454 | 0 | d[i + 0] = (s[i] << 8) | s[i]; |
1455 | 0 | d[i + 1] = table[s[i + 1]]; |
1456 | 0 | d[i + 2] = table[s[i + 2]]; |
1457 | 0 | d[i + 3] = table[s[i + 3]]; |
1458 | 0 | } |
1459 | 0 | } |
1460 | | |
1461 | | static void |
1462 | | gamma_convert_u16_u8 (GammaData * data, gpointer dest, gpointer src) |
1463 | 0 | { |
1464 | 0 | gint i; |
1465 | 0 | guint16 *s = src; |
1466 | 0 | guint8 *d = dest; |
1467 | 0 | guint8 *table = data->gamma_table; |
1468 | 0 | gint width = data->width * 4; |
1469 | |
|
1470 | 0 | for (i = 0; i < width; i += 4) { |
1471 | 0 | d[i + 0] = s[i] >> 8; |
1472 | 0 | d[i + 1] = table[s[i + 1]]; |
1473 | 0 | d[i + 2] = table[s[i + 2]]; |
1474 | 0 | d[i + 3] = table[s[i + 3]]; |
1475 | 0 | } |
1476 | 0 | } |
1477 | | |
1478 | | static void |
1479 | | gamma_convert_u16_u16 (GammaData * data, gpointer dest, gpointer src) |
1480 | 0 | { |
1481 | 0 | gint i; |
1482 | 0 | guint16 *s = src; |
1483 | 0 | guint16 *d = dest; |
1484 | 0 | guint16 *table = data->gamma_table; |
1485 | 0 | gint width = data->width * 4; |
1486 | |
|
1487 | 0 | for (i = 0; i < width; i += 4) { |
1488 | 0 | d[i + 0] = s[i]; |
1489 | 0 | d[i + 1] = table[s[i + 1]]; |
1490 | 0 | d[i + 2] = table[s[i + 2]]; |
1491 | 0 | d[i + 3] = table[s[i + 3]]; |
1492 | 0 | } |
1493 | 0 | } |
1494 | | |
1495 | | static void |
1496 | | setup_gamma_decode (GstVideoConverter * convert) |
1497 | 0 | { |
1498 | 0 | GstVideoTransferFunction func; |
1499 | 0 | guint16 *t; |
1500 | 0 | gint i; |
1501 | |
|
1502 | 0 | func = convert->in_info.colorimetry.transfer; |
1503 | |
|
1504 | 0 | convert->gamma_dec.width = convert->current_width; |
1505 | 0 | if (convert->gamma_dec.gamma_table) { |
1506 | 0 | GST_LOG ("gamma decode already set up"); |
1507 | 0 | } else if (convert->current_bits == 8) { |
1508 | 0 | GST_LOG ("gamma decode 8->16: %d", func); |
1509 | 0 | convert->gamma_dec.gamma_func = gamma_convert_u8_u16; |
1510 | 0 | t = convert->gamma_dec.gamma_table = g_malloc (sizeof (guint16) * 256); |
1511 | |
|
1512 | 0 | for (i = 0; i < 256; i++) |
1513 | 0 | t[i] = |
1514 | 0 | rint (gst_video_transfer_function_decode (func, i / 255.0) * 65535.0); |
1515 | 0 | } else { |
1516 | 0 | GST_LOG ("gamma decode 16->16: %d", func); |
1517 | 0 | convert->gamma_dec.gamma_func = gamma_convert_u16_u16; |
1518 | 0 | t = convert->gamma_dec.gamma_table = g_malloc (sizeof (guint16) * 65536); |
1519 | |
|
1520 | 0 | for (i = 0; i < 65536; i++) |
1521 | 0 | t[i] = |
1522 | 0 | rint (gst_video_transfer_function_decode (func, |
1523 | 0 | i / 65535.0) * 65535.0); |
1524 | 0 | } |
1525 | 0 | convert->current_bits = 16; |
1526 | 0 | convert->current_pstride = 8; |
1527 | 0 | convert->current_format = GST_VIDEO_FORMAT_ARGB64; |
1528 | 0 | } |
1529 | | |
1530 | | static void |
1531 | | setup_gamma_encode (GstVideoConverter * convert, gint target_bits) |
1532 | 0 | { |
1533 | 0 | GstVideoTransferFunction func; |
1534 | 0 | gint i; |
1535 | |
|
1536 | 0 | func = convert->out_info.colorimetry.transfer; |
1537 | |
|
1538 | 0 | convert->gamma_enc.width = convert->current_width; |
1539 | 0 | if (convert->gamma_enc.gamma_table) { |
1540 | 0 | GST_LOG ("gamma encode already set up"); |
1541 | 0 | } else if (target_bits == 8) { |
1542 | 0 | guint8 *t; |
1543 | |
|
1544 | 0 | GST_LOG ("gamma encode 16->8: %d", func); |
1545 | 0 | convert->gamma_enc.gamma_func = gamma_convert_u16_u8; |
1546 | 0 | t = convert->gamma_enc.gamma_table = g_malloc (sizeof (guint8) * 65536); |
1547 | |
|
1548 | 0 | for (i = 0; i < 65536; i++) |
1549 | 0 | t[i] = |
1550 | 0 | rint (gst_video_transfer_function_encode (func, i / 65535.0) * 255.0); |
1551 | 0 | } else { |
1552 | 0 | guint16 *t; |
1553 | |
|
1554 | 0 | GST_LOG ("gamma encode 16->16: %d", func); |
1555 | 0 | convert->gamma_enc.gamma_func = gamma_convert_u16_u16; |
1556 | 0 | t = convert->gamma_enc.gamma_table = g_malloc (sizeof (guint16) * 65536); |
1557 | |
|
1558 | 0 | for (i = 0; i < 65536; i++) |
1559 | 0 | t[i] = |
1560 | 0 | rint (gst_video_transfer_function_encode (func, |
1561 | 0 | i / 65535.0) * 65535.0); |
1562 | 0 | } |
1563 | 0 | } |
1564 | | |
1565 | | static GstLineCache * |
1566 | | chain_convert_to_RGB (GstVideoConverter * convert, GstLineCache * prev, |
1567 | | gint idx) |
1568 | 0 | { |
1569 | 0 | gboolean do_gamma; |
1570 | |
|
1571 | 0 | do_gamma = CHECK_GAMMA_REMAP (convert); |
1572 | |
|
1573 | 0 | if (do_gamma) { |
1574 | 0 | gint scale; |
1575 | | |
1576 | | /* Set up conversion matrices if needed, but only for the first thread */ |
1577 | 0 | if (idx == 0 && !convert->unpack_rgb) { |
1578 | 0 | color_matrix_set_identity (&convert->to_RGB_matrix); |
1579 | 0 | compute_matrix_to_RGB (convert, &convert->to_RGB_matrix); |
1580 | | |
1581 | | /* matrix is in 0..1 range, scale to current bits */ |
1582 | 0 | GST_LOG ("chain RGB convert"); |
1583 | 0 | scale = 1 << convert->current_bits; |
1584 | 0 | color_matrix_scale_components (&convert->to_RGB_matrix, |
1585 | 0 | (float) scale, (float) scale, (float) scale); |
1586 | |
|
1587 | 0 | prepare_matrix (convert, &convert->to_RGB_matrix); |
1588 | |
|
1589 | 0 | if (convert->current_bits == 8) |
1590 | 0 | convert->current_format = GST_VIDEO_FORMAT_ARGB; |
1591 | 0 | else |
1592 | 0 | convert->current_format = GST_VIDEO_FORMAT_ARGB64; |
1593 | 0 | } |
1594 | |
|
1595 | 0 | prev = convert->to_RGB_lines[idx] = gst_line_cache_new (prev); |
1596 | 0 | prev->write_input = TRUE; |
1597 | 0 | prev->pass_alloc = FALSE; |
1598 | 0 | prev->n_lines = 1; |
1599 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
1600 | 0 | gst_line_cache_set_need_line_func (prev, |
1601 | 0 | do_convert_to_RGB_lines, idx, convert, NULL); |
1602 | |
|
1603 | 0 | GST_LOG ("chain gamma decode"); |
1604 | 0 | setup_gamma_decode (convert); |
1605 | 0 | } |
1606 | 0 | return prev; |
1607 | 0 | } |
1608 | | |
1609 | | static GstLineCache * |
1610 | | chain_hscale (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
1611 | 0 | { |
1612 | 0 | gint method; |
1613 | 0 | guint taps; |
1614 | |
|
1615 | 0 | method = GET_OPT_RESAMPLER_METHOD (convert); |
1616 | 0 | taps = GET_OPT_RESAMPLER_TAPS (convert); |
1617 | |
|
1618 | 0 | convert->h_scaler[idx] = |
1619 | 0 | gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps, |
1620 | 0 | convert->in_width, convert->out_width, convert->config); |
1621 | |
|
1622 | 0 | gst_video_scaler_get_coeff (convert->h_scaler[idx], 0, NULL, &taps); |
1623 | |
|
1624 | 0 | GST_LOG ("chain hscale %d->%d, taps %d, method %d", |
1625 | 0 | convert->in_width, convert->out_width, taps, method); |
1626 | |
|
1627 | 0 | convert->current_width = convert->out_width; |
1628 | 0 | convert->h_scale_format = convert->current_format; |
1629 | |
|
1630 | 0 | prev = convert->hscale_lines[idx] = gst_line_cache_new (prev); |
1631 | 0 | prev->write_input = FALSE; |
1632 | 0 | prev->pass_alloc = FALSE; |
1633 | 0 | prev->n_lines = 1; |
1634 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
1635 | 0 | gst_line_cache_set_need_line_func (prev, do_hscale_lines, idx, convert, NULL); |
1636 | |
|
1637 | 0 | return prev; |
1638 | 0 | } |
1639 | | |
1640 | | static GstLineCache * |
1641 | | chain_vscale (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
1642 | 0 | { |
1643 | 0 | gint method; |
1644 | 0 | guint taps, taps_i = 0; |
1645 | 0 | gint backlog = 0; |
1646 | |
|
1647 | 0 | method = GET_OPT_RESAMPLER_METHOD (convert); |
1648 | 0 | taps = GET_OPT_RESAMPLER_TAPS (convert); |
1649 | |
|
1650 | 0 | if (GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info) |
1651 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&convert->in_info) != |
1652 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE)) { |
1653 | 0 | convert->v_scaler_i[idx] = |
1654 | 0 | gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_INTERLACED, taps, |
1655 | 0 | convert->in_height, convert->out_height, convert->config); |
1656 | |
|
1657 | 0 | gst_video_scaler_get_coeff (convert->v_scaler_i[idx], 0, NULL, &taps_i); |
1658 | 0 | backlog = taps_i; |
1659 | 0 | } |
1660 | 0 | convert->v_scaler_p[idx] = |
1661 | 0 | gst_video_scaler_new (method, 0, taps, convert->in_height, |
1662 | 0 | convert->out_height, convert->config); |
1663 | 0 | convert->v_scale_width = convert->current_width; |
1664 | 0 | convert->v_scale_format = convert->current_format; |
1665 | 0 | convert->current_height = convert->out_height; |
1666 | |
|
1667 | 0 | gst_video_scaler_get_coeff (convert->v_scaler_p[idx], 0, NULL, &taps); |
1668 | |
|
1669 | 0 | GST_LOG ("chain vscale %d->%d, taps %d, method %d, backlog %d", |
1670 | 0 | convert->in_height, convert->out_height, taps, method, backlog); |
1671 | |
|
1672 | 0 | prev->backlog = backlog; |
1673 | 0 | prev = convert->vscale_lines[idx] = gst_line_cache_new (prev); |
1674 | 0 | prev->pass_alloc = (taps == 1); |
1675 | 0 | prev->write_input = FALSE; |
1676 | 0 | prev->n_lines = MAX (taps_i, taps); |
1677 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
1678 | 0 | gst_line_cache_set_need_line_func (prev, do_vscale_lines, idx, convert, NULL); |
1679 | |
|
1680 | 0 | return prev; |
1681 | 0 | } |
1682 | | |
1683 | | static GstLineCache * |
1684 | | chain_scale (GstVideoConverter * convert, GstLineCache * prev, gboolean force, |
1685 | | gint idx) |
1686 | 0 | { |
1687 | 0 | gint s0, s1, s2, s3; |
1688 | |
|
1689 | 0 | s0 = convert->current_width * convert->current_height; |
1690 | 0 | s3 = convert->out_width * convert->out_height; |
1691 | |
|
1692 | 0 | GST_LOG ("in pixels %d <> out pixels %d", s0, s3); |
1693 | |
|
1694 | 0 | if (s3 <= s0 || force) { |
1695 | | /* we are making the image smaller or are forced to resample */ |
1696 | 0 | s1 = convert->out_width * convert->current_height; |
1697 | 0 | s2 = convert->current_width * convert->out_height; |
1698 | |
|
1699 | 0 | GST_LOG ("%d <> %d", s1, s2); |
1700 | |
|
1701 | 0 | if (s1 <= s2) { |
1702 | | /* h scaling first produces less pixels */ |
1703 | 0 | if (convert->current_width != convert->out_width) |
1704 | 0 | prev = chain_hscale (convert, prev, idx); |
1705 | 0 | if (convert->current_height != convert->out_height) |
1706 | 0 | prev = chain_vscale (convert, prev, idx); |
1707 | 0 | } else { |
1708 | | /* v scaling first produces less pixels */ |
1709 | 0 | if (convert->current_height != convert->out_height) |
1710 | 0 | prev = chain_vscale (convert, prev, idx); |
1711 | 0 | if (convert->current_width != convert->out_width) |
1712 | 0 | prev = chain_hscale (convert, prev, idx); |
1713 | 0 | } |
1714 | 0 | } |
1715 | 0 | return prev; |
1716 | 0 | } |
1717 | | |
1718 | | static GstLineCache * |
1719 | | chain_convert (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
1720 | 0 | { |
1721 | 0 | gboolean do_gamma, do_conversion, pass_alloc = FALSE; |
1722 | 0 | gboolean same_matrix, same_primaries, same_bits; |
1723 | 0 | MatrixData p1, p2; |
1724 | |
|
1725 | 0 | same_bits = convert->unpack_bits == convert->pack_bits; |
1726 | 0 | if (CHECK_MATRIX_NONE (convert)) { |
1727 | 0 | same_matrix = TRUE; |
1728 | 0 | } else { |
1729 | 0 | same_matrix = |
1730 | 0 | convert->in_info.colorimetry.matrix == |
1731 | 0 | convert->out_info.colorimetry.matrix; |
1732 | 0 | } |
1733 | |
|
1734 | 0 | if (CHECK_PRIMARIES_NONE (convert)) { |
1735 | 0 | same_primaries = TRUE; |
1736 | 0 | } else { |
1737 | 0 | same_primaries = |
1738 | 0 | gst_video_color_primaries_is_equivalent (convert->in_info. |
1739 | 0 | colorimetry.primaries, convert->out_info.colorimetry.primaries); |
1740 | 0 | } |
1741 | |
|
1742 | 0 | GST_LOG ("matrix %d -> %d (%d)", convert->in_info.colorimetry.matrix, |
1743 | 0 | convert->out_info.colorimetry.matrix, same_matrix); |
1744 | 0 | GST_LOG ("bits %d -> %d (%d)", convert->unpack_bits, convert->pack_bits, |
1745 | 0 | same_bits); |
1746 | 0 | GST_LOG ("primaries %d -> %d (%d)", convert->in_info.colorimetry.primaries, |
1747 | 0 | convert->out_info.colorimetry.primaries, same_primaries); |
1748 | |
|
1749 | 0 | color_matrix_set_identity (&convert->convert_matrix); |
1750 | |
|
1751 | 0 | if (!same_primaries) { |
1752 | 0 | const GstVideoColorPrimariesInfo *pi; |
1753 | | |
1754 | | /* Convert from RGB_input to RGB_output via XYZ |
1755 | | * res = XYZ_to_RGB_output ( RGB_to_XYZ_input ( input ) ) |
1756 | | * or in matricial form: |
1757 | | * RGB_output = XYZ_to_RGB_output_matrix * RGB_TO_XYZ_input_matrix * RGB_input |
1758 | | * |
1759 | | * The RGB_input is the pre-existing convert_matrix |
1760 | | * The convert_matrix will become the RGB_output |
1761 | | */ |
1762 | | |
1763 | | /* Convert input RGB to XYZ */ |
1764 | 0 | pi = gst_video_color_primaries_get_info (convert->in_info.colorimetry. |
1765 | 0 | primaries); |
1766 | | /* Get the RGB_TO_XYZ_input_matrix */ |
1767 | 0 | color_matrix_RGB_to_XYZ (&p1, pi->Rx, pi->Ry, pi->Gx, pi->Gy, pi->Bx, |
1768 | 0 | pi->By, pi->Wx, pi->Wy); |
1769 | 0 | GST_LOG ("to XYZ matrix"); |
1770 | 0 | color_matrix_debug (&p1); |
1771 | 0 | GST_LOG ("current matrix"); |
1772 | | /* convert_matrix = RGB_TO_XYZ_input_matrix * input_RGB */ |
1773 | 0 | color_matrix_multiply (&convert->convert_matrix, &convert->convert_matrix, |
1774 | 0 | &p1); |
1775 | 0 | color_matrix_debug (&convert->convert_matrix); |
1776 | | |
1777 | | /* Convert XYZ to output RGB */ |
1778 | 0 | pi = gst_video_color_primaries_get_info (convert->out_info.colorimetry. |
1779 | 0 | primaries); |
1780 | | /* Calculate the XYZ_to_RGB_output_matrix |
1781 | | * * Get the RGB_TO_XYZ_output_matrix |
1782 | | * * invert it |
1783 | | * * store in p2 |
1784 | | */ |
1785 | 0 | color_matrix_RGB_to_XYZ (&p2, pi->Rx, pi->Ry, pi->Gx, pi->Gy, pi->Bx, |
1786 | 0 | pi->By, pi->Wx, pi->Wy); |
1787 | 0 | color_matrix_invert (&p2, &p2); |
1788 | 0 | GST_LOG ("to RGB matrix"); |
1789 | 0 | color_matrix_debug (&p2); |
1790 | | /* Finally: |
1791 | | * convert_matrix = XYZ_to_RGB_output_matrix * RGB_TO_XYZ_input_matrix * RGB_input |
1792 | | * = XYZ_to_RGB_output_matrix * convert_matrix |
1793 | | * = p2 * convert_matrix |
1794 | | */ |
1795 | 0 | color_matrix_multiply (&convert->convert_matrix, &p2, |
1796 | 0 | &convert->convert_matrix); |
1797 | 0 | GST_LOG ("current matrix"); |
1798 | 0 | color_matrix_debug (&convert->convert_matrix); |
1799 | 0 | } |
1800 | |
|
1801 | 0 | do_gamma = CHECK_GAMMA_REMAP (convert); |
1802 | 0 | if (!do_gamma) { |
1803 | |
|
1804 | 0 | convert->in_bits = convert->unpack_bits; |
1805 | 0 | convert->out_bits = convert->pack_bits; |
1806 | |
|
1807 | 0 | if (!same_bits || !same_matrix || !same_primaries) { |
1808 | | /* no gamma, combine all conversions into 1 */ |
1809 | 0 | if (convert->in_bits < convert->out_bits) { |
1810 | 0 | gint scale = 1 << (convert->out_bits - convert->in_bits); |
1811 | 0 | color_matrix_scale_components (&convert->convert_matrix, |
1812 | 0 | 1 / (float) scale, 1 / (float) scale, 1 / (float) scale); |
1813 | 0 | } |
1814 | 0 | GST_LOG ("to RGB matrix"); |
1815 | 0 | compute_matrix_to_RGB (convert, &convert->convert_matrix); |
1816 | 0 | GST_LOG ("current matrix"); |
1817 | 0 | color_matrix_debug (&convert->convert_matrix); |
1818 | |
|
1819 | 0 | GST_LOG ("to YUV matrix"); |
1820 | 0 | compute_matrix_to_YUV (convert, &convert->convert_matrix, FALSE); |
1821 | 0 | GST_LOG ("current matrix"); |
1822 | 0 | color_matrix_debug (&convert->convert_matrix); |
1823 | 0 | if (convert->in_bits > convert->out_bits) { |
1824 | 0 | gint scale = 1 << (convert->in_bits - convert->out_bits); |
1825 | 0 | color_matrix_scale_components (&convert->convert_matrix, |
1826 | 0 | (float) scale, (float) scale, (float) scale); |
1827 | 0 | } |
1828 | 0 | convert->current_bits = MAX (convert->in_bits, convert->out_bits); |
1829 | |
|
1830 | 0 | do_conversion = TRUE; |
1831 | 0 | if (!same_matrix || !same_primaries) { |
1832 | 0 | if (idx == 0) |
1833 | 0 | prepare_matrix (convert, &convert->convert_matrix); |
1834 | 0 | } |
1835 | 0 | if (convert->in_bits == convert->out_bits) |
1836 | 0 | pass_alloc = TRUE; |
1837 | 0 | } else |
1838 | 0 | do_conversion = FALSE; |
1839 | |
|
1840 | 0 | convert->current_bits = convert->pack_bits; |
1841 | 0 | convert->current_format = convert->pack_format; |
1842 | 0 | convert->current_pstride = convert->current_bits >> 1; |
1843 | 0 | } else { |
1844 | | /* we did gamma, just do colorspace conversion if needed */ |
1845 | 0 | if (same_primaries) { |
1846 | 0 | do_conversion = FALSE; |
1847 | 0 | } else { |
1848 | 0 | if (idx == 0) |
1849 | 0 | prepare_matrix (convert, &convert->convert_matrix); |
1850 | 0 | convert->in_bits = convert->out_bits = 16; |
1851 | 0 | pass_alloc = TRUE; |
1852 | 0 | do_conversion = TRUE; |
1853 | 0 | } |
1854 | 0 | } |
1855 | |
|
1856 | 0 | if (do_conversion) { |
1857 | 0 | GST_LOG ("chain conversion"); |
1858 | 0 | prev = convert->convert_lines[idx] = gst_line_cache_new (prev); |
1859 | 0 | prev->write_input = TRUE; |
1860 | 0 | prev->pass_alloc = pass_alloc; |
1861 | 0 | prev->n_lines = 1; |
1862 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
1863 | 0 | gst_line_cache_set_need_line_func (prev, |
1864 | 0 | do_convert_lines, idx, convert, NULL); |
1865 | 0 | } |
1866 | 0 | return prev; |
1867 | 0 | } |
1868 | | |
1869 | | static void |
1870 | | convert_set_alpha_u8 (GstVideoConverter * convert, gpointer pixels, gint width) |
1871 | 0 | { |
1872 | 0 | guint8 *p = pixels; |
1873 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
1874 | 0 | int i; |
1875 | |
|
1876 | 0 | for (i = 0; i < width; i++) |
1877 | 0 | p[i * 4] = alpha; |
1878 | 0 | } |
1879 | | |
1880 | | static void |
1881 | | convert_set_alpha_u16 (GstVideoConverter * convert, gpointer pixels, gint width) |
1882 | 0 | { |
1883 | 0 | guint16 *p = pixels; |
1884 | 0 | guint16 alpha; |
1885 | 0 | int i; |
1886 | |
|
1887 | 0 | alpha = MIN (convert->alpha_value, 255); |
1888 | 0 | alpha |= alpha << 8; |
1889 | |
|
1890 | 0 | for (i = 0; i < width; i++) |
1891 | 0 | p[i * 4] = alpha; |
1892 | 0 | } |
1893 | | |
1894 | | static void |
1895 | | convert_mult_alpha_u8 (GstVideoConverter * convert, gpointer pixels, gint width) |
1896 | 0 | { |
1897 | 0 | guint8 *p = pixels; |
1898 | 0 | guint alpha = convert->alpha_value; |
1899 | 0 | int i; |
1900 | |
|
1901 | 0 | for (i = 0; i < width; i++) { |
1902 | 0 | gint a = (p[i * 4] * alpha) / 255; |
1903 | 0 | p[i * 4] = CLAMP (a, 0, 255); |
1904 | 0 | } |
1905 | 0 | } |
1906 | | |
1907 | | static void |
1908 | | convert_mult_alpha_u16 (GstVideoConverter * convert, gpointer pixels, |
1909 | | gint width) |
1910 | 0 | { |
1911 | 0 | guint16 *p = pixels; |
1912 | 0 | guint alpha = convert->alpha_value; |
1913 | 0 | int i; |
1914 | |
|
1915 | 0 | for (i = 0; i < width; i++) { |
1916 | 0 | gint a = (p[i * 4] * alpha) / 255; |
1917 | 0 | p[i * 4] = CLAMP (a, 0, 65535); |
1918 | 0 | } |
1919 | 0 | } |
1920 | | |
1921 | | static GstLineCache * |
1922 | | chain_alpha (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
1923 | 0 | { |
1924 | 0 | switch (convert->alpha_mode) { |
1925 | 0 | case ALPHA_MODE_NONE: |
1926 | 0 | case ALPHA_MODE_COPY: |
1927 | 0 | return prev; |
1928 | | |
1929 | 0 | case ALPHA_MODE_SET: |
1930 | 0 | if (convert->current_bits == 8) |
1931 | 0 | convert->alpha_func = convert_set_alpha_u8; |
1932 | 0 | else |
1933 | 0 | convert->alpha_func = convert_set_alpha_u16; |
1934 | 0 | break; |
1935 | 0 | case ALPHA_MODE_MULT: |
1936 | 0 | if (convert->current_bits == 8) |
1937 | 0 | convert->alpha_func = convert_mult_alpha_u8; |
1938 | 0 | else |
1939 | 0 | convert->alpha_func = convert_mult_alpha_u16; |
1940 | 0 | break; |
1941 | 0 | } |
1942 | | |
1943 | 0 | GST_LOG ("chain alpha mode %d", convert->alpha_mode); |
1944 | 0 | prev = convert->alpha_lines[idx] = gst_line_cache_new (prev); |
1945 | 0 | prev->write_input = TRUE; |
1946 | 0 | prev->pass_alloc = TRUE; |
1947 | 0 | prev->n_lines = 1; |
1948 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
1949 | 0 | gst_line_cache_set_need_line_func (prev, do_alpha_lines, idx, convert, NULL); |
1950 | |
|
1951 | 0 | return prev; |
1952 | 0 | } |
1953 | | |
1954 | | static GstLineCache * |
1955 | | chain_convert_to_YUV (GstVideoConverter * convert, GstLineCache * prev, |
1956 | | gint idx) |
1957 | 0 | { |
1958 | 0 | gboolean do_gamma; |
1959 | |
|
1960 | 0 | do_gamma = CHECK_GAMMA_REMAP (convert); |
1961 | |
|
1962 | 0 | if (do_gamma) { |
1963 | 0 | gint scale; |
1964 | |
|
1965 | 0 | GST_LOG ("chain gamma encode"); |
1966 | 0 | setup_gamma_encode (convert, convert->pack_bits); |
1967 | |
|
1968 | 0 | convert->current_bits = convert->pack_bits; |
1969 | 0 | convert->current_pstride = convert->current_bits >> 1; |
1970 | |
|
1971 | 0 | if (idx == 0 && !convert->pack_rgb) { |
1972 | 0 | color_matrix_set_identity (&convert->to_YUV_matrix); |
1973 | | |
1974 | | /* When gamma remap is enabled, we do |
1975 | | * 1) converts to ARGB64 linear RGB |
1976 | | * - if input is 8bits, convert to ARGB and scaled to 16bits with gamma |
1977 | | * decoding at once |
1978 | | * - otherwise converted ARGB64 and gamma decoded |
1979 | | * 2) scale/convert etc, |
1980 | | * 3) and gamma encode |
1981 | | * |
1982 | | * So source data to the do_convert_to_YUV_lines() method is always |
1983 | | * ARGB64 |
1984 | | * |
1985 | | * Then, if output unpack format is 8bits, setup_gamma_encode() will scale |
1986 | | * ARGB64 down to ARGB as a part of gamma encoding, otherwise it's still |
1987 | | * ARGB64 |
1988 | | * |
1989 | | * Finally this to_YUV_matrix is applied. Since compute_matrix_to_YUV() |
1990 | | * expects [0, 1.0] range RGB as an input, scale down identity matrix |
1991 | | * to expected scale here, otherwise offset of the matrix would be |
1992 | | * very wrong |
1993 | | */ |
1994 | 0 | GST_LOG ("chain YUV convert"); |
1995 | 0 | scale = 1 << convert->pack_bits; |
1996 | 0 | color_matrix_scale_components (&convert->to_YUV_matrix, |
1997 | 0 | 1 / (float) scale, 1 / (float) scale, 1 / (float) scale); |
1998 | |
|
1999 | 0 | compute_matrix_to_YUV (convert, &convert->to_YUV_matrix, FALSE); |
2000 | 0 | prepare_matrix (convert, &convert->to_YUV_matrix); |
2001 | 0 | } |
2002 | 0 | convert->current_format = convert->pack_format; |
2003 | |
|
2004 | 0 | prev = convert->to_YUV_lines[idx] = gst_line_cache_new (prev); |
2005 | 0 | prev->write_input = FALSE; |
2006 | 0 | prev->pass_alloc = FALSE; |
2007 | 0 | prev->n_lines = 1; |
2008 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
2009 | 0 | gst_line_cache_set_need_line_func (prev, |
2010 | 0 | do_convert_to_YUV_lines, idx, convert, NULL); |
2011 | 0 | } |
2012 | |
|
2013 | 0 | return prev; |
2014 | 0 | } |
2015 | | |
2016 | | static GstLineCache * |
2017 | | chain_downsample (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
2018 | 0 | { |
2019 | 0 | if (convert->downsample_p[idx] || convert->downsample_i[idx]) { |
2020 | 0 | GST_LOG ("chain downsample"); |
2021 | 0 | prev = convert->downsample_lines[idx] = gst_line_cache_new (prev); |
2022 | 0 | prev->write_input = TRUE; |
2023 | 0 | prev->pass_alloc = TRUE; |
2024 | | /* XXX: why this hardcoded value? */ |
2025 | 0 | prev->n_lines = 5; |
2026 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
2027 | 0 | gst_line_cache_set_need_line_func (prev, |
2028 | 0 | do_downsample_lines, idx, convert, NULL); |
2029 | 0 | } |
2030 | 0 | return prev; |
2031 | 0 | } |
2032 | | |
2033 | | static GstLineCache * |
2034 | | chain_dither (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
2035 | 0 | { |
2036 | 0 | gint i; |
2037 | 0 | gboolean do_dither = FALSE; |
2038 | 0 | GstVideoDitherFlags flags = 0; |
2039 | 0 | GstVideoDitherMethod method; |
2040 | 0 | guint quant[4], target_quant; |
2041 | |
|
2042 | 0 | method = GET_OPT_DITHER_METHOD (convert); |
2043 | 0 | if (method == GST_VIDEO_DITHER_NONE) |
2044 | 0 | return prev; |
2045 | | |
2046 | 0 | target_quant = GET_OPT_DITHER_QUANTIZATION (convert); |
2047 | 0 | GST_LOG ("method %d, target-quantization %d", method, target_quant); |
2048 | |
|
2049 | 0 | if (convert->pack_pal) { |
2050 | 0 | quant[0] = 47; |
2051 | 0 | quant[1] = 47; |
2052 | 0 | quant[2] = 47; |
2053 | 0 | quant[3] = 1; |
2054 | 0 | do_dither = TRUE; |
2055 | 0 | } else { |
2056 | 0 | for (i = 0; i < GST_VIDEO_MAX_COMPONENTS; i++) { |
2057 | 0 | gint depth; |
2058 | |
|
2059 | 0 | depth = convert->out_info.finfo->depth[i]; |
2060 | |
|
2061 | 0 | if (depth == 0) { |
2062 | 0 | quant[i] = 0; |
2063 | 0 | continue; |
2064 | 0 | } |
2065 | | |
2066 | 0 | if (convert->current_bits >= depth) { |
2067 | 0 | quant[i] = 1 << (convert->current_bits - depth); |
2068 | 0 | if (target_quant > quant[i]) { |
2069 | 0 | flags |= GST_VIDEO_DITHER_FLAG_QUANTIZE; |
2070 | 0 | quant[i] = target_quant; |
2071 | 0 | } |
2072 | 0 | } else { |
2073 | 0 | quant[i] = 0; |
2074 | 0 | } |
2075 | 0 | if (quant[i] > 1) |
2076 | 0 | do_dither = TRUE; |
2077 | 0 | } |
2078 | 0 | } |
2079 | |
|
2080 | 0 | if (do_dither) { |
2081 | 0 | GST_LOG ("chain dither"); |
2082 | |
|
2083 | 0 | convert->dither[idx] = gst_video_dither_new (method, |
2084 | 0 | flags, convert->pack_format, quant, convert->current_width); |
2085 | |
|
2086 | 0 | prev = convert->dither_lines[idx] = gst_line_cache_new (prev); |
2087 | 0 | prev->write_input = TRUE; |
2088 | 0 | prev->pass_alloc = TRUE; |
2089 | 0 | prev->n_lines = 1; |
2090 | 0 | prev->stride = convert->current_pstride * convert->current_width; |
2091 | 0 | gst_line_cache_set_need_line_func (prev, do_dither_lines, idx, convert, |
2092 | 0 | NULL); |
2093 | 0 | } |
2094 | 0 | return prev; |
2095 | 0 | } |
2096 | | |
2097 | | static GstLineCache * |
2098 | | chain_pack (GstVideoConverter * convert, GstLineCache * prev, gint idx) |
2099 | 0 | { |
2100 | 0 | convert->pack_nlines = convert->out_info.finfo->pack_lines; |
2101 | 0 | convert->pack_pstride = convert->current_pstride; |
2102 | 0 | convert->identity_pack = |
2103 | 0 | (convert->out_info.finfo->format == |
2104 | 0 | convert->out_info.finfo->unpack_format); |
2105 | 0 | GST_LOG ("chain pack line format %s, pstride %d, identity_pack %d (%d %d)", |
2106 | 0 | gst_video_format_to_string (convert->current_format), |
2107 | 0 | convert->current_pstride, convert->identity_pack, |
2108 | 0 | convert->out_info.finfo->format, convert->out_info.finfo->unpack_format); |
2109 | |
|
2110 | 0 | return prev; |
2111 | 0 | } |
2112 | | |
2113 | | static void |
2114 | | setup_allocators (GstVideoConverter * convert) |
2115 | 0 | { |
2116 | 0 | GstLineCache *cache, *prev; |
2117 | 0 | GstLineCacheAllocLineFunc alloc_line; |
2118 | 0 | gboolean alloc_writable; |
2119 | 0 | gpointer user_data; |
2120 | 0 | GDestroyNotify notify; |
2121 | 0 | gint width; |
2122 | 0 | gint i; |
2123 | |
|
2124 | 0 | width = MAX (convert->in_maxwidth, convert->out_maxwidth); |
2125 | 0 | width += convert->out_x; |
2126 | |
|
2127 | 0 | for (i = 0; i < convert->conversion_runner->n_threads; i++) { |
2128 | | /* start with using dest lines if we can directly write into it */ |
2129 | 0 | if (convert->identity_pack) { |
2130 | 0 | alloc_line = get_dest_line; |
2131 | 0 | alloc_writable = TRUE; |
2132 | 0 | user_data = convert; |
2133 | 0 | notify = NULL; |
2134 | 0 | } else { |
2135 | 0 | user_data = |
2136 | 0 | converter_alloc_new (sizeof (guint16) * width * 4, 4 + BACKLOG, |
2137 | 0 | convert, NULL); |
2138 | 0 | setup_border_alloc (convert, user_data); |
2139 | 0 | notify = (GDestroyNotify) converter_alloc_free; |
2140 | 0 | alloc_line = get_border_temp_line; |
2141 | | /* when we add a border, we need to write */ |
2142 | 0 | alloc_writable = convert->borderline != NULL; |
2143 | 0 | } |
2144 | | |
2145 | | /* First step, try to calculate how many temp lines we need. Go backwards, |
2146 | | * keep track of the maximum number of lines we need for each intermediate |
2147 | | * step. */ |
2148 | 0 | for (prev = cache = convert->pack_lines[i]; cache; cache = cache->prev) { |
2149 | 0 | GST_LOG ("looking at cache %p, %d lines, %d backlog", cache, |
2150 | 0 | cache->n_lines, cache->backlog); |
2151 | 0 | prev->n_lines = MAX (prev->n_lines, cache->n_lines); |
2152 | 0 | if (!cache->pass_alloc) { |
2153 | 0 | GST_LOG ("cache %p, needs %d lines", prev, prev->n_lines); |
2154 | 0 | prev = cache; |
2155 | 0 | } |
2156 | 0 | } |
2157 | | |
2158 | | /* now walk backwards, we try to write into the dest lines directly |
2159 | | * and keep track if the source needs to be writable */ |
2160 | 0 | for (cache = convert->pack_lines[i]; cache; cache = cache->prev) { |
2161 | 0 | gst_line_cache_set_alloc_line_func (cache, alloc_line, user_data, notify); |
2162 | 0 | cache->alloc_writable = alloc_writable; |
2163 | | |
2164 | | /* make sure only one cache frees the allocator */ |
2165 | 0 | notify = NULL; |
2166 | |
|
2167 | 0 | if (!cache->pass_alloc) { |
2168 | | /* can't pass allocator, make new temp line allocator */ |
2169 | 0 | user_data = |
2170 | 0 | converter_alloc_new (sizeof (guint16) * width * 4, |
2171 | 0 | cache->n_lines + cache->backlog, convert, NULL); |
2172 | 0 | notify = (GDestroyNotify) converter_alloc_free; |
2173 | 0 | alloc_line = get_temp_line; |
2174 | 0 | alloc_writable = FALSE; |
2175 | 0 | } |
2176 | | /* if someone writes to the input, we need a writable line from the |
2177 | | * previous cache */ |
2178 | 0 | if (cache->write_input) |
2179 | 0 | alloc_writable = TRUE; |
2180 | 0 | } |
2181 | | /* free leftover allocator */ |
2182 | 0 | if (notify) |
2183 | 0 | notify (user_data); |
2184 | 0 | } |
2185 | 0 | } |
2186 | | |
2187 | | static void |
2188 | | setup_borderline (GstVideoConverter * convert) |
2189 | 0 | { |
2190 | 0 | gint width; |
2191 | |
|
2192 | 0 | width = MAX (convert->in_maxwidth, convert->out_maxwidth); |
2193 | 0 | width += convert->out_x; |
2194 | |
|
2195 | 0 | if (convert->fill_border && (convert->out_height < convert->out_maxheight || |
2196 | 0 | convert->out_width < convert->out_maxwidth)) { |
2197 | 0 | guint32 border_val; |
2198 | 0 | gint i, w_sub; |
2199 | 0 | const GstVideoFormatInfo *out_finfo; |
2200 | 0 | gpointer planes[GST_VIDEO_MAX_PLANES]; |
2201 | 0 | gint strides[GST_VIDEO_MAX_PLANES]; |
2202 | |
|
2203 | 0 | convert->borderline = g_malloc0 (sizeof (guint16) * width * 4); |
2204 | |
|
2205 | 0 | out_finfo = convert->out_info.finfo; |
2206 | |
|
2207 | 0 | if (GST_VIDEO_INFO_IS_YUV (&convert->out_info)) { |
2208 | 0 | MatrixData cm; |
2209 | 0 | gint a, r, g, b; |
2210 | 0 | gint y, u, v; |
2211 | | |
2212 | | /* Get Color matrix. */ |
2213 | 0 | color_matrix_set_identity (&cm); |
2214 | 0 | compute_matrix_to_YUV (convert, &cm, TRUE); |
2215 | 0 | color_matrix_convert (&cm); |
2216 | |
|
2217 | 0 | border_val = GINT32_FROM_BE (convert->border_argb); |
2218 | |
|
2219 | 0 | b = (0xFF000000 & border_val) >> 24; |
2220 | 0 | g = (0x00FF0000 & border_val) >> 16; |
2221 | 0 | r = (0x0000FF00 & border_val) >> 8; |
2222 | 0 | a = (0x000000FF & border_val); |
2223 | |
|
2224 | 0 | y = 16 + ((r * cm.im[0][0] + g * cm.im[0][1] + b * cm.im[0][2]) >> 8); |
2225 | 0 | u = 128 + ((r * cm.im[1][0] + g * cm.im[1][1] + b * cm.im[1][2]) >> 8); |
2226 | 0 | v = 128 + ((r * cm.im[2][0] + g * cm.im[2][1] + b * cm.im[2][2]) >> 8); |
2227 | |
|
2228 | 0 | a = CLAMP (a, 0, 255); |
2229 | 0 | y = CLAMP (y, 0, 255); |
2230 | 0 | u = CLAMP (u, 0, 255); |
2231 | 0 | v = CLAMP (v, 0, 255); |
2232 | |
|
2233 | 0 | border_val = a | (y << 8) | (u << 16) | ((guint32) v << 24); |
2234 | 0 | } else { |
2235 | 0 | border_val = GINT32_FROM_BE (convert->border_argb); |
2236 | 0 | } |
2237 | 0 | if (convert->pack_bits == 8) |
2238 | 0 | video_orc_splat_u32 (convert->borderline, border_val, width); |
2239 | 0 | else |
2240 | 0 | video_orc_splat2_u64 (convert->borderline, border_val, width); |
2241 | | |
2242 | | /* convert pixels */ |
2243 | 0 | for (i = 0; i < out_finfo->n_planes; i++) { |
2244 | 0 | planes[i] = &convert->borders[i]; |
2245 | 0 | strides[i] = sizeof (guint64); |
2246 | 0 | } |
2247 | 0 | w_sub = 0; |
2248 | 0 | if (out_finfo->n_planes == 1) { |
2249 | | /* for packed formats, convert based on subsampling so that we |
2250 | | * get a complete group of pixels */ |
2251 | 0 | for (i = 0; i < out_finfo->n_components; i++) { |
2252 | 0 | w_sub = MAX (w_sub, out_finfo->w_sub[i]); |
2253 | 0 | } |
2254 | 0 | } |
2255 | 0 | out_finfo->pack_func (out_finfo, GST_VIDEO_PACK_FLAG_NONE, |
2256 | 0 | convert->borderline, 0, planes, strides, |
2257 | 0 | GST_VIDEO_CHROMA_SITE_UNKNOWN, 0, 1 << w_sub); |
2258 | 0 | } else { |
2259 | 0 | convert->borderline = NULL; |
2260 | 0 | } |
2261 | 0 | } |
2262 | | |
2263 | | static AlphaMode |
2264 | | convert_get_alpha_mode (GstVideoConverter * convert) |
2265 | 0 | { |
2266 | 0 | gboolean in_alpha, out_alpha; |
2267 | |
|
2268 | 0 | in_alpha = GST_VIDEO_INFO_HAS_ALPHA (&convert->in_info); |
2269 | 0 | out_alpha = GST_VIDEO_INFO_HAS_ALPHA (&convert->out_info); |
2270 | | |
2271 | | /* no output alpha, do nothing */ |
2272 | 0 | if (!out_alpha) |
2273 | 0 | return ALPHA_MODE_NONE; |
2274 | | |
2275 | 0 | if (in_alpha) { |
2276 | | /* in and out */ |
2277 | 0 | if (CHECK_ALPHA_COPY (convert)) |
2278 | 0 | return ALPHA_MODE_COPY; |
2279 | | |
2280 | 0 | if (CHECK_ALPHA_MULT (convert)) { |
2281 | 0 | if (GET_OPT_ALPHA_VALUE (convert) == 1.0) |
2282 | 0 | return ALPHA_MODE_COPY; |
2283 | 0 | else |
2284 | 0 | return ALPHA_MODE_MULT; |
2285 | 0 | } |
2286 | 0 | } |
2287 | | /* nothing special, this is what unpack etc does automatically */ |
2288 | 0 | if (GET_OPT_ALPHA_VALUE (convert) == 1.0) |
2289 | 0 | return ALPHA_MODE_NONE; |
2290 | | |
2291 | | /* everything else becomes SET */ |
2292 | 0 | return ALPHA_MODE_SET; |
2293 | 0 | } |
2294 | | |
2295 | | static void |
2296 | | gst_video_converter_init_from_config (GstVideoConverter * convert) |
2297 | 0 | { |
2298 | 0 | const GstVideoFormatInfo *fin, *fout, *finfo; |
2299 | 0 | GstVideoInfo *in_info = &convert->in_info; |
2300 | 0 | GstVideoInfo *out_info = &convert->out_info; |
2301 | 0 | gdouble alpha_value; |
2302 | |
|
2303 | 0 | fin = in_info->finfo; |
2304 | 0 | fout = out_info->finfo; |
2305 | |
|
2306 | 0 | convert->in_x = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_SRC_X, 0); |
2307 | 0 | convert->in_y = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_SRC_Y, 0); |
2308 | 0 | convert->in_x &= ~((1 << fin->w_sub[1]) - 1); |
2309 | 0 | convert->in_y &= ~((1 << fin->h_sub[1]) - 1); |
2310 | |
|
2311 | 0 | convert->in_width = get_opt_int (convert, |
2312 | 0 | GST_VIDEO_CONVERTER_OPT_SRC_WIDTH, convert->in_maxwidth - convert->in_x); |
2313 | 0 | convert->in_height = get_opt_int (convert, |
2314 | 0 | GST_VIDEO_CONVERTER_OPT_SRC_HEIGHT, |
2315 | 0 | convert->in_maxheight - convert->in_y); |
2316 | |
|
2317 | 0 | convert->in_width = |
2318 | 0 | MIN (convert->in_width, convert->in_maxwidth - convert->in_x); |
2319 | 0 | if (convert->in_width + convert->in_x < 0 || |
2320 | 0 | convert->in_width + convert->in_x > convert->in_maxwidth) { |
2321 | 0 | convert->in_width = 0; |
2322 | 0 | } |
2323 | |
|
2324 | 0 | convert->in_height = |
2325 | 0 | MIN (convert->in_height, convert->in_maxheight - convert->in_y); |
2326 | 0 | if (convert->in_height + convert->in_y < 0 || |
2327 | 0 | convert->in_height + convert->in_y > convert->in_maxheight) { |
2328 | 0 | convert->in_height = 0; |
2329 | 0 | } |
2330 | |
|
2331 | 0 | convert->out_x = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_X, 0); |
2332 | 0 | convert->out_y = get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_Y, 0); |
2333 | 0 | convert->out_x &= ~((1 << fout->w_sub[1]) - 1); |
2334 | 0 | convert->out_y &= ~((1 << fout->h_sub[1]) - 1); |
2335 | |
|
2336 | 0 | convert->out_width = get_opt_int (convert, |
2337 | 0 | GST_VIDEO_CONVERTER_OPT_DEST_WIDTH, |
2338 | 0 | convert->out_maxwidth - convert->out_x); |
2339 | 0 | convert->out_height = |
2340 | 0 | get_opt_int (convert, GST_VIDEO_CONVERTER_OPT_DEST_HEIGHT, |
2341 | 0 | convert->out_maxheight - convert->out_y); |
2342 | |
|
2343 | 0 | if (convert->out_width > convert->out_maxwidth - convert->out_x) |
2344 | 0 | convert->out_width = convert->out_maxwidth - convert->out_x; |
2345 | 0 | convert->out_width = CLAMP (convert->out_width, 0, convert->out_maxwidth); |
2346 | | |
2347 | | /* Check if completely outside the framebuffer */ |
2348 | 0 | if (convert->out_width + convert->out_x < 0 || |
2349 | 0 | convert->out_width + convert->out_x > convert->out_maxwidth) { |
2350 | 0 | convert->out_width = 0; |
2351 | 0 | } |
2352 | | |
2353 | | /* Same for height */ |
2354 | 0 | if (convert->out_height > convert->out_maxheight - convert->out_y) |
2355 | 0 | convert->out_height = convert->out_maxheight - convert->out_y; |
2356 | 0 | convert->out_height = CLAMP (convert->out_height, 0, convert->out_maxheight); |
2357 | |
|
2358 | 0 | if (convert->out_height + convert->out_y < 0 || |
2359 | 0 | convert->out_height + convert->out_y > convert->out_maxheight) { |
2360 | 0 | convert->out_height = 0; |
2361 | 0 | } |
2362 | |
|
2363 | 0 | convert->fill_border = GET_OPT_FILL_BORDER (convert); |
2364 | 0 | convert->border_argb = get_opt_uint (convert, |
2365 | 0 | GST_VIDEO_CONVERTER_OPT_BORDER_ARGB, DEFAULT_OPT_BORDER_ARGB); |
2366 | |
|
2367 | 0 | alpha_value = GET_OPT_ALPHA_VALUE (convert); |
2368 | 0 | convert->alpha_value = 255 * alpha_value; |
2369 | 0 | convert->alpha_mode = convert_get_alpha_mode (convert); |
2370 | |
|
2371 | 0 | convert->unpack_format = in_info->finfo->unpack_format; |
2372 | 0 | finfo = gst_video_format_get_info (convert->unpack_format); |
2373 | 0 | convert->unpack_bits = GST_VIDEO_FORMAT_INFO_DEPTH (finfo, 0); |
2374 | 0 | convert->unpack_rgb = GST_VIDEO_FORMAT_INFO_IS_RGB (finfo); |
2375 | 0 | if (convert->unpack_rgb |
2376 | 0 | && in_info->colorimetry.matrix != GST_VIDEO_COLOR_MATRIX_RGB) { |
2377 | | /* force identity matrix for RGB input */ |
2378 | 0 | GST_WARNING ("invalid matrix %d for input RGB format, using RGB", |
2379 | 0 | in_info->colorimetry.matrix); |
2380 | 0 | convert->in_info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB; |
2381 | 0 | } |
2382 | |
|
2383 | 0 | convert->pack_format = out_info->finfo->unpack_format; |
2384 | 0 | finfo = gst_video_format_get_info (convert->pack_format); |
2385 | 0 | convert->pack_bits = GST_VIDEO_FORMAT_INFO_DEPTH (finfo, 0); |
2386 | 0 | convert->pack_rgb = GST_VIDEO_FORMAT_INFO_IS_RGB (finfo); |
2387 | 0 | convert->pack_pal = |
2388 | 0 | gst_video_format_get_palette (GST_VIDEO_INFO_FORMAT (out_info), |
2389 | 0 | &convert->pack_palsize); |
2390 | 0 | if (convert->pack_rgb |
2391 | 0 | && out_info->colorimetry.matrix != GST_VIDEO_COLOR_MATRIX_RGB) { |
2392 | | /* force identity matrix for RGB output */ |
2393 | 0 | GST_WARNING ("invalid matrix %d for output RGB format, using RGB", |
2394 | 0 | out_info->colorimetry.matrix); |
2395 | 0 | convert->out_info.colorimetry.matrix = GST_VIDEO_COLOR_MATRIX_RGB; |
2396 | 0 | } |
2397 | 0 | } |
2398 | | |
2399 | | /** |
2400 | | * gst_video_converter_new_with_pool: (constructor) (skip) |
2401 | | * @in_info: a #GstVideoInfo |
2402 | | * @out_info: a #GstVideoInfo |
2403 | | * @config: (transfer full): a #GstStructure with configuration options |
2404 | | * @pool: (nullable): a #GstTaskPool to spawn threads from |
2405 | | * |
2406 | | * Create a new converter object to convert between @in_info and @out_info |
2407 | | * with @config. |
2408 | | * |
2409 | | * The optional @pool can be used to spawn threads, this is useful when |
2410 | | * creating new converters rapidly, for example when updating cropping. |
2411 | | * |
2412 | | * If @config is not provided, and a @pool is provided, the number of threads for |
2413 | | * the converter will be set to the maximum number of threads in the pool. |
2414 | | * |
2415 | | * Returns (nullable): a #GstVideoConverter or %NULL if conversion is not possible. |
2416 | | * |
2417 | | * Since: 1.20 |
2418 | | */ |
2419 | | GstVideoConverter * |
2420 | | gst_video_converter_new_with_pool (const GstVideoInfo * in_info, |
2421 | | const GstVideoInfo * out_info, GstStructure * config, GstTaskPool * pool) |
2422 | 0 | { |
2423 | 0 | GstVideoConverter *convert; |
2424 | 0 | GstLineCache *prev; |
2425 | 0 | gint n_threads, i; |
2426 | 0 | gboolean async_tasks; |
2427 | |
|
2428 | 0 | g_return_val_if_fail (in_info != NULL, NULL); |
2429 | 0 | g_return_val_if_fail (out_info != NULL, NULL); |
2430 | | /* we won't ever do framerate conversion */ |
2431 | 0 | g_return_val_if_fail (in_info->fps_n == out_info->fps_n, NULL); |
2432 | 0 | g_return_val_if_fail (in_info->fps_d == out_info->fps_d, NULL); |
2433 | | /* we won't ever do deinterlace */ |
2434 | 0 | g_return_val_if_fail (in_info->interlace_mode == out_info->interlace_mode, |
2435 | 0 | NULL); |
2436 | | |
2437 | 0 | convert = g_new0 (GstVideoConverter, 1); |
2438 | |
|
2439 | 0 | convert->in_info = *in_info; |
2440 | 0 | convert->out_info = *out_info; |
2441 | |
|
2442 | 0 | convert->in_maxwidth = GST_VIDEO_INFO_WIDTH (in_info); |
2443 | 0 | convert->in_maxheight = GST_VIDEO_INFO_FIELD_HEIGHT (in_info); |
2444 | 0 | convert->out_maxwidth = GST_VIDEO_INFO_WIDTH (out_info); |
2445 | 0 | convert->out_maxheight = GST_VIDEO_INFO_FIELD_HEIGHT (out_info); |
2446 | |
|
2447 | 0 | convert->config = gst_structure_new_static_str_empty ("GstVideoConverter"); |
2448 | 0 | if (config) { |
2449 | 0 | gst_video_converter_set_config (convert, config); |
2450 | 0 | n_threads = get_opt_uint (convert, GST_VIDEO_CONVERTER_OPT_THREADS, 1); |
2451 | 0 | } else { |
2452 | | /* No config provided. If a pool is available, use its thread count */ |
2453 | 0 | gst_video_converter_init_from_config (convert); |
2454 | 0 | if (pool && GST_IS_SHARED_TASK_POOL (pool)) { |
2455 | 0 | n_threads = |
2456 | 0 | gst_shared_task_pool_get_max_threads (GST_SHARED_TASK_POOL (pool)); |
2457 | 0 | GST_LOG ("setting n-threads from max threads %d from provided pool", |
2458 | 0 | n_threads); |
2459 | 0 | gst_structure_set (convert->config, |
2460 | 0 | GST_VIDEO_CONVERTER_OPT_THREADS, G_TYPE_UINT, n_threads, NULL); |
2461 | 0 | } else |
2462 | 0 | n_threads = get_opt_uint (convert, GST_VIDEO_CONVERTER_OPT_THREADS, 1); |
2463 | 0 | } |
2464 | |
|
2465 | 0 | if (n_threads == 0 || n_threads > g_get_num_processors ()) |
2466 | 0 | n_threads = g_get_num_processors (); |
2467 | | /* Magic number of 200 lines */ |
2468 | 0 | if (MAX (convert->out_height, convert->in_height) / n_threads < 200) |
2469 | 0 | n_threads = (MAX (convert->out_height, convert->in_height) + 199) / 200; |
2470 | 0 | if (n_threads < 1) |
2471 | 0 | n_threads = 1; |
2472 | |
|
2473 | 0 | async_tasks = GET_OPT_ASYNC_TASKS (convert); |
2474 | 0 | convert->conversion_runner = |
2475 | 0 | gst_parallelized_task_runner_new (n_threads, pool, async_tasks); |
2476 | |
|
2477 | 0 | if (video_converter_lookup_fastpath (convert)) |
2478 | 0 | goto done; |
2479 | | |
2480 | 0 | if (in_info->finfo->unpack_func == NULL) |
2481 | 0 | goto no_unpack_func; |
2482 | | |
2483 | 0 | if (out_info->finfo->pack_func == NULL) |
2484 | 0 | goto no_pack_func; |
2485 | | |
2486 | 0 | convert->convert = video_converter_generic; |
2487 | |
|
2488 | 0 | convert->upsample_p = g_new0 (GstVideoChromaResample *, n_threads); |
2489 | 0 | convert->upsample_i = g_new0 (GstVideoChromaResample *, n_threads); |
2490 | 0 | convert->downsample_p = g_new0 (GstVideoChromaResample *, n_threads); |
2491 | 0 | convert->downsample_i = g_new0 (GstVideoChromaResample *, n_threads); |
2492 | 0 | convert->v_scaler_p = g_new0 (GstVideoScaler *, n_threads); |
2493 | 0 | convert->v_scaler_i = g_new0 (GstVideoScaler *, n_threads); |
2494 | 0 | convert->h_scaler = g_new0 (GstVideoScaler *, n_threads); |
2495 | 0 | convert->unpack_lines = g_new0 (GstLineCache *, n_threads); |
2496 | 0 | convert->pack_lines = g_new0 (GstLineCache *, n_threads); |
2497 | 0 | convert->upsample_lines = g_new0 (GstLineCache *, n_threads); |
2498 | 0 | convert->to_RGB_lines = g_new0 (GstLineCache *, n_threads); |
2499 | 0 | convert->hscale_lines = g_new0 (GstLineCache *, n_threads); |
2500 | 0 | convert->vscale_lines = g_new0 (GstLineCache *, n_threads); |
2501 | 0 | convert->convert_lines = g_new0 (GstLineCache *, n_threads); |
2502 | 0 | convert->alpha_lines = g_new0 (GstLineCache *, n_threads); |
2503 | 0 | convert->to_YUV_lines = g_new0 (GstLineCache *, n_threads); |
2504 | 0 | convert->downsample_lines = g_new0 (GstLineCache *, n_threads); |
2505 | 0 | convert->dither_lines = g_new0 (GstLineCache *, n_threads); |
2506 | 0 | convert->dither = g_new0 (GstVideoDither *, n_threads); |
2507 | |
|
2508 | 0 | if (convert->in_width > 0 && convert->out_width > 0 && convert->in_height > 0 |
2509 | 0 | && convert->out_height > 0) { |
2510 | 0 | for (i = 0; i < n_threads; i++) { |
2511 | 0 | convert->current_format = GST_VIDEO_INFO_FORMAT (in_info); |
2512 | 0 | convert->current_width = convert->in_width; |
2513 | 0 | convert->current_height = convert->in_height; |
2514 | | |
2515 | | /* unpack */ |
2516 | 0 | prev = chain_unpack_line (convert, i); |
2517 | | /* upsample chroma */ |
2518 | 0 | prev = chain_upsample (convert, prev, i); |
2519 | | /* convert to gamma decoded RGB */ |
2520 | 0 | prev = chain_convert_to_RGB (convert, prev, i); |
2521 | | /* do all downscaling */ |
2522 | 0 | prev = chain_scale (convert, prev, FALSE, i); |
2523 | | /* do conversion between color spaces */ |
2524 | 0 | prev = chain_convert (convert, prev, i); |
2525 | | /* do alpha channels */ |
2526 | 0 | prev = chain_alpha (convert, prev, i); |
2527 | | /* do all remaining (up)scaling */ |
2528 | 0 | prev = chain_scale (convert, prev, TRUE, i); |
2529 | | /* convert to gamma encoded Y'Cb'Cr' */ |
2530 | 0 | prev = chain_convert_to_YUV (convert, prev, i); |
2531 | | /* downsample chroma */ |
2532 | 0 | prev = chain_downsample (convert, prev, i); |
2533 | | /* dither */ |
2534 | 0 | prev = chain_dither (convert, prev, i); |
2535 | | /* pack into final format */ |
2536 | 0 | convert->pack_lines[i] = chain_pack (convert, prev, i); |
2537 | 0 | } |
2538 | 0 | } |
2539 | |
|
2540 | 0 | setup_borderline (convert); |
2541 | | /* now figure out allocators */ |
2542 | 0 | setup_allocators (convert); |
2543 | |
|
2544 | 0 | done: |
2545 | 0 | return convert; |
2546 | | |
2547 | | /* ERRORS */ |
2548 | 0 | no_unpack_func: |
2549 | 0 | { |
2550 | 0 | GST_ERROR ("no unpack_func for format %s", |
2551 | 0 | gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (in_info))); |
2552 | 0 | gst_video_converter_free (convert); |
2553 | 0 | return NULL; |
2554 | 0 | } |
2555 | 0 | no_pack_func: |
2556 | 0 | { |
2557 | 0 | GST_ERROR ("no pack_func for format %s", |
2558 | 0 | gst_video_format_to_string (GST_VIDEO_INFO_FORMAT (out_info))); |
2559 | 0 | gst_video_converter_free (convert); |
2560 | 0 | return NULL; |
2561 | 0 | } |
2562 | 0 | } |
2563 | | |
2564 | | /** |
2565 | | * gst_video_converter_new: (constructor) (skip) |
2566 | | * @in_info: a #GstVideoInfo |
2567 | | * @out_info: a #GstVideoInfo |
2568 | | * @config: (transfer full): a #GstStructure with configuration options |
2569 | | * |
2570 | | * Create a new converter object to convert between @in_info and @out_info |
2571 | | * with @config. |
2572 | | * |
2573 | | * Returns (nullable): a #GstVideoConverter or %NULL if conversion is not possible. |
2574 | | * |
2575 | | * Since: 1.6 |
2576 | | */ |
2577 | | GstVideoConverter * |
2578 | | gst_video_converter_new (const GstVideoInfo * in_info, |
2579 | | const GstVideoInfo * out_info, GstStructure * config) |
2580 | 0 | { |
2581 | 0 | return gst_video_converter_new_with_pool (in_info, out_info, config, NULL); |
2582 | 0 | } |
2583 | | |
2584 | | static void |
2585 | | clear_matrix_data (MatrixData * data) |
2586 | 0 | { |
2587 | 0 | g_free (data->t_r); |
2588 | 0 | g_free (data->t_g); |
2589 | 0 | g_free (data->t_b); |
2590 | 0 | } |
2591 | | |
2592 | | /** |
2593 | | * gst_video_converter_free: |
2594 | | * @convert: a #GstVideoConverter |
2595 | | * |
2596 | | * Free @convert |
2597 | | * |
2598 | | * Since: 1.6 |
2599 | | */ |
2600 | | void |
2601 | | gst_video_converter_free (GstVideoConverter * convert) |
2602 | 0 | { |
2603 | 0 | guint i, j; |
2604 | |
|
2605 | 0 | g_return_if_fail (convert != NULL); |
2606 | | |
2607 | 0 | for (i = 0; i < convert->conversion_runner->n_threads; i++) { |
2608 | 0 | if (convert->upsample_p && convert->upsample_p[i]) |
2609 | 0 | gst_video_chroma_resample_free (convert->upsample_p[i]); |
2610 | 0 | if (convert->upsample_i && convert->upsample_i[i]) |
2611 | 0 | gst_video_chroma_resample_free (convert->upsample_i[i]); |
2612 | 0 | if (convert->downsample_p && convert->downsample_p[i]) |
2613 | 0 | gst_video_chroma_resample_free (convert->downsample_p[i]); |
2614 | 0 | if (convert->downsample_i && convert->downsample_i[i]) |
2615 | 0 | gst_video_chroma_resample_free (convert->downsample_i[i]); |
2616 | 0 | if (convert->v_scaler_p && convert->v_scaler_p[i]) |
2617 | 0 | gst_video_scaler_free (convert->v_scaler_p[i]); |
2618 | 0 | if (convert->v_scaler_i && convert->v_scaler_i[i]) |
2619 | 0 | gst_video_scaler_free (convert->v_scaler_i[i]); |
2620 | 0 | if (convert->h_scaler && convert->h_scaler[i]) |
2621 | 0 | gst_video_scaler_free (convert->h_scaler[i]); |
2622 | 0 | if (convert->unpack_lines && convert->unpack_lines[i]) |
2623 | 0 | gst_line_cache_free (convert->unpack_lines[i]); |
2624 | 0 | if (convert->upsample_lines && convert->upsample_lines[i]) |
2625 | 0 | gst_line_cache_free (convert->upsample_lines[i]); |
2626 | 0 | if (convert->to_RGB_lines && convert->to_RGB_lines[i]) |
2627 | 0 | gst_line_cache_free (convert->to_RGB_lines[i]); |
2628 | 0 | if (convert->hscale_lines && convert->hscale_lines[i]) |
2629 | 0 | gst_line_cache_free (convert->hscale_lines[i]); |
2630 | 0 | if (convert->vscale_lines && convert->vscale_lines[i]) |
2631 | 0 | gst_line_cache_free (convert->vscale_lines[i]); |
2632 | 0 | if (convert->convert_lines && convert->convert_lines[i]) |
2633 | 0 | gst_line_cache_free (convert->convert_lines[i]); |
2634 | 0 | if (convert->alpha_lines && convert->alpha_lines[i]) |
2635 | 0 | gst_line_cache_free (convert->alpha_lines[i]); |
2636 | 0 | if (convert->to_YUV_lines && convert->to_YUV_lines[i]) |
2637 | 0 | gst_line_cache_free (convert->to_YUV_lines[i]); |
2638 | 0 | if (convert->downsample_lines && convert->downsample_lines[i]) |
2639 | 0 | gst_line_cache_free (convert->downsample_lines[i]); |
2640 | 0 | if (convert->dither_lines && convert->dither_lines[i]) |
2641 | 0 | gst_line_cache_free (convert->dither_lines[i]); |
2642 | 0 | if (convert->dither && convert->dither[i]) |
2643 | 0 | gst_video_dither_free (convert->dither[i]); |
2644 | 0 | } |
2645 | 0 | g_free (convert->upsample_p); |
2646 | 0 | g_free (convert->upsample_i); |
2647 | 0 | g_free (convert->downsample_p); |
2648 | 0 | g_free (convert->downsample_i); |
2649 | 0 | g_free (convert->v_scaler_p); |
2650 | 0 | g_free (convert->v_scaler_i); |
2651 | 0 | g_free (convert->h_scaler); |
2652 | 0 | g_free (convert->unpack_lines); |
2653 | 0 | g_free (convert->pack_lines); |
2654 | 0 | g_free (convert->upsample_lines); |
2655 | 0 | g_free (convert->to_RGB_lines); |
2656 | 0 | g_free (convert->hscale_lines); |
2657 | 0 | g_free (convert->vscale_lines); |
2658 | 0 | g_free (convert->convert_lines); |
2659 | 0 | g_free (convert->alpha_lines); |
2660 | 0 | g_free (convert->to_YUV_lines); |
2661 | 0 | g_free (convert->downsample_lines); |
2662 | 0 | g_free (convert->dither_lines); |
2663 | 0 | g_free (convert->dither); |
2664 | |
|
2665 | 0 | g_free (convert->gamma_dec.gamma_table); |
2666 | 0 | g_free (convert->gamma_enc.gamma_table); |
2667 | |
|
2668 | 0 | if (convert->tmpline) { |
2669 | 0 | for (i = 0; i < convert->conversion_runner->n_threads; i++) |
2670 | 0 | g_free (convert->tmpline[i]); |
2671 | 0 | g_free (convert->tmpline); |
2672 | 0 | } |
2673 | |
|
2674 | 0 | g_free (convert->borderline); |
2675 | |
|
2676 | 0 | if (convert->config) |
2677 | 0 | gst_structure_free (convert->config); |
2678 | |
|
2679 | 0 | for (i = 0; i < 4; i++) { |
2680 | 0 | for (j = 0; j < convert->conversion_runner->n_threads; j++) { |
2681 | 0 | if (convert->fv_scaler[i].scaler) |
2682 | 0 | gst_video_scaler_free (convert->fv_scaler[i].scaler[j]); |
2683 | 0 | if (convert->fh_scaler[i].scaler) |
2684 | 0 | gst_video_scaler_free (convert->fh_scaler[i].scaler[j]); |
2685 | 0 | } |
2686 | 0 | g_free (convert->fv_scaler[i].scaler); |
2687 | 0 | g_free (convert->fh_scaler[i].scaler); |
2688 | 0 | } |
2689 | |
|
2690 | 0 | if (convert->conversion_runner) |
2691 | 0 | gst_parallelized_task_runner_free (convert->conversion_runner); |
2692 | |
|
2693 | 0 | clear_matrix_data (&convert->to_RGB_matrix); |
2694 | 0 | clear_matrix_data (&convert->convert_matrix); |
2695 | 0 | clear_matrix_data (&convert->to_YUV_matrix); |
2696 | |
|
2697 | 0 | for (i = 0; i < 4; i++) { |
2698 | 0 | g_free (convert->tasks[i]); |
2699 | 0 | g_free (convert->tasks_p[i]); |
2700 | 0 | } |
2701 | |
|
2702 | 0 | g_free (convert); |
2703 | 0 | } |
2704 | | |
2705 | | static gboolean |
2706 | | copy_config (const GstIdStr * fieldname, const GValue * value, |
2707 | | gpointer user_data) |
2708 | 0 | { |
2709 | 0 | GstVideoConverter *convert = user_data; |
2710 | |
|
2711 | 0 | gst_structure_id_str_set_value (convert->config, fieldname, value); |
2712 | |
|
2713 | 0 | return TRUE; |
2714 | 0 | } |
2715 | | |
2716 | | /** |
2717 | | * gst_video_converter_set_config: |
2718 | | * @convert: a #GstVideoConverter |
2719 | | * @config: (transfer full): a #GstStructure |
2720 | | * |
2721 | | * Set @config as extra configuration for @convert. |
2722 | | * |
2723 | | * If the parameters in @config can not be set exactly, this function returns |
2724 | | * %FALSE and will try to update as much state as possible. The new state can |
2725 | | * then be retrieved and refined with gst_video_converter_get_config(). |
2726 | | * |
2727 | | * Look at the `GST_VIDEO_CONVERTER_OPT_*` fields to check valid configuration |
2728 | | * option and values. |
2729 | | * |
2730 | | * Returns: %TRUE when @config could be set. |
2731 | | * |
2732 | | * Since: 1.6 |
2733 | | */ |
2734 | | gboolean |
2735 | | gst_video_converter_set_config (GstVideoConverter * convert, |
2736 | | GstStructure * config) |
2737 | 0 | { |
2738 | 0 | g_return_val_if_fail (convert != NULL, FALSE); |
2739 | 0 | g_return_val_if_fail (config != NULL, FALSE); |
2740 | | |
2741 | 0 | gst_structure_foreach_id_str (config, copy_config, convert); |
2742 | 0 | gst_structure_free (config); |
2743 | |
|
2744 | 0 | gst_video_converter_init_from_config (convert); |
2745 | |
|
2746 | 0 | return TRUE; |
2747 | 0 | } |
2748 | | |
2749 | | /** |
2750 | | * gst_video_converter_get_config: |
2751 | | * @convert: a #GstVideoConverter |
2752 | | * |
2753 | | * Get the current configuration of @convert. |
2754 | | * |
2755 | | * Returns: a #GstStructure that remains valid for as long as @convert is valid |
2756 | | * or until gst_video_converter_set_config() is called. |
2757 | | */ |
2758 | | const GstStructure * |
2759 | | gst_video_converter_get_config (GstVideoConverter * convert) |
2760 | 0 | { |
2761 | 0 | g_return_val_if_fail (convert != NULL, NULL); |
2762 | | |
2763 | 0 | return convert->config; |
2764 | 0 | } |
2765 | | |
2766 | | /** |
2767 | | * gst_video_converter_frame: |
2768 | | * @convert: a #GstVideoConverter |
2769 | | * @dest: a #GstVideoFrame |
2770 | | * @src: a #GstVideoFrame |
2771 | | * |
2772 | | * Convert the pixels of @src into @dest using @convert. |
2773 | | * |
2774 | | * If #GST_VIDEO_CONVERTER_OPT_ASYNC_TASKS is %TRUE then this function will |
2775 | | * return immediately and needs to be followed by a call to |
2776 | | * gst_video_converter_frame_finish(). |
2777 | | * |
2778 | | * Since: 1.6 |
2779 | | */ |
2780 | | void |
2781 | | gst_video_converter_frame (GstVideoConverter * convert, |
2782 | | const GstVideoFrame * src, GstVideoFrame * dest) |
2783 | 0 | { |
2784 | 0 | g_return_if_fail (convert != NULL); |
2785 | 0 | g_return_if_fail (src != NULL); |
2786 | 0 | g_return_if_fail (dest != NULL); |
2787 | | |
2788 | | /* Check the frames we've been passed match the layout |
2789 | | * we were configured for or we might go out of bounds */ |
2790 | 0 | if (G_UNLIKELY (GST_VIDEO_INFO_FORMAT (&convert->in_info) != |
2791 | 0 | GST_VIDEO_FRAME_FORMAT (src) |
2792 | 0 | || GST_VIDEO_INFO_WIDTH (&convert->in_info) > |
2793 | 0 | GST_VIDEO_FRAME_WIDTH (src) |
2794 | 0 | || GST_VIDEO_INFO_FIELD_HEIGHT (&convert->in_info) > |
2795 | 0 | GST_VIDEO_FRAME_HEIGHT (src))) { |
2796 | 0 | g_critical ("Input video frame does not match configuration"); |
2797 | 0 | return; |
2798 | 0 | } |
2799 | 0 | if (G_UNLIKELY (GST_VIDEO_INFO_FORMAT (&convert->out_info) != |
2800 | 0 | GST_VIDEO_FRAME_FORMAT (dest) |
2801 | 0 | || GST_VIDEO_INFO_WIDTH (&convert->out_info) > |
2802 | 0 | GST_VIDEO_FRAME_WIDTH (dest) |
2803 | 0 | || GST_VIDEO_INFO_FIELD_HEIGHT (&convert->out_info) > |
2804 | 0 | GST_VIDEO_FRAME_HEIGHT (dest))) { |
2805 | 0 | g_critical ("Output video frame does not match configuration"); |
2806 | 0 | return; |
2807 | 0 | } |
2808 | | |
2809 | 0 | if (G_UNLIKELY (convert->in_width == 0 || convert->in_height == 0 || |
2810 | 0 | convert->out_width == 0 || convert->out_height == 0)) |
2811 | 0 | return; |
2812 | | |
2813 | 0 | convert->convert (convert, src, dest); |
2814 | 0 | } |
2815 | | |
2816 | | /** |
2817 | | * gst_video_converter_frame_finish: |
2818 | | * @convert: a #GstVideoConverter |
2819 | | * |
2820 | | * Wait for a previous async conversion performed using |
2821 | | * gst_video_converter_frame() to complete. |
2822 | | * |
2823 | | * Since: 1.20 |
2824 | | */ |
2825 | | void |
2826 | | gst_video_converter_frame_finish (GstVideoConverter * convert) |
2827 | 0 | { |
2828 | 0 | g_return_if_fail (convert); |
2829 | 0 | g_return_if_fail (convert->conversion_runner); |
2830 | 0 | g_return_if_fail (convert->conversion_runner->async_tasks); |
2831 | | |
2832 | 0 | gst_parallelized_task_runner_finish (convert->conversion_runner); |
2833 | 0 | } |
2834 | | |
2835 | | static void |
2836 | | video_converter_compute_matrix (GstVideoConverter * convert) |
2837 | 0 | { |
2838 | 0 | MatrixData *dst = &convert->convert_matrix; |
2839 | |
|
2840 | 0 | color_matrix_set_identity (dst); |
2841 | 0 | compute_matrix_to_RGB (convert, dst); |
2842 | 0 | compute_matrix_to_YUV (convert, dst, FALSE); |
2843 | |
|
2844 | 0 | convert->current_bits = 8; |
2845 | 0 | prepare_matrix (convert, dst); |
2846 | 0 | } |
2847 | | |
2848 | | static void |
2849 | | video_converter_compute_resample (GstVideoConverter * convert, gint idx) |
2850 | 0 | { |
2851 | 0 | GstVideoInfo *in_info, *out_info; |
2852 | 0 | const GstVideoFormatInfo *sfinfo, *dfinfo; |
2853 | |
|
2854 | 0 | if (CHECK_CHROMA_NONE (convert)) |
2855 | 0 | return; |
2856 | | |
2857 | 0 | in_info = &convert->in_info; |
2858 | 0 | out_info = &convert->out_info; |
2859 | |
|
2860 | 0 | sfinfo = in_info->finfo; |
2861 | 0 | dfinfo = out_info->finfo; |
2862 | |
|
2863 | 0 | GST_LOG ("site: %d->%d, w_sub: %d->%d, h_sub: %d->%d", in_info->chroma_site, |
2864 | 0 | out_info->chroma_site, sfinfo->w_sub[2], dfinfo->w_sub[2], |
2865 | 0 | sfinfo->h_sub[2], dfinfo->h_sub[2]); |
2866 | |
|
2867 | 0 | if (sfinfo->w_sub[2] != dfinfo->w_sub[2] || |
2868 | 0 | sfinfo->h_sub[2] != dfinfo->h_sub[2] || |
2869 | 0 | in_info->chroma_site != out_info->chroma_site || |
2870 | 0 | in_info->width != out_info->width || |
2871 | 0 | in_info->height != out_info->height) { |
2872 | 0 | if (GST_VIDEO_INFO_IS_INTERLACED (in_info) |
2873 | 0 | && GST_VIDEO_INFO_INTERLACE_MODE (in_info) != |
2874 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE) { |
2875 | 0 | if (!CHECK_CHROMA_DOWNSAMPLE (convert)) |
2876 | 0 | convert->upsample_i[idx] = gst_video_chroma_resample_new (0, |
2877 | 0 | in_info->chroma_site, GST_VIDEO_CHROMA_FLAG_INTERLACED, |
2878 | 0 | sfinfo->unpack_format, sfinfo->w_sub[2], sfinfo->h_sub[2]); |
2879 | 0 | if (!CHECK_CHROMA_UPSAMPLE (convert)) |
2880 | 0 | convert->downsample_i[idx] = |
2881 | 0 | gst_video_chroma_resample_new (0, out_info->chroma_site, |
2882 | 0 | GST_VIDEO_CHROMA_FLAG_INTERLACED, dfinfo->unpack_format, |
2883 | 0 | -dfinfo->w_sub[2], -dfinfo->h_sub[2]); |
2884 | 0 | } |
2885 | 0 | if (!CHECK_CHROMA_DOWNSAMPLE (convert)) |
2886 | 0 | convert->upsample_p[idx] = gst_video_chroma_resample_new (0, |
2887 | 0 | in_info->chroma_site, 0, sfinfo->unpack_format, sfinfo->w_sub[2], |
2888 | 0 | sfinfo->h_sub[2]); |
2889 | 0 | if (!CHECK_CHROMA_UPSAMPLE (convert)) |
2890 | 0 | convert->downsample_p[idx] = gst_video_chroma_resample_new (0, |
2891 | 0 | out_info->chroma_site, 0, dfinfo->unpack_format, -dfinfo->w_sub[2], |
2892 | 0 | -dfinfo->h_sub[2]); |
2893 | 0 | } |
2894 | 0 | } |
2895 | | |
2896 | | #define FRAME_GET_PLANE_STRIDE(frame, plane) \ |
2897 | 0 | GST_VIDEO_FRAME_PLANE_STRIDE (frame, plane) |
2898 | | #define FRAME_GET_PLANE_LINE(frame, plane, line) \ |
2899 | 0 | (gpointer)(((guint8*)(GST_VIDEO_FRAME_PLANE_DATA (frame, plane))) + \ |
2900 | 0 | FRAME_GET_PLANE_STRIDE (frame, plane) * (line)) |
2901 | | |
2902 | | #define FRAME_GET_COMP_STRIDE(frame, comp) \ |
2903 | 0 | GST_VIDEO_FRAME_COMP_STRIDE (frame, comp) |
2904 | | #define FRAME_GET_COMP_LINE(frame, comp, line) \ |
2905 | 0 | (gpointer)(((guint8*)(GST_VIDEO_FRAME_COMP_DATA (frame, comp))) + \ |
2906 | 0 | FRAME_GET_COMP_STRIDE (frame, comp) * (line)) |
2907 | | |
2908 | 0 | #define FRAME_GET_STRIDE(frame) FRAME_GET_PLANE_STRIDE (frame, 0) |
2909 | 0 | #define FRAME_GET_LINE(frame,line) FRAME_GET_PLANE_LINE (frame, 0, line) |
2910 | | |
2911 | 0 | #define FRAME_GET_Y_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_Y, line) |
2912 | 0 | #define FRAME_GET_U_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_U, line) |
2913 | 0 | #define FRAME_GET_V_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_V, line) |
2914 | 0 | #define FRAME_GET_A_LINE(frame,line) FRAME_GET_COMP_LINE(frame, GST_VIDEO_COMP_A, line) |
2915 | | |
2916 | 0 | #define FRAME_GET_Y_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_Y) |
2917 | 0 | #define FRAME_GET_U_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_U) |
2918 | 0 | #define FRAME_GET_V_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_V) |
2919 | | #define FRAME_GET_A_STRIDE(frame) FRAME_GET_COMP_STRIDE(frame, GST_VIDEO_COMP_A) |
2920 | | |
2921 | | |
2922 | | #define UNPACK_FRAME(frame,dest,line,x,width) \ |
2923 | 0 | frame->info.finfo->unpack_func (frame->info.finfo, \ |
2924 | 0 | (GST_VIDEO_FRAME_IS_INTERLACED (frame) ? \ |
2925 | 0 | GST_VIDEO_PACK_FLAG_INTERLACED : \ |
2926 | 0 | GST_VIDEO_PACK_FLAG_NONE), \ |
2927 | 0 | dest, frame->data, frame->info.stride, x, \ |
2928 | 0 | line, width) |
2929 | | #define PACK_FRAME(frame,src,line,width) \ |
2930 | 0 | frame->info.finfo->pack_func (frame->info.finfo, \ |
2931 | 0 | (GST_VIDEO_FRAME_IS_INTERLACED (frame) ? \ |
2932 | 0 | GST_VIDEO_PACK_FLAG_INTERLACED : \ |
2933 | 0 | GST_VIDEO_PACK_FLAG_NONE), \ |
2934 | 0 | src, 0, frame->data, frame->info.stride, \ |
2935 | 0 | frame->info.chroma_site, line, width); |
2936 | | |
2937 | | static gpointer |
2938 | | get_dest_line (GstLineCache * cache, gint idx, gpointer user_data) |
2939 | 0 | { |
2940 | 0 | GstVideoConverter *convert = user_data; |
2941 | 0 | guint8 *line; |
2942 | 0 | gint pstride = convert->pack_pstride; |
2943 | 0 | gint out_x = convert->out_x; |
2944 | 0 | guint cline; |
2945 | |
|
2946 | 0 | cline = CLAMP (idx, 0, convert->out_maxheight - 1); |
2947 | |
|
2948 | 0 | line = FRAME_GET_LINE (convert->dest, cline); |
2949 | 0 | GST_LOG ("get dest line %d %p", cline, line); |
2950 | |
|
2951 | 0 | if (convert->borderline) { |
2952 | 0 | gint r_border = (out_x + convert->out_width) * pstride; |
2953 | 0 | gint rb_width = convert->out_maxwidth * pstride - r_border; |
2954 | 0 | gint lb_width = out_x * pstride; |
2955 | |
|
2956 | 0 | memcpy (line, convert->borderline, lb_width); |
2957 | 0 | memcpy (line + r_border, convert->borderline, rb_width); |
2958 | 0 | } |
2959 | 0 | line += out_x * pstride; |
2960 | |
|
2961 | 0 | return line; |
2962 | 0 | } |
2963 | | |
2964 | | static gboolean |
2965 | | do_unpack_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
2966 | | gpointer user_data) |
2967 | 0 | { |
2968 | 0 | GstVideoConverter *convert = user_data; |
2969 | 0 | gpointer tmpline; |
2970 | 0 | guint cline; |
2971 | |
|
2972 | 0 | cline = CLAMP (in_line + convert->in_y, 0, convert->in_maxheight - 1); |
2973 | |
|
2974 | 0 | if (cache->alloc_writable || !convert->identity_unpack) { |
2975 | 0 | tmpline = gst_line_cache_alloc_line (cache, out_line); |
2976 | 0 | GST_LOG ("unpack line %d (%u) %p", in_line, cline, tmpline); |
2977 | 0 | UNPACK_FRAME (convert->src, tmpline, cline, convert->in_x, |
2978 | 0 | convert->in_width); |
2979 | 0 | } else { |
2980 | 0 | tmpline = ((guint8 *) FRAME_GET_LINE (convert->src, cline)) + |
2981 | 0 | convert->in_x * convert->unpack_pstride; |
2982 | 0 | GST_LOG ("get src line %d (%u) %p", in_line, cline, tmpline); |
2983 | 0 | } |
2984 | 0 | gst_line_cache_add_line (cache, in_line, tmpline); |
2985 | |
|
2986 | 0 | return TRUE; |
2987 | 0 | } |
2988 | | |
2989 | | static gboolean |
2990 | | do_upsample_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
2991 | | gpointer user_data) |
2992 | 0 | { |
2993 | 0 | GstVideoConverter *convert = user_data; |
2994 | 0 | gpointer *lines; |
2995 | 0 | gint i, start_line, n_lines; |
2996 | |
|
2997 | 0 | n_lines = convert->up_n_lines; |
2998 | 0 | start_line = in_line; |
2999 | 0 | if (start_line < n_lines + convert->up_offset) { |
3000 | 0 | start_line += convert->up_offset; |
3001 | 0 | out_line += convert->up_offset; |
3002 | 0 | } |
3003 | | |
3004 | | /* get the lines needed for chroma upsample */ |
3005 | 0 | lines = |
3006 | 0 | gst_line_cache_get_lines (cache->prev, idx, out_line, start_line, |
3007 | 0 | n_lines); |
3008 | |
|
3009 | 0 | if (convert->upsample[idx]) { |
3010 | 0 | GST_LOG ("doing upsample %d-%d %p", start_line, start_line + n_lines - 1, |
3011 | 0 | lines[0]); |
3012 | 0 | gst_video_chroma_resample (convert->upsample[idx], lines, |
3013 | 0 | convert->in_width); |
3014 | 0 | } |
3015 | |
|
3016 | 0 | for (i = 0; i < n_lines; i++) |
3017 | 0 | gst_line_cache_add_line (cache, start_line + i, lines[i]); |
3018 | |
|
3019 | 0 | return TRUE; |
3020 | 0 | } |
3021 | | |
3022 | | static gboolean |
3023 | | do_convert_to_RGB_lines (GstLineCache * cache, gint idx, gint out_line, |
3024 | | gint in_line, gpointer user_data) |
3025 | 0 | { |
3026 | 0 | GstVideoConverter *convert = user_data; |
3027 | 0 | MatrixData *data = &convert->to_RGB_matrix; |
3028 | 0 | gpointer *lines, destline; |
3029 | |
|
3030 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3031 | 0 | destline = lines[0]; |
3032 | |
|
3033 | 0 | if (data->matrix_func) { |
3034 | 0 | GST_LOG ("to RGB line %d %p", in_line, destline); |
3035 | 0 | data->matrix_func (data, destline); |
3036 | 0 | } |
3037 | 0 | if (convert->gamma_dec.gamma_func) { |
3038 | 0 | destline = gst_line_cache_alloc_line (cache, out_line); |
3039 | |
|
3040 | 0 | GST_LOG ("gamma decode line %d %p->%p", in_line, lines[0], destline); |
3041 | 0 | convert->gamma_dec.gamma_func (&convert->gamma_dec, destline, lines[0]); |
3042 | 0 | } |
3043 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3044 | |
|
3045 | 0 | return TRUE; |
3046 | 0 | } |
3047 | | |
3048 | | static gboolean |
3049 | | do_hscale_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
3050 | | gpointer user_data) |
3051 | 0 | { |
3052 | 0 | GstVideoConverter *convert = user_data; |
3053 | 0 | gpointer *lines, destline; |
3054 | |
|
3055 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3056 | |
|
3057 | 0 | destline = gst_line_cache_alloc_line (cache, out_line); |
3058 | |
|
3059 | 0 | GST_LOG ("hresample line %d %p->%p", in_line, lines[0], destline); |
3060 | 0 | gst_video_scaler_horizontal (convert->h_scaler[idx], convert->h_scale_format, |
3061 | 0 | lines[0], destline, 0, convert->out_width); |
3062 | |
|
3063 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3064 | |
|
3065 | 0 | return TRUE; |
3066 | 0 | } |
3067 | | |
3068 | | static gboolean |
3069 | | do_vscale_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
3070 | | gpointer user_data) |
3071 | 0 | { |
3072 | 0 | GstVideoConverter *convert = user_data; |
3073 | 0 | gpointer *lines, destline; |
3074 | 0 | guint sline, n_lines; |
3075 | 0 | guint cline; |
3076 | |
|
3077 | 0 | cline = CLAMP (in_line, 0, convert->out_height - 1); |
3078 | |
|
3079 | 0 | gst_video_scaler_get_coeff (convert->v_scaler[idx], cline, &sline, &n_lines); |
3080 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, sline, n_lines); |
3081 | |
|
3082 | 0 | destline = gst_line_cache_alloc_line (cache, out_line); |
3083 | |
|
3084 | 0 | GST_LOG ("vresample line %d %d-%d %p->%p", in_line, sline, |
3085 | 0 | sline + n_lines - 1, lines[0], destline); |
3086 | 0 | gst_video_scaler_vertical (convert->v_scaler[idx], convert->v_scale_format, |
3087 | 0 | lines, destline, cline, convert->v_scale_width); |
3088 | |
|
3089 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3090 | |
|
3091 | 0 | return TRUE; |
3092 | 0 | } |
3093 | | |
3094 | | static gboolean |
3095 | | do_convert_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
3096 | | gpointer user_data) |
3097 | 0 | { |
3098 | 0 | GstVideoConverter *convert = user_data; |
3099 | 0 | MatrixData *data = &convert->convert_matrix; |
3100 | 0 | gpointer *lines, destline; |
3101 | 0 | guint in_bits, out_bits; |
3102 | 0 | gint width; |
3103 | |
|
3104 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3105 | |
|
3106 | 0 | destline = lines[0]; |
3107 | |
|
3108 | 0 | in_bits = convert->in_bits; |
3109 | 0 | out_bits = convert->out_bits; |
3110 | |
|
3111 | 0 | width = MIN (convert->in_width, convert->out_width); |
3112 | |
|
3113 | 0 | if (out_bits == 16 || in_bits == 16) { |
3114 | 0 | gpointer srcline = lines[0]; |
3115 | |
|
3116 | 0 | if (out_bits != in_bits) |
3117 | 0 | destline = gst_line_cache_alloc_line (cache, out_line); |
3118 | | |
3119 | | /* FIXME, we can scale in the conversion matrix */ |
3120 | 0 | if (in_bits == 8) { |
3121 | 0 | GST_LOG ("8->16 line %d %p->%p", in_line, srcline, destline); |
3122 | 0 | video_orc_convert_u8_to_u16 (destline, srcline, width * 4); |
3123 | 0 | srcline = destline; |
3124 | 0 | } |
3125 | |
|
3126 | 0 | if (data->matrix_func) { |
3127 | 0 | GST_LOG ("matrix line %d %p", in_line, srcline); |
3128 | 0 | data->matrix_func (data, srcline); |
3129 | 0 | } |
3130 | | |
3131 | | /* FIXME, dither here */ |
3132 | 0 | if (out_bits == 8) { |
3133 | 0 | GST_LOG ("16->8 line %d %p->%p", in_line, srcline, destline); |
3134 | 0 | video_orc_convert_u16_to_u8 (destline, srcline, width * 4); |
3135 | 0 | } |
3136 | 0 | } else { |
3137 | 0 | if (data->matrix_func) { |
3138 | 0 | GST_LOG ("matrix line %d %p", in_line, destline); |
3139 | 0 | data->matrix_func (data, destline); |
3140 | 0 | } |
3141 | 0 | } |
3142 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3143 | |
|
3144 | 0 | return TRUE; |
3145 | 0 | } |
3146 | | |
3147 | | static gboolean |
3148 | | do_alpha_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
3149 | | gpointer user_data) |
3150 | 0 | { |
3151 | 0 | gpointer *lines, destline; |
3152 | 0 | GstVideoConverter *convert = user_data; |
3153 | 0 | gint width = MIN (convert->in_width, convert->out_width); |
3154 | |
|
3155 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3156 | 0 | destline = lines[0]; |
3157 | |
|
3158 | 0 | GST_LOG ("alpha line %d %p", in_line, destline); |
3159 | 0 | convert->alpha_func (convert, destline, width); |
3160 | |
|
3161 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3162 | |
|
3163 | 0 | return TRUE; |
3164 | 0 | } |
3165 | | |
3166 | | static gboolean |
3167 | | do_convert_to_YUV_lines (GstLineCache * cache, gint idx, gint out_line, |
3168 | | gint in_line, gpointer user_data) |
3169 | 0 | { |
3170 | 0 | GstVideoConverter *convert = user_data; |
3171 | 0 | MatrixData *data = &convert->to_YUV_matrix; |
3172 | 0 | gpointer *lines, destline; |
3173 | |
|
3174 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3175 | 0 | destline = lines[0]; |
3176 | |
|
3177 | 0 | if (convert->gamma_enc.gamma_func) { |
3178 | 0 | destline = gst_line_cache_alloc_line (cache, out_line); |
3179 | |
|
3180 | 0 | GST_LOG ("gamma encode line %d %p->%p", in_line, lines[0], destline); |
3181 | 0 | convert->gamma_enc.gamma_func (&convert->gamma_enc, destline, lines[0]); |
3182 | 0 | } |
3183 | 0 | if (data->matrix_func) { |
3184 | 0 | GST_LOG ("to YUV line %d %p", in_line, destline); |
3185 | 0 | data->matrix_func (data, destline); |
3186 | 0 | } |
3187 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3188 | |
|
3189 | 0 | return TRUE; |
3190 | 0 | } |
3191 | | |
3192 | | static gboolean |
3193 | | do_downsample_lines (GstLineCache * cache, gint idx, gint out_line, |
3194 | | gint in_line, gpointer user_data) |
3195 | 0 | { |
3196 | 0 | GstVideoConverter *convert = user_data; |
3197 | 0 | gpointer *lines; |
3198 | 0 | gint i, start_line, n_lines; |
3199 | |
|
3200 | 0 | n_lines = convert->down_n_lines; |
3201 | 0 | start_line = in_line; |
3202 | 0 | if (start_line < n_lines + convert->down_offset) |
3203 | 0 | start_line += convert->down_offset; |
3204 | | |
3205 | | /* get the lines needed for chroma downsample */ |
3206 | 0 | lines = |
3207 | 0 | gst_line_cache_get_lines (cache->prev, idx, out_line, start_line, |
3208 | 0 | n_lines); |
3209 | |
|
3210 | 0 | if (convert->downsample[idx]) { |
3211 | 0 | GST_LOG ("downsample line %d %d-%d %p", in_line, start_line, |
3212 | 0 | start_line + n_lines - 1, lines[0]); |
3213 | 0 | gst_video_chroma_resample (convert->downsample[idx], lines, |
3214 | 0 | convert->out_width); |
3215 | 0 | } |
3216 | |
|
3217 | 0 | for (i = 0; i < n_lines; i++) |
3218 | 0 | gst_line_cache_add_line (cache, start_line + i, lines[i]); |
3219 | |
|
3220 | 0 | return TRUE; |
3221 | 0 | } |
3222 | | |
3223 | | static gboolean |
3224 | | do_dither_lines (GstLineCache * cache, gint idx, gint out_line, gint in_line, |
3225 | | gpointer user_data) |
3226 | 0 | { |
3227 | 0 | GstVideoConverter *convert = user_data; |
3228 | 0 | gpointer *lines, destline; |
3229 | |
|
3230 | 0 | lines = gst_line_cache_get_lines (cache->prev, idx, out_line, in_line, 1); |
3231 | 0 | destline = lines[0]; |
3232 | |
|
3233 | 0 | if (convert->dither[idx]) { |
3234 | 0 | GST_LOG ("Dither line %d %p", in_line, destline); |
3235 | 0 | gst_video_dither_line (convert->dither[idx], destline, 0, out_line, |
3236 | 0 | convert->out_width); |
3237 | 0 | } |
3238 | 0 | gst_line_cache_add_line (cache, in_line, destline); |
3239 | |
|
3240 | 0 | return TRUE; |
3241 | 0 | } |
3242 | | |
3243 | | typedef struct |
3244 | | { |
3245 | | GstLineCache *pack_lines; |
3246 | | gint idx; |
3247 | | gint h_0, h_1; |
3248 | | gint pack_lines_count; |
3249 | | gint out_y; |
3250 | | gboolean identity_pack; |
3251 | | gint lb_width, out_maxwidth; |
3252 | | GstVideoFrame *dest; |
3253 | | } ConvertTask; |
3254 | | |
3255 | | static void |
3256 | | convert_generic_task (ConvertTask * task) |
3257 | 0 | { |
3258 | 0 | gint i; |
3259 | |
|
3260 | 0 | for (i = task->h_0; i < task->h_1; i += task->pack_lines_count) { |
3261 | 0 | gpointer *lines; |
3262 | | |
3263 | | /* load the lines needed to pack */ |
3264 | 0 | lines = |
3265 | 0 | gst_line_cache_get_lines (task->pack_lines, task->idx, i + task->out_y, |
3266 | 0 | i, task->pack_lines_count); |
3267 | |
|
3268 | 0 | if (!task->identity_pack) { |
3269 | | /* take away the border */ |
3270 | 0 | guint8 *l = ((guint8 *) lines[0]) - task->lb_width; |
3271 | | /* and pack into destination */ |
3272 | 0 | GST_LOG ("pack line %d %p (%p)", i + task->out_y, lines[0], l); |
3273 | 0 | PACK_FRAME (task->dest, l, i + task->out_y, task->out_maxwidth); |
3274 | 0 | } |
3275 | 0 | } |
3276 | 0 | } |
3277 | | |
3278 | | static void |
3279 | | video_converter_generic (GstVideoConverter * convert, const GstVideoFrame * src, |
3280 | | GstVideoFrame * dest) |
3281 | 0 | { |
3282 | 0 | gint i; |
3283 | 0 | gint out_maxwidth, out_maxheight; |
3284 | 0 | gint out_x, out_y, out_height; |
3285 | 0 | gint pack_lines, pstride; |
3286 | 0 | gint lb_width; |
3287 | 0 | ConvertTask *tasks; |
3288 | 0 | ConvertTask **tasks_p; |
3289 | 0 | gint n_threads; |
3290 | 0 | gint lines_per_thread; |
3291 | |
|
3292 | 0 | out_height = convert->out_height; |
3293 | 0 | out_maxwidth = convert->out_maxwidth; |
3294 | 0 | out_maxheight = convert->out_maxheight; |
3295 | |
|
3296 | 0 | out_x = convert->out_x; |
3297 | 0 | out_y = convert->out_y; |
3298 | |
|
3299 | 0 | convert->src = src; |
3300 | 0 | convert->dest = dest; |
3301 | |
|
3302 | 0 | if (GST_VIDEO_FRAME_IS_INTERLACED (src)) { |
3303 | 0 | GST_LOG ("setup interlaced frame"); |
3304 | 0 | convert->upsample = convert->upsample_i; |
3305 | 0 | convert->downsample = convert->downsample_i; |
3306 | 0 | convert->v_scaler = convert->v_scaler_i; |
3307 | 0 | } else { |
3308 | 0 | GST_LOG ("setup progressive frame"); |
3309 | 0 | convert->upsample = convert->upsample_p; |
3310 | 0 | convert->downsample = convert->downsample_p; |
3311 | 0 | convert->v_scaler = convert->v_scaler_p; |
3312 | 0 | } |
3313 | 0 | if (convert->upsample[0]) { |
3314 | 0 | gst_video_chroma_resample_get_info (convert->upsample[0], |
3315 | 0 | &convert->up_n_lines, &convert->up_offset); |
3316 | 0 | } else { |
3317 | 0 | convert->up_n_lines = 1; |
3318 | 0 | convert->up_offset = 0; |
3319 | 0 | } |
3320 | 0 | if (convert->downsample[0]) { |
3321 | 0 | gst_video_chroma_resample_get_info (convert->downsample[0], |
3322 | 0 | &convert->down_n_lines, &convert->down_offset); |
3323 | 0 | } else { |
3324 | 0 | convert->down_n_lines = 1; |
3325 | 0 | convert->down_offset = 0; |
3326 | 0 | } |
3327 | |
|
3328 | 0 | pack_lines = convert->pack_nlines; /* only 1 for now */ |
3329 | 0 | pstride = convert->pack_pstride; |
3330 | |
|
3331 | 0 | lb_width = out_x * pstride; |
3332 | |
|
3333 | 0 | if (convert->borderline) { |
3334 | | /* FIXME we should try to avoid PACK_FRAME */ |
3335 | 0 | for (i = 0; i < out_y; i++) |
3336 | 0 | PACK_FRAME (dest, convert->borderline, i, out_maxwidth); |
3337 | 0 | } |
3338 | |
|
3339 | 0 | n_threads = convert->conversion_runner->n_threads; |
3340 | 0 | tasks = convert->tasks[0] = |
3341 | 0 | g_renew (ConvertTask, convert->tasks[0], n_threads); |
3342 | 0 | tasks_p = convert->tasks_p[0] = |
3343 | 0 | g_renew (ConvertTask *, convert->tasks_p[0], n_threads); |
3344 | |
|
3345 | 0 | lines_per_thread = |
3346 | 0 | GST_ROUND_UP_N ((out_height + n_threads - 1) / n_threads, pack_lines); |
3347 | |
|
3348 | 0 | for (i = 0; i < n_threads; i++) { |
3349 | 0 | tasks[i].dest = dest; |
3350 | 0 | tasks[i].pack_lines = convert->pack_lines[i]; |
3351 | 0 | tasks[i].idx = i; |
3352 | 0 | tasks[i].pack_lines_count = pack_lines; |
3353 | 0 | tasks[i].out_y = out_y; |
3354 | 0 | tasks[i].identity_pack = convert->identity_pack; |
3355 | 0 | tasks[i].lb_width = lb_width; |
3356 | 0 | tasks[i].out_maxwidth = out_maxwidth; |
3357 | |
|
3358 | 0 | tasks[i].h_0 = i * lines_per_thread; |
3359 | 0 | tasks[i].h_1 = MIN ((i + 1) * lines_per_thread, out_height); |
3360 | |
|
3361 | 0 | tasks_p[i] = &tasks[i]; |
3362 | 0 | } |
3363 | |
|
3364 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3365 | 0 | (GstParallelizedTaskFunc) convert_generic_task, (gpointer) tasks_p); |
3366 | |
|
3367 | 0 | if (convert->borderline) { |
3368 | 0 | for (i = out_y + out_height; i < out_maxheight; i++) |
3369 | 0 | PACK_FRAME (dest, convert->borderline, i, out_maxwidth); |
3370 | 0 | } |
3371 | 0 | if (convert->pack_pal) { |
3372 | 0 | memcpy (GST_VIDEO_FRAME_PLANE_DATA (dest, 1), convert->pack_pal, |
3373 | 0 | convert->pack_palsize); |
3374 | 0 | } |
3375 | 0 | } |
3376 | | |
3377 | | static void convert_fill_border (GstVideoConverter * convert, |
3378 | | GstVideoFrame * dest); |
3379 | | |
3380 | | /* Fast paths */ |
3381 | | |
3382 | | #define GET_LINE_OFFSETS(interlaced,line,l1,l2) \ |
3383 | 0 | if (interlaced) { \ |
3384 | 0 | l1 = (line & 2 ? line - 1 : line); \ |
3385 | 0 | l2 = l1 + 2; \ |
3386 | 0 | } else { \ |
3387 | 0 | l1 = line; \ |
3388 | 0 | l2 = l1 + 1; \ |
3389 | 0 | } |
3390 | | |
3391 | | typedef struct |
3392 | | { |
3393 | | const GstVideoFrame *src; |
3394 | | GstVideoFrame *dest; |
3395 | | gint height_0, height_1; |
3396 | | |
3397 | | /* parameters */ |
3398 | | gboolean interlaced; |
3399 | | gint width; |
3400 | | gint alpha; |
3401 | | MatrixData *data; |
3402 | | gint in_x, in_y; |
3403 | | gint out_x, out_y; |
3404 | | gpointer tmpline; |
3405 | | } FConvertTask; |
3406 | | |
3407 | | static void |
3408 | | convert_I420_YUY2_task (FConvertTask * task) |
3409 | 0 | { |
3410 | 0 | gint i; |
3411 | 0 | gint l1, l2; |
3412 | |
|
3413 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3414 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3415 | |
|
3416 | 0 | video_orc_convert_I420_YUY2 (FRAME_GET_LINE (task->dest, l1), |
3417 | 0 | FRAME_GET_LINE (task->dest, l2), |
3418 | 0 | FRAME_GET_Y_LINE (task->src, l1), |
3419 | 0 | FRAME_GET_Y_LINE (task->src, l2), |
3420 | 0 | FRAME_GET_U_LINE (task->src, i >> 1), |
3421 | 0 | FRAME_GET_V_LINE (task->src, i >> 1), (task->width + 1) / 2); |
3422 | 0 | } |
3423 | 0 | } |
3424 | | |
3425 | | static void |
3426 | | convert_I420_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
3427 | | GstVideoFrame * dest) |
3428 | 0 | { |
3429 | 0 | int i; |
3430 | 0 | gint width = convert->in_width; |
3431 | 0 | gint height = convert->in_height; |
3432 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3433 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3434 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3435 | 0 | gint h2; |
3436 | 0 | FConvertTask *tasks; |
3437 | 0 | FConvertTask **tasks_p; |
3438 | 0 | gint n_threads; |
3439 | 0 | gint lines_per_thread; |
3440 | | |
3441 | | /* I420 has half as many chroma lines, as such we have to |
3442 | | * always merge two into one. For non-interlaced these are |
3443 | | * the two next to each other, for interlaced one is skipped |
3444 | | * in between. */ |
3445 | 0 | if (interlaced) |
3446 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3447 | 0 | else |
3448 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3449 | |
|
3450 | 0 | n_threads = convert->conversion_runner->n_threads; |
3451 | 0 | tasks = convert->tasks[0] = |
3452 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3453 | 0 | tasks_p = convert->tasks_p[0] = |
3454 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
3455 | |
|
3456 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
3457 | |
|
3458 | 0 | for (i = 0; i < n_threads; i++) { |
3459 | 0 | tasks[i].src = src; |
3460 | 0 | tasks[i].dest = dest; |
3461 | |
|
3462 | 0 | tasks[i].interlaced = interlaced; |
3463 | 0 | tasks[i].width = width; |
3464 | |
|
3465 | 0 | tasks[i].height_0 = i * lines_per_thread; |
3466 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
3467 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
3468 | |
|
3469 | 0 | tasks_p[i] = &tasks[i]; |
3470 | 0 | } |
3471 | |
|
3472 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3473 | 0 | (GstParallelizedTaskFunc) convert_I420_YUY2_task, (gpointer) tasks_p); |
3474 | | |
3475 | | /* now handle last lines. For interlaced these are up to 3 */ |
3476 | 0 | if (h2 != height) { |
3477 | 0 | for (i = h2; i < height; i++) { |
3478 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
3479 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
3480 | 0 | } |
3481 | 0 | } |
3482 | 0 | } |
3483 | | |
3484 | | static void |
3485 | | convert_I420_UYVY_task (FConvertTask * task) |
3486 | 0 | { |
3487 | 0 | gint i; |
3488 | 0 | gint l1, l2; |
3489 | |
|
3490 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3491 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3492 | |
|
3493 | 0 | video_orc_convert_I420_UYVY (FRAME_GET_LINE (task->dest, l1), |
3494 | 0 | FRAME_GET_LINE (task->dest, l2), |
3495 | 0 | FRAME_GET_Y_LINE (task->src, l1), |
3496 | 0 | FRAME_GET_Y_LINE (task->src, l2), |
3497 | 0 | FRAME_GET_U_LINE (task->src, i >> 1), |
3498 | 0 | FRAME_GET_V_LINE (task->src, i >> 1), (task->width + 1) / 2); |
3499 | 0 | } |
3500 | 0 | } |
3501 | | |
3502 | | static void |
3503 | | convert_I420_UYVY (GstVideoConverter * convert, const GstVideoFrame * src, |
3504 | | GstVideoFrame * dest) |
3505 | 0 | { |
3506 | 0 | int i; |
3507 | 0 | gint width = convert->in_width; |
3508 | 0 | gint height = convert->in_height; |
3509 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3510 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3511 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3512 | 0 | gint h2; |
3513 | 0 | FConvertTask *tasks; |
3514 | 0 | FConvertTask **tasks_p; |
3515 | 0 | gint n_threads; |
3516 | 0 | gint lines_per_thread; |
3517 | | |
3518 | | /* I420 has half as many chroma lines, as such we have to |
3519 | | * always merge two into one. For non-interlaced these are |
3520 | | * the two next to each other, for interlaced one is skipped |
3521 | | * in between. */ |
3522 | 0 | if (interlaced) |
3523 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3524 | 0 | else |
3525 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3526 | |
|
3527 | 0 | n_threads = convert->conversion_runner->n_threads; |
3528 | 0 | tasks = convert->tasks[0] = |
3529 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3530 | 0 | tasks_p = convert->tasks_p[0] = |
3531 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
3532 | |
|
3533 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
3534 | |
|
3535 | 0 | for (i = 0; i < n_threads; i++) { |
3536 | 0 | tasks[i].src = src; |
3537 | 0 | tasks[i].dest = dest; |
3538 | |
|
3539 | 0 | tasks[i].interlaced = interlaced; |
3540 | 0 | tasks[i].width = width; |
3541 | |
|
3542 | 0 | tasks[i].height_0 = i * lines_per_thread; |
3543 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
3544 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
3545 | |
|
3546 | 0 | tasks_p[i] = &tasks[i]; |
3547 | 0 | } |
3548 | |
|
3549 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3550 | 0 | (GstParallelizedTaskFunc) convert_I420_UYVY_task, (gpointer) tasks_p); |
3551 | | |
3552 | | /* now handle last lines. For interlaced these are up to 3 */ |
3553 | 0 | if (h2 != height) { |
3554 | 0 | for (i = h2; i < height; i++) { |
3555 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
3556 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
3557 | 0 | } |
3558 | 0 | } |
3559 | 0 | } |
3560 | | |
3561 | | static void |
3562 | | convert_I420_AYUV_task (FConvertTask * task) |
3563 | 0 | { |
3564 | 0 | gint i; |
3565 | 0 | gint l1, l2; |
3566 | |
|
3567 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3568 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3569 | |
|
3570 | 0 | video_orc_convert_I420_AYUV (FRAME_GET_LINE (task->dest, l1), |
3571 | 0 | FRAME_GET_LINE (task->dest, l2), |
3572 | 0 | FRAME_GET_Y_LINE (task->src, l1), |
3573 | 0 | FRAME_GET_Y_LINE (task->src, l2), |
3574 | 0 | FRAME_GET_U_LINE (task->src, i >> 1), FRAME_GET_V_LINE (task->src, |
3575 | 0 | i >> 1), task->alpha, task->width); |
3576 | 0 | } |
3577 | 0 | } |
3578 | | |
3579 | | static void |
3580 | | convert_I420_AYUV (GstVideoConverter * convert, const GstVideoFrame * src, |
3581 | | GstVideoFrame * dest) |
3582 | 0 | { |
3583 | 0 | int i; |
3584 | 0 | gint width = convert->in_width; |
3585 | 0 | gint height = convert->in_height; |
3586 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3587 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3588 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3589 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
3590 | 0 | gint h2; |
3591 | 0 | FConvertTask *tasks; |
3592 | 0 | FConvertTask **tasks_p; |
3593 | 0 | gint n_threads; |
3594 | 0 | gint lines_per_thread; |
3595 | | |
3596 | | /* I420 has half as many chroma lines, as such we have to |
3597 | | * always merge two into one. For non-interlaced these are |
3598 | | * the two next to each other, for interlaced one is skipped |
3599 | | * in between. */ |
3600 | 0 | if (interlaced) |
3601 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3602 | 0 | else |
3603 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3604 | | |
3605 | |
|
3606 | 0 | n_threads = convert->conversion_runner->n_threads; |
3607 | 0 | tasks = convert->tasks[0] = |
3608 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3609 | 0 | tasks_p = convert->tasks_p[0] = |
3610 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
3611 | |
|
3612 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
3613 | |
|
3614 | 0 | for (i = 0; i < n_threads; i++) { |
3615 | 0 | tasks[i].src = src; |
3616 | 0 | tasks[i].dest = dest; |
3617 | |
|
3618 | 0 | tasks[i].interlaced = interlaced; |
3619 | 0 | tasks[i].width = width; |
3620 | 0 | tasks[i].alpha = alpha; |
3621 | |
|
3622 | 0 | tasks[i].height_0 = i * lines_per_thread; |
3623 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
3624 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
3625 | |
|
3626 | 0 | tasks_p[i] = &tasks[i]; |
3627 | 0 | } |
3628 | |
|
3629 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3630 | 0 | (GstParallelizedTaskFunc) convert_I420_AYUV_task, (gpointer) tasks_p); |
3631 | | |
3632 | | /* now handle last lines. For interlaced these are up to 3 */ |
3633 | 0 | if (h2 != height) { |
3634 | 0 | for (i = h2; i < height; i++) { |
3635 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
3636 | 0 | if (alpha != 0xff) |
3637 | 0 | convert_set_alpha_u8 (convert, convert->tmpline[0], width); |
3638 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
3639 | 0 | } |
3640 | 0 | } |
3641 | 0 | } |
3642 | | |
3643 | | static void |
3644 | | convert_I420_v210_task (FConvertTask * task) |
3645 | 0 | { |
3646 | 0 | gint i, j; |
3647 | 0 | gint l1, l2; |
3648 | 0 | const guint8 *s_y1, *s_y2, *s_u, *s_v; |
3649 | 0 | guint8 *d1, *d2; |
3650 | 0 | guint32 a0, a1, a2, a3; |
3651 | 0 | guint8 y0_1, y1_1, y2_1, y3_1, y4_1, y5_1; |
3652 | 0 | guint8 u0_1, u2_1, u4_1; |
3653 | 0 | guint8 v0_1, v2_1, v4_1; |
3654 | 0 | guint8 y0_2, y1_2, y2_2, y3_2, y4_2, y5_2; |
3655 | 0 | guint8 u0_2, u2_2, u4_2; |
3656 | 0 | guint8 v0_2, v2_2, v4_2; |
3657 | |
|
3658 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3659 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3660 | |
|
3661 | 0 | s_y1 = FRAME_GET_Y_LINE (task->src, l1); |
3662 | 0 | s_y2 = FRAME_GET_Y_LINE (task->src, l2); |
3663 | 0 | s_u = FRAME_GET_U_LINE (task->src, i >> 1); |
3664 | 0 | s_v = FRAME_GET_V_LINE (task->src, i >> 1); |
3665 | |
|
3666 | 0 | d1 = FRAME_GET_LINE (task->dest, l1); |
3667 | 0 | d2 = FRAME_GET_LINE (task->dest, l2); |
3668 | |
|
3669 | 0 | for (j = 0; j < task->width; j += 6) { |
3670 | 0 | y1_1 = y2_1 = y3_1 = y4_1 = y5_1 = 0; |
3671 | 0 | u2_1 = u4_1 = v2_1 = v4_1 = 0; |
3672 | 0 | y1_2 = y2_2 = y3_2 = y4_2 = y5_2 = 0; |
3673 | 0 | u2_2 = u4_2 = v2_2 = v4_2 = 0; |
3674 | |
|
3675 | 0 | y0_1 = s_y1[j]; |
3676 | 0 | y0_2 = s_y2[j]; |
3677 | |
|
3678 | 0 | u0_1 = u0_2 = s_u[j / 2]; |
3679 | 0 | v0_1 = v0_2 = s_v[j / 2]; |
3680 | |
|
3681 | 0 | if (j < task->width - 1) { |
3682 | 0 | y1_1 = s_y1[j + 1]; |
3683 | 0 | y1_2 = s_y2[j + 1]; |
3684 | 0 | } |
3685 | |
|
3686 | 0 | if (j < task->width - 2) { |
3687 | 0 | y2_1 = s_y1[j + 2]; |
3688 | 0 | y2_2 = s_y2[j + 2]; |
3689 | |
|
3690 | 0 | u2_1 = u2_2 = s_u[j / 2 + 1]; |
3691 | 0 | v2_1 = v2_2 = s_v[j / 2 + 1]; |
3692 | 0 | } |
3693 | |
|
3694 | 0 | if (j < task->width - 3) { |
3695 | 0 | y3_1 = s_y1[j + 3]; |
3696 | 0 | y3_2 = s_y2[j + 3]; |
3697 | 0 | } |
3698 | |
|
3699 | 0 | if (j < task->width - 4) { |
3700 | 0 | y4_1 = s_y1[j + 4]; |
3701 | 0 | y4_2 = s_y2[j + 4]; |
3702 | |
|
3703 | 0 | u4_1 = u4_2 = s_u[j / 2 + 2]; |
3704 | 0 | v4_1 = v4_2 = s_v[j / 2 + 2]; |
3705 | 0 | } |
3706 | |
|
3707 | 0 | if (j < task->width - 5) { |
3708 | 0 | y5_1 = s_y1[j + 5]; |
3709 | 0 | y5_2 = s_y2[j + 5]; |
3710 | 0 | } |
3711 | |
|
3712 | 0 | a0 = u0_1 << 2 | (y0_1 << 12) | (v0_1 << 22); |
3713 | 0 | a1 = y1_1 << 2 | (u2_1 << 12) | (y2_1 << 22); |
3714 | 0 | a2 = v2_1 << 2 | (y3_1 << 12) | (u4_1 << 22); |
3715 | 0 | a3 = y4_1 << 2 | (v4_1 << 12) | (y5_1 << 22); |
3716 | |
|
3717 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 0, a0); |
3718 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 4, a1); |
3719 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 8, a2); |
3720 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 12, a3); |
3721 | |
|
3722 | 0 | a0 = u0_2 << 2 | (y0_2 << 12) | (v0_2 << 22); |
3723 | 0 | a1 = y1_2 << 2 | (u2_2 << 12) | (y2_2 << 22); |
3724 | 0 | a2 = v2_2 << 2 | (y3_2 << 12) | (u4_2 << 22); |
3725 | 0 | a3 = y4_2 << 2 | (v4_2 << 12) | (y5_2 << 22); |
3726 | |
|
3727 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 0, a0); |
3728 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 4, a1); |
3729 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 8, a2); |
3730 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 12, a3); |
3731 | 0 | } |
3732 | 0 | } |
3733 | 0 | } |
3734 | | |
3735 | | static void |
3736 | | convert_I420_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
3737 | | GstVideoFrame * dest) |
3738 | 0 | { |
3739 | 0 | int i; |
3740 | 0 | gint width = convert->in_width; |
3741 | 0 | gint height = convert->in_height; |
3742 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3743 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3744 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3745 | 0 | gint h2; |
3746 | 0 | FConvertTask *tasks; |
3747 | 0 | FConvertTask **tasks_p; |
3748 | 0 | gint n_threads; |
3749 | 0 | gint lines_per_thread; |
3750 | 0 | guint8 *tmpline_8; |
3751 | | |
3752 | | /* I420 has half as many chroma lines, as such we have to |
3753 | | * always merge two into one. For non-interlaced these are |
3754 | | * the two next to each other, for interlaced one is skipped |
3755 | | * in between. */ |
3756 | 0 | if (interlaced) |
3757 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3758 | 0 | else |
3759 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3760 | |
|
3761 | 0 | n_threads = convert->conversion_runner->n_threads; |
3762 | 0 | tasks = convert->tasks[0] = |
3763 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3764 | 0 | tasks_p = convert->tasks_p[0] = |
3765 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
3766 | |
|
3767 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
3768 | |
|
3769 | 0 | for (i = 0; i < n_threads; i++) { |
3770 | 0 | tasks[i].src = src; |
3771 | 0 | tasks[i].dest = dest; |
3772 | |
|
3773 | 0 | tasks[i].interlaced = interlaced; |
3774 | 0 | tasks[i].width = width; |
3775 | |
|
3776 | 0 | tasks[i].height_0 = i * lines_per_thread; |
3777 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
3778 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
3779 | |
|
3780 | 0 | tasks_p[i] = &tasks[i]; |
3781 | 0 | } |
3782 | |
|
3783 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3784 | 0 | (GstParallelizedTaskFunc) convert_I420_v210_task, (gpointer) tasks_p); |
3785 | | |
3786 | | /* now handle last lines. For interlaced these are up to 3 */ |
3787 | 0 | if (h2 != height) { |
3788 | 0 | for (i = h2; i < height; i++) { |
3789 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
3790 | |
|
3791 | 0 | tmpline_8 = (guint8 *) convert->tmpline[0]; |
3792 | 0 | for (int j = width * 4 - 1; j >= 0; j--) { |
3793 | 0 | convert->tmpline[0][j] = tmpline_8[j] << 8; |
3794 | 0 | } |
3795 | |
|
3796 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
3797 | 0 | } |
3798 | 0 | } |
3799 | 0 | } |
3800 | | |
3801 | | static void |
3802 | | convert_I420_10_v210_task (FConvertTask * task) |
3803 | 0 | { |
3804 | 0 | gint i, j; |
3805 | 0 | gint l1, l2; |
3806 | 0 | const guint16 *s_y1, *s_y2, *s_u, *s_v; |
3807 | 0 | guint8 *d1, *d2; |
3808 | 0 | guint32 a0, a1, a2, a3; |
3809 | 0 | guint16 y0_1, y1_1, y2_1, y3_1, y4_1, y5_1; |
3810 | 0 | guint16 u0_1, u2_1, u4_1; |
3811 | 0 | guint16 v0_1, v2_1, v4_1; |
3812 | 0 | guint16 y0_2, y1_2, y2_2, y3_2, y4_2, y5_2; |
3813 | 0 | guint16 u0_2, u2_2, u4_2; |
3814 | 0 | guint16 v0_2, v2_2, v4_2; |
3815 | |
|
3816 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3817 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3818 | |
|
3819 | 0 | s_y1 = FRAME_GET_Y_LINE (task->src, l1); |
3820 | 0 | s_y2 = FRAME_GET_Y_LINE (task->src, l2); |
3821 | 0 | s_u = FRAME_GET_U_LINE (task->src, i >> 1); |
3822 | 0 | s_v = FRAME_GET_V_LINE (task->src, i >> 1); |
3823 | |
|
3824 | 0 | d1 = FRAME_GET_LINE (task->dest, l1); |
3825 | 0 | d2 = FRAME_GET_LINE (task->dest, l2); |
3826 | |
|
3827 | 0 | for (j = 0; j < task->width; j += 6) { |
3828 | 0 | y1_1 = y2_1 = y3_1 = y4_1 = y5_1 = 0; |
3829 | 0 | u2_1 = u4_1 = v2_1 = v4_1 = 0; |
3830 | 0 | y1_2 = y2_2 = y3_2 = y4_2 = y5_2 = 0; |
3831 | 0 | u2_2 = u4_2 = v2_2 = v4_2 = 0; |
3832 | |
|
3833 | 0 | y0_1 = s_y1[j]; |
3834 | 0 | y0_2 = s_y2[j]; |
3835 | |
|
3836 | 0 | u0_1 = u0_2 = s_u[j / 2]; |
3837 | 0 | v0_1 = v0_2 = s_v[j / 2]; |
3838 | |
|
3839 | 0 | if (j < task->width - 1) { |
3840 | 0 | y1_1 = s_y1[j + 1]; |
3841 | 0 | y1_2 = s_y2[j + 1]; |
3842 | 0 | } |
3843 | |
|
3844 | 0 | if (j < task->width - 2) { |
3845 | 0 | y2_1 = s_y1[j + 2]; |
3846 | 0 | y2_2 = s_y2[j + 2]; |
3847 | |
|
3848 | 0 | u2_1 = u2_2 = s_u[j / 2 + 1]; |
3849 | 0 | v2_1 = v2_2 = s_v[j / 2 + 1]; |
3850 | 0 | } |
3851 | |
|
3852 | 0 | if (j < task->width - 3) { |
3853 | 0 | y3_1 = s_y1[j + 3]; |
3854 | 0 | y3_2 = s_y2[j + 3]; |
3855 | 0 | } |
3856 | |
|
3857 | 0 | if (j < task->width - 4) { |
3858 | 0 | y4_1 = s_y1[j + 4]; |
3859 | 0 | y4_2 = s_y2[j + 4]; |
3860 | |
|
3861 | 0 | u4_1 = u4_2 = s_u[j / 2 + 2]; |
3862 | 0 | v4_1 = v4_2 = s_v[j / 2 + 2]; |
3863 | 0 | } |
3864 | |
|
3865 | 0 | if (j < task->width - 5) { |
3866 | 0 | y5_1 = s_y1[j + 5]; |
3867 | 0 | y5_2 = s_y2[j + 5]; |
3868 | 0 | } |
3869 | |
|
3870 | 0 | a0 = u0_1 | (y0_1 << 10) | (v0_1 << 20); |
3871 | 0 | a1 = y1_1 | (u2_1 << 10) | (y2_1 << 20); |
3872 | 0 | a2 = v2_1 | (y3_1 << 10) | (u4_1 << 20); |
3873 | 0 | a3 = y4_1 | (v4_1 << 10) | (y5_1 << 20); |
3874 | |
|
3875 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 0, a0); |
3876 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 4, a1); |
3877 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 8, a2); |
3878 | 0 | GST_WRITE_UINT32_LE (d1 + (j / 6) * 16 + 12, a3); |
3879 | |
|
3880 | 0 | a0 = u0_2 | (y0_2 << 10) | (v0_2 << 20); |
3881 | 0 | a1 = y1_2 | (u2_2 << 10) | (y2_2 << 20); |
3882 | 0 | a2 = v2_2 | (y3_2 << 10) | (u4_2 << 20); |
3883 | 0 | a3 = y4_2 | (v4_2 << 10) | (y5_2 << 20); |
3884 | |
|
3885 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 0, a0); |
3886 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 4, a1); |
3887 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 8, a2); |
3888 | 0 | GST_WRITE_UINT32_LE (d2 + (j / 6) * 16 + 12, a3); |
3889 | 0 | } |
3890 | 0 | } |
3891 | 0 | } |
3892 | | |
3893 | | static void |
3894 | | convert_I420_10_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
3895 | | GstVideoFrame * dest) |
3896 | 0 | { |
3897 | 0 | int i; |
3898 | 0 | gint width = convert->in_width; |
3899 | 0 | gint height = convert->in_height; |
3900 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3901 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3902 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3903 | 0 | gint h2; |
3904 | 0 | FConvertTask *tasks; |
3905 | 0 | FConvertTask **tasks_p; |
3906 | 0 | gint n_threads; |
3907 | 0 | gint lines_per_thread; |
3908 | | |
3909 | | /* I420 has half as many chroma lines, as such we have to |
3910 | | * always merge two into one. For non-interlaced these are |
3911 | | * the two next to each other, for interlaced one is skipped |
3912 | | * in between. */ |
3913 | 0 | if (interlaced) |
3914 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3915 | 0 | else |
3916 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3917 | |
|
3918 | 0 | n_threads = convert->conversion_runner->n_threads; |
3919 | 0 | tasks = convert->tasks[0] = |
3920 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3921 | 0 | tasks_p = convert->tasks_p[0] = |
3922 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
3923 | |
|
3924 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
3925 | |
|
3926 | 0 | for (i = 0; i < n_threads; i++) { |
3927 | 0 | tasks[i].src = src; |
3928 | 0 | tasks[i].dest = dest; |
3929 | |
|
3930 | 0 | tasks[i].interlaced = interlaced; |
3931 | 0 | tasks[i].width = width; |
3932 | |
|
3933 | 0 | tasks[i].height_0 = i * lines_per_thread; |
3934 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
3935 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
3936 | |
|
3937 | 0 | tasks_p[i] = &tasks[i]; |
3938 | 0 | } |
3939 | |
|
3940 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
3941 | 0 | (GstParallelizedTaskFunc) convert_I420_10_v210_task, (gpointer) tasks_p); |
3942 | | |
3943 | | /* now handle last lines. For interlaced these are up to 3 */ |
3944 | 0 | if (h2 != height) { |
3945 | 0 | for (i = h2; i < height; i++) { |
3946 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
3947 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
3948 | 0 | } |
3949 | 0 | } |
3950 | 0 | } |
3951 | | |
3952 | | static void |
3953 | | convert_YUY2_I420_task (FConvertTask * task) |
3954 | 0 | { |
3955 | 0 | gint i; |
3956 | 0 | gint l1, l2; |
3957 | |
|
3958 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
3959 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
3960 | |
|
3961 | 0 | video_orc_convert_YUY2_I420 (FRAME_GET_Y_LINE (task->dest, l1), |
3962 | 0 | FRAME_GET_Y_LINE (task->dest, l2), |
3963 | 0 | FRAME_GET_U_LINE (task->dest, i >> 1), |
3964 | 0 | FRAME_GET_V_LINE (task->dest, i >> 1), |
3965 | 0 | FRAME_GET_LINE (task->src, l1), FRAME_GET_LINE (task->src, l2), |
3966 | 0 | (task->width + 1) / 2); |
3967 | 0 | } |
3968 | 0 | } |
3969 | | |
3970 | | static void |
3971 | | convert_YUY2_I420 (GstVideoConverter * convert, const GstVideoFrame * src, |
3972 | | GstVideoFrame * dest) |
3973 | 0 | { |
3974 | 0 | int i; |
3975 | 0 | gint width = convert->in_width; |
3976 | 0 | gint height = convert->in_height; |
3977 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
3978 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
3979 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
3980 | 0 | gint h2; |
3981 | 0 | FConvertTask *tasks; |
3982 | 0 | FConvertTask **tasks_p; |
3983 | 0 | gint n_threads; |
3984 | 0 | gint lines_per_thread; |
3985 | | |
3986 | | /* I420 has half as many chroma lines, as such we have to |
3987 | | * always merge two into one. For non-interlaced these are |
3988 | | * the two next to each other, for interlaced one is skipped |
3989 | | * in between. */ |
3990 | 0 | if (interlaced) |
3991 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
3992 | 0 | else |
3993 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
3994 | |
|
3995 | 0 | n_threads = convert->conversion_runner->n_threads; |
3996 | 0 | tasks = convert->tasks[0] = |
3997 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
3998 | 0 | tasks_p = convert->tasks_p[0] = |
3999 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
4000 | |
|
4001 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
4002 | |
|
4003 | 0 | for (i = 0; i < n_threads; i++) { |
4004 | 0 | tasks[i].src = src; |
4005 | 0 | tasks[i].dest = dest; |
4006 | |
|
4007 | 0 | tasks[i].interlaced = interlaced; |
4008 | 0 | tasks[i].width = width; |
4009 | |
|
4010 | 0 | tasks[i].height_0 = i * lines_per_thread; |
4011 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
4012 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
4013 | |
|
4014 | 0 | tasks_p[i] = &tasks[i]; |
4015 | 0 | } |
4016 | |
|
4017 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4018 | 0 | (GstParallelizedTaskFunc) convert_YUY2_I420_task, (gpointer) tasks_p); |
4019 | | |
4020 | | /* now handle last lines. For interlaced these are up to 3 */ |
4021 | 0 | if (h2 != height) { |
4022 | 0 | for (i = h2; i < height; i++) { |
4023 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
4024 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
4025 | 0 | } |
4026 | 0 | } |
4027 | 0 | } |
4028 | | |
4029 | | static void |
4030 | | convert_v210_I420_task (FConvertTask * task) |
4031 | 0 | { |
4032 | 0 | gint i, j; |
4033 | 0 | gint l1, l2; |
4034 | 0 | guint8 *d_y1, *d_y2, *d_u, *d_v; |
4035 | 0 | const guint8 *s1, *s2; |
4036 | 0 | guint32 a0, a1, a2, a3; |
4037 | 0 | guint16 y0_1, y1_1, y2_1, y3_1, y4_1, y5_1; |
4038 | 0 | guint16 u0_1, u2_1, u4_1; |
4039 | 0 | guint16 v0_1, v2_1, v4_1; |
4040 | 0 | guint16 y0_2, y1_2, y2_2, y3_2, y4_2, y5_2; |
4041 | 0 | guint16 u0_2, u2_2, u4_2; |
4042 | 0 | guint16 v0_2, v2_2, v4_2; |
4043 | |
|
4044 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
4045 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
4046 | |
|
4047 | 0 | d_y1 = FRAME_GET_Y_LINE (task->dest, l1); |
4048 | 0 | d_y2 = FRAME_GET_Y_LINE (task->dest, l2); |
4049 | 0 | d_u = FRAME_GET_U_LINE (task->dest, i >> 1); |
4050 | 0 | d_v = FRAME_GET_V_LINE (task->dest, i >> 1); |
4051 | |
|
4052 | 0 | s1 = FRAME_GET_LINE (task->src, l1); |
4053 | 0 | s2 = FRAME_GET_LINE (task->src, l2); |
4054 | |
|
4055 | 0 | for (j = 0; j < task->width; j += 6) { |
4056 | 0 | a0 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 0); |
4057 | 0 | a1 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 4); |
4058 | 0 | a2 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 8); |
4059 | 0 | a3 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 12); |
4060 | |
|
4061 | 0 | u0_1 = ((a0 >> 0) & 0x3ff) >> 2; |
4062 | 0 | y0_1 = ((a0 >> 10) & 0x3ff) >> 2; |
4063 | 0 | v0_1 = ((a0 >> 20) & 0x3ff) >> 2; |
4064 | 0 | y1_1 = ((a1 >> 0) & 0x3ff) >> 2; |
4065 | |
|
4066 | 0 | u2_1 = ((a1 >> 10) & 0x3ff) >> 2; |
4067 | 0 | y2_1 = ((a1 >> 20) & 0x3ff) >> 2; |
4068 | 0 | v2_1 = ((a2 >> 0) & 0x3ff) >> 2; |
4069 | 0 | y3_1 = ((a2 >> 10) & 0x3ff) >> 2; |
4070 | |
|
4071 | 0 | u4_1 = ((a2 >> 20) & 0x3ff) >> 2; |
4072 | 0 | y4_1 = ((a3 >> 0) & 0x3ff) >> 2; |
4073 | 0 | v4_1 = ((a3 >> 10) & 0x3ff) >> 2; |
4074 | 0 | y5_1 = ((a3 >> 20) & 0x3ff) >> 2; |
4075 | |
|
4076 | 0 | a0 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 0); |
4077 | 0 | a1 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 4); |
4078 | 0 | a2 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 8); |
4079 | 0 | a3 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 12); |
4080 | |
|
4081 | 0 | u0_2 = ((a0 >> 0) & 0x3ff) >> 2; |
4082 | 0 | y0_2 = ((a0 >> 10) & 0x3ff) >> 2; |
4083 | 0 | v0_2 = ((a0 >> 20) & 0x3ff) >> 2; |
4084 | 0 | y1_2 = ((a1 >> 0) & 0x3ff) >> 2; |
4085 | |
|
4086 | 0 | u2_2 = ((a1 >> 10) & 0x3ff) >> 2; |
4087 | 0 | y2_2 = ((a1 >> 20) & 0x3ff) >> 2; |
4088 | 0 | v2_2 = ((a2 >> 0) & 0x3ff) >> 2; |
4089 | 0 | y3_2 = ((a2 >> 10) & 0x3ff) >> 2; |
4090 | |
|
4091 | 0 | u4_2 = ((a2 >> 20) & 0x3ff) >> 2; |
4092 | 0 | y4_2 = ((a3 >> 0) & 0x3ff) >> 2; |
4093 | 0 | v4_2 = ((a3 >> 10) & 0x3ff) >> 2; |
4094 | 0 | y5_2 = ((a3 >> 20) & 0x3ff) >> 2; |
4095 | |
|
4096 | 0 | d_y1[j] = y0_1; |
4097 | 0 | d_y2[j] = y0_2; |
4098 | 0 | d_u[j / 2] = (u0_1 + u0_2) / 2; |
4099 | 0 | d_v[j / 2] = (v0_1 + v0_2) / 2; |
4100 | |
|
4101 | 0 | if (j < task->width - 1) { |
4102 | 0 | d_y1[j + 1] = y1_1; |
4103 | 0 | d_y2[j + 1] = y1_2; |
4104 | 0 | } |
4105 | |
|
4106 | 0 | if (j < task->width - 2) { |
4107 | 0 | d_y1[j + 2] = y2_1; |
4108 | 0 | d_y2[j + 2] = y2_2; |
4109 | 0 | d_u[j / 2 + 1] = (u2_1 + u2_2) / 2; |
4110 | 0 | d_v[j / 2 + 1] = (v2_1 + v2_2) / 2; |
4111 | 0 | } |
4112 | |
|
4113 | 0 | if (j < task->width - 3) { |
4114 | 0 | d_y1[j + 3] = y3_1; |
4115 | 0 | d_y2[j + 3] = y3_2; |
4116 | 0 | } |
4117 | |
|
4118 | 0 | if (j < task->width - 4) { |
4119 | 0 | d_y1[j + 4] = y4_1; |
4120 | 0 | d_y2[j + 4] = y4_2; |
4121 | 0 | d_u[j / 2 + 2] = (u4_1 + u4_2) / 2; |
4122 | 0 | d_v[j / 2 + 2] = (v4_1 + v4_2) / 2; |
4123 | 0 | } |
4124 | |
|
4125 | 0 | if (j < task->width - 5) { |
4126 | 0 | d_y1[j + 5] = y5_1; |
4127 | 0 | d_y2[j + 5] = y5_2; |
4128 | 0 | } |
4129 | 0 | } |
4130 | 0 | } |
4131 | 0 | } |
4132 | | |
4133 | | static void |
4134 | | convert_v210_I420 (GstVideoConverter * convert, const GstVideoFrame * src, |
4135 | | GstVideoFrame * dest) |
4136 | 0 | { |
4137 | 0 | int i; |
4138 | 0 | gint width = convert->in_width; |
4139 | 0 | gint height = convert->in_height; |
4140 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
4141 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
4142 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
4143 | 0 | gint h2; |
4144 | 0 | FConvertTask *tasks; |
4145 | 0 | FConvertTask **tasks_p; |
4146 | 0 | gint n_threads; |
4147 | 0 | gint lines_per_thread; |
4148 | 0 | guint8 *tmpline_8; |
4149 | | |
4150 | | /* I420 has half as many chroma lines, as such we have to |
4151 | | * always merge two into one. For non-interlaced these are |
4152 | | * the two next to each other, for interlaced one is skipped |
4153 | | * in between. */ |
4154 | 0 | if (interlaced) |
4155 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
4156 | 0 | else |
4157 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
4158 | |
|
4159 | 0 | n_threads = convert->conversion_runner->n_threads; |
4160 | 0 | tasks = convert->tasks[0] = |
4161 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
4162 | 0 | tasks_p = convert->tasks_p[0] = |
4163 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
4164 | |
|
4165 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
4166 | |
|
4167 | 0 | for (i = 0; i < n_threads; i++) { |
4168 | 0 | tasks[i].src = src; |
4169 | 0 | tasks[i].dest = dest; |
4170 | |
|
4171 | 0 | tasks[i].interlaced = interlaced; |
4172 | 0 | tasks[i].width = width; |
4173 | |
|
4174 | 0 | tasks[i].height_0 = i * lines_per_thread; |
4175 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
4176 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
4177 | |
|
4178 | 0 | tasks_p[i] = &tasks[i]; |
4179 | 0 | } |
4180 | |
|
4181 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4182 | 0 | (GstParallelizedTaskFunc) convert_v210_I420_task, (gpointer) tasks_p); |
4183 | | |
4184 | | /* now handle last lines. For interlaced these are up to 3 */ |
4185 | 0 | if (h2 != height) { |
4186 | 0 | for (i = h2; i < height; i++) { |
4187 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
4188 | |
|
4189 | 0 | tmpline_8 = (guint8 *) convert->tmpline[0]; |
4190 | 0 | for (int j = 0; j < width * 4; j++) { |
4191 | 0 | tmpline_8[j] = convert->tmpline[0][j] >> 8; |
4192 | 0 | } |
4193 | |
|
4194 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
4195 | 0 | } |
4196 | 0 | } |
4197 | 0 | } |
4198 | | |
4199 | | static void |
4200 | | convert_v210_I420_10_task (FConvertTask * task) |
4201 | 0 | { |
4202 | 0 | gint i, j; |
4203 | 0 | gint l1, l2; |
4204 | 0 | guint16 *d_y1, *d_y2, *d_u, *d_v; |
4205 | 0 | const guint8 *s1, *s2; |
4206 | 0 | guint32 a0, a1, a2, a3; |
4207 | 0 | guint16 y0_1, y1_1, y2_1, y3_1, y4_1, y5_1; |
4208 | 0 | guint16 u0_1, u2_1, u4_1; |
4209 | 0 | guint16 v0_1, v2_1, v4_1; |
4210 | 0 | guint16 y0_2, y1_2, y2_2, y3_2, y4_2, y5_2; |
4211 | 0 | guint16 u0_2, u2_2, u4_2; |
4212 | 0 | guint16 v0_2, v2_2, v4_2; |
4213 | |
|
4214 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
4215 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
4216 | |
|
4217 | 0 | d_y1 = FRAME_GET_Y_LINE (task->dest, l1); |
4218 | 0 | d_y2 = FRAME_GET_Y_LINE (task->dest, l2); |
4219 | 0 | d_u = FRAME_GET_U_LINE (task->dest, i >> 1); |
4220 | 0 | d_v = FRAME_GET_V_LINE (task->dest, i >> 1); |
4221 | |
|
4222 | 0 | s1 = FRAME_GET_LINE (task->src, l1); |
4223 | 0 | s2 = FRAME_GET_LINE (task->src, l2); |
4224 | |
|
4225 | 0 | for (j = 0; j < task->width; j += 6) { |
4226 | 0 | a0 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 0); |
4227 | 0 | a1 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 4); |
4228 | 0 | a2 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 8); |
4229 | 0 | a3 = GST_READ_UINT32_LE (s1 + (j / 6) * 16 + 12); |
4230 | |
|
4231 | 0 | u0_1 = ((a0 >> 0) & 0x3ff); |
4232 | 0 | y0_1 = ((a0 >> 10) & 0x3ff); |
4233 | 0 | v0_1 = ((a0 >> 20) & 0x3ff); |
4234 | 0 | y1_1 = ((a1 >> 0) & 0x3ff); |
4235 | |
|
4236 | 0 | u2_1 = ((a1 >> 10) & 0x3ff); |
4237 | 0 | y2_1 = ((a1 >> 20) & 0x3ff); |
4238 | 0 | v2_1 = ((a2 >> 0) & 0x3ff); |
4239 | 0 | y3_1 = ((a2 >> 10) & 0x3ff); |
4240 | |
|
4241 | 0 | u4_1 = ((a2 >> 20) & 0x3ff); |
4242 | 0 | y4_1 = ((a3 >> 0) & 0x3ff); |
4243 | 0 | v4_1 = ((a3 >> 10) & 0x3ff); |
4244 | 0 | y5_1 = ((a3 >> 20) & 0x3ff); |
4245 | |
|
4246 | 0 | a0 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 0); |
4247 | 0 | a1 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 4); |
4248 | 0 | a2 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 8); |
4249 | 0 | a3 = GST_READ_UINT32_LE (s2 + (j / 6) * 16 + 12); |
4250 | |
|
4251 | 0 | u0_2 = ((a0 >> 0) & 0x3ff); |
4252 | 0 | y0_2 = ((a0 >> 10) & 0x3ff); |
4253 | 0 | v0_2 = ((a0 >> 20) & 0x3ff); |
4254 | 0 | y1_2 = ((a1 >> 0) & 0x3ff); |
4255 | |
|
4256 | 0 | u2_2 = ((a1 >> 10) & 0x3ff); |
4257 | 0 | y2_2 = ((a1 >> 20) & 0x3ff); |
4258 | 0 | v2_2 = ((a2 >> 0) & 0x3ff); |
4259 | 0 | y3_2 = ((a2 >> 10) & 0x3ff); |
4260 | |
|
4261 | 0 | u4_2 = ((a2 >> 20) & 0x3ff); |
4262 | 0 | y4_2 = ((a3 >> 0) & 0x3ff); |
4263 | 0 | v4_2 = ((a3 >> 10) & 0x3ff); |
4264 | 0 | y5_2 = ((a3 >> 20) & 0x3ff); |
4265 | |
|
4266 | 0 | d_y1[j] = y0_1; |
4267 | 0 | d_y2[j] = y0_2; |
4268 | 0 | d_u[j / 2] = (u0_1 + u0_2) / 2; |
4269 | 0 | d_v[j / 2] = (v0_1 + v0_2) / 2; |
4270 | |
|
4271 | 0 | if (j < task->width - 1) { |
4272 | 0 | d_y1[j + 1] = y1_1; |
4273 | 0 | d_y2[j + 1] = y1_2; |
4274 | 0 | } |
4275 | |
|
4276 | 0 | if (j < task->width - 2) { |
4277 | 0 | d_y1[j + 2] = y2_1; |
4278 | 0 | d_y2[j + 2] = y2_2; |
4279 | 0 | d_u[j / 2 + 1] = (u2_1 + u2_2) / 2; |
4280 | 0 | d_v[j / 2 + 1] = (v2_1 + v2_2) / 2; |
4281 | 0 | } |
4282 | |
|
4283 | 0 | if (j < task->width - 3) { |
4284 | 0 | d_y1[j + 3] = y3_1; |
4285 | 0 | d_y2[j + 3] = y3_2; |
4286 | 0 | } |
4287 | |
|
4288 | 0 | if (j < task->width - 4) { |
4289 | 0 | d_y1[j + 4] = y4_1; |
4290 | 0 | d_y2[j + 4] = y4_2; |
4291 | 0 | d_u[j / 2 + 2] = (u4_1 + u4_2) / 2; |
4292 | 0 | d_v[j / 2 + 2] = (v4_1 + v4_2) / 2; |
4293 | 0 | } |
4294 | |
|
4295 | 0 | if (j < task->width - 5) { |
4296 | 0 | d_y1[j + 5] = y5_1; |
4297 | 0 | d_y2[j + 5] = y5_2; |
4298 | 0 | } |
4299 | 0 | } |
4300 | 0 | } |
4301 | 0 | } |
4302 | | |
4303 | | static void |
4304 | | convert_v210_I420_10 (GstVideoConverter * convert, const GstVideoFrame * src, |
4305 | | GstVideoFrame * dest) |
4306 | 0 | { |
4307 | 0 | int i; |
4308 | 0 | gint width = convert->in_width; |
4309 | 0 | gint height = convert->in_height; |
4310 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
4311 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
4312 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
4313 | 0 | gint h2; |
4314 | 0 | FConvertTask *tasks; |
4315 | 0 | FConvertTask **tasks_p; |
4316 | 0 | gint n_threads; |
4317 | 0 | gint lines_per_thread; |
4318 | | |
4319 | | /* I420 has half as many chroma lines, as such we have to |
4320 | | * always merge two into one. For non-interlaced these are |
4321 | | * the two next to each other, for interlaced one is skipped |
4322 | | * in between. */ |
4323 | 0 | if (interlaced) |
4324 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
4325 | 0 | else |
4326 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
4327 | |
|
4328 | 0 | n_threads = convert->conversion_runner->n_threads; |
4329 | 0 | tasks = convert->tasks[0] = |
4330 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
4331 | 0 | tasks_p = convert->tasks_p[0] = |
4332 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
4333 | |
|
4334 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
4335 | |
|
4336 | 0 | for (i = 0; i < n_threads; i++) { |
4337 | 0 | tasks[i].src = src; |
4338 | 0 | tasks[i].dest = dest; |
4339 | |
|
4340 | 0 | tasks[i].interlaced = interlaced; |
4341 | 0 | tasks[i].width = width; |
4342 | |
|
4343 | 0 | tasks[i].height_0 = i * lines_per_thread; |
4344 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
4345 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
4346 | |
|
4347 | 0 | tasks_p[i] = &tasks[i]; |
4348 | 0 | } |
4349 | |
|
4350 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4351 | 0 | (GstParallelizedTaskFunc) convert_v210_I420_10_task, (gpointer) tasks_p); |
4352 | | |
4353 | | /* now handle last lines. For interlaced these are up to 3 */ |
4354 | 0 | if (h2 != height) { |
4355 | 0 | for (i = h2; i < height; i++) { |
4356 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
4357 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
4358 | 0 | } |
4359 | 0 | } |
4360 | 0 | } |
4361 | | |
4362 | | |
4363 | | typedef struct |
4364 | | { |
4365 | | const guint8 *s, *s2, *su, *sv; |
4366 | | guint8 *d, *d2, *du, *dv; |
4367 | | gint sstride, sustride, svstride; |
4368 | | gint dstride, dustride, dvstride; |
4369 | | gint width, height; |
4370 | | gint alpha; |
4371 | | MatrixData *data; |
4372 | | } FConvertPlaneTask; |
4373 | | |
4374 | | static void |
4375 | | convert_YUY2_AYUV_task (FConvertPlaneTask * task) |
4376 | 0 | { |
4377 | 0 | video_orc_convert_YUY2_AYUV (task->d, task->dstride, task->s, |
4378 | 0 | task->sstride, task->alpha, (task->width + 1) / 2, task->height); |
4379 | 0 | } |
4380 | | |
4381 | | static void |
4382 | | convert_YUY2_AYUV (GstVideoConverter * convert, const GstVideoFrame * src, |
4383 | | GstVideoFrame * dest) |
4384 | 0 | { |
4385 | 0 | gint width = convert->in_width; |
4386 | 0 | gint height = convert->in_height; |
4387 | 0 | guint8 *s, *d; |
4388 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
4389 | 0 | FConvertPlaneTask *tasks; |
4390 | 0 | FConvertPlaneTask **tasks_p; |
4391 | 0 | gint n_threads; |
4392 | 0 | gint lines_per_thread; |
4393 | 0 | gint i; |
4394 | |
|
4395 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4396 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4397 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
4398 | 0 | d += (convert->out_x * 4); |
4399 | |
|
4400 | 0 | n_threads = convert->conversion_runner->n_threads; |
4401 | 0 | tasks = convert->tasks[0] = |
4402 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4403 | 0 | tasks_p = convert->tasks_p[0] = |
4404 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4405 | |
|
4406 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4407 | |
|
4408 | 0 | for (i = 0; i < n_threads; i++) { |
4409 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
4410 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4411 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
4412 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4413 | |
|
4414 | 0 | tasks[i].width = width; |
4415 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4416 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4417 | 0 | tasks[i].height -= i * lines_per_thread; |
4418 | 0 | tasks[i].alpha = alpha; |
4419 | |
|
4420 | 0 | tasks_p[i] = &tasks[i]; |
4421 | 0 | } |
4422 | |
|
4423 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4424 | 0 | (GstParallelizedTaskFunc) convert_YUY2_AYUV_task, (gpointer) tasks_p); |
4425 | |
|
4426 | 0 | convert_fill_border (convert, dest); |
4427 | 0 | } |
4428 | | |
4429 | | static void |
4430 | | convert_YUY2_v210_task (FConvertPlaneTask * task) |
4431 | 0 | { |
4432 | 0 | gint i, j; |
4433 | 0 | guint8 *d; |
4434 | 0 | const guint8 *s; |
4435 | 0 | guint32 a0, a1, a2, a3; |
4436 | 0 | guint8 y0, y1, y2, y3, y4, y5; |
4437 | 0 | guint8 u0, u2, u4; |
4438 | 0 | guint8 v0, v2, v4; |
4439 | |
|
4440 | 0 | for (i = 0; i < task->height; i++) { |
4441 | 0 | d = task->d + i * task->dstride; |
4442 | 0 | s = task->s + i * task->sstride; |
4443 | |
|
4444 | 0 | for (j = 0; j < task->width; j += 6) { |
4445 | 0 | y1 = y2 = y3 = y4 = y5 = 0; |
4446 | 0 | u2 = u4 = v2 = v4 = 0; |
4447 | |
|
4448 | 0 | y0 = s[2 * j]; |
4449 | 0 | u0 = s[2 * j + 1]; |
4450 | 0 | v0 = s[2 * j + 3]; |
4451 | |
|
4452 | 0 | if (j < task->width - 1) { |
4453 | 0 | y1 = s[2 * j + 2]; |
4454 | 0 | } |
4455 | |
|
4456 | 0 | if (j < task->width - 2) { |
4457 | 0 | y2 = s[2 * j + 4]; |
4458 | 0 | u2 = s[2 * j + 5]; |
4459 | 0 | v2 = s[2 * j + 7]; |
4460 | 0 | } |
4461 | |
|
4462 | 0 | if (j < task->width - 3) { |
4463 | 0 | y3 = s[2 * j + 6]; |
4464 | 0 | } |
4465 | |
|
4466 | 0 | if (j < task->width - 4) { |
4467 | 0 | y4 = s[2 * j + 8]; |
4468 | 0 | u4 = s[2 * j + 9]; |
4469 | 0 | v4 = s[2 * j + 11]; |
4470 | 0 | } |
4471 | |
|
4472 | 0 | if (j < task->width - 5) { |
4473 | 0 | y5 = s[2 * j + 10]; |
4474 | 0 | } |
4475 | |
|
4476 | 0 | a0 = u0 << 2 | (y0 << 12) | (v0 << 22); |
4477 | 0 | a1 = y1 << 2 | (u2 << 12) | (y2 << 22); |
4478 | 0 | a2 = v2 << 2 | (y3 << 12) | (u4 << 22); |
4479 | 0 | a3 = y4 << 2 | (v4 << 12) | (y5 << 22); |
4480 | |
|
4481 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 0, a0); |
4482 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 4, a1); |
4483 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 8, a2); |
4484 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 12, a3); |
4485 | 0 | } |
4486 | 0 | } |
4487 | 0 | } |
4488 | | |
4489 | | static void |
4490 | | convert_YUY2_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
4491 | | GstVideoFrame * dest) |
4492 | 0 | { |
4493 | 0 | gint width = convert->in_width; |
4494 | 0 | gint height = convert->in_height; |
4495 | 0 | guint8 *s, *d; |
4496 | 0 | FConvertPlaneTask *tasks; |
4497 | 0 | FConvertPlaneTask **tasks_p; |
4498 | 0 | gint n_threads; |
4499 | 0 | gint lines_per_thread; |
4500 | 0 | gint i; |
4501 | |
|
4502 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4503 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4504 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
4505 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
4506 | |
|
4507 | 0 | n_threads = convert->conversion_runner->n_threads; |
4508 | 0 | tasks = convert->tasks[0] = |
4509 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4510 | 0 | tasks_p = convert->tasks_p[0] = |
4511 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4512 | |
|
4513 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4514 | |
|
4515 | 0 | for (i = 0; i < n_threads; i++) { |
4516 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
4517 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4518 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
4519 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4520 | |
|
4521 | 0 | tasks[i].width = width; |
4522 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4523 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4524 | 0 | tasks[i].height -= i * lines_per_thread; |
4525 | |
|
4526 | 0 | tasks_p[i] = &tasks[i]; |
4527 | 0 | } |
4528 | |
|
4529 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4530 | 0 | (GstParallelizedTaskFunc) convert_YUY2_v210_task, (gpointer) tasks_p); |
4531 | |
|
4532 | 0 | convert_fill_border (convert, dest); |
4533 | 0 | } |
4534 | | |
4535 | | static void |
4536 | | convert_YUY2_Y42B_task (FConvertPlaneTask * task) |
4537 | 0 | { |
4538 | 0 | video_orc_convert_YUY2_Y42B (task->d, task->dstride, task->du, |
4539 | 0 | task->dustride, task->dv, task->dvstride, |
4540 | 0 | task->s, task->sstride, (task->width + 1) / 2, task->height); |
4541 | 0 | } |
4542 | | |
4543 | | static void |
4544 | | convert_YUY2_Y42B (GstVideoConverter * convert, const GstVideoFrame * src, |
4545 | | GstVideoFrame * dest) |
4546 | 0 | { |
4547 | 0 | gint width = convert->in_width; |
4548 | 0 | gint height = convert->in_height; |
4549 | 0 | guint8 *s, *dy, *du, *dv; |
4550 | 0 | FConvertPlaneTask *tasks; |
4551 | 0 | FConvertPlaneTask **tasks_p; |
4552 | 0 | gint n_threads; |
4553 | 0 | gint lines_per_thread; |
4554 | 0 | gint i; |
4555 | |
|
4556 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4557 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4558 | |
|
4559 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
4560 | 0 | dy += convert->out_x; |
4561 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
4562 | 0 | du += convert->out_x >> 1; |
4563 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
4564 | 0 | dv += convert->out_x >> 1; |
4565 | |
|
4566 | 0 | n_threads = convert->conversion_runner->n_threads; |
4567 | 0 | tasks = convert->tasks[0] = |
4568 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4569 | 0 | tasks_p = convert->tasks_p[0] = |
4570 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4571 | |
|
4572 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4573 | |
|
4574 | 0 | for (i = 0; i < n_threads; i++) { |
4575 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
4576 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
4577 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
4578 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4579 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
4580 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
4581 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
4582 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4583 | |
|
4584 | 0 | tasks[i].width = width; |
4585 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4586 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4587 | 0 | tasks[i].height -= i * lines_per_thread; |
4588 | |
|
4589 | 0 | tasks_p[i] = &tasks[i]; |
4590 | 0 | } |
4591 | |
|
4592 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4593 | 0 | (GstParallelizedTaskFunc) convert_YUY2_Y42B_task, (gpointer) tasks_p); |
4594 | |
|
4595 | 0 | convert_fill_border (convert, dest); |
4596 | 0 | } |
4597 | | |
4598 | | static void |
4599 | | convert_YUY2_Y444_task (FConvertPlaneTask * task) |
4600 | 0 | { |
4601 | 0 | video_orc_convert_YUY2_Y444 (task->d, |
4602 | 0 | task->dstride, task->du, |
4603 | 0 | task->dustride, task->dv, |
4604 | 0 | task->dvstride, task->s, |
4605 | 0 | task->sstride, (task->width + 1) / 2, task->height); |
4606 | 0 | } |
4607 | | |
4608 | | static void |
4609 | | convert_YUY2_Y444 (GstVideoConverter * convert, const GstVideoFrame * src, |
4610 | | GstVideoFrame * dest) |
4611 | 0 | { |
4612 | 0 | gint width = convert->in_width; |
4613 | 0 | gint height = convert->in_height; |
4614 | 0 | guint8 *s, *dy, *du, *dv; |
4615 | 0 | FConvertPlaneTask *tasks; |
4616 | 0 | FConvertPlaneTask **tasks_p; |
4617 | 0 | gint n_threads; |
4618 | 0 | gint lines_per_thread; |
4619 | 0 | gint i; |
4620 | |
|
4621 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4622 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4623 | |
|
4624 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
4625 | 0 | dy += convert->out_x; |
4626 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
4627 | 0 | du += convert->out_x; |
4628 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
4629 | 0 | dv += convert->out_x; |
4630 | |
|
4631 | 0 | n_threads = convert->conversion_runner->n_threads; |
4632 | 0 | tasks = convert->tasks[0] = |
4633 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4634 | 0 | tasks_p = convert->tasks_p[0] = |
4635 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4636 | |
|
4637 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4638 | |
|
4639 | 0 | for (i = 0; i < n_threads; i++) { |
4640 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
4641 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
4642 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
4643 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4644 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
4645 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
4646 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
4647 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4648 | |
|
4649 | 0 | tasks[i].width = width; |
4650 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4651 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4652 | 0 | tasks[i].height -= i * lines_per_thread; |
4653 | |
|
4654 | 0 | tasks_p[i] = &tasks[i]; |
4655 | 0 | } |
4656 | |
|
4657 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4658 | 0 | (GstParallelizedTaskFunc) convert_YUY2_Y444_task, (gpointer) tasks_p); |
4659 | |
|
4660 | 0 | convert_fill_border (convert, dest); |
4661 | 0 | } |
4662 | | |
4663 | | static void |
4664 | | convert_v210_Y42B_task (FConvertPlaneTask * task) |
4665 | 0 | { |
4666 | 0 | gint i, j; |
4667 | 0 | guint8 *d_y, *d_u, *d_v; |
4668 | 0 | const guint8 *s; |
4669 | 0 | guint32 a0, a1, a2, a3; |
4670 | 0 | guint16 y0, y1, y2, y3, y4, y5; |
4671 | 0 | guint16 u0, u2, u4; |
4672 | 0 | guint16 v0, v2, v4; |
4673 | |
|
4674 | 0 | for (i = 0; i < task->height; i++) { |
4675 | 0 | d_y = task->d + i * task->dstride; |
4676 | 0 | d_u = task->du + i * task->dustride; |
4677 | 0 | d_v = task->dv + i * task->dvstride; |
4678 | 0 | s = task->s + i * task->sstride; |
4679 | |
|
4680 | 0 | for (j = 0; j < task->width; j += 6) { |
4681 | 0 | a0 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 0); |
4682 | 0 | a1 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 4); |
4683 | 0 | a2 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 8); |
4684 | 0 | a3 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 12); |
4685 | |
|
4686 | 0 | u0 = ((a0 >> 0) & 0x3ff) >> 2; |
4687 | 0 | y0 = ((a0 >> 10) & 0x3ff) >> 2; |
4688 | 0 | v0 = ((a0 >> 20) & 0x3ff) >> 2; |
4689 | 0 | y1 = ((a1 >> 0) & 0x3ff) >> 2; |
4690 | |
|
4691 | 0 | u2 = ((a1 >> 10) & 0x3ff) >> 2; |
4692 | 0 | y2 = ((a1 >> 20) & 0x3ff) >> 2; |
4693 | 0 | v2 = ((a2 >> 0) & 0x3ff) >> 2; |
4694 | 0 | y3 = ((a2 >> 10) & 0x3ff) >> 2; |
4695 | |
|
4696 | 0 | u4 = ((a2 >> 20) & 0x3ff) >> 2; |
4697 | 0 | y4 = ((a3 >> 0) & 0x3ff) >> 2; |
4698 | 0 | v4 = ((a3 >> 10) & 0x3ff) >> 2; |
4699 | 0 | y5 = ((a3 >> 20) & 0x3ff) >> 2; |
4700 | |
|
4701 | 0 | d_y[j] = y0; |
4702 | 0 | d_u[j / 2] = u0; |
4703 | 0 | d_v[j / 2] = v0; |
4704 | |
|
4705 | 0 | if (j < task->width - 1) { |
4706 | 0 | d_y[j + 1] = y1; |
4707 | 0 | } |
4708 | |
|
4709 | 0 | if (j < task->width - 2) { |
4710 | 0 | d_y[j + 2] = y2; |
4711 | 0 | d_u[j / 2 + 1] = u2; |
4712 | 0 | d_v[j / 2 + 1] = v2; |
4713 | 0 | } |
4714 | |
|
4715 | 0 | if (j < task->width - 3) { |
4716 | 0 | d_y[j + 3] = y3; |
4717 | 0 | } |
4718 | |
|
4719 | 0 | if (j < task->width - 4) { |
4720 | 0 | d_y[j + 4] = y4; |
4721 | 0 | d_u[j / 2 + 2] = u4; |
4722 | 0 | d_v[j / 2 + 2] = v4; |
4723 | 0 | } |
4724 | |
|
4725 | 0 | if (j < task->width - 5) { |
4726 | 0 | d_y[j + 5] = y5; |
4727 | 0 | } |
4728 | 0 | } |
4729 | 0 | } |
4730 | 0 | } |
4731 | | |
4732 | | static void |
4733 | | convert_v210_Y42B (GstVideoConverter * convert, const GstVideoFrame * src, |
4734 | | GstVideoFrame * dest) |
4735 | 0 | { |
4736 | 0 | gint width = convert->in_width; |
4737 | 0 | gint height = convert->in_height; |
4738 | 0 | guint8 *s, *dy, *du, *dv; |
4739 | 0 | FConvertPlaneTask *tasks; |
4740 | 0 | FConvertPlaneTask **tasks_p; |
4741 | 0 | gint n_threads; |
4742 | 0 | gint lines_per_thread; |
4743 | 0 | gint i; |
4744 | |
|
4745 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4746 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4747 | |
|
4748 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
4749 | 0 | dy += convert->out_x; |
4750 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
4751 | 0 | du += convert->out_x >> 1; |
4752 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
4753 | 0 | dv += convert->out_x >> 1; |
4754 | |
|
4755 | 0 | n_threads = convert->conversion_runner->n_threads; |
4756 | 0 | tasks = convert->tasks[0] = |
4757 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4758 | 0 | tasks_p = convert->tasks_p[0] = |
4759 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4760 | |
|
4761 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4762 | |
|
4763 | 0 | for (i = 0; i < n_threads; i++) { |
4764 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
4765 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
4766 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
4767 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4768 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
4769 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
4770 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
4771 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4772 | |
|
4773 | 0 | tasks[i].width = width; |
4774 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4775 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4776 | 0 | tasks[i].height -= i * lines_per_thread; |
4777 | |
|
4778 | 0 | tasks_p[i] = &tasks[i]; |
4779 | 0 | } |
4780 | |
|
4781 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4782 | 0 | (GstParallelizedTaskFunc) convert_v210_Y42B_task, (gpointer) tasks_p); |
4783 | |
|
4784 | 0 | convert_fill_border (convert, dest); |
4785 | 0 | } |
4786 | | |
4787 | | static void |
4788 | | convert_v210_I422_10_task (FConvertPlaneTask * task) |
4789 | 0 | { |
4790 | 0 | gint i, j; |
4791 | 0 | guint16 *d_y, *d_u, *d_v; |
4792 | 0 | const guint8 *s; |
4793 | 0 | guint32 a0, a1, a2, a3; |
4794 | 0 | guint16 y0, y1, y2, y3, y4, y5; |
4795 | 0 | guint16 u0, u2, u4; |
4796 | 0 | guint16 v0, v2, v4; |
4797 | |
|
4798 | 0 | for (i = 0; i < task->height; i++) { |
4799 | 0 | d_y = (guint16 *) (task->d + i * task->dstride); |
4800 | 0 | d_u = (guint16 *) (task->du + i * task->dustride); |
4801 | 0 | d_v = (guint16 *) (task->dv + i * task->dvstride); |
4802 | 0 | s = task->s + i * task->sstride; |
4803 | |
|
4804 | 0 | for (j = 0; j < task->width; j += 6) { |
4805 | 0 | a0 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 0); |
4806 | 0 | a1 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 4); |
4807 | 0 | a2 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 8); |
4808 | 0 | a3 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 12); |
4809 | |
|
4810 | 0 | u0 = ((a0 >> 0) & 0x3ff); |
4811 | 0 | y0 = ((a0 >> 10) & 0x3ff); |
4812 | 0 | v0 = ((a0 >> 20) & 0x3ff); |
4813 | 0 | y1 = ((a1 >> 0) & 0x3ff); |
4814 | |
|
4815 | 0 | u2 = ((a1 >> 10) & 0x3ff); |
4816 | 0 | y2 = ((a1 >> 20) & 0x3ff); |
4817 | 0 | v2 = ((a2 >> 0) & 0x3ff); |
4818 | 0 | y3 = ((a2 >> 10) & 0x3ff); |
4819 | |
|
4820 | 0 | u4 = ((a2 >> 20) & 0x3ff); |
4821 | 0 | y4 = ((a3 >> 0) & 0x3ff); |
4822 | 0 | v4 = ((a3 >> 10) & 0x3ff); |
4823 | 0 | y5 = ((a3 >> 20) & 0x3ff); |
4824 | |
|
4825 | 0 | d_y[j] = y0; |
4826 | 0 | d_u[j / 2] = u0; |
4827 | 0 | d_v[j / 2] = v0; |
4828 | |
|
4829 | 0 | if (j < task->width - 1) { |
4830 | 0 | d_y[j + 1] = y1; |
4831 | 0 | } |
4832 | |
|
4833 | 0 | if (j < task->width - 2) { |
4834 | 0 | d_y[j + 2] = y2; |
4835 | 0 | d_u[j / 2 + 1] = u2; |
4836 | 0 | d_v[j / 2 + 1] = v2; |
4837 | 0 | } |
4838 | |
|
4839 | 0 | if (j < task->width - 3) { |
4840 | 0 | d_y[j + 3] = y3; |
4841 | 0 | } |
4842 | |
|
4843 | 0 | if (j < task->width - 4) { |
4844 | 0 | d_y[j + 4] = y4; |
4845 | 0 | d_u[j / 2 + 2] = u4; |
4846 | 0 | d_v[j / 2 + 2] = v4; |
4847 | 0 | } |
4848 | |
|
4849 | 0 | if (j < task->width - 5) { |
4850 | 0 | d_y[j + 5] = y5; |
4851 | 0 | } |
4852 | 0 | } |
4853 | 0 | } |
4854 | 0 | } |
4855 | | |
4856 | | static void |
4857 | | convert_v210_I422_10 (GstVideoConverter * convert, const GstVideoFrame * src, |
4858 | | GstVideoFrame * dest) |
4859 | 0 | { |
4860 | 0 | gint width = convert->in_width; |
4861 | 0 | gint height = convert->in_height; |
4862 | 0 | guint8 *s, *dy, *du, *dv; |
4863 | 0 | FConvertPlaneTask *tasks; |
4864 | 0 | FConvertPlaneTask **tasks_p; |
4865 | 0 | gint n_threads; |
4866 | 0 | gint lines_per_thread; |
4867 | 0 | gint i; |
4868 | |
|
4869 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
4870 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
4871 | |
|
4872 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
4873 | 0 | dy += convert->out_x; |
4874 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
4875 | 0 | du += convert->out_x >> 1; |
4876 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
4877 | 0 | dv += convert->out_x >> 1; |
4878 | |
|
4879 | 0 | n_threads = convert->conversion_runner->n_threads; |
4880 | 0 | tasks = convert->tasks[0] = |
4881 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
4882 | 0 | tasks_p = convert->tasks_p[0] = |
4883 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
4884 | |
|
4885 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
4886 | |
|
4887 | 0 | for (i = 0; i < n_threads; i++) { |
4888 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
4889 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
4890 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
4891 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
4892 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
4893 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
4894 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
4895 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
4896 | |
|
4897 | 0 | tasks[i].width = width; |
4898 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
4899 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
4900 | 0 | tasks[i].height -= i * lines_per_thread; |
4901 | |
|
4902 | 0 | tasks_p[i] = &tasks[i]; |
4903 | 0 | } |
4904 | |
|
4905 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4906 | 0 | (GstParallelizedTaskFunc) convert_v210_I422_10_task, (gpointer) tasks_p); |
4907 | |
|
4908 | 0 | convert_fill_border (convert, dest); |
4909 | 0 | } |
4910 | | |
4911 | | static void |
4912 | | convert_UYVY_I420_task (FConvertTask * task) |
4913 | 0 | { |
4914 | 0 | gint i; |
4915 | 0 | gint l1, l2; |
4916 | |
|
4917 | 0 | for (i = task->height_0; i < task->height_1; i += 2) { |
4918 | 0 | GET_LINE_OFFSETS (task->interlaced, i, l1, l2); |
4919 | |
|
4920 | 0 | video_orc_convert_UYVY_I420 (FRAME_GET_COMP_LINE (task->dest, 0, l1), |
4921 | 0 | FRAME_GET_COMP_LINE (task->dest, 0, l2), |
4922 | 0 | FRAME_GET_COMP_LINE (task->dest, 1, i >> 1), |
4923 | 0 | FRAME_GET_COMP_LINE (task->dest, 2, i >> 1), |
4924 | 0 | FRAME_GET_LINE (task->src, l1), FRAME_GET_LINE (task->src, l2), |
4925 | 0 | (task->width + 1) / 2); |
4926 | 0 | } |
4927 | 0 | } |
4928 | | |
4929 | | static void |
4930 | | convert_UYVY_I420 (GstVideoConverter * convert, const GstVideoFrame * src, |
4931 | | GstVideoFrame * dest) |
4932 | 0 | { |
4933 | 0 | int i; |
4934 | 0 | gint width = convert->in_width; |
4935 | 0 | gint height = convert->in_height; |
4936 | 0 | gboolean interlaced = GST_VIDEO_FRAME_IS_INTERLACED (src) |
4937 | 0 | && (GST_VIDEO_INFO_INTERLACE_MODE (&src->info) != |
4938 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE); |
4939 | 0 | gint h2; |
4940 | 0 | FConvertTask *tasks; |
4941 | 0 | FConvertTask **tasks_p; |
4942 | 0 | gint n_threads; |
4943 | 0 | gint lines_per_thread; |
4944 | | |
4945 | | /* I420 has half as many chroma lines, as such we have to |
4946 | | * always merge two into one. For non-interlaced these are |
4947 | | * the two next to each other, for interlaced one is skipped |
4948 | | * in between. */ |
4949 | 0 | if (interlaced) |
4950 | 0 | h2 = GST_ROUND_DOWN_4 (height); |
4951 | 0 | else |
4952 | 0 | h2 = GST_ROUND_DOWN_2 (height); |
4953 | |
|
4954 | 0 | n_threads = convert->conversion_runner->n_threads; |
4955 | 0 | tasks = convert->tasks[0] = |
4956 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
4957 | 0 | tasks_p = convert->tasks_p[0] = |
4958 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
4959 | |
|
4960 | 0 | lines_per_thread = GST_ROUND_UP_2 ((h2 + n_threads - 1) / n_threads); |
4961 | |
|
4962 | 0 | for (i = 0; i < n_threads; i++) { |
4963 | 0 | tasks[i].src = src; |
4964 | 0 | tasks[i].dest = dest; |
4965 | |
|
4966 | 0 | tasks[i].interlaced = interlaced; |
4967 | 0 | tasks[i].width = width; |
4968 | |
|
4969 | 0 | tasks[i].height_0 = i * lines_per_thread; |
4970 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
4971 | 0 | tasks[i].height_1 = MIN (h2, tasks[i].height_1); |
4972 | |
|
4973 | 0 | tasks_p[i] = &tasks[i]; |
4974 | 0 | } |
4975 | |
|
4976 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
4977 | 0 | (GstParallelizedTaskFunc) convert_UYVY_I420_task, (gpointer) tasks_p); |
4978 | | |
4979 | | /* now handle last lines. For interlaced these are up to 3 */ |
4980 | 0 | if (h2 != height) { |
4981 | 0 | for (i = h2; i < height; i++) { |
4982 | 0 | UNPACK_FRAME (src, convert->tmpline[0], i, convert->in_x, width); |
4983 | 0 | PACK_FRAME (dest, convert->tmpline[0], i, width); |
4984 | 0 | } |
4985 | 0 | } |
4986 | 0 | } |
4987 | | |
4988 | | static void |
4989 | | convert_UYVY_AYUV_task (FConvertPlaneTask * task) |
4990 | 0 | { |
4991 | 0 | video_orc_convert_UYVY_AYUV (task->d, task->dstride, task->s, |
4992 | 0 | task->sstride, task->alpha, (task->width + 1) / 2, task->height); |
4993 | 0 | } |
4994 | | |
4995 | | static void |
4996 | | convert_UYVY_AYUV (GstVideoConverter * convert, const GstVideoFrame * src, |
4997 | | GstVideoFrame * dest) |
4998 | 0 | { |
4999 | 0 | gint width = convert->in_width; |
5000 | 0 | gint height = convert->in_height; |
5001 | 0 | guint8 *s, *d; |
5002 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
5003 | 0 | FConvertPlaneTask *tasks; |
5004 | 0 | FConvertPlaneTask **tasks_p; |
5005 | 0 | gint n_threads; |
5006 | 0 | gint lines_per_thread; |
5007 | 0 | gint i; |
5008 | |
|
5009 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5010 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5011 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5012 | 0 | d += (convert->out_x * 4); |
5013 | |
|
5014 | 0 | n_threads = convert->conversion_runner->n_threads; |
5015 | 0 | tasks = convert->tasks[0] = |
5016 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5017 | 0 | tasks_p = convert->tasks_p[0] = |
5018 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5019 | |
|
5020 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5021 | |
|
5022 | 0 | for (i = 0; i < n_threads; i++) { |
5023 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5024 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5025 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5026 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5027 | |
|
5028 | 0 | tasks[i].width = width; |
5029 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5030 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5031 | 0 | tasks[i].height -= i * lines_per_thread; |
5032 | 0 | tasks[i].alpha = alpha; |
5033 | |
|
5034 | 0 | tasks_p[i] = &tasks[i]; |
5035 | 0 | } |
5036 | |
|
5037 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5038 | 0 | (GstParallelizedTaskFunc) convert_UYVY_AYUV_task, (gpointer) tasks_p); |
5039 | |
|
5040 | 0 | convert_fill_border (convert, dest); |
5041 | 0 | } |
5042 | | |
5043 | | static void |
5044 | | convert_UYVY_v210_task (FConvertPlaneTask * task) |
5045 | 0 | { |
5046 | 0 | gint i, j; |
5047 | 0 | guint8 *d; |
5048 | 0 | const guint8 *s; |
5049 | 0 | guint32 a0, a1, a2, a3; |
5050 | 0 | guint8 y0, y1, y2, y3, y4, y5; |
5051 | 0 | guint8 u0, u2, u4; |
5052 | 0 | guint8 v0, v2, v4; |
5053 | |
|
5054 | 0 | for (i = 0; i < task->height; i++) { |
5055 | 0 | d = task->d + i * task->dstride; |
5056 | 0 | s = task->s + i * task->sstride; |
5057 | |
|
5058 | 0 | for (j = 0; j < task->width; j += 6) { |
5059 | 0 | y1 = y2 = y3 = y4 = y5 = 0; |
5060 | 0 | u2 = u4 = v2 = v4 = 0; |
5061 | |
|
5062 | 0 | y0 = s[2 * j + 1]; |
5063 | 0 | u0 = s[2 * j]; |
5064 | 0 | v0 = s[2 * j + 2]; |
5065 | |
|
5066 | 0 | if (j < task->width - 1) { |
5067 | 0 | y1 = s[2 * j + 3]; |
5068 | 0 | } |
5069 | |
|
5070 | 0 | if (j < task->width - 2) { |
5071 | 0 | y2 = s[2 * j + 5]; |
5072 | 0 | u2 = s[2 * j + 4]; |
5073 | 0 | v2 = s[2 * j + 6]; |
5074 | 0 | } |
5075 | |
|
5076 | 0 | if (j < task->width - 3) { |
5077 | 0 | y3 = s[2 * j + 7]; |
5078 | 0 | } |
5079 | |
|
5080 | 0 | if (j < task->width - 4) { |
5081 | 0 | y4 = s[2 * j + 9]; |
5082 | 0 | u4 = s[2 * j + 8]; |
5083 | 0 | v4 = s[2 * j + 10]; |
5084 | 0 | } |
5085 | |
|
5086 | 0 | if (j < task->width - 5) { |
5087 | 0 | y5 = s[2 * j + 11]; |
5088 | 0 | } |
5089 | |
|
5090 | 0 | a0 = u0 << 2 | (y0 << 12) | (v0 << 22); |
5091 | 0 | a1 = y1 << 2 | (u2 << 12) | (y2 << 22); |
5092 | 0 | a2 = v2 << 2 | (y3 << 12) | (u4 << 22); |
5093 | 0 | a3 = y4 << 2 | (v4 << 12) | (y5 << 22); |
5094 | |
|
5095 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 0, a0); |
5096 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 4, a1); |
5097 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 8, a2); |
5098 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 12, a3); |
5099 | 0 | } |
5100 | 0 | } |
5101 | 0 | } |
5102 | | |
5103 | | static void |
5104 | | convert_UYVY_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
5105 | | GstVideoFrame * dest) |
5106 | 0 | { |
5107 | 0 | gint width = convert->in_width; |
5108 | 0 | gint height = convert->in_height; |
5109 | 0 | guint8 *s, *d; |
5110 | 0 | FConvertPlaneTask *tasks; |
5111 | 0 | FConvertPlaneTask **tasks_p; |
5112 | 0 | gint n_threads; |
5113 | 0 | gint lines_per_thread; |
5114 | 0 | gint i; |
5115 | |
|
5116 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5117 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5118 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5119 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5120 | |
|
5121 | 0 | n_threads = convert->conversion_runner->n_threads; |
5122 | 0 | tasks = convert->tasks[0] = |
5123 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5124 | 0 | tasks_p = convert->tasks_p[0] = |
5125 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5126 | |
|
5127 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5128 | |
|
5129 | 0 | for (i = 0; i < n_threads; i++) { |
5130 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5131 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5132 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5133 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5134 | |
|
5135 | 0 | tasks[i].width = width; |
5136 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5137 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5138 | 0 | tasks[i].height -= i * lines_per_thread; |
5139 | |
|
5140 | 0 | tasks_p[i] = &tasks[i]; |
5141 | 0 | } |
5142 | |
|
5143 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5144 | 0 | (GstParallelizedTaskFunc) convert_UYVY_v210_task, (gpointer) tasks_p); |
5145 | |
|
5146 | 0 | convert_fill_border (convert, dest); |
5147 | 0 | } |
5148 | | |
5149 | | static void |
5150 | | convert_UYVY_YUY2_task (FConvertPlaneTask * task) |
5151 | 0 | { |
5152 | 0 | video_orc_convert_UYVY_YUY2 (task->d, task->dstride, task->s, |
5153 | 0 | task->sstride, (task->width + 1) / 2, task->height); |
5154 | 0 | } |
5155 | | |
5156 | | static void |
5157 | | convert_UYVY_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
5158 | | GstVideoFrame * dest) |
5159 | 0 | { |
5160 | 0 | gint width = convert->in_width; |
5161 | 0 | gint height = convert->in_height; |
5162 | 0 | guint8 *s, *d; |
5163 | 0 | FConvertPlaneTask *tasks; |
5164 | 0 | FConvertPlaneTask **tasks_p; |
5165 | 0 | gint n_threads; |
5166 | 0 | gint lines_per_thread; |
5167 | 0 | gint i; |
5168 | |
|
5169 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5170 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5171 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5172 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5173 | |
|
5174 | 0 | n_threads = convert->conversion_runner->n_threads; |
5175 | 0 | tasks = convert->tasks[0] = |
5176 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5177 | 0 | tasks_p = convert->tasks_p[0] = |
5178 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5179 | |
|
5180 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5181 | |
|
5182 | 0 | for (i = 0; i < n_threads; i++) { |
5183 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5184 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5185 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5186 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5187 | |
|
5188 | 0 | tasks[i].width = width; |
5189 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5190 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5191 | 0 | tasks[i].height -= i * lines_per_thread; |
5192 | |
|
5193 | 0 | tasks_p[i] = &tasks[i]; |
5194 | 0 | } |
5195 | |
|
5196 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5197 | 0 | (GstParallelizedTaskFunc) convert_UYVY_YUY2_task, (gpointer) tasks_p); |
5198 | |
|
5199 | 0 | convert_fill_border (convert, dest); |
5200 | 0 | } |
5201 | | |
5202 | | static void |
5203 | | convert_v210_UYVY_task (FConvertPlaneTask * task) |
5204 | 0 | { |
5205 | 0 | gint i, j; |
5206 | 0 | guint8 *d; |
5207 | 0 | const guint8 *s; |
5208 | 0 | guint32 a0, a1, a2, a3; |
5209 | 0 | guint16 y0, y1, y2, y3, y4, y5; |
5210 | 0 | guint16 u0, u2, u4; |
5211 | 0 | guint16 v0, v2, v4; |
5212 | |
|
5213 | 0 | for (i = 0; i < task->height; i++) { |
5214 | 0 | d = task->d + i * task->dstride; |
5215 | 0 | s = task->s + i * task->sstride; |
5216 | |
|
5217 | 0 | for (j = 0; j < task->width; j += 6) { |
5218 | 0 | a0 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 0); |
5219 | 0 | a1 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 4); |
5220 | 0 | a2 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 8); |
5221 | 0 | a3 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 12); |
5222 | |
|
5223 | 0 | u0 = ((a0 >> 0) & 0x3ff) >> 2; |
5224 | 0 | y0 = ((a0 >> 10) & 0x3ff) >> 2; |
5225 | 0 | v0 = ((a0 >> 20) & 0x3ff) >> 2; |
5226 | 0 | y1 = ((a1 >> 0) & 0x3ff) >> 2; |
5227 | |
|
5228 | 0 | u2 = ((a1 >> 10) & 0x3ff) >> 2; |
5229 | 0 | y2 = ((a1 >> 20) & 0x3ff) >> 2; |
5230 | 0 | v2 = ((a2 >> 0) & 0x3ff) >> 2; |
5231 | 0 | y3 = ((a2 >> 10) & 0x3ff) >> 2; |
5232 | |
|
5233 | 0 | u4 = ((a2 >> 20) & 0x3ff) >> 2; |
5234 | 0 | y4 = ((a3 >> 0) & 0x3ff) >> 2; |
5235 | 0 | v4 = ((a3 >> 10) & 0x3ff) >> 2; |
5236 | 0 | y5 = ((a3 >> 20) & 0x3ff) >> 2; |
5237 | |
|
5238 | 0 | d[2 * j + 1] = y0; |
5239 | 0 | d[2 * j] = u0; |
5240 | 0 | d[2 * j + 2] = v0; |
5241 | |
|
5242 | 0 | if (j < task->width - 1) { |
5243 | 0 | d[2 * j + 3] = y1; |
5244 | 0 | } |
5245 | |
|
5246 | 0 | if (j < task->width - 2) { |
5247 | 0 | d[2 * j + 5] = y2; |
5248 | 0 | d[2 * j + 4] = u2; |
5249 | 0 | d[2 * j + 6] = v2; |
5250 | 0 | } |
5251 | |
|
5252 | 0 | if (j < task->width - 3) { |
5253 | 0 | d[2 * j + 7] = y3; |
5254 | 0 | } |
5255 | |
|
5256 | 0 | if (j < task->width - 4) { |
5257 | 0 | d[2 * j + 9] = y4; |
5258 | 0 | d[2 * j + 8] = u4; |
5259 | 0 | d[2 * j + 10] = v4; |
5260 | 0 | } |
5261 | |
|
5262 | 0 | if (j < task->width - 5) { |
5263 | 0 | d[2 * j + 11] = y5; |
5264 | 0 | } |
5265 | 0 | } |
5266 | 0 | } |
5267 | 0 | } |
5268 | | |
5269 | | static void |
5270 | | convert_v210_UYVY (GstVideoConverter * convert, const GstVideoFrame * src, |
5271 | | GstVideoFrame * dest) |
5272 | 0 | { |
5273 | 0 | gint width = convert->in_width; |
5274 | 0 | gint height = convert->in_height; |
5275 | 0 | guint8 *s, *d; |
5276 | 0 | FConvertPlaneTask *tasks; |
5277 | 0 | FConvertPlaneTask **tasks_p; |
5278 | 0 | gint n_threads; |
5279 | 0 | gint lines_per_thread; |
5280 | 0 | gint i; |
5281 | |
|
5282 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5283 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5284 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5285 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5286 | |
|
5287 | 0 | n_threads = convert->conversion_runner->n_threads; |
5288 | 0 | tasks = convert->tasks[0] = |
5289 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5290 | 0 | tasks_p = convert->tasks_p[0] = |
5291 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5292 | |
|
5293 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5294 | |
|
5295 | 0 | for (i = 0; i < n_threads; i++) { |
5296 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5297 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5298 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5299 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5300 | |
|
5301 | 0 | tasks[i].width = width; |
5302 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5303 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5304 | 0 | tasks[i].height -= i * lines_per_thread; |
5305 | |
|
5306 | 0 | tasks_p[i] = &tasks[i]; |
5307 | 0 | } |
5308 | |
|
5309 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5310 | 0 | (GstParallelizedTaskFunc) convert_v210_UYVY_task, (gpointer) tasks_p); |
5311 | |
|
5312 | 0 | convert_fill_border (convert, dest); |
5313 | 0 | } |
5314 | | |
5315 | | static void |
5316 | | convert_v210_YUY2_task (FConvertPlaneTask * task) |
5317 | 0 | { |
5318 | 0 | gint i, j; |
5319 | 0 | guint8 *d; |
5320 | 0 | const guint8 *s; |
5321 | 0 | guint32 a0, a1, a2, a3; |
5322 | 0 | guint16 y0, y1, y2, y3, y4, y5; |
5323 | 0 | guint16 u0, u2, u4; |
5324 | 0 | guint16 v0, v2, v4; |
5325 | |
|
5326 | 0 | for (i = 0; i < task->height; i++) { |
5327 | 0 | d = task->d + i * task->dstride; |
5328 | 0 | s = task->s + i * task->sstride; |
5329 | |
|
5330 | 0 | for (j = 0; j < task->width; j += 6) { |
5331 | 0 | a0 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 0); |
5332 | 0 | a1 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 4); |
5333 | 0 | a2 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 8); |
5334 | 0 | a3 = GST_READ_UINT32_LE (s + (j / 6) * 16 + 12); |
5335 | |
|
5336 | 0 | u0 = ((a0 >> 0) & 0x3ff) >> 2; |
5337 | 0 | y0 = ((a0 >> 10) & 0x3ff) >> 2; |
5338 | 0 | v0 = ((a0 >> 20) & 0x3ff) >> 2; |
5339 | 0 | y1 = ((a1 >> 0) & 0x3ff) >> 2; |
5340 | |
|
5341 | 0 | u2 = ((a1 >> 10) & 0x3ff) >> 2; |
5342 | 0 | y2 = ((a1 >> 20) & 0x3ff) >> 2; |
5343 | 0 | v2 = ((a2 >> 0) & 0x3ff) >> 2; |
5344 | 0 | y3 = ((a2 >> 10) & 0x3ff) >> 2; |
5345 | |
|
5346 | 0 | u4 = ((a2 >> 20) & 0x3ff) >> 2; |
5347 | 0 | y4 = ((a3 >> 0) & 0x3ff) >> 2; |
5348 | 0 | v4 = ((a3 >> 10) & 0x3ff) >> 2; |
5349 | 0 | y5 = ((a3 >> 20) & 0x3ff) >> 2; |
5350 | |
|
5351 | 0 | d[2 * j] = y0; |
5352 | 0 | d[2 * j + 1] = u0; |
5353 | 0 | d[2 * j + 3] = v0; |
5354 | |
|
5355 | 0 | if (j < task->width - 1) { |
5356 | 0 | d[2 * j + 2] = y1; |
5357 | 0 | } |
5358 | |
|
5359 | 0 | if (j < task->width - 2) { |
5360 | 0 | d[2 * j + 4] = y2; |
5361 | 0 | d[2 * j + 5] = u2; |
5362 | 0 | d[2 * j + 7] = v2; |
5363 | 0 | } |
5364 | |
|
5365 | 0 | if (j < task->width - 3) { |
5366 | 0 | d[2 * j + 6] = y3; |
5367 | 0 | } |
5368 | |
|
5369 | 0 | if (j < task->width - 4) { |
5370 | 0 | d[2 * j + 8] = y4; |
5371 | 0 | d[2 * j + 9] = u4; |
5372 | 0 | d[2 * j + 11] = v4; |
5373 | 0 | } |
5374 | |
|
5375 | 0 | if (j < task->width - 5) { |
5376 | 0 | d[2 * j + 10] = y5; |
5377 | 0 | } |
5378 | 0 | } |
5379 | 0 | } |
5380 | 0 | } |
5381 | | |
5382 | | static void |
5383 | | convert_v210_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
5384 | | GstVideoFrame * dest) |
5385 | 0 | { |
5386 | 0 | gint width = convert->in_width; |
5387 | 0 | gint height = convert->in_height; |
5388 | 0 | guint8 *s, *d; |
5389 | 0 | FConvertPlaneTask *tasks; |
5390 | 0 | FConvertPlaneTask **tasks_p; |
5391 | 0 | gint n_threads; |
5392 | 0 | gint lines_per_thread; |
5393 | 0 | gint i; |
5394 | |
|
5395 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5396 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5397 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5398 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5399 | |
|
5400 | 0 | n_threads = convert->conversion_runner->n_threads; |
5401 | 0 | tasks = convert->tasks[0] = |
5402 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5403 | 0 | tasks_p = convert->tasks_p[0] = |
5404 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5405 | |
|
5406 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5407 | |
|
5408 | 0 | for (i = 0; i < n_threads; i++) { |
5409 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5410 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5411 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5412 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5413 | |
|
5414 | 0 | tasks[i].width = width; |
5415 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5416 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5417 | 0 | tasks[i].height -= i * lines_per_thread; |
5418 | |
|
5419 | 0 | tasks_p[i] = &tasks[i]; |
5420 | 0 | } |
5421 | |
|
5422 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5423 | 0 | (GstParallelizedTaskFunc) convert_v210_YUY2_task, (gpointer) tasks_p); |
5424 | |
|
5425 | 0 | convert_fill_border (convert, dest); |
5426 | 0 | } |
5427 | | |
5428 | | static void |
5429 | | convert_UYVY_Y42B_task (FConvertPlaneTask * task) |
5430 | 0 | { |
5431 | 0 | video_orc_convert_UYVY_Y42B (task->d, task->dstride, task->du, |
5432 | 0 | task->dustride, task->dv, task->dvstride, |
5433 | 0 | task->s, task->sstride, (task->width + 1) / 2, task->height); |
5434 | 0 | } |
5435 | | |
5436 | | static void |
5437 | | convert_UYVY_Y42B (GstVideoConverter * convert, const GstVideoFrame * src, |
5438 | | GstVideoFrame * dest) |
5439 | 0 | { |
5440 | 0 | gint width = convert->in_width; |
5441 | 0 | gint height = convert->in_height; |
5442 | 0 | guint8 *s, *dy, *du, *dv; |
5443 | 0 | FConvertPlaneTask *tasks; |
5444 | 0 | FConvertPlaneTask **tasks_p; |
5445 | 0 | gint n_threads; |
5446 | 0 | gint lines_per_thread; |
5447 | 0 | gint i; |
5448 | |
|
5449 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5450 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5451 | |
|
5452 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
5453 | 0 | dy += convert->out_x; |
5454 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
5455 | 0 | du += convert->out_x >> 1; |
5456 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
5457 | 0 | dv += convert->out_x >> 1; |
5458 | |
|
5459 | 0 | n_threads = convert->conversion_runner->n_threads; |
5460 | 0 | tasks = convert->tasks[0] = |
5461 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5462 | 0 | tasks_p = convert->tasks_p[0] = |
5463 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5464 | |
|
5465 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5466 | |
|
5467 | 0 | for (i = 0; i < n_threads; i++) { |
5468 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
5469 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
5470 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
5471 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5472 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
5473 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
5474 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
5475 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5476 | |
|
5477 | 0 | tasks[i].width = width; |
5478 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5479 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5480 | 0 | tasks[i].height -= i * lines_per_thread; |
5481 | |
|
5482 | 0 | tasks_p[i] = &tasks[i]; |
5483 | 0 | } |
5484 | |
|
5485 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5486 | 0 | (GstParallelizedTaskFunc) convert_UYVY_Y42B_task, (gpointer) tasks_p); |
5487 | |
|
5488 | 0 | convert_fill_border (convert, dest); |
5489 | 0 | } |
5490 | | |
5491 | | static void |
5492 | | convert_UYVY_Y444_task (FConvertPlaneTask * task) |
5493 | 0 | { |
5494 | 0 | video_orc_convert_UYVY_Y444 (task->d, |
5495 | 0 | task->dstride, task->du, |
5496 | 0 | task->dustride, task->dv, |
5497 | 0 | task->dvstride, task->s, |
5498 | 0 | task->sstride, (task->width + 1) / 2, task->height); |
5499 | 0 | } |
5500 | | |
5501 | | static void |
5502 | | convert_UYVY_Y444 (GstVideoConverter * convert, const GstVideoFrame * src, |
5503 | | GstVideoFrame * dest) |
5504 | 0 | { |
5505 | 0 | gint width = convert->in_width; |
5506 | 0 | gint height = convert->in_height; |
5507 | 0 | guint8 *s, *dy, *du, *dv; |
5508 | 0 | FConvertPlaneTask *tasks; |
5509 | 0 | FConvertPlaneTask **tasks_p; |
5510 | 0 | gint n_threads; |
5511 | 0 | gint lines_per_thread; |
5512 | 0 | gint i; |
5513 | |
|
5514 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5515 | 0 | s += (GST_ROUND_UP_2 (convert->in_x) * 2); |
5516 | |
|
5517 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
5518 | 0 | dy += convert->out_x; |
5519 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
5520 | 0 | du += convert->out_x; |
5521 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
5522 | 0 | dv += convert->out_x; |
5523 | |
|
5524 | 0 | n_threads = convert->conversion_runner->n_threads; |
5525 | 0 | tasks = convert->tasks[0] = |
5526 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5527 | 0 | tasks_p = convert->tasks_p[0] = |
5528 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5529 | |
|
5530 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5531 | |
|
5532 | 0 | for (i = 0; i < n_threads; i++) { |
5533 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
5534 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
5535 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
5536 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5537 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
5538 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
5539 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
5540 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5541 | |
|
5542 | 0 | tasks[i].width = width; |
5543 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5544 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5545 | 0 | tasks[i].height -= i * lines_per_thread; |
5546 | |
|
5547 | 0 | tasks_p[i] = &tasks[i]; |
5548 | 0 | } |
5549 | |
|
5550 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5551 | 0 | (GstParallelizedTaskFunc) convert_UYVY_Y444_task, (gpointer) tasks_p); |
5552 | |
|
5553 | 0 | convert_fill_border (convert, dest); |
5554 | 0 | } |
5555 | | |
5556 | | static void |
5557 | | convert_UYVY_GRAY8_task (FConvertPlaneTask * task) |
5558 | 0 | { |
5559 | 0 | video_orc_convert_UYVY_GRAY8 (task->d, task->dstride, (guint16 *) task->s, |
5560 | 0 | task->sstride, task->width, task->height); |
5561 | 0 | } |
5562 | | |
5563 | | static void |
5564 | | convert_UYVY_GRAY8 (GstVideoConverter * convert, const GstVideoFrame * src, |
5565 | | GstVideoFrame * dest) |
5566 | 0 | { |
5567 | 0 | gint width = convert->in_width; |
5568 | 0 | gint height = convert->in_height; |
5569 | 0 | guint8 *s; |
5570 | 0 | guint8 *d; |
5571 | 0 | FConvertPlaneTask *tasks; |
5572 | 0 | FConvertPlaneTask **tasks_p; |
5573 | 0 | gint n_threads; |
5574 | 0 | gint lines_per_thread; |
5575 | 0 | gint i; |
5576 | |
|
5577 | 0 | s = GST_VIDEO_FRAME_PLANE_DATA (src, 0); |
5578 | 0 | d = GST_VIDEO_FRAME_PLANE_DATA (dest, 0); |
5579 | |
|
5580 | 0 | n_threads = convert->conversion_runner->n_threads; |
5581 | 0 | tasks = convert->tasks[0] = |
5582 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5583 | 0 | tasks_p = convert->tasks_p[0] = |
5584 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5585 | |
|
5586 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5587 | |
|
5588 | 0 | for (i = 0; i < n_threads; i++) { |
5589 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5590 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5591 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5592 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5593 | |
|
5594 | 0 | tasks[i].width = width; |
5595 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5596 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5597 | 0 | tasks[i].height -= i * lines_per_thread; |
5598 | |
|
5599 | 0 | tasks_p[i] = &tasks[i]; |
5600 | 0 | } |
5601 | |
|
5602 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5603 | 0 | (GstParallelizedTaskFunc) convert_UYVY_GRAY8_task, (gpointer) tasks_p); |
5604 | |
|
5605 | 0 | convert_fill_border (convert, dest); |
5606 | 0 | } |
5607 | | |
5608 | | static void |
5609 | | convert_AYUV_I420_task (FConvertPlaneTask * task) |
5610 | 0 | { |
5611 | 0 | video_orc_convert_AYUV_I420 (task->d, |
5612 | 0 | 2 * task->dstride, task->d2, |
5613 | 0 | 2 * task->dstride, task->du, |
5614 | 0 | task->dustride, task->dv, |
5615 | 0 | task->dvstride, task->s, |
5616 | 0 | 2 * task->sstride, task->s2, |
5617 | 0 | 2 * task->sstride, task->width / 2, task->height / 2); |
5618 | 0 | } |
5619 | | |
5620 | | static void |
5621 | | convert_AYUV_I420 (GstVideoConverter * convert, const GstVideoFrame * src, |
5622 | | GstVideoFrame * dest) |
5623 | 0 | { |
5624 | 0 | gint width = convert->in_width; |
5625 | 0 | gint height = convert->in_height; |
5626 | 0 | guint8 *s1, *s2, *dy1, *dy2, *du, *dv; |
5627 | 0 | FConvertPlaneTask *tasks; |
5628 | 0 | FConvertPlaneTask **tasks_p; |
5629 | 0 | gint n_threads; |
5630 | 0 | gint lines_per_thread; |
5631 | 0 | gint i; |
5632 | |
|
5633 | 0 | s1 = FRAME_GET_LINE (src, convert->in_y + 0); |
5634 | 0 | s1 += convert->in_x * 4; |
5635 | 0 | s2 = FRAME_GET_LINE (src, convert->in_y + 1); |
5636 | 0 | s2 += convert->in_x * 4; |
5637 | |
|
5638 | 0 | dy1 = FRAME_GET_Y_LINE (dest, convert->out_y + 0); |
5639 | 0 | dy1 += convert->out_x; |
5640 | 0 | dy2 = FRAME_GET_Y_LINE (dest, convert->out_y + 1); |
5641 | 0 | dy2 += convert->out_x; |
5642 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y >> 1); |
5643 | 0 | du += convert->out_x >> 1; |
5644 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y >> 1); |
5645 | 0 | dv += convert->out_x >> 1; |
5646 | | |
5647 | | /* only for even width/height */ |
5648 | |
|
5649 | 0 | n_threads = convert->conversion_runner->n_threads; |
5650 | 0 | tasks = convert->tasks[0] = |
5651 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5652 | 0 | tasks_p = convert->tasks_p[0] = |
5653 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5654 | |
|
5655 | 0 | lines_per_thread = GST_ROUND_UP_2 ((height + n_threads - 1) / n_threads); |
5656 | |
|
5657 | 0 | for (i = 0; i < n_threads; i++) { |
5658 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
5659 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
5660 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
5661 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5662 | 0 | tasks[i].d = dy1 + i * lines_per_thread * tasks[i].dstride; |
5663 | 0 | tasks[i].d2 = dy2 + i * lines_per_thread * tasks[i].dstride; |
5664 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride / 2; |
5665 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride / 2; |
5666 | 0 | tasks[i].s = s1 + i * lines_per_thread * tasks[i].sstride; |
5667 | 0 | tasks[i].s2 = s2 + i * lines_per_thread * tasks[i].sstride; |
5668 | |
|
5669 | 0 | tasks[i].width = width; |
5670 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5671 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5672 | 0 | tasks[i].height -= i * lines_per_thread; |
5673 | |
|
5674 | 0 | tasks_p[i] = &tasks[i]; |
5675 | 0 | } |
5676 | |
|
5677 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5678 | 0 | (GstParallelizedTaskFunc) convert_AYUV_I420_task, (gpointer) tasks_p); |
5679 | |
|
5680 | 0 | convert_fill_border (convert, dest); |
5681 | 0 | } |
5682 | | |
5683 | | static void |
5684 | | convert_AYUV_YUY2_task (FConvertPlaneTask * task) |
5685 | 0 | { |
5686 | 0 | video_orc_convert_AYUV_YUY2 (task->d, task->dstride, task->s, |
5687 | 0 | task->sstride, task->width / 2, task->height); |
5688 | 0 | } |
5689 | | |
5690 | | static void |
5691 | | convert_AYUV_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
5692 | | GstVideoFrame * dest) |
5693 | 0 | { |
5694 | 0 | gint width = convert->in_width; |
5695 | 0 | gint height = convert->in_height; |
5696 | 0 | guint8 *s, *d; |
5697 | 0 | FConvertPlaneTask *tasks; |
5698 | 0 | FConvertPlaneTask **tasks_p; |
5699 | 0 | gint n_threads; |
5700 | 0 | gint lines_per_thread; |
5701 | 0 | gint i; |
5702 | |
|
5703 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5704 | 0 | s += convert->in_x * 4; |
5705 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5706 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5707 | | |
5708 | | /* only for even width */ |
5709 | 0 | n_threads = convert->conversion_runner->n_threads; |
5710 | 0 | tasks = convert->tasks[0] = |
5711 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5712 | 0 | tasks_p = convert->tasks_p[0] = |
5713 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5714 | |
|
5715 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5716 | |
|
5717 | 0 | for (i = 0; i < n_threads; i++) { |
5718 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5719 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5720 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5721 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5722 | |
|
5723 | 0 | tasks[i].width = width; |
5724 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5725 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5726 | 0 | tasks[i].height -= i * lines_per_thread; |
5727 | |
|
5728 | 0 | tasks_p[i] = &tasks[i]; |
5729 | 0 | } |
5730 | |
|
5731 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5732 | 0 | (GstParallelizedTaskFunc) convert_AYUV_YUY2_task, (gpointer) tasks_p); |
5733 | |
|
5734 | 0 | convert_fill_border (convert, dest); |
5735 | 0 | } |
5736 | | |
5737 | | static void |
5738 | | convert_AYUV_UYVY_task (FConvertPlaneTask * task) |
5739 | 0 | { |
5740 | 0 | video_orc_convert_AYUV_UYVY (task->d, task->dstride, task->s, |
5741 | 0 | task->sstride, task->width / 2, task->height); |
5742 | 0 | } |
5743 | | |
5744 | | static void |
5745 | | convert_AYUV_UYVY (GstVideoConverter * convert, const GstVideoFrame * src, |
5746 | | GstVideoFrame * dest) |
5747 | 0 | { |
5748 | 0 | gint width = convert->in_width; |
5749 | 0 | gint height = convert->in_height; |
5750 | 0 | guint8 *s, *d; |
5751 | 0 | FConvertPlaneTask *tasks; |
5752 | 0 | FConvertPlaneTask **tasks_p; |
5753 | 0 | gint n_threads; |
5754 | 0 | gint lines_per_thread; |
5755 | 0 | gint i; |
5756 | |
|
5757 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5758 | 0 | s += convert->in_x * 4; |
5759 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5760 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5761 | | |
5762 | | /* only for even width */ |
5763 | 0 | n_threads = convert->conversion_runner->n_threads; |
5764 | 0 | tasks = convert->tasks[0] = |
5765 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5766 | 0 | tasks_p = convert->tasks_p[0] = |
5767 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5768 | |
|
5769 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5770 | |
|
5771 | 0 | for (i = 0; i < n_threads; i++) { |
5772 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5773 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5774 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5775 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5776 | |
|
5777 | 0 | tasks[i].width = width; |
5778 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5779 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5780 | 0 | tasks[i].height -= i * lines_per_thread; |
5781 | |
|
5782 | 0 | tasks_p[i] = &tasks[i]; |
5783 | 0 | } |
5784 | |
|
5785 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5786 | 0 | (GstParallelizedTaskFunc) convert_AYUV_UYVY_task, (gpointer) tasks_p); |
5787 | |
|
5788 | 0 | convert_fill_border (convert, dest); |
5789 | 0 | } |
5790 | | |
5791 | | static void |
5792 | | convert_AYUV_Y42B_task (FConvertPlaneTask * task) |
5793 | 0 | { |
5794 | 0 | video_orc_convert_AYUV_Y42B (task->d, task->dstride, task->du, |
5795 | 0 | task->dustride, task->dv, task->dvstride, |
5796 | 0 | task->s, task->sstride, task->width / 2, task->height); |
5797 | 0 | } |
5798 | | |
5799 | | static void |
5800 | | convert_AYUV_Y42B (GstVideoConverter * convert, const GstVideoFrame * src, |
5801 | | GstVideoFrame * dest) |
5802 | 0 | { |
5803 | 0 | gint width = convert->in_width; |
5804 | 0 | gint height = convert->in_height; |
5805 | 0 | guint8 *s, *dy, *du, *dv; |
5806 | 0 | FConvertPlaneTask *tasks; |
5807 | 0 | FConvertPlaneTask **tasks_p; |
5808 | 0 | gint n_threads; |
5809 | 0 | gint lines_per_thread; |
5810 | 0 | gint i; |
5811 | |
|
5812 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5813 | 0 | s += convert->in_x * 4; |
5814 | |
|
5815 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
5816 | 0 | dy += convert->out_x; |
5817 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
5818 | 0 | du += convert->out_x >> 1; |
5819 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
5820 | 0 | dv += convert->out_x >> 1; |
5821 | | |
5822 | | /* only works for even width */ |
5823 | 0 | n_threads = convert->conversion_runner->n_threads; |
5824 | 0 | tasks = convert->tasks[0] = |
5825 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5826 | 0 | tasks_p = convert->tasks_p[0] = |
5827 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5828 | |
|
5829 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5830 | |
|
5831 | 0 | for (i = 0; i < n_threads; i++) { |
5832 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
5833 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
5834 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
5835 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5836 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
5837 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
5838 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
5839 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5840 | |
|
5841 | 0 | tasks[i].width = width; |
5842 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5843 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5844 | 0 | tasks[i].height -= i * lines_per_thread; |
5845 | |
|
5846 | 0 | tasks_p[i] = &tasks[i]; |
5847 | 0 | } |
5848 | |
|
5849 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5850 | 0 | (GstParallelizedTaskFunc) convert_AYUV_Y42B_task, (gpointer) tasks_p); |
5851 | |
|
5852 | 0 | convert_fill_border (convert, dest); |
5853 | 0 | } |
5854 | | |
5855 | | static void |
5856 | | convert_AYUV_Y444_task (FConvertPlaneTask * task) |
5857 | 0 | { |
5858 | 0 | video_orc_convert_AYUV_Y444 (task->d, task->dstride, task->du, |
5859 | 0 | task->dustride, task->dv, task->dvstride, |
5860 | 0 | task->s, task->sstride, task->width, task->height); |
5861 | 0 | } |
5862 | | |
5863 | | static void |
5864 | | convert_AYUV_Y444 (GstVideoConverter * convert, const GstVideoFrame * src, |
5865 | | GstVideoFrame * dest) |
5866 | 0 | { |
5867 | 0 | gint width = convert->in_width; |
5868 | 0 | gint height = convert->in_height; |
5869 | 0 | guint8 *s, *dy, *du, *dv; |
5870 | 0 | FConvertPlaneTask *tasks; |
5871 | 0 | FConvertPlaneTask **tasks_p; |
5872 | 0 | gint n_threads; |
5873 | 0 | gint lines_per_thread; |
5874 | 0 | gint i; |
5875 | |
|
5876 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
5877 | 0 | s += convert->in_x * 4; |
5878 | |
|
5879 | 0 | dy = FRAME_GET_Y_LINE (dest, convert->out_y); |
5880 | 0 | dy += convert->out_x; |
5881 | 0 | du = FRAME_GET_U_LINE (dest, convert->out_y); |
5882 | 0 | du += convert->out_x; |
5883 | 0 | dv = FRAME_GET_V_LINE (dest, convert->out_y); |
5884 | 0 | dv += convert->out_x; |
5885 | |
|
5886 | 0 | n_threads = convert->conversion_runner->n_threads; |
5887 | 0 | tasks = convert->tasks[0] = |
5888 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5889 | 0 | tasks_p = convert->tasks_p[0] = |
5890 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5891 | |
|
5892 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5893 | |
|
5894 | 0 | for (i = 0; i < n_threads; i++) { |
5895 | 0 | tasks[i].dstride = FRAME_GET_Y_STRIDE (dest); |
5896 | 0 | tasks[i].dustride = FRAME_GET_U_STRIDE (dest); |
5897 | 0 | tasks[i].dvstride = FRAME_GET_V_STRIDE (dest); |
5898 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
5899 | 0 | tasks[i].d = dy + i * lines_per_thread * tasks[i].dstride; |
5900 | 0 | tasks[i].du = du + i * lines_per_thread * tasks[i].dustride; |
5901 | 0 | tasks[i].dv = dv + i * lines_per_thread * tasks[i].dvstride; |
5902 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
5903 | |
|
5904 | 0 | tasks[i].width = width; |
5905 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5906 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5907 | 0 | tasks[i].height -= i * lines_per_thread; |
5908 | |
|
5909 | 0 | tasks_p[i] = &tasks[i]; |
5910 | 0 | } |
5911 | |
|
5912 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5913 | 0 | (GstParallelizedTaskFunc) convert_AYUV_Y444_task, (gpointer) tasks_p); |
5914 | 0 | convert_fill_border (convert, dest); |
5915 | 0 | } |
5916 | | |
5917 | | static void |
5918 | | convert_Y42B_YUY2_task (FConvertPlaneTask * task) |
5919 | 0 | { |
5920 | 0 | video_orc_convert_Y42B_YUY2 (task->d, task->dstride, |
5921 | 0 | task->s, task->sstride, |
5922 | 0 | task->su, task->sustride, |
5923 | 0 | task->sv, task->svstride, (task->width + 1) / 2, task->height); |
5924 | 0 | } |
5925 | | |
5926 | | static void |
5927 | | convert_Y42B_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
5928 | | GstVideoFrame * dest) |
5929 | 0 | { |
5930 | 0 | gint width = convert->in_width; |
5931 | 0 | gint height = convert->in_height; |
5932 | 0 | guint8 *sy, *su, *sv, *d; |
5933 | 0 | FConvertPlaneTask *tasks; |
5934 | 0 | FConvertPlaneTask **tasks_p; |
5935 | 0 | gint n_threads; |
5936 | 0 | gint lines_per_thread; |
5937 | 0 | gint i; |
5938 | |
|
5939 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
5940 | 0 | sy += convert->in_x; |
5941 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
5942 | 0 | su += convert->in_x >> 1; |
5943 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
5944 | 0 | sv += convert->in_x >> 1; |
5945 | |
|
5946 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
5947 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
5948 | |
|
5949 | 0 | n_threads = convert->conversion_runner->n_threads; |
5950 | 0 | tasks = convert->tasks[0] = |
5951 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
5952 | 0 | tasks_p = convert->tasks_p[0] = |
5953 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
5954 | |
|
5955 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
5956 | |
|
5957 | 0 | for (i = 0; i < n_threads; i++) { |
5958 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
5959 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
5960 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
5961 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
5962 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
5963 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
5964 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
5965 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
5966 | |
|
5967 | 0 | tasks[i].width = width; |
5968 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
5969 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
5970 | 0 | tasks[i].height -= i * lines_per_thread; |
5971 | |
|
5972 | 0 | tasks_p[i] = &tasks[i]; |
5973 | 0 | } |
5974 | |
|
5975 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
5976 | 0 | (GstParallelizedTaskFunc) convert_Y42B_YUY2_task, (gpointer) tasks_p); |
5977 | |
|
5978 | 0 | convert_fill_border (convert, dest); |
5979 | 0 | } |
5980 | | |
5981 | | static void |
5982 | | convert_Y42B_UYVY_task (FConvertPlaneTask * task) |
5983 | 0 | { |
5984 | 0 | video_orc_convert_Y42B_UYVY (task->d, task->dstride, |
5985 | 0 | task->s, task->sstride, |
5986 | 0 | task->su, task->sustride, |
5987 | 0 | task->sv, task->svstride, (task->width + 1) / 2, task->height); |
5988 | 0 | } |
5989 | | |
5990 | | static void |
5991 | | convert_Y42B_UYVY (GstVideoConverter * convert, const GstVideoFrame * src, |
5992 | | GstVideoFrame * dest) |
5993 | 0 | { |
5994 | 0 | gint width = convert->in_width; |
5995 | 0 | gint height = convert->in_height; |
5996 | 0 | guint8 *sy, *su, *sv, *d; |
5997 | 0 | FConvertPlaneTask *tasks; |
5998 | 0 | FConvertPlaneTask **tasks_p; |
5999 | 0 | gint n_threads; |
6000 | 0 | gint lines_per_thread; |
6001 | 0 | gint i; |
6002 | |
|
6003 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6004 | 0 | sy += convert->in_x; |
6005 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6006 | 0 | su += convert->in_x >> 1; |
6007 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6008 | 0 | sv += convert->in_x >> 1; |
6009 | |
|
6010 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6011 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
6012 | |
|
6013 | 0 | n_threads = convert->conversion_runner->n_threads; |
6014 | 0 | tasks = convert->tasks[0] = |
6015 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6016 | 0 | tasks_p = convert->tasks_p[0] = |
6017 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6018 | |
|
6019 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6020 | |
|
6021 | 0 | for (i = 0; i < n_threads; i++) { |
6022 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6023 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6024 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6025 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6026 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6027 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6028 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6029 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6030 | |
|
6031 | 0 | tasks[i].width = width; |
6032 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6033 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6034 | 0 | tasks[i].height -= i * lines_per_thread; |
6035 | |
|
6036 | 0 | tasks_p[i] = &tasks[i]; |
6037 | 0 | } |
6038 | |
|
6039 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6040 | 0 | (GstParallelizedTaskFunc) convert_Y42B_UYVY_task, (gpointer) tasks_p); |
6041 | |
|
6042 | 0 | convert_fill_border (convert, dest); |
6043 | 0 | } |
6044 | | |
6045 | | static void |
6046 | | convert_Y42B_AYUV_task (FConvertPlaneTask * task) |
6047 | 0 | { |
6048 | 0 | video_orc_convert_Y42B_AYUV (task->d, task->dstride, task->s, |
6049 | 0 | task->sstride, |
6050 | 0 | task->su, |
6051 | 0 | task->sustride, |
6052 | 0 | task->sv, task->svstride, task->alpha, task->width / 2, task->height); |
6053 | 0 | } |
6054 | | |
6055 | | static void |
6056 | | convert_Y42B_AYUV (GstVideoConverter * convert, const GstVideoFrame * src, |
6057 | | GstVideoFrame * dest) |
6058 | 0 | { |
6059 | 0 | gint width = convert->in_width; |
6060 | 0 | gint height = convert->in_height; |
6061 | 0 | guint8 *sy, *su, *sv, *d; |
6062 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
6063 | 0 | FConvertPlaneTask *tasks; |
6064 | 0 | FConvertPlaneTask **tasks_p; |
6065 | 0 | gint n_threads; |
6066 | 0 | gint lines_per_thread; |
6067 | 0 | gint i; |
6068 | |
|
6069 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6070 | 0 | sy += convert->in_x; |
6071 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6072 | 0 | su += convert->in_x >> 1; |
6073 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6074 | 0 | sv += convert->in_x >> 1; |
6075 | |
|
6076 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6077 | 0 | d += convert->out_x * 4; |
6078 | | |
6079 | | /* only for even width */ |
6080 | 0 | n_threads = convert->conversion_runner->n_threads; |
6081 | 0 | tasks = convert->tasks[0] = |
6082 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6083 | 0 | tasks_p = convert->tasks_p[0] = |
6084 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6085 | |
|
6086 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6087 | |
|
6088 | 0 | for (i = 0; i < n_threads; i++) { |
6089 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6090 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6091 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6092 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6093 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6094 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6095 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6096 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6097 | |
|
6098 | 0 | tasks[i].width = width; |
6099 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6100 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6101 | 0 | tasks[i].height -= i * lines_per_thread; |
6102 | 0 | tasks[i].alpha = alpha; |
6103 | |
|
6104 | 0 | tasks_p[i] = &tasks[i]; |
6105 | 0 | } |
6106 | |
|
6107 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6108 | 0 | (GstParallelizedTaskFunc) convert_Y42B_AYUV_task, (gpointer) tasks_p); |
6109 | |
|
6110 | 0 | convert_fill_border (convert, dest); |
6111 | 0 | } |
6112 | | |
6113 | | static void |
6114 | | convert_Y42B_v210_task (FConvertPlaneTask * task) |
6115 | 0 | { |
6116 | 0 | gint i, j; |
6117 | 0 | guint8 *d; |
6118 | 0 | const guint8 *s_y, *s_u, *s_v; |
6119 | 0 | guint32 a0, a1, a2, a3; |
6120 | 0 | guint8 y0, y1, y2, y3, y4, y5; |
6121 | 0 | guint8 u0, u2, u4; |
6122 | 0 | guint8 v0, v2, v4; |
6123 | |
|
6124 | 0 | for (i = 0; i < task->height; i++) { |
6125 | 0 | d = task->d + i * task->dstride; |
6126 | 0 | s_y = task->s + i * task->sstride; |
6127 | 0 | s_u = task->su + i * task->sustride; |
6128 | 0 | s_v = task->sv + i * task->svstride; |
6129 | |
|
6130 | 0 | for (j = 0; j < task->width; j += 6) { |
6131 | 0 | y1 = y2 = y3 = y4 = y5 = 0; |
6132 | 0 | u2 = u4 = v2 = v4 = 0; |
6133 | |
|
6134 | 0 | y0 = s_y[j]; |
6135 | 0 | u0 = s_u[j / 2]; |
6136 | 0 | v0 = s_v[j / 2]; |
6137 | |
|
6138 | 0 | if (j < task->width - 1) { |
6139 | 0 | y1 = s_y[j + 1]; |
6140 | 0 | } |
6141 | |
|
6142 | 0 | if (j < task->width - 2) { |
6143 | 0 | y2 = s_y[j + 2]; |
6144 | 0 | u2 = s_u[j / 2 + 1]; |
6145 | 0 | v2 = s_v[j / 2 + 1]; |
6146 | 0 | } |
6147 | |
|
6148 | 0 | if (j < task->width - 3) { |
6149 | 0 | y3 = s_y[j + 3]; |
6150 | 0 | } |
6151 | |
|
6152 | 0 | if (j < task->width - 4) { |
6153 | 0 | y4 = s_y[j + 4]; |
6154 | 0 | u4 = s_u[j / 2 + 2]; |
6155 | 0 | v4 = s_v[j / 2 + 2]; |
6156 | 0 | } |
6157 | |
|
6158 | 0 | if (j < task->width - 5) { |
6159 | 0 | y5 = s_y[j + 5]; |
6160 | 0 | } |
6161 | |
|
6162 | 0 | a0 = u0 << 2 | (y0 << 12) | (v0 << 22); |
6163 | 0 | a1 = y1 << 2 | (u2 << 12) | (y2 << 22); |
6164 | 0 | a2 = v2 << 2 | (y3 << 12) | (u4 << 22); |
6165 | 0 | a3 = y4 << 2 | (v4 << 12) | (y5 << 22); |
6166 | |
|
6167 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 0, a0); |
6168 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 4, a1); |
6169 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 8, a2); |
6170 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 12, a3); |
6171 | 0 | } |
6172 | 0 | } |
6173 | 0 | } |
6174 | | |
6175 | | static void |
6176 | | convert_Y42B_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
6177 | | GstVideoFrame * dest) |
6178 | 0 | { |
6179 | 0 | gint width = convert->in_width; |
6180 | 0 | gint height = convert->in_height; |
6181 | 0 | guint8 *d, *sy, *su, *sv; |
6182 | 0 | FConvertPlaneTask *tasks; |
6183 | 0 | FConvertPlaneTask **tasks_p; |
6184 | 0 | gint n_threads; |
6185 | 0 | gint lines_per_thread; |
6186 | 0 | gint i; |
6187 | |
|
6188 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6189 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
6190 | |
|
6191 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6192 | 0 | sy += convert->in_x; |
6193 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6194 | 0 | su += convert->in_x >> 1; |
6195 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6196 | 0 | sv += convert->in_x >> 1; |
6197 | |
|
6198 | 0 | n_threads = convert->conversion_runner->n_threads; |
6199 | 0 | tasks = convert->tasks[0] = |
6200 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6201 | 0 | tasks_p = convert->tasks_p[0] = |
6202 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6203 | |
|
6204 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6205 | |
|
6206 | 0 | for (i = 0; i < n_threads; i++) { |
6207 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6208 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6209 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6210 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6211 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6212 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6213 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6214 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6215 | |
|
6216 | 0 | tasks[i].width = width; |
6217 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6218 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6219 | 0 | tasks[i].height -= i * lines_per_thread; |
6220 | |
|
6221 | 0 | tasks_p[i] = &tasks[i]; |
6222 | 0 | } |
6223 | |
|
6224 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6225 | 0 | (GstParallelizedTaskFunc) convert_Y42B_v210_task, (gpointer) tasks_p); |
6226 | |
|
6227 | 0 | convert_fill_border (convert, dest); |
6228 | 0 | } |
6229 | | |
6230 | | static void |
6231 | | convert_I422_10_v210_task (FConvertPlaneTask * task) |
6232 | 0 | { |
6233 | 0 | gint i, j; |
6234 | 0 | guint8 *d; |
6235 | 0 | const guint16 *s_y, *s_u, *s_v; |
6236 | 0 | guint32 a0, a1, a2, a3; |
6237 | 0 | guint16 y0, y1, y2, y3, y4, y5; |
6238 | 0 | guint16 u0, u2, u4; |
6239 | 0 | guint16 v0, v2, v4; |
6240 | |
|
6241 | 0 | for (i = 0; i < task->height; i++) { |
6242 | 0 | d = task->d + i * task->dstride; |
6243 | 0 | s_y = (const guint16 *) (task->s + i * task->sstride); |
6244 | 0 | s_u = (const guint16 *) (task->su + i * task->sustride); |
6245 | 0 | s_v = (const guint16 *) (task->sv + i * task->svstride); |
6246 | |
|
6247 | 0 | for (j = 0; j < task->width; j += 6) { |
6248 | 0 | y1 = y2 = y3 = y4 = y5 = 0; |
6249 | 0 | u2 = u4 = v2 = v4 = 0; |
6250 | |
|
6251 | 0 | y0 = s_y[j]; |
6252 | 0 | u0 = s_u[j / 2]; |
6253 | 0 | v0 = s_v[j / 2]; |
6254 | |
|
6255 | 0 | if (j < task->width - 1) { |
6256 | 0 | y1 = s_y[j + 1]; |
6257 | 0 | } |
6258 | |
|
6259 | 0 | if (j < task->width - 2) { |
6260 | 0 | y2 = s_y[j + 2]; |
6261 | 0 | u2 = s_u[j / 2 + 1]; |
6262 | 0 | v2 = s_v[j / 2 + 1]; |
6263 | 0 | } |
6264 | |
|
6265 | 0 | if (j < task->width - 3) { |
6266 | 0 | y3 = s_y[j + 3]; |
6267 | 0 | } |
6268 | |
|
6269 | 0 | if (j < task->width - 4) { |
6270 | 0 | y4 = s_y[j + 4]; |
6271 | 0 | u4 = s_u[j / 2 + 2]; |
6272 | 0 | v4 = s_v[j / 2 + 2]; |
6273 | 0 | } |
6274 | |
|
6275 | 0 | if (j < task->width - 5) { |
6276 | 0 | y5 = s_y[j + 5]; |
6277 | 0 | } |
6278 | |
|
6279 | 0 | a0 = u0 | (y0 << 10) | (v0 << 20); |
6280 | 0 | a1 = y1 | (u2 << 10) | (y2 << 20); |
6281 | 0 | a2 = v2 | (y3 << 10) | (u4 << 20); |
6282 | 0 | a3 = y4 | (v4 << 10) | (y5 << 20); |
6283 | |
|
6284 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 0, a0); |
6285 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 4, a1); |
6286 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 8, a2); |
6287 | 0 | GST_WRITE_UINT32_LE (d + (j / 6) * 16 + 12, a3); |
6288 | 0 | } |
6289 | 0 | } |
6290 | 0 | } |
6291 | | |
6292 | | static void |
6293 | | convert_I422_10_v210 (GstVideoConverter * convert, const GstVideoFrame * src, |
6294 | | GstVideoFrame * dest) |
6295 | 0 | { |
6296 | 0 | gint width = convert->in_width; |
6297 | 0 | gint height = convert->in_height; |
6298 | 0 | guint8 *d, *sy, *su, *sv; |
6299 | 0 | FConvertPlaneTask *tasks; |
6300 | 0 | FConvertPlaneTask **tasks_p; |
6301 | 0 | gint n_threads; |
6302 | 0 | gint lines_per_thread; |
6303 | 0 | gint i; |
6304 | |
|
6305 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6306 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
6307 | |
|
6308 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6309 | 0 | sy += convert->in_x; |
6310 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6311 | 0 | su += convert->in_x >> 1; |
6312 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6313 | 0 | sv += convert->in_x >> 1; |
6314 | |
|
6315 | 0 | n_threads = convert->conversion_runner->n_threads; |
6316 | 0 | tasks = convert->tasks[0] = |
6317 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6318 | 0 | tasks_p = convert->tasks_p[0] = |
6319 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6320 | |
|
6321 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6322 | |
|
6323 | 0 | for (i = 0; i < n_threads; i++) { |
6324 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6325 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6326 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6327 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6328 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6329 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6330 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6331 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6332 | |
|
6333 | 0 | tasks[i].width = width; |
6334 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6335 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6336 | 0 | tasks[i].height -= i * lines_per_thread; |
6337 | |
|
6338 | 0 | tasks_p[i] = &tasks[i]; |
6339 | 0 | } |
6340 | |
|
6341 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6342 | 0 | (GstParallelizedTaskFunc) convert_I422_10_v210_task, (gpointer) tasks_p); |
6343 | |
|
6344 | 0 | convert_fill_border (convert, dest); |
6345 | 0 | } |
6346 | | |
6347 | | static void |
6348 | | convert_Y444_YUY2_task (FConvertPlaneTask * task) |
6349 | 0 | { |
6350 | 0 | video_orc_convert_Y444_YUY2 (task->d, task->dstride, task->s, |
6351 | 0 | task->sstride, |
6352 | 0 | task->su, |
6353 | 0 | task->sustride, task->sv, task->svstride, task->width / 2, task->height); |
6354 | 0 | } |
6355 | | |
6356 | | static void |
6357 | | convert_Y444_YUY2 (GstVideoConverter * convert, const GstVideoFrame * src, |
6358 | | GstVideoFrame * dest) |
6359 | 0 | { |
6360 | 0 | gint width = convert->in_width; |
6361 | 0 | gint height = convert->in_height; |
6362 | 0 | guint8 *sy, *su, *sv, *d; |
6363 | 0 | FConvertPlaneTask *tasks; |
6364 | 0 | FConvertPlaneTask **tasks_p; |
6365 | 0 | gint n_threads; |
6366 | 0 | gint lines_per_thread; |
6367 | 0 | gint i; |
6368 | |
|
6369 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6370 | 0 | sy += convert->in_x; |
6371 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6372 | 0 | su += convert->in_x; |
6373 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6374 | 0 | sv += convert->in_x; |
6375 | |
|
6376 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6377 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
6378 | |
|
6379 | 0 | n_threads = convert->conversion_runner->n_threads; |
6380 | 0 | tasks = convert->tasks[0] = |
6381 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6382 | 0 | tasks_p = convert->tasks_p[0] = |
6383 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6384 | |
|
6385 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6386 | |
|
6387 | 0 | for (i = 0; i < n_threads; i++) { |
6388 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6389 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6390 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6391 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6392 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6393 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6394 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6395 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6396 | |
|
6397 | 0 | tasks[i].width = width; |
6398 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6399 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6400 | 0 | tasks[i].height -= i * lines_per_thread; |
6401 | |
|
6402 | 0 | tasks_p[i] = &tasks[i]; |
6403 | 0 | } |
6404 | |
|
6405 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6406 | 0 | (GstParallelizedTaskFunc) convert_Y444_YUY2_task, (gpointer) tasks_p); |
6407 | |
|
6408 | 0 | convert_fill_border (convert, dest); |
6409 | 0 | } |
6410 | | |
6411 | | static void |
6412 | | convert_Y444_UYVY_task (FConvertPlaneTask * task) |
6413 | 0 | { |
6414 | 0 | video_orc_convert_Y444_UYVY (task->d, task->dstride, task->s, |
6415 | 0 | task->sstride, |
6416 | 0 | task->su, |
6417 | 0 | task->sustride, task->sv, task->svstride, task->width / 2, task->height); |
6418 | 0 | } |
6419 | | |
6420 | | static void |
6421 | | convert_Y444_UYVY (GstVideoConverter * convert, const GstVideoFrame * src, |
6422 | | GstVideoFrame * dest) |
6423 | 0 | { |
6424 | 0 | gint width = convert->in_width; |
6425 | 0 | gint height = convert->in_height; |
6426 | 0 | guint8 *sy, *su, *sv, *d; |
6427 | 0 | FConvertPlaneTask *tasks; |
6428 | 0 | FConvertPlaneTask **tasks_p; |
6429 | 0 | gint n_threads; |
6430 | 0 | gint lines_per_thread; |
6431 | 0 | gint i; |
6432 | |
|
6433 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6434 | 0 | sy += convert->in_x; |
6435 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6436 | 0 | su += convert->in_x; |
6437 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6438 | 0 | sv += convert->in_x; |
6439 | |
|
6440 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6441 | 0 | d += (GST_ROUND_UP_2 (convert->out_x) * 2); |
6442 | |
|
6443 | 0 | n_threads = convert->conversion_runner->n_threads; |
6444 | 0 | tasks = convert->tasks[0] = |
6445 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6446 | 0 | tasks_p = convert->tasks_p[0] = |
6447 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6448 | |
|
6449 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6450 | |
|
6451 | 0 | for (i = 0; i < n_threads; i++) { |
6452 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6453 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6454 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6455 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6456 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6457 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6458 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6459 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6460 | |
|
6461 | 0 | tasks[i].width = width; |
6462 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6463 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6464 | 0 | tasks[i].height -= i * lines_per_thread; |
6465 | |
|
6466 | 0 | tasks_p[i] = &tasks[i]; |
6467 | 0 | } |
6468 | |
|
6469 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6470 | 0 | (GstParallelizedTaskFunc) convert_Y444_UYVY_task, (gpointer) tasks_p); |
6471 | |
|
6472 | 0 | convert_fill_border (convert, dest); |
6473 | 0 | } |
6474 | | |
6475 | | static void |
6476 | | convert_Y444_AYUV_task (FConvertPlaneTask * task) |
6477 | 0 | { |
6478 | 0 | video_orc_convert_Y444_AYUV (task->d, task->dstride, task->s, |
6479 | 0 | task->sstride, |
6480 | 0 | task->su, |
6481 | 0 | task->sustride, |
6482 | 0 | task->sv, task->svstride, task->alpha, task->width, task->height); |
6483 | 0 | } |
6484 | | |
6485 | | static void |
6486 | | convert_Y444_AYUV (GstVideoConverter * convert, const GstVideoFrame * src, |
6487 | | GstVideoFrame * dest) |
6488 | 0 | { |
6489 | 0 | gint width = convert->in_width; |
6490 | 0 | gint height = convert->in_height; |
6491 | 0 | guint8 *sy, *su, *sv, *d; |
6492 | 0 | guint8 alpha = MIN (convert->alpha_value, 255); |
6493 | 0 | FConvertPlaneTask *tasks; |
6494 | 0 | FConvertPlaneTask **tasks_p; |
6495 | 0 | gint n_threads; |
6496 | 0 | gint lines_per_thread; |
6497 | 0 | gint i; |
6498 | |
|
6499 | 0 | sy = FRAME_GET_Y_LINE (src, convert->in_y); |
6500 | 0 | sy += convert->in_x; |
6501 | 0 | su = FRAME_GET_U_LINE (src, convert->in_y); |
6502 | 0 | su += convert->in_x; |
6503 | 0 | sv = FRAME_GET_V_LINE (src, convert->in_y); |
6504 | 0 | sv += convert->in_x; |
6505 | |
|
6506 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6507 | 0 | d += convert->out_x * 4; |
6508 | |
|
6509 | 0 | n_threads = convert->conversion_runner->n_threads; |
6510 | 0 | tasks = convert->tasks[0] = |
6511 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6512 | 0 | tasks_p = convert->tasks_p[0] = |
6513 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6514 | |
|
6515 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6516 | |
|
6517 | 0 | for (i = 0; i < n_threads; i++) { |
6518 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6519 | 0 | tasks[i].sstride = FRAME_GET_Y_STRIDE (src); |
6520 | 0 | tasks[i].sustride = FRAME_GET_U_STRIDE (src); |
6521 | 0 | tasks[i].svstride = FRAME_GET_V_STRIDE (src); |
6522 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6523 | 0 | tasks[i].s = sy + i * lines_per_thread * tasks[i].sstride; |
6524 | 0 | tasks[i].su = su + i * lines_per_thread * tasks[i].sustride; |
6525 | 0 | tasks[i].sv = sv + i * lines_per_thread * tasks[i].svstride; |
6526 | |
|
6527 | 0 | tasks[i].width = width; |
6528 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6529 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6530 | 0 | tasks[i].height -= i * lines_per_thread; |
6531 | 0 | tasks[i].alpha = alpha; |
6532 | |
|
6533 | 0 | tasks_p[i] = &tasks[i]; |
6534 | 0 | } |
6535 | |
|
6536 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6537 | 0 | (GstParallelizedTaskFunc) convert_Y444_AYUV_task, (gpointer) tasks_p); |
6538 | |
|
6539 | 0 | convert_fill_border (convert, dest); |
6540 | 0 | } |
6541 | | |
6542 | | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
6543 | | static void |
6544 | | convert_AYUV_ARGB_task (FConvertPlaneTask * task) |
6545 | 0 | { |
6546 | 0 | video_orc_convert_AYUV_ARGB (task->d, task->dstride, task->s, |
6547 | 0 | task->sstride, task->data->im[0][0], task->data->im[0][2], |
6548 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6549 | 0 | task->width, task->height); |
6550 | 0 | } |
6551 | | |
6552 | | static void |
6553 | | convert_AYUV_ARGB (GstVideoConverter * convert, const GstVideoFrame * src, |
6554 | | GstVideoFrame * dest) |
6555 | 0 | { |
6556 | 0 | gint width = convert->in_width; |
6557 | 0 | gint height = convert->in_height; |
6558 | 0 | MatrixData *data = &convert->convert_matrix; |
6559 | 0 | guint8 *s, *d; |
6560 | 0 | FConvertPlaneTask *tasks; |
6561 | 0 | FConvertPlaneTask **tasks_p; |
6562 | 0 | gint n_threads; |
6563 | 0 | gint lines_per_thread; |
6564 | 0 | gint i; |
6565 | |
|
6566 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
6567 | 0 | s += (convert->in_x * 4); |
6568 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6569 | 0 | d += (convert->out_x * 4); |
6570 | |
|
6571 | 0 | n_threads = convert->conversion_runner->n_threads; |
6572 | 0 | tasks = convert->tasks[0] = |
6573 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6574 | 0 | tasks_p = convert->tasks_p[0] = |
6575 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6576 | |
|
6577 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6578 | |
|
6579 | 0 | for (i = 0; i < n_threads; i++) { |
6580 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6581 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
6582 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6583 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
6584 | |
|
6585 | 0 | tasks[i].width = width; |
6586 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6587 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6588 | 0 | tasks[i].height -= i * lines_per_thread; |
6589 | 0 | tasks[i].data = data; |
6590 | |
|
6591 | 0 | tasks_p[i] = &tasks[i]; |
6592 | 0 | } |
6593 | |
|
6594 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6595 | 0 | (GstParallelizedTaskFunc) convert_AYUV_ARGB_task, (gpointer) tasks_p); |
6596 | |
|
6597 | 0 | convert_fill_border (convert, dest); |
6598 | 0 | } |
6599 | | |
6600 | | static void |
6601 | | convert_AYUV_BGRA_task (FConvertPlaneTask * task) |
6602 | 0 | { |
6603 | 0 | video_orc_convert_AYUV_BGRA (task->d, task->dstride, task->s, |
6604 | 0 | task->sstride, task->data->im[0][0], task->data->im[0][2], |
6605 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6606 | 0 | task->width, task->height); |
6607 | 0 | } |
6608 | | |
6609 | | static void |
6610 | | convert_AYUV_BGRA (GstVideoConverter * convert, const GstVideoFrame * src, |
6611 | | GstVideoFrame * dest) |
6612 | 0 | { |
6613 | 0 | gint width = convert->in_width; |
6614 | 0 | gint height = convert->in_height; |
6615 | 0 | MatrixData *data = &convert->convert_matrix; |
6616 | 0 | guint8 *s, *d; |
6617 | 0 | FConvertPlaneTask *tasks; |
6618 | 0 | FConvertPlaneTask **tasks_p; |
6619 | 0 | gint n_threads; |
6620 | 0 | gint lines_per_thread; |
6621 | 0 | gint i; |
6622 | |
|
6623 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
6624 | 0 | s += (convert->in_x * 4); |
6625 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6626 | 0 | d += (convert->out_x * 4); |
6627 | |
|
6628 | 0 | n_threads = convert->conversion_runner->n_threads; |
6629 | 0 | tasks = convert->tasks[0] = |
6630 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6631 | 0 | tasks_p = convert->tasks_p[0] = |
6632 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6633 | |
|
6634 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6635 | |
|
6636 | 0 | for (i = 0; i < n_threads; i++) { |
6637 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6638 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
6639 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6640 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
6641 | |
|
6642 | 0 | tasks[i].width = width; |
6643 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6644 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6645 | 0 | tasks[i].height -= i * lines_per_thread; |
6646 | 0 | tasks[i].data = data; |
6647 | |
|
6648 | 0 | tasks_p[i] = &tasks[i]; |
6649 | 0 | } |
6650 | |
|
6651 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6652 | 0 | (GstParallelizedTaskFunc) convert_AYUV_BGRA_task, (gpointer) tasks_p); |
6653 | |
|
6654 | 0 | convert_fill_border (convert, dest); |
6655 | 0 | } |
6656 | | |
6657 | | static void |
6658 | | convert_AYUV_ABGR_task (FConvertPlaneTask * task) |
6659 | 0 | { |
6660 | 0 | video_orc_convert_AYUV_ABGR (task->d, task->dstride, task->s, |
6661 | 0 | task->sstride, task->data->im[0][0], task->data->im[0][2], |
6662 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6663 | 0 | task->width, task->height); |
6664 | 0 | } |
6665 | | |
6666 | | static void |
6667 | | convert_AYUV_ABGR (GstVideoConverter * convert, const GstVideoFrame * src, |
6668 | | GstVideoFrame * dest) |
6669 | 0 | { |
6670 | 0 | gint width = convert->in_width; |
6671 | 0 | gint height = convert->in_height; |
6672 | 0 | MatrixData *data = &convert->convert_matrix; |
6673 | 0 | guint8 *s, *d; |
6674 | 0 | FConvertPlaneTask *tasks; |
6675 | 0 | FConvertPlaneTask **tasks_p; |
6676 | 0 | gint n_threads; |
6677 | 0 | gint lines_per_thread; |
6678 | 0 | gint i; |
6679 | |
|
6680 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
6681 | 0 | s += (convert->in_x * 4); |
6682 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6683 | 0 | d += (convert->out_x * 4); |
6684 | |
|
6685 | 0 | n_threads = convert->conversion_runner->n_threads; |
6686 | 0 | tasks = convert->tasks[0] = |
6687 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6688 | 0 | tasks_p = convert->tasks_p[0] = |
6689 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6690 | |
|
6691 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6692 | |
|
6693 | 0 | for (i = 0; i < n_threads; i++) { |
6694 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6695 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
6696 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6697 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
6698 | |
|
6699 | 0 | tasks[i].width = width; |
6700 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6701 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6702 | 0 | tasks[i].height -= i * lines_per_thread; |
6703 | 0 | tasks[i].data = data; |
6704 | |
|
6705 | 0 | tasks_p[i] = &tasks[i]; |
6706 | 0 | } |
6707 | |
|
6708 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6709 | 0 | (GstParallelizedTaskFunc) convert_AYUV_ABGR_task, (gpointer) tasks_p); |
6710 | |
|
6711 | 0 | convert_fill_border (convert, dest); |
6712 | 0 | } |
6713 | | |
6714 | | static void |
6715 | | convert_AYUV_RGBA_task (FConvertPlaneTask * task) |
6716 | 0 | { |
6717 | 0 | video_orc_convert_AYUV_RGBA (task->d, task->dstride, task->s, |
6718 | 0 | task->sstride, task->data->im[0][0], task->data->im[0][2], |
6719 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6720 | 0 | task->width, task->height); |
6721 | 0 | } |
6722 | | |
6723 | | static void |
6724 | | convert_AYUV_RGBA (GstVideoConverter * convert, const GstVideoFrame * src, |
6725 | | GstVideoFrame * dest) |
6726 | 0 | { |
6727 | 0 | gint width = convert->in_width; |
6728 | 0 | gint height = convert->in_height; |
6729 | 0 | MatrixData *data = &convert->convert_matrix; |
6730 | 0 | guint8 *s, *d; |
6731 | 0 | FConvertPlaneTask *tasks; |
6732 | 0 | FConvertPlaneTask **tasks_p; |
6733 | 0 | gint n_threads; |
6734 | 0 | gint lines_per_thread; |
6735 | 0 | gint i; |
6736 | |
|
6737 | 0 | s = FRAME_GET_LINE (src, convert->in_y); |
6738 | 0 | s += (convert->in_x * 4); |
6739 | 0 | d = FRAME_GET_LINE (dest, convert->out_y); |
6740 | 0 | d += (convert->out_x * 4); |
6741 | |
|
6742 | 0 | n_threads = convert->conversion_runner->n_threads; |
6743 | 0 | tasks = convert->tasks[0] = |
6744 | 0 | g_renew (FConvertPlaneTask, convert->tasks[0], n_threads); |
6745 | 0 | tasks_p = convert->tasks_p[0] = |
6746 | 0 | g_renew (FConvertPlaneTask *, convert->tasks_p[0], n_threads); |
6747 | |
|
6748 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6749 | |
|
6750 | 0 | for (i = 0; i < n_threads; i++) { |
6751 | 0 | tasks[i].dstride = FRAME_GET_STRIDE (dest); |
6752 | 0 | tasks[i].sstride = FRAME_GET_STRIDE (src); |
6753 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
6754 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
6755 | |
|
6756 | 0 | tasks[i].width = width; |
6757 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
6758 | 0 | tasks[i].height = MIN (tasks[i].height, height); |
6759 | 0 | tasks[i].height -= i * lines_per_thread; |
6760 | 0 | tasks[i].data = data; |
6761 | |
|
6762 | 0 | tasks_p[i] = &tasks[i]; |
6763 | 0 | } |
6764 | |
|
6765 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6766 | 0 | (GstParallelizedTaskFunc) convert_AYUV_RGBA_task, (gpointer) tasks_p); |
6767 | |
|
6768 | 0 | convert_fill_border (convert, dest); |
6769 | 0 | } |
6770 | | #endif |
6771 | | |
6772 | | static void |
6773 | | convert_I420_BGRA_task (FConvertTask * task) |
6774 | 0 | { |
6775 | 0 | gint i; |
6776 | |
|
6777 | 0 | for (i = task->height_0; i < task->height_1; i++) { |
6778 | 0 | guint8 *sy, *su, *sv, *d; |
6779 | |
|
6780 | 0 | d = FRAME_GET_LINE (task->dest, i + task->out_y); |
6781 | 0 | d += (task->out_x * 4); |
6782 | 0 | sy = FRAME_GET_Y_LINE (task->src, i + task->in_y); |
6783 | 0 | sy += task->in_x; |
6784 | 0 | su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1); |
6785 | 0 | su += (task->in_x >> 1); |
6786 | 0 | sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1); |
6787 | 0 | sv += (task->in_x >> 1); |
6788 | |
|
6789 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
6790 | 0 | video_orc_convert_I420_BGRA (d, sy, su, sv, |
6791 | 0 | task->data->im[0][0], task->data->im[0][2], |
6792 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6793 | 0 | task->width); |
6794 | | #else |
6795 | | video_orc_convert_I420_ARGB (d, sy, su, sv, |
6796 | | task->data->im[0][0], task->data->im[0][2], |
6797 | | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6798 | | task->width); |
6799 | | #endif |
6800 | 0 | } |
6801 | 0 | } |
6802 | | |
6803 | | static void |
6804 | | convert_I420_BGRA (GstVideoConverter * convert, const GstVideoFrame * src, |
6805 | | GstVideoFrame * dest) |
6806 | 0 | { |
6807 | 0 | int i; |
6808 | 0 | gint width = convert->in_width; |
6809 | 0 | gint height = convert->in_height; |
6810 | 0 | MatrixData *data = &convert->convert_matrix; |
6811 | 0 | FConvertTask *tasks; |
6812 | 0 | FConvertTask **tasks_p; |
6813 | 0 | gint n_threads; |
6814 | 0 | gint lines_per_thread; |
6815 | |
|
6816 | 0 | n_threads = convert->conversion_runner->n_threads; |
6817 | 0 | tasks = convert->tasks[0] = |
6818 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
6819 | 0 | tasks_p = convert->tasks_p[0] = |
6820 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
6821 | |
|
6822 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6823 | |
|
6824 | 0 | for (i = 0; i < n_threads; i++) { |
6825 | 0 | tasks[i].src = src; |
6826 | 0 | tasks[i].dest = dest; |
6827 | |
|
6828 | 0 | tasks[i].width = width; |
6829 | 0 | tasks[i].data = data; |
6830 | 0 | tasks[i].in_x = convert->in_x; |
6831 | 0 | tasks[i].in_y = convert->in_y; |
6832 | 0 | tasks[i].out_x = convert->out_x; |
6833 | 0 | tasks[i].out_y = convert->out_y; |
6834 | |
|
6835 | 0 | tasks[i].height_0 = i * lines_per_thread; |
6836 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
6837 | 0 | tasks[i].height_1 = MIN (height, tasks[i].height_1); |
6838 | |
|
6839 | 0 | tasks_p[i] = &tasks[i]; |
6840 | 0 | } |
6841 | |
|
6842 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6843 | 0 | (GstParallelizedTaskFunc) convert_I420_BGRA_task, (gpointer) tasks_p); |
6844 | |
|
6845 | 0 | convert_fill_border (convert, dest); |
6846 | 0 | } |
6847 | | |
6848 | | static void |
6849 | | convert_I420_ARGB_task (FConvertTask * task) |
6850 | 0 | { |
6851 | 0 | gint i; |
6852 | |
|
6853 | 0 | for (i = task->height_0; i < task->height_1; i++) { |
6854 | 0 | guint8 *sy, *su, *sv, *d; |
6855 | |
|
6856 | 0 | d = FRAME_GET_LINE (task->dest, i + task->out_y); |
6857 | 0 | d += (task->out_x * 4); |
6858 | 0 | sy = FRAME_GET_Y_LINE (task->src, i + task->in_y); |
6859 | 0 | sy += task->in_x; |
6860 | 0 | su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1); |
6861 | 0 | su += (task->in_x >> 1); |
6862 | 0 | sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1); |
6863 | 0 | sv += (task->in_x >> 1); |
6864 | |
|
6865 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
6866 | 0 | video_orc_convert_I420_ARGB (d, sy, su, sv, |
6867 | 0 | task->data->im[0][0], task->data->im[0][2], |
6868 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6869 | 0 | task->width); |
6870 | | #else |
6871 | | video_orc_convert_I420_BGRA (d, sy, su, sv, |
6872 | | task->data->im[0][0], task->data->im[0][2], |
6873 | | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6874 | | task->width); |
6875 | | #endif |
6876 | 0 | } |
6877 | 0 | } |
6878 | | |
6879 | | static void |
6880 | | convert_I420_ARGB (GstVideoConverter * convert, const GstVideoFrame * src, |
6881 | | GstVideoFrame * dest) |
6882 | 0 | { |
6883 | 0 | int i; |
6884 | 0 | gint width = convert->in_width; |
6885 | 0 | gint height = convert->in_height; |
6886 | 0 | MatrixData *data = &convert->convert_matrix; |
6887 | 0 | FConvertTask *tasks; |
6888 | 0 | FConvertTask **tasks_p; |
6889 | 0 | gint n_threads; |
6890 | 0 | gint lines_per_thread; |
6891 | |
|
6892 | 0 | n_threads = convert->conversion_runner->n_threads; |
6893 | 0 | tasks = convert->tasks[0] = |
6894 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
6895 | 0 | tasks_p = convert->tasks_p[0] = |
6896 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
6897 | |
|
6898 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6899 | |
|
6900 | 0 | for (i = 0; i < n_threads; i++) { |
6901 | 0 | tasks[i].src = src; |
6902 | 0 | tasks[i].dest = dest; |
6903 | |
|
6904 | 0 | tasks[i].width = width; |
6905 | 0 | tasks[i].data = data; |
6906 | 0 | tasks[i].in_x = convert->in_x; |
6907 | 0 | tasks[i].in_y = convert->in_y; |
6908 | 0 | tasks[i].out_x = convert->out_x; |
6909 | 0 | tasks[i].out_y = convert->out_y; |
6910 | |
|
6911 | 0 | tasks[i].height_0 = i * lines_per_thread; |
6912 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
6913 | 0 | tasks[i].height_1 = MIN (height, tasks[i].height_1); |
6914 | |
|
6915 | 0 | tasks_p[i] = &tasks[i]; |
6916 | 0 | } |
6917 | |
|
6918 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
6919 | 0 | (GstParallelizedTaskFunc) convert_I420_ARGB_task, (gpointer) tasks_p); |
6920 | |
|
6921 | 0 | convert_fill_border (convert, dest); |
6922 | 0 | } |
6923 | | |
6924 | | static void |
6925 | | convert_I420_pack_ARGB_task (FConvertTask * task) |
6926 | 0 | { |
6927 | 0 | gint i; |
6928 | 0 | gpointer d[GST_VIDEO_MAX_PLANES]; |
6929 | |
|
6930 | 0 | d[0] = FRAME_GET_LINE (task->dest, 0); |
6931 | 0 | d[0] = |
6932 | 0 | (guint8 *) d[0] + |
6933 | 0 | task->out_x * GST_VIDEO_FORMAT_INFO_PSTRIDE (task->dest->info.finfo, 0); |
6934 | |
|
6935 | 0 | for (i = task->height_0; i < task->height_1; i++) { |
6936 | 0 | guint8 *sy, *su, *sv; |
6937 | |
|
6938 | 0 | sy = FRAME_GET_Y_LINE (task->src, i + task->in_y); |
6939 | 0 | sy += task->in_x; |
6940 | 0 | su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1); |
6941 | 0 | su += (task->in_x >> 1); |
6942 | 0 | sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1); |
6943 | 0 | sv += (task->in_x >> 1); |
6944 | |
|
6945 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
6946 | 0 | video_orc_convert_I420_ARGB (task->tmpline, sy, su, sv, |
6947 | 0 | task->data->im[0][0], task->data->im[0][2], |
6948 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6949 | 0 | task->width); |
6950 | | #else |
6951 | | video_orc_convert_I420_BGRA (task->tmpline, sy, su, sv, |
6952 | | task->data->im[0][0], task->data->im[0][2], |
6953 | | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
6954 | | task->width); |
6955 | | #endif |
6956 | 0 | task->dest->info.finfo->pack_func (task->dest->info.finfo, |
6957 | 0 | (GST_VIDEO_FRAME_IS_INTERLACED (task->dest) ? |
6958 | 0 | GST_VIDEO_PACK_FLAG_INTERLACED : |
6959 | 0 | GST_VIDEO_PACK_FLAG_NONE), |
6960 | 0 | task->tmpline, 0, d, task->dest->info.stride, |
6961 | 0 | task->dest->info.chroma_site, i + task->out_y, task->width); |
6962 | 0 | } |
6963 | 0 | } |
6964 | | |
6965 | | static void |
6966 | | convert_I420_pack_ARGB (GstVideoConverter * convert, const GstVideoFrame * src, |
6967 | | GstVideoFrame * dest) |
6968 | 0 | { |
6969 | 0 | int i; |
6970 | 0 | gint width = convert->in_width; |
6971 | 0 | gint height = convert->in_height; |
6972 | 0 | MatrixData *data = &convert->convert_matrix; |
6973 | 0 | FConvertTask *tasks; |
6974 | 0 | FConvertTask **tasks_p; |
6975 | 0 | gint n_threads; |
6976 | 0 | gint lines_per_thread; |
6977 | |
|
6978 | 0 | n_threads = convert->conversion_runner->n_threads; |
6979 | 0 | tasks = convert->tasks[0] = |
6980 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
6981 | 0 | tasks_p = convert->tasks_p[0] = |
6982 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
6983 | |
|
6984 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
6985 | |
|
6986 | 0 | for (i = 0; i < n_threads; i++) { |
6987 | 0 | tasks[i].src = src; |
6988 | 0 | tasks[i].dest = dest; |
6989 | |
|
6990 | 0 | tasks[i].width = width; |
6991 | 0 | tasks[i].data = data; |
6992 | 0 | tasks[i].in_x = convert->in_x; |
6993 | 0 | tasks[i].in_y = convert->in_y; |
6994 | 0 | tasks[i].out_x = convert->out_x; |
6995 | 0 | tasks[i].out_y = convert->out_y; |
6996 | 0 | tasks[i].tmpline = convert->tmpline[i]; |
6997 | |
|
6998 | 0 | tasks[i].height_0 = i * lines_per_thread; |
6999 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
7000 | 0 | tasks[i].height_1 = MIN (height, tasks[i].height_1); |
7001 | |
|
7002 | 0 | tasks_p[i] = &tasks[i]; |
7003 | 0 | } |
7004 | |
|
7005 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7006 | 0 | (GstParallelizedTaskFunc) convert_I420_pack_ARGB_task, |
7007 | 0 | (gpointer) tasks_p); |
7008 | |
|
7009 | 0 | convert_fill_border (convert, dest); |
7010 | 0 | } |
7011 | | |
7012 | | static void |
7013 | | convert_A420_pack_ARGB_task (FConvertTask * task) |
7014 | 0 | { |
7015 | 0 | gint i; |
7016 | 0 | gpointer d[GST_VIDEO_MAX_PLANES]; |
7017 | |
|
7018 | 0 | d[0] = FRAME_GET_LINE (task->dest, 0); |
7019 | 0 | d[0] = |
7020 | 0 | (guint8 *) d[0] + |
7021 | 0 | task->out_x * GST_VIDEO_FORMAT_INFO_PSTRIDE (task->dest->info.finfo, 0); |
7022 | |
|
7023 | 0 | for (i = task->height_0; i < task->height_1; i++) { |
7024 | 0 | guint8 *sy, *su, *sv, *sa; |
7025 | |
|
7026 | 0 | sy = FRAME_GET_Y_LINE (task->src, i + task->in_y); |
7027 | 0 | sy += task->in_x; |
7028 | 0 | su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1); |
7029 | 0 | su += (task->in_x >> 1); |
7030 | 0 | sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1); |
7031 | 0 | sv += (task->in_x >> 1); |
7032 | 0 | sa = FRAME_GET_A_LINE (task->src, i + task->in_y); |
7033 | 0 | sa += task->in_x; |
7034 | |
|
7035 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
7036 | 0 | video_orc_convert_A420_ARGB (task->tmpline, sy, su, sv, sa, |
7037 | 0 | task->data->im[0][0], task->data->im[0][2], |
7038 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
7039 | 0 | task->width); |
7040 | | #else |
7041 | | video_orc_convert_A420_BGRA (task->tmpline, sy, su, sv, sa, |
7042 | | task->data->im[0][0], task->data->im[0][2], |
7043 | | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
7044 | | task->width); |
7045 | | #endif |
7046 | |
|
7047 | 0 | task->dest->info.finfo->pack_func (task->dest->info.finfo, |
7048 | 0 | (GST_VIDEO_FRAME_IS_INTERLACED (task->dest) ? |
7049 | 0 | GST_VIDEO_PACK_FLAG_INTERLACED : |
7050 | 0 | GST_VIDEO_PACK_FLAG_NONE), |
7051 | 0 | task->tmpline, 0, d, task->dest->info.stride, |
7052 | 0 | task->dest->info.chroma_site, i + task->out_y, task->width); |
7053 | 0 | } |
7054 | 0 | } |
7055 | | |
7056 | | static void |
7057 | | convert_A420_pack_ARGB (GstVideoConverter * convert, const GstVideoFrame * src, |
7058 | | GstVideoFrame * dest) |
7059 | 0 | { |
7060 | 0 | int i; |
7061 | 0 | gint width = convert->in_width; |
7062 | 0 | gint height = convert->in_height; |
7063 | 0 | MatrixData *data = &convert->convert_matrix; |
7064 | 0 | FConvertTask *tasks; |
7065 | 0 | FConvertTask **tasks_p; |
7066 | 0 | gint n_threads; |
7067 | 0 | gint lines_per_thread; |
7068 | |
|
7069 | 0 | n_threads = convert->conversion_runner->n_threads; |
7070 | 0 | tasks = convert->tasks[0] = |
7071 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
7072 | 0 | tasks_p = convert->tasks_p[0] = |
7073 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
7074 | |
|
7075 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
7076 | |
|
7077 | 0 | for (i = 0; i < n_threads; i++) { |
7078 | 0 | tasks[i].src = src; |
7079 | 0 | tasks[i].dest = dest; |
7080 | |
|
7081 | 0 | tasks[i].width = width; |
7082 | 0 | tasks[i].data = data; |
7083 | 0 | tasks[i].in_x = convert->in_x; |
7084 | 0 | tasks[i].in_y = convert->in_y; |
7085 | 0 | tasks[i].out_x = convert->out_x; |
7086 | 0 | tasks[i].out_y = convert->out_y; |
7087 | 0 | tasks[i].tmpline = convert->tmpline[i]; |
7088 | |
|
7089 | 0 | tasks[i].height_0 = i * lines_per_thread; |
7090 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
7091 | 0 | tasks[i].height_1 = MIN (height, tasks[i].height_1); |
7092 | |
|
7093 | 0 | tasks_p[i] = &tasks[i]; |
7094 | 0 | } |
7095 | |
|
7096 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7097 | 0 | (GstParallelizedTaskFunc) convert_A420_pack_ARGB_task, |
7098 | 0 | (gpointer) tasks_p); |
7099 | |
|
7100 | 0 | convert_fill_border (convert, dest); |
7101 | 0 | } |
7102 | | |
7103 | | static void |
7104 | | convert_A420_BGRA_task (FConvertTask * task) |
7105 | 0 | { |
7106 | 0 | gint i; |
7107 | |
|
7108 | 0 | for (i = task->height_0; i < task->height_1; i++) { |
7109 | 0 | guint8 *sy, *su, *sv, *sa, *d; |
7110 | |
|
7111 | 0 | d = FRAME_GET_LINE (task->dest, i + task->out_y); |
7112 | 0 | d += (task->out_x * 4); |
7113 | 0 | sy = FRAME_GET_Y_LINE (task->src, i + task->in_y); |
7114 | 0 | sy += task->in_x; |
7115 | 0 | su = FRAME_GET_U_LINE (task->src, (i + task->in_y) >> 1); |
7116 | 0 | su += (task->in_x >> 1); |
7117 | 0 | sv = FRAME_GET_V_LINE (task->src, (i + task->in_y) >> 1); |
7118 | 0 | sv += (task->in_x >> 1); |
7119 | 0 | sa = FRAME_GET_A_LINE (task->src, i + task->in_y); |
7120 | 0 | sa += task->in_x; |
7121 | |
|
7122 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
7123 | 0 | video_orc_convert_A420_BGRA (d, sy, su, sv, sa, |
7124 | 0 | task->data->im[0][0], task->data->im[0][2], |
7125 | 0 | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
7126 | 0 | task->width); |
7127 | | #else |
7128 | | video_orc_convert_A420_ARGB (d, sy, su, sv, sa, |
7129 | | task->data->im[0][0], task->data->im[0][2], |
7130 | | task->data->im[2][1], task->data->im[1][1], task->data->im[1][2], |
7131 | | task->width); |
7132 | | #endif |
7133 | 0 | } |
7134 | 0 | } |
7135 | | |
7136 | | static void |
7137 | | convert_A420_BGRA (GstVideoConverter * convert, const GstVideoFrame * src, |
7138 | | GstVideoFrame * dest) |
7139 | 0 | { |
7140 | 0 | int i; |
7141 | 0 | gint width = convert->in_width; |
7142 | 0 | gint height = convert->in_height; |
7143 | 0 | MatrixData *data = &convert->convert_matrix; |
7144 | 0 | FConvertTask *tasks; |
7145 | 0 | FConvertTask **tasks_p; |
7146 | 0 | gint n_threads; |
7147 | 0 | gint lines_per_thread; |
7148 | |
|
7149 | 0 | n_threads = convert->conversion_runner->n_threads; |
7150 | 0 | tasks = convert->tasks[0] = |
7151 | 0 | g_renew (FConvertTask, convert->tasks[0], n_threads); |
7152 | 0 | tasks_p = convert->tasks_p[0] = |
7153 | 0 | g_renew (FConvertTask *, convert->tasks_p[0], n_threads); |
7154 | |
|
7155 | 0 | lines_per_thread = (height + n_threads - 1) / n_threads; |
7156 | |
|
7157 | 0 | for (i = 0; i < n_threads; i++) { |
7158 | 0 | tasks[i].src = src; |
7159 | 0 | tasks[i].dest = dest; |
7160 | |
|
7161 | 0 | tasks[i].width = width; |
7162 | 0 | tasks[i].data = data; |
7163 | 0 | tasks[i].in_x = convert->in_x; |
7164 | 0 | tasks[i].in_y = convert->in_y; |
7165 | 0 | tasks[i].out_x = convert->out_x; |
7166 | 0 | tasks[i].out_y = convert->out_y; |
7167 | |
|
7168 | 0 | tasks[i].height_0 = i * lines_per_thread; |
7169 | 0 | tasks[i].height_1 = tasks[i].height_0 + lines_per_thread; |
7170 | 0 | tasks[i].height_1 = MIN (height, tasks[i].height_1); |
7171 | |
|
7172 | 0 | tasks_p[i] = &tasks[i]; |
7173 | 0 | } |
7174 | |
|
7175 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7176 | 0 | (GstParallelizedTaskFunc) convert_A420_BGRA_task, (gpointer) tasks_p); |
7177 | |
|
7178 | 0 | convert_fill_border (convert, dest); |
7179 | 0 | } |
7180 | | |
7181 | | static void |
7182 | | memset_u24 (guint8 * data, guint8 col[3], unsigned int n) |
7183 | 0 | { |
7184 | 0 | unsigned int i; |
7185 | |
|
7186 | 0 | for (i = 0; i < n; i++) { |
7187 | 0 | data[0] = col[0]; |
7188 | 0 | data[1] = col[1]; |
7189 | 0 | data[2] = col[2]; |
7190 | 0 | data += 3; |
7191 | 0 | } |
7192 | 0 | } |
7193 | | |
7194 | | static void |
7195 | | memset_u32_16 (guint8 * data, guint8 col[4], unsigned int n) |
7196 | 0 | { |
7197 | 0 | unsigned int i; |
7198 | |
|
7199 | 0 | for (i = 0; i < n; i += 2) { |
7200 | 0 | data[0] = col[0]; |
7201 | 0 | data[1] = col[1]; |
7202 | 0 | if (i + 1 < n) { |
7203 | 0 | data[2] = col[2]; |
7204 | 0 | data[3] = col[3]; |
7205 | 0 | } |
7206 | 0 | data += 4; |
7207 | 0 | } |
7208 | 0 | } |
7209 | | |
7210 | | #define MAKE_BORDER_FUNC(func) \ |
7211 | 0 | for (i = 0; i < out_y; i++) \ |
7212 | 0 | func (FRAME_GET_PLANE_LINE (dest, k, i), col, out_maxwidth); \ |
7213 | 0 | if (rb_width || lb_width) { \ |
7214 | 0 | for (i = 0; i < out_height; i++) { \ |
7215 | 0 | guint8 *d = FRAME_GET_PLANE_LINE (dest, k, i + out_y); \ |
7216 | 0 | if (lb_width) \ |
7217 | 0 | func (d, col, lb_width); \ |
7218 | 0 | if (rb_width) \ |
7219 | 0 | func (d + (pstride * r_border), col, rb_width); \ |
7220 | 0 | } \ |
7221 | 0 | } \ |
7222 | 0 | for (i = out_y + out_height; i < out_maxheight; i++) \ |
7223 | 0 | func (FRAME_GET_PLANE_LINE (dest, k, i), col, out_maxwidth); \ |
7224 | | |
7225 | | static void |
7226 | | convert_fill_border (GstVideoConverter * convert, GstVideoFrame * dest) |
7227 | 0 | { |
7228 | 0 | int k, n_planes; |
7229 | 0 | const GstVideoFormatInfo *out_finfo; |
7230 | |
|
7231 | 0 | if (!convert->fill_border || !convert->borderline) |
7232 | 0 | return; |
7233 | | |
7234 | 0 | out_finfo = convert->out_info.finfo; |
7235 | |
|
7236 | 0 | n_planes = GST_VIDEO_FRAME_N_PLANES (dest); |
7237 | |
|
7238 | 0 | for (k = 0; k < n_planes; k++) { |
7239 | 0 | gint comp[GST_VIDEO_MAX_COMPONENTS]; |
7240 | 0 | gint i, out_x, out_y, out_width, out_height, pstride, pgroup; |
7241 | 0 | gint r_border, lb_width, rb_width; |
7242 | 0 | gint out_maxwidth, out_maxheight; |
7243 | 0 | gpointer borders; |
7244 | |
|
7245 | 0 | gst_video_format_info_component (out_finfo, k, comp); |
7246 | 0 | out_x = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, comp[0], |
7247 | 0 | convert->out_x); |
7248 | 0 | out_y = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, comp[0], |
7249 | 0 | convert->out_y); |
7250 | 0 | out_width = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, comp[0], |
7251 | 0 | convert->out_width); |
7252 | 0 | out_height = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, comp[0], |
7253 | 0 | convert->out_height); |
7254 | 0 | out_maxwidth = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, comp[0], |
7255 | 0 | convert->out_maxwidth); |
7256 | 0 | out_maxheight = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, comp[0], |
7257 | 0 | convert->out_maxheight); |
7258 | |
|
7259 | 0 | pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, comp[0]); |
7260 | |
|
7261 | 0 | switch (GST_VIDEO_FORMAT_INFO_FORMAT (out_finfo)) { |
7262 | 0 | case GST_VIDEO_FORMAT_YUY2: |
7263 | 0 | case GST_VIDEO_FORMAT_YVYU: |
7264 | 0 | case GST_VIDEO_FORMAT_UYVY: |
7265 | 0 | pgroup = 42; |
7266 | 0 | out_maxwidth = GST_ROUND_UP_2 (out_maxwidth); |
7267 | 0 | break; |
7268 | 0 | default: |
7269 | 0 | pgroup = pstride; |
7270 | 0 | break; |
7271 | 0 | } |
7272 | | |
7273 | 0 | r_border = out_x + out_width; |
7274 | 0 | rb_width = out_maxwidth - r_border; |
7275 | 0 | lb_width = out_x; |
7276 | |
|
7277 | 0 | borders = &convert->borders[k]; |
7278 | |
|
7279 | 0 | switch (pgroup) { |
7280 | 0 | case 1: |
7281 | 0 | { |
7282 | 0 | guint8 col = ((guint8 *) borders)[0]; |
7283 | 0 | MAKE_BORDER_FUNC (memset); |
7284 | 0 | break; |
7285 | 0 | } |
7286 | 0 | case 2: |
7287 | 0 | { |
7288 | 0 | guint16 col = ((guint16 *) borders)[0]; |
7289 | 0 | MAKE_BORDER_FUNC (video_orc_splat_u16); |
7290 | 0 | break; |
7291 | 0 | } |
7292 | 0 | case 3: |
7293 | 0 | { |
7294 | 0 | guint8 col[3]; |
7295 | 0 | col[0] = ((guint8 *) borders)[0]; |
7296 | 0 | col[1] = ((guint8 *) borders)[1]; |
7297 | 0 | col[2] = ((guint8 *) borders)[2]; |
7298 | 0 | MAKE_BORDER_FUNC (memset_u24); |
7299 | 0 | break; |
7300 | 0 | } |
7301 | 0 | case 4: |
7302 | 0 | { |
7303 | 0 | guint32 col = ((guint32 *) borders)[0]; |
7304 | 0 | MAKE_BORDER_FUNC (video_orc_splat_u32); |
7305 | 0 | break; |
7306 | 0 | } |
7307 | 0 | case 8: |
7308 | 0 | { |
7309 | 0 | guint64 col = ((guint64 *) borders)[0]; |
7310 | 0 | MAKE_BORDER_FUNC (video_orc_splat_u64); |
7311 | 0 | break; |
7312 | 0 | } |
7313 | 0 | case 42: |
7314 | 0 | { |
7315 | 0 | guint8 col[4]; |
7316 | 0 | col[0] = ((guint8 *) borders)[0]; |
7317 | 0 | col[2] = ((guint8 *) borders)[2]; |
7318 | 0 | col[1] = ((guint8 *) borders)[r_border & 1 ? 3 : 1]; |
7319 | 0 | col[3] = ((guint8 *) borders)[r_border & 1 ? 1 : 3]; |
7320 | 0 | MAKE_BORDER_FUNC (memset_u32_16); |
7321 | 0 | break; |
7322 | 0 | } |
7323 | 0 | default: |
7324 | 0 | break; |
7325 | 0 | } |
7326 | 0 | } |
7327 | 0 | } |
7328 | | |
7329 | | typedef struct |
7330 | | { |
7331 | | const guint8 *s, *s2; |
7332 | | guint8 *d, *d2; |
7333 | | gint sstride, dstride; |
7334 | | gint width, height; |
7335 | | gint fill; |
7336 | | } FSimpleScaleTask; |
7337 | | |
7338 | | static void |
7339 | | convert_plane_fill_task (FSimpleScaleTask * task) |
7340 | 0 | { |
7341 | 0 | video_orc_memset_2d (task->d, task->dstride, |
7342 | 0 | task->fill, task->width, task->height); |
7343 | 0 | } |
7344 | | |
7345 | | static void |
7346 | | convert_plane_fill (GstVideoConverter * convert, |
7347 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7348 | 0 | { |
7349 | 0 | guint8 *d; |
7350 | 0 | FSimpleScaleTask *tasks; |
7351 | 0 | FSimpleScaleTask **tasks_p; |
7352 | 0 | gint n_threads; |
7353 | 0 | gint lines_per_thread; |
7354 | 0 | gint i; |
7355 | |
|
7356 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7357 | 0 | d += convert->fout_x[plane]; |
7358 | |
|
7359 | 0 | n_threads = convert->conversion_runner->n_threads; |
7360 | 0 | tasks = convert->tasks[plane] = |
7361 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7362 | 0 | tasks_p = convert->tasks_p[plane] = |
7363 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7364 | 0 | lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads; |
7365 | |
|
7366 | 0 | for (i = 0; i < n_threads; i++) { |
7367 | 0 | tasks[i].d = d + i * lines_per_thread * convert->fout_width[plane]; |
7368 | |
|
7369 | 0 | tasks[i].fill = convert->ffill[plane]; |
7370 | 0 | tasks[i].width = convert->fout_width[plane]; |
7371 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7372 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7373 | 0 | tasks[i].height -= i * lines_per_thread; |
7374 | 0 | tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane); |
7375 | |
|
7376 | 0 | tasks_p[i] = &tasks[i]; |
7377 | 0 | } |
7378 | |
|
7379 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7380 | 0 | (GstParallelizedTaskFunc) convert_plane_fill_task, (gpointer) tasks_p); |
7381 | 0 | } |
7382 | | |
7383 | | static void |
7384 | | convert_plane_h_double_task (FSimpleScaleTask * task) |
7385 | 0 | { |
7386 | 0 | video_orc_planar_chroma_422_444 (task->d, |
7387 | 0 | task->dstride, task->s, task->sstride, task->width / 2, task->height); |
7388 | 0 | } |
7389 | | |
7390 | | static void |
7391 | | convert_plane_h_double (GstVideoConverter * convert, |
7392 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7393 | 0 | { |
7394 | 0 | guint8 *s, *d; |
7395 | 0 | gint splane = convert->fsplane[plane]; |
7396 | 0 | FSimpleScaleTask *tasks; |
7397 | 0 | FSimpleScaleTask **tasks_p; |
7398 | 0 | gint n_threads; |
7399 | 0 | gint lines_per_thread; |
7400 | 0 | gint i; |
7401 | |
|
7402 | 0 | s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7403 | 0 | s += convert->fin_x[splane]; |
7404 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7405 | 0 | d += convert->fout_x[plane]; |
7406 | |
|
7407 | 0 | n_threads = convert->conversion_runner->n_threads; |
7408 | 0 | tasks = convert->tasks[plane] = |
7409 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7410 | 0 | tasks_p = convert->tasks_p[plane] = |
7411 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7412 | 0 | lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads; |
7413 | |
|
7414 | 0 | for (i = 0; i < n_threads; i++) { |
7415 | 0 | tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane); |
7416 | 0 | tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane); |
7417 | |
|
7418 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
7419 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
7420 | |
|
7421 | 0 | tasks[i].width = convert->fout_width[plane]; |
7422 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7423 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7424 | 0 | tasks[i].height -= i * lines_per_thread; |
7425 | |
|
7426 | 0 | tasks_p[i] = &tasks[i]; |
7427 | 0 | } |
7428 | |
|
7429 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7430 | 0 | (GstParallelizedTaskFunc) convert_plane_h_double_task, |
7431 | 0 | (gpointer) tasks_p); |
7432 | 0 | } |
7433 | | |
7434 | | static void |
7435 | | convert_plane_h_halve_task (FSimpleScaleTask * task) |
7436 | 0 | { |
7437 | 0 | video_orc_planar_chroma_444_422 (task->d, |
7438 | 0 | task->dstride, task->s, task->sstride, task->width, task->height); |
7439 | 0 | } |
7440 | | |
7441 | | static void |
7442 | | convert_plane_h_halve (GstVideoConverter * convert, |
7443 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7444 | 0 | { |
7445 | 0 | guint8 *s, *d; |
7446 | 0 | gint splane = convert->fsplane[plane]; |
7447 | 0 | FSimpleScaleTask *tasks; |
7448 | 0 | FSimpleScaleTask **tasks_p; |
7449 | 0 | gint n_threads; |
7450 | 0 | gint lines_per_thread; |
7451 | 0 | gint i; |
7452 | |
|
7453 | 0 | s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7454 | 0 | s += convert->fin_x[splane]; |
7455 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7456 | 0 | d += convert->fout_x[plane]; |
7457 | |
|
7458 | 0 | n_threads = convert->conversion_runner->n_threads; |
7459 | 0 | tasks = convert->tasks[plane] = |
7460 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7461 | 0 | tasks_p = convert->tasks_p[plane] = |
7462 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7463 | 0 | lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads; |
7464 | |
|
7465 | 0 | for (i = 0; i < n_threads; i++) { |
7466 | 0 | tasks[i].dstride = FRAME_GET_PLANE_STRIDE (dest, plane); |
7467 | 0 | tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane); |
7468 | |
|
7469 | 0 | tasks[i].d = d + i * lines_per_thread * tasks[i].dstride; |
7470 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride; |
7471 | |
|
7472 | 0 | tasks[i].width = convert->fout_width[plane]; |
7473 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7474 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7475 | 0 | tasks[i].height -= i * lines_per_thread; |
7476 | |
|
7477 | 0 | tasks_p[i] = &tasks[i]; |
7478 | 0 | } |
7479 | |
|
7480 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7481 | 0 | (GstParallelizedTaskFunc) convert_plane_h_halve_task, (gpointer) tasks_p); |
7482 | 0 | } |
7483 | | |
7484 | | static void |
7485 | | convert_plane_v_double_task (FSimpleScaleTask * task) |
7486 | 0 | { |
7487 | 0 | video_orc_planar_chroma_420_422 (task->d, 2 * task->dstride, task->d2, |
7488 | 0 | 2 * task->dstride, task->s, task->sstride, task->width, task->height / 2); |
7489 | 0 | } |
7490 | | |
7491 | | static void |
7492 | | convert_plane_v_double (GstVideoConverter * convert, |
7493 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7494 | 0 | { |
7495 | 0 | guint8 *s, *d1, *d2; |
7496 | 0 | gint ds, splane = convert->fsplane[plane]; |
7497 | 0 | FSimpleScaleTask *tasks; |
7498 | 0 | FSimpleScaleTask **tasks_p; |
7499 | 0 | gint n_threads; |
7500 | 0 | gint lines_per_thread; |
7501 | 0 | gint i; |
7502 | |
|
7503 | 0 | s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7504 | 0 | s += convert->fin_x[splane]; |
7505 | 0 | d1 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7506 | 0 | d1 += convert->fout_x[plane]; |
7507 | 0 | d2 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane] + 1); |
7508 | 0 | d2 += convert->fout_x[plane]; |
7509 | 0 | ds = FRAME_GET_PLANE_STRIDE (dest, plane); |
7510 | |
|
7511 | 0 | n_threads = convert->conversion_runner->n_threads; |
7512 | 0 | tasks = convert->tasks[plane] = |
7513 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7514 | 0 | tasks_p = convert->tasks_p[plane] = |
7515 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7516 | 0 | lines_per_thread = |
7517 | 0 | GST_ROUND_UP_2 ((convert->fout_height[plane] + n_threads - |
7518 | 0 | 1) / n_threads); |
7519 | |
|
7520 | 0 | for (i = 0; i < n_threads; i++) { |
7521 | 0 | tasks[i].d = d1 + i * lines_per_thread * ds; |
7522 | 0 | tasks[i].d2 = d2 + i * lines_per_thread * ds; |
7523 | 0 | tasks[i].dstride = ds; |
7524 | 0 | tasks[i].sstride = FRAME_GET_PLANE_STRIDE (src, splane); |
7525 | 0 | tasks[i].s = s + i * lines_per_thread * tasks[i].sstride / 2; |
7526 | |
|
7527 | 0 | tasks[i].width = convert->fout_width[plane]; |
7528 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7529 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7530 | 0 | tasks[i].height -= i * lines_per_thread; |
7531 | |
|
7532 | 0 | tasks_p[i] = &tasks[i]; |
7533 | 0 | } |
7534 | |
|
7535 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7536 | 0 | (GstParallelizedTaskFunc) convert_plane_v_double_task, |
7537 | 0 | (gpointer) tasks_p); |
7538 | 0 | } |
7539 | | |
7540 | | static void |
7541 | | convert_plane_v_halve_task (FSimpleScaleTask * task) |
7542 | 0 | { |
7543 | 0 | video_orc_planar_chroma_422_420 (task->d, task->dstride, task->s, |
7544 | 0 | 2 * task->sstride, task->s2, 2 * task->sstride, task->width, |
7545 | 0 | task->height); |
7546 | 0 | } |
7547 | | |
7548 | | static void |
7549 | | convert_plane_v_halve (GstVideoConverter * convert, |
7550 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7551 | 0 | { |
7552 | 0 | guint8 *s1, *s2, *d; |
7553 | 0 | gint ss, ds, splane = convert->fsplane[plane]; |
7554 | 0 | FSimpleScaleTask *tasks; |
7555 | 0 | FSimpleScaleTask **tasks_p; |
7556 | 0 | gint n_threads; |
7557 | 0 | gint lines_per_thread; |
7558 | 0 | gint i; |
7559 | |
|
7560 | 0 | s1 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7561 | 0 | s1 += convert->fin_x[splane]; |
7562 | 0 | s2 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane] + 1); |
7563 | 0 | s2 += convert->fin_x[splane]; |
7564 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7565 | 0 | d += convert->fout_x[plane]; |
7566 | |
|
7567 | 0 | ss = FRAME_GET_PLANE_STRIDE (src, splane); |
7568 | 0 | ds = FRAME_GET_PLANE_STRIDE (dest, plane); |
7569 | |
|
7570 | 0 | n_threads = convert->conversion_runner->n_threads; |
7571 | 0 | tasks = convert->tasks[plane] = |
7572 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7573 | 0 | tasks_p = convert->tasks_p[plane] = |
7574 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7575 | 0 | lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads; |
7576 | |
|
7577 | 0 | for (i = 0; i < n_threads; i++) { |
7578 | 0 | tasks[i].d = d + i * lines_per_thread * ds; |
7579 | 0 | tasks[i].dstride = ds; |
7580 | 0 | tasks[i].s = s1 + i * lines_per_thread * ss * 2; |
7581 | 0 | tasks[i].s2 = s2 + i * lines_per_thread * ss * 2; |
7582 | 0 | tasks[i].sstride = ss; |
7583 | |
|
7584 | 0 | tasks[i].width = convert->fout_width[plane]; |
7585 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7586 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7587 | 0 | tasks[i].height -= i * lines_per_thread; |
7588 | |
|
7589 | 0 | tasks_p[i] = &tasks[i]; |
7590 | 0 | } |
7591 | |
|
7592 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7593 | 0 | (GstParallelizedTaskFunc) convert_plane_v_halve_task, (gpointer) tasks_p); |
7594 | 0 | } |
7595 | | |
7596 | | static void |
7597 | | convert_plane_hv_double_task (FSimpleScaleTask * task) |
7598 | 0 | { |
7599 | 0 | video_orc_planar_chroma_420_444 (task->d, 2 * task->dstride, task->d2, |
7600 | 0 | 2 * task->dstride, task->s, task->sstride, (task->width + 1) / 2, |
7601 | 0 | task->height / 2); |
7602 | 0 | } |
7603 | | |
7604 | | static void |
7605 | | convert_plane_hv_double (GstVideoConverter * convert, |
7606 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7607 | 0 | { |
7608 | 0 | guint8 *s, *d1, *d2; |
7609 | 0 | gint ss, ds, splane = convert->fsplane[plane]; |
7610 | 0 | FSimpleScaleTask *tasks; |
7611 | 0 | FSimpleScaleTask **tasks_p; |
7612 | 0 | gint n_threads; |
7613 | 0 | gint lines_per_thread; |
7614 | 0 | gint i; |
7615 | |
|
7616 | 0 | s = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7617 | 0 | s += convert->fin_x[splane]; |
7618 | 0 | d1 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7619 | 0 | d1 += convert->fout_x[plane]; |
7620 | 0 | d2 = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane] + 1); |
7621 | 0 | d2 += convert->fout_x[plane]; |
7622 | 0 | ss = FRAME_GET_PLANE_STRIDE (src, splane); |
7623 | 0 | ds = FRAME_GET_PLANE_STRIDE (dest, plane); |
7624 | |
|
7625 | 0 | n_threads = convert->conversion_runner->n_threads; |
7626 | 0 | tasks = convert->tasks[plane] = |
7627 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7628 | 0 | tasks_p = convert->tasks_p[plane] = |
7629 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7630 | 0 | lines_per_thread = |
7631 | 0 | GST_ROUND_UP_2 ((convert->fout_height[plane] + n_threads - |
7632 | 0 | 1) / n_threads); |
7633 | |
|
7634 | 0 | for (i = 0; i < n_threads; i++) { |
7635 | 0 | tasks[i].d = d1 + i * lines_per_thread * ds; |
7636 | 0 | tasks[i].d2 = d2 + i * lines_per_thread * ds; |
7637 | 0 | tasks[i].dstride = ds; |
7638 | 0 | tasks[i].sstride = ss; |
7639 | 0 | tasks[i].s = s + i * lines_per_thread * ss / 2; |
7640 | |
|
7641 | 0 | tasks[i].width = convert->fout_width[plane]; |
7642 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7643 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7644 | 0 | tasks[i].height -= i * lines_per_thread; |
7645 | |
|
7646 | 0 | tasks_p[i] = &tasks[i]; |
7647 | 0 | } |
7648 | |
|
7649 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7650 | 0 | (GstParallelizedTaskFunc) convert_plane_hv_double_task, |
7651 | 0 | (gpointer) tasks_p); |
7652 | 0 | } |
7653 | | |
7654 | | static void |
7655 | | convert_plane_hv_halve_task (FSimpleScaleTask * task) |
7656 | 0 | { |
7657 | 0 | video_orc_planar_chroma_444_420 (task->d, task->dstride, task->s, |
7658 | 0 | 2 * task->sstride, task->s2, 2 * task->sstride, task->width, |
7659 | 0 | task->height); |
7660 | 0 | } |
7661 | | |
7662 | | static void |
7663 | | convert_plane_hv_halve (GstVideoConverter * convert, |
7664 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7665 | 0 | { |
7666 | 0 | guint8 *s1, *s2, *d; |
7667 | 0 | gint ss, ds, splane = convert->fsplane[plane]; |
7668 | 0 | FSimpleScaleTask *tasks; |
7669 | 0 | FSimpleScaleTask **tasks_p; |
7670 | 0 | gint n_threads; |
7671 | 0 | gint lines_per_thread; |
7672 | 0 | gint i; |
7673 | |
|
7674 | 0 | s1 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane]); |
7675 | 0 | s1 += convert->fin_x[splane]; |
7676 | 0 | s2 = FRAME_GET_PLANE_LINE (src, splane, convert->fin_y[splane] + 1); |
7677 | 0 | s2 += convert->fin_x[splane]; |
7678 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, convert->fout_y[plane]); |
7679 | 0 | d += convert->fout_x[plane]; |
7680 | 0 | ss = FRAME_GET_PLANE_STRIDE (src, splane); |
7681 | 0 | ds = FRAME_GET_PLANE_STRIDE (dest, plane); |
7682 | |
|
7683 | 0 | n_threads = convert->conversion_runner->n_threads; |
7684 | 0 | tasks = convert->tasks[plane] = |
7685 | 0 | g_renew (FSimpleScaleTask, convert->tasks[plane], n_threads); |
7686 | 0 | tasks_p = convert->tasks_p[plane] = |
7687 | 0 | g_renew (FSimpleScaleTask *, convert->tasks_p[plane], n_threads); |
7688 | 0 | lines_per_thread = (convert->fout_height[plane] + n_threads - 1) / n_threads; |
7689 | |
|
7690 | 0 | for (i = 0; i < n_threads; i++) { |
7691 | 0 | tasks[i].d = d + i * lines_per_thread * ds; |
7692 | 0 | tasks[i].dstride = ds; |
7693 | 0 | tasks[i].s = s1 + i * lines_per_thread * ss * 2; |
7694 | 0 | tasks[i].s2 = s2 + i * lines_per_thread * ss * 2; |
7695 | 0 | tasks[i].sstride = ss; |
7696 | |
|
7697 | 0 | tasks[i].width = convert->fout_width[plane]; |
7698 | 0 | tasks[i].height = (i + 1) * lines_per_thread; |
7699 | 0 | tasks[i].height = MIN (tasks[i].height, convert->fout_height[plane]); |
7700 | 0 | tasks[i].height -= i * lines_per_thread; |
7701 | |
|
7702 | 0 | tasks_p[i] = &tasks[i]; |
7703 | 0 | } |
7704 | |
|
7705 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7706 | 0 | (GstParallelizedTaskFunc) convert_plane_hv_halve_task, |
7707 | 0 | (gpointer) tasks_p); |
7708 | 0 | } |
7709 | | |
7710 | | typedef struct |
7711 | | { |
7712 | | GstVideoScaler *h_scaler, *v_scaler; |
7713 | | GstVideoFormat format; |
7714 | | const guint8 *s; |
7715 | | guint8 *d; |
7716 | | gint sstride, dstride; |
7717 | | guint x, y, w, h; |
7718 | | } FScaleTask; |
7719 | | |
7720 | | static void |
7721 | | convert_plane_hv_task (FScaleTask * task) |
7722 | 0 | { |
7723 | 0 | gst_video_scaler_2d (task->h_scaler, task->v_scaler, task->format, |
7724 | 0 | (guint8 *) task->s, task->sstride, |
7725 | 0 | task->d, task->dstride, task->x, task->y, task->w, task->h); |
7726 | 0 | } |
7727 | | |
7728 | | static void |
7729 | | convert_plane_hv (GstVideoConverter * convert, |
7730 | | const GstVideoFrame * src, GstVideoFrame * dest, gint plane) |
7731 | 0 | { |
7732 | 0 | gint in_x, in_y, out_x, out_y, out_width, out_height; |
7733 | 0 | GstVideoFormat format; |
7734 | 0 | gint splane = convert->fsplane[plane]; |
7735 | 0 | guint8 *s, *d; |
7736 | 0 | gint sstride, dstride; |
7737 | 0 | FScaleTask *tasks; |
7738 | 0 | FScaleTask **tasks_p; |
7739 | 0 | gint i, n_threads, lines_per_thread; |
7740 | |
|
7741 | 0 | in_x = convert->fin_x[splane]; |
7742 | 0 | in_y = convert->fin_y[splane]; |
7743 | 0 | out_x = convert->fout_x[plane]; |
7744 | 0 | out_y = convert->fout_y[plane]; |
7745 | 0 | out_width = convert->fout_width[plane]; |
7746 | 0 | out_height = convert->fout_height[plane]; |
7747 | 0 | format = convert->fformat[plane]; |
7748 | |
|
7749 | 0 | s = FRAME_GET_PLANE_LINE (src, splane, in_y); |
7750 | 0 | s += in_x; |
7751 | 0 | d = FRAME_GET_PLANE_LINE (dest, plane, out_y); |
7752 | 0 | d += out_x; |
7753 | |
|
7754 | 0 | sstride = FRAME_GET_PLANE_STRIDE (src, splane); |
7755 | 0 | dstride = FRAME_GET_PLANE_STRIDE (dest, plane); |
7756 | |
|
7757 | 0 | n_threads = convert->conversion_runner->n_threads; |
7758 | 0 | tasks = convert->tasks[plane] = |
7759 | 0 | g_renew (FScaleTask, convert->tasks[plane], n_threads); |
7760 | 0 | tasks_p = convert->tasks_p[plane] = |
7761 | 0 | g_renew (FScaleTask *, convert->tasks_p[plane], n_threads); |
7762 | |
|
7763 | 0 | lines_per_thread = (out_height + n_threads - 1) / n_threads; |
7764 | |
|
7765 | 0 | for (i = 0; i < n_threads; i++) { |
7766 | 0 | tasks[i].h_scaler = |
7767 | 0 | convert->fh_scaler[plane].scaler ? convert-> |
7768 | 0 | fh_scaler[plane].scaler[i] : NULL; |
7769 | 0 | tasks[i].v_scaler = |
7770 | 0 | convert->fv_scaler[plane].scaler ? convert-> |
7771 | 0 | fv_scaler[plane].scaler[i] : NULL; |
7772 | 0 | tasks[i].format = format; |
7773 | 0 | tasks[i].s = s; |
7774 | 0 | tasks[i].d = d; |
7775 | 0 | tasks[i].sstride = sstride; |
7776 | 0 | tasks[i].dstride = dstride; |
7777 | |
|
7778 | 0 | tasks[i].x = 0; |
7779 | 0 | tasks[i].w = out_width; |
7780 | |
|
7781 | 0 | tasks[i].y = i * lines_per_thread; |
7782 | 0 | tasks[i].h = tasks[i].y + lines_per_thread; |
7783 | 0 | tasks[i].h = MIN (out_height, tasks[i].h); |
7784 | |
|
7785 | 0 | tasks_p[i] = &tasks[i]; |
7786 | 0 | } |
7787 | |
|
7788 | 0 | gst_parallelized_task_runner_run (convert->conversion_runner, |
7789 | 0 | (GstParallelizedTaskFunc) convert_plane_hv_task, (gpointer) tasks_p); |
7790 | 0 | } |
7791 | | |
7792 | | static void |
7793 | | convert_scale_planes (GstVideoConverter * convert, |
7794 | | const GstVideoFrame * src, GstVideoFrame * dest) |
7795 | 0 | { |
7796 | 0 | int i, n_planes; |
7797 | |
|
7798 | 0 | n_planes = GST_VIDEO_FRAME_N_PLANES (dest); |
7799 | 0 | for (i = 0; i < n_planes; i++) { |
7800 | 0 | if (convert->fconvert[i]) |
7801 | 0 | convert->fconvert[i] (convert, src, dest, i); |
7802 | 0 | } |
7803 | 0 | convert_fill_border (convert, dest); |
7804 | 0 | } |
7805 | | |
7806 | | static GstVideoFormat |
7807 | | get_scale_format (GstVideoFormat format, gint plane) |
7808 | 0 | { |
7809 | 0 | GstVideoFormat res = GST_VIDEO_FORMAT_UNKNOWN; |
7810 | |
|
7811 | 0 | switch (format) { |
7812 | 0 | case GST_VIDEO_FORMAT_I420: |
7813 | 0 | case GST_VIDEO_FORMAT_YV12: |
7814 | 0 | case GST_VIDEO_FORMAT_Y41B: |
7815 | 0 | case GST_VIDEO_FORMAT_Y42B: |
7816 | 0 | case GST_VIDEO_FORMAT_Y444: |
7817 | 0 | case GST_VIDEO_FORMAT_GRAY8: |
7818 | 0 | case GST_VIDEO_FORMAT_A420: |
7819 | 0 | case GST_VIDEO_FORMAT_A422: |
7820 | 0 | case GST_VIDEO_FORMAT_A444: |
7821 | 0 | case GST_VIDEO_FORMAT_YUV9: |
7822 | 0 | case GST_VIDEO_FORMAT_YVU9: |
7823 | 0 | case GST_VIDEO_FORMAT_GBR: |
7824 | 0 | case GST_VIDEO_FORMAT_GBRA: |
7825 | 0 | case GST_VIDEO_FORMAT_RGBP: |
7826 | 0 | case GST_VIDEO_FORMAT_BGRP: |
7827 | 0 | res = GST_VIDEO_FORMAT_GRAY8; |
7828 | 0 | break; |
7829 | 0 | case GST_VIDEO_FORMAT_GRAY16_BE: |
7830 | 0 | case GST_VIDEO_FORMAT_GRAY16_LE: |
7831 | 0 | res = GST_VIDEO_FORMAT_GRAY16_BE; |
7832 | 0 | break; |
7833 | 0 | case GST_VIDEO_FORMAT_YUY2: |
7834 | 0 | case GST_VIDEO_FORMAT_UYVY: |
7835 | 0 | case GST_VIDEO_FORMAT_VYUY: |
7836 | 0 | case GST_VIDEO_FORMAT_YVYU: |
7837 | 0 | case GST_VIDEO_FORMAT_AYUV: |
7838 | 0 | case GST_VIDEO_FORMAT_VUYA: |
7839 | 0 | case GST_VIDEO_FORMAT_RGBx: |
7840 | 0 | case GST_VIDEO_FORMAT_BGRx: |
7841 | 0 | case GST_VIDEO_FORMAT_xRGB: |
7842 | 0 | case GST_VIDEO_FORMAT_xBGR: |
7843 | 0 | case GST_VIDEO_FORMAT_RGBA: |
7844 | 0 | case GST_VIDEO_FORMAT_BGRA: |
7845 | 0 | case GST_VIDEO_FORMAT_ARGB: |
7846 | 0 | case GST_VIDEO_FORMAT_ABGR: |
7847 | 0 | case GST_VIDEO_FORMAT_RGB: |
7848 | 0 | case GST_VIDEO_FORMAT_BGR: |
7849 | 0 | case GST_VIDEO_FORMAT_v308: |
7850 | 0 | case GST_VIDEO_FORMAT_IYU2: |
7851 | 0 | case GST_VIDEO_FORMAT_ARGB64: |
7852 | 0 | case GST_VIDEO_FORMAT_ARGB64_LE: |
7853 | 0 | case GST_VIDEO_FORMAT_ARGB64_BE: |
7854 | 0 | case GST_VIDEO_FORMAT_RGBA64_BE: |
7855 | 0 | case GST_VIDEO_FORMAT_RGBA64_LE: |
7856 | 0 | case GST_VIDEO_FORMAT_BGRA64_BE: |
7857 | 0 | case GST_VIDEO_FORMAT_BGRA64_LE: |
7858 | 0 | case GST_VIDEO_FORMAT_ABGR64_BE: |
7859 | 0 | case GST_VIDEO_FORMAT_ABGR64_LE: |
7860 | 0 | case GST_VIDEO_FORMAT_AYUV64: |
7861 | 0 | case GST_VIDEO_FORMAT_RBGA: |
7862 | 0 | res = format; |
7863 | 0 | break; |
7864 | 0 | case GST_VIDEO_FORMAT_RGB15: |
7865 | 0 | case GST_VIDEO_FORMAT_BGR15: |
7866 | 0 | case GST_VIDEO_FORMAT_RGB16: |
7867 | 0 | case GST_VIDEO_FORMAT_BGR16: |
7868 | 0 | res = GST_VIDEO_FORMAT_NV12; |
7869 | 0 | break; |
7870 | 0 | case GST_VIDEO_FORMAT_NV12: |
7871 | 0 | case GST_VIDEO_FORMAT_NV21: |
7872 | 0 | case GST_VIDEO_FORMAT_NV16: |
7873 | 0 | case GST_VIDEO_FORMAT_NV61: |
7874 | 0 | case GST_VIDEO_FORMAT_NV24: |
7875 | 0 | res = plane == 0 ? GST_VIDEO_FORMAT_GRAY8 : GST_VIDEO_FORMAT_NV12; |
7876 | 0 | break; |
7877 | 0 | case GST_VIDEO_FORMAT_AV12: |
7878 | 0 | res = (plane == 0 |
7879 | 0 | || plane == 2) ? GST_VIDEO_FORMAT_GRAY8 : GST_VIDEO_FORMAT_NV12; |
7880 | 0 | break; |
7881 | 0 | case GST_VIDEO_FORMAT_UNKNOWN: |
7882 | 0 | case GST_VIDEO_FORMAT_ENCODED: |
7883 | 0 | case GST_VIDEO_FORMAT_v210: |
7884 | 0 | case GST_VIDEO_FORMAT_v216: |
7885 | 0 | case GST_VIDEO_FORMAT_Y210: |
7886 | 0 | case GST_VIDEO_FORMAT_Y410: |
7887 | 0 | case GST_VIDEO_FORMAT_UYVP: |
7888 | 0 | case GST_VIDEO_FORMAT_RGB8P: |
7889 | 0 | case GST_VIDEO_FORMAT_IYU1: |
7890 | 0 | case GST_VIDEO_FORMAT_r210: |
7891 | 0 | case GST_VIDEO_FORMAT_I420_10BE: |
7892 | 0 | case GST_VIDEO_FORMAT_I420_10LE: |
7893 | 0 | case GST_VIDEO_FORMAT_I422_10BE: |
7894 | 0 | case GST_VIDEO_FORMAT_I422_10LE: |
7895 | 0 | case GST_VIDEO_FORMAT_Y444_10BE: |
7896 | 0 | case GST_VIDEO_FORMAT_Y444_10LE: |
7897 | 0 | case GST_VIDEO_FORMAT_I420_12BE: |
7898 | 0 | case GST_VIDEO_FORMAT_I420_12LE: |
7899 | 0 | case GST_VIDEO_FORMAT_I422_12BE: |
7900 | 0 | case GST_VIDEO_FORMAT_I422_12LE: |
7901 | 0 | case GST_VIDEO_FORMAT_Y444_12BE: |
7902 | 0 | case GST_VIDEO_FORMAT_Y444_12LE: |
7903 | 0 | case GST_VIDEO_FORMAT_GBR_10BE: |
7904 | 0 | case GST_VIDEO_FORMAT_GBR_10LE: |
7905 | 0 | case GST_VIDEO_FORMAT_GBRA_10BE: |
7906 | 0 | case GST_VIDEO_FORMAT_GBRA_10LE: |
7907 | 0 | case GST_VIDEO_FORMAT_GBR_12BE: |
7908 | 0 | case GST_VIDEO_FORMAT_GBR_12LE: |
7909 | 0 | case GST_VIDEO_FORMAT_GBRA_12BE: |
7910 | 0 | case GST_VIDEO_FORMAT_GBRA_12LE: |
7911 | 0 | case GST_VIDEO_FORMAT_GBR_16BE: |
7912 | 0 | case GST_VIDEO_FORMAT_GBR_16LE: |
7913 | 0 | case GST_VIDEO_FORMAT_NV12_64Z32: |
7914 | 0 | case GST_VIDEO_FORMAT_NV12_4L4: |
7915 | 0 | case GST_VIDEO_FORMAT_NV12_32L32: |
7916 | 0 | case GST_VIDEO_FORMAT_NV12_16L32S: |
7917 | 0 | case GST_VIDEO_FORMAT_A420_10BE: |
7918 | 0 | case GST_VIDEO_FORMAT_A420_10LE: |
7919 | 0 | case GST_VIDEO_FORMAT_A422_10BE: |
7920 | 0 | case GST_VIDEO_FORMAT_A422_10LE: |
7921 | 0 | case GST_VIDEO_FORMAT_A444_10BE: |
7922 | 0 | case GST_VIDEO_FORMAT_A444_10LE: |
7923 | 0 | case GST_VIDEO_FORMAT_A444_12BE: |
7924 | 0 | case GST_VIDEO_FORMAT_A444_12LE: |
7925 | 0 | case GST_VIDEO_FORMAT_A422_12BE: |
7926 | 0 | case GST_VIDEO_FORMAT_A422_12LE: |
7927 | 0 | case GST_VIDEO_FORMAT_A420_12BE: |
7928 | 0 | case GST_VIDEO_FORMAT_A420_12LE: |
7929 | 0 | case GST_VIDEO_FORMAT_A444_16BE: |
7930 | 0 | case GST_VIDEO_FORMAT_A444_16LE: |
7931 | 0 | case GST_VIDEO_FORMAT_A422_16BE: |
7932 | 0 | case GST_VIDEO_FORMAT_A422_16LE: |
7933 | 0 | case GST_VIDEO_FORMAT_A420_16BE: |
7934 | 0 | case GST_VIDEO_FORMAT_A420_16LE: |
7935 | 0 | case GST_VIDEO_FORMAT_P010_10BE: |
7936 | 0 | case GST_VIDEO_FORMAT_P010_10LE: |
7937 | 0 | case GST_VIDEO_FORMAT_GRAY10_LE16: |
7938 | 0 | case GST_VIDEO_FORMAT_GRAY10_LE32: |
7939 | 0 | case GST_VIDEO_FORMAT_NV12_10LE32: |
7940 | 0 | case GST_VIDEO_FORMAT_NV16_10LE32: |
7941 | 0 | case GST_VIDEO_FORMAT_NV12_10LE40: |
7942 | 0 | case GST_VIDEO_FORMAT_NV16_10LE40: |
7943 | 0 | case GST_VIDEO_FORMAT_BGR10A2_LE: |
7944 | 0 | case GST_VIDEO_FORMAT_RGB10A2_LE: |
7945 | 0 | case GST_VIDEO_FORMAT_BGR10x2_LE: |
7946 | 0 | case GST_VIDEO_FORMAT_RGB10x2_LE: |
7947 | 0 | case GST_VIDEO_FORMAT_Y444_16BE: |
7948 | 0 | case GST_VIDEO_FORMAT_Y444_16LE: |
7949 | 0 | case GST_VIDEO_FORMAT_P016_BE: |
7950 | 0 | case GST_VIDEO_FORMAT_P016_LE: |
7951 | 0 | case GST_VIDEO_FORMAT_P012_BE: |
7952 | 0 | case GST_VIDEO_FORMAT_P012_LE: |
7953 | 0 | case GST_VIDEO_FORMAT_Y212_BE: |
7954 | 0 | case GST_VIDEO_FORMAT_Y212_LE: |
7955 | 0 | case GST_VIDEO_FORMAT_Y216_BE: |
7956 | 0 | case GST_VIDEO_FORMAT_Y216_LE: |
7957 | 0 | case GST_VIDEO_FORMAT_Y412_BE: |
7958 | 0 | case GST_VIDEO_FORMAT_Y412_LE: |
7959 | 0 | case GST_VIDEO_FORMAT_Y416_BE: |
7960 | 0 | case GST_VIDEO_FORMAT_Y416_LE: |
7961 | 0 | case GST_VIDEO_FORMAT_NV12_8L128: |
7962 | 0 | case GST_VIDEO_FORMAT_NV12_10BE_8L128: |
7963 | 0 | case GST_VIDEO_FORMAT_NV12_10LE40_4L4: |
7964 | 0 | case GST_VIDEO_FORMAT_DMA_DRM: |
7965 | 0 | case GST_VIDEO_FORMAT_MT2110T: |
7966 | 0 | case GST_VIDEO_FORMAT_MT2110R: |
7967 | 0 | res = format; |
7968 | 0 | g_assert_not_reached (); |
7969 | 0 | break; |
7970 | 0 | } |
7971 | 0 | return res; |
7972 | 0 | } |
7973 | | |
7974 | | static gboolean |
7975 | | is_merge_yuv (GstVideoInfo * info) |
7976 | 0 | { |
7977 | 0 | switch (GST_VIDEO_INFO_FORMAT (info)) { |
7978 | 0 | case GST_VIDEO_FORMAT_YUY2: |
7979 | 0 | case GST_VIDEO_FORMAT_YVYU: |
7980 | 0 | case GST_VIDEO_FORMAT_UYVY: |
7981 | 0 | case GST_VIDEO_FORMAT_VYUY: |
7982 | 0 | return TRUE; |
7983 | 0 | default: |
7984 | 0 | return FALSE; |
7985 | 0 | } |
7986 | 0 | } |
7987 | | |
7988 | | static gboolean |
7989 | | setup_scale (GstVideoConverter * convert) |
7990 | 0 | { |
7991 | 0 | int i, n_planes; |
7992 | 0 | gint method, cr_method, in_width, in_height, out_width, out_height; |
7993 | 0 | guint taps; |
7994 | 0 | GstVideoInfo *in_info, *out_info; |
7995 | 0 | const GstVideoFormatInfo *in_finfo, *out_finfo; |
7996 | 0 | GstVideoFormat in_format, out_format; |
7997 | 0 | gboolean interlaced; |
7998 | 0 | guint n_threads = convert->conversion_runner->n_threads; |
7999 | |
|
8000 | 0 | in_info = &convert->in_info; |
8001 | 0 | out_info = &convert->out_info; |
8002 | |
|
8003 | 0 | in_finfo = in_info->finfo; |
8004 | 0 | out_finfo = out_info->finfo; |
8005 | |
|
8006 | 0 | n_planes = GST_VIDEO_INFO_N_PLANES (out_info); |
8007 | |
|
8008 | 0 | interlaced = GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info) |
8009 | 0 | && GST_VIDEO_INFO_INTERLACE_MODE (&convert->in_info) != |
8010 | 0 | GST_VIDEO_INTERLACE_MODE_ALTERNATE; |
8011 | |
|
8012 | 0 | method = GET_OPT_RESAMPLER_METHOD (convert); |
8013 | 0 | if (method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) |
8014 | 0 | cr_method = method; |
8015 | 0 | else |
8016 | 0 | cr_method = GET_OPT_CHROMA_RESAMPLER_METHOD (convert); |
8017 | 0 | taps = GET_OPT_RESAMPLER_TAPS (convert); |
8018 | |
|
8019 | 0 | in_format = GST_VIDEO_INFO_FORMAT (in_info); |
8020 | 0 | out_format = GST_VIDEO_INFO_FORMAT (out_info); |
8021 | |
|
8022 | 0 | switch (in_format) { |
8023 | 0 | case GST_VIDEO_FORMAT_RGB15: |
8024 | 0 | case GST_VIDEO_FORMAT_RGB16: |
8025 | 0 | case GST_VIDEO_FORMAT_BGR15: |
8026 | 0 | case GST_VIDEO_FORMAT_BGR16: |
8027 | 0 | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
8028 | 0 | case GST_VIDEO_FORMAT_GRAY16_BE: |
8029 | | #else |
8030 | | case GST_VIDEO_FORMAT_GRAY16_LE: |
8031 | | #endif |
8032 | 0 | if (method != GST_VIDEO_RESAMPLER_METHOD_NEAREST) { |
8033 | 0 | GST_LOG ("%s only with nearest resampling", |
8034 | 0 | gst_video_format_to_string (in_format)); |
8035 | 0 | return FALSE; |
8036 | 0 | } |
8037 | 0 | break; |
8038 | 0 | default: |
8039 | 0 | break; |
8040 | 0 | } |
8041 | | |
8042 | 0 | in_width = convert->in_width; |
8043 | 0 | in_height = convert->in_height; |
8044 | 0 | out_width = convert->out_width; |
8045 | 0 | out_height = convert->out_height; |
8046 | |
|
8047 | 0 | if (n_planes == 1 && !GST_VIDEO_FORMAT_INFO_IS_GRAY (out_finfo)) { |
8048 | 0 | gint pstride; |
8049 | 0 | guint j; |
8050 | |
|
8051 | 0 | if (is_merge_yuv (in_info)) { |
8052 | 0 | GstVideoScaler *y_scaler, *uv_scaler; |
8053 | |
|
8054 | 0 | if (in_width != out_width) { |
8055 | 0 | convert->fh_scaler[0].scaler = g_new (GstVideoScaler *, n_threads); |
8056 | 0 | for (j = 0; j < n_threads; j++) { |
8057 | 0 | y_scaler = |
8058 | 0 | gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps, |
8059 | 0 | GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, GST_VIDEO_COMP_Y, |
8060 | 0 | in_width), GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, |
8061 | 0 | GST_VIDEO_COMP_Y, out_width), convert->config); |
8062 | 0 | uv_scaler = |
8063 | 0 | gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, |
8064 | 0 | gst_video_scaler_get_max_taps (y_scaler), |
8065 | 0 | GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, GST_VIDEO_COMP_U, |
8066 | 0 | in_width), GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, |
8067 | 0 | GST_VIDEO_COMP_U, out_width), convert->config); |
8068 | |
|
8069 | 0 | convert->fh_scaler[0].scaler[j] = |
8070 | 0 | gst_video_scaler_combine_packed_YUV (y_scaler, uv_scaler, |
8071 | 0 | in_format, out_format); |
8072 | |
|
8073 | 0 | gst_video_scaler_free (y_scaler); |
8074 | 0 | gst_video_scaler_free (uv_scaler); |
8075 | 0 | } |
8076 | 0 | } else { |
8077 | 0 | convert->fh_scaler[0].scaler = NULL; |
8078 | 0 | } |
8079 | |
|
8080 | 0 | pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, GST_VIDEO_COMP_Y); |
8081 | 0 | convert->fin_x[0] = GST_ROUND_UP_2 (convert->in_x) * pstride; |
8082 | 0 | convert->fout_x[0] = GST_ROUND_UP_2 (convert->out_x) * pstride; |
8083 | |
|
8084 | 0 | } else { |
8085 | 0 | if (in_width != out_width && in_width != 0 && out_width != 0) { |
8086 | 0 | convert->fh_scaler[0].scaler = g_new (GstVideoScaler *, n_threads); |
8087 | 0 | for (j = 0; j < n_threads; j++) { |
8088 | 0 | convert->fh_scaler[0].scaler[j] = |
8089 | 0 | gst_video_scaler_new (method, GST_VIDEO_SCALER_FLAG_NONE, taps, |
8090 | 0 | in_width, out_width, convert->config); |
8091 | 0 | } |
8092 | 0 | } else { |
8093 | 0 | convert->fh_scaler[0].scaler = NULL; |
8094 | 0 | } |
8095 | |
|
8096 | 0 | pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, GST_VIDEO_COMP_R); |
8097 | 0 | convert->fin_x[0] = convert->in_x * pstride; |
8098 | 0 | convert->fout_x[0] = convert->out_x * pstride; |
8099 | 0 | } |
8100 | |
|
8101 | 0 | if (in_height != out_height && in_height != 0 && out_height != 0) { |
8102 | 0 | convert->fv_scaler[0].scaler = g_new (GstVideoScaler *, n_threads); |
8103 | |
|
8104 | 0 | for (j = 0; j < n_threads; j++) { |
8105 | 0 | convert->fv_scaler[0].scaler[j] = |
8106 | 0 | gst_video_scaler_new (method, |
8107 | 0 | interlaced ? |
8108 | 0 | GST_VIDEO_SCALER_FLAG_INTERLACED : GST_VIDEO_SCALER_FLAG_NONE, taps, |
8109 | 0 | in_height, out_height, convert->config); |
8110 | 0 | } |
8111 | 0 | } else { |
8112 | 0 | convert->fv_scaler[0].scaler = NULL; |
8113 | 0 | } |
8114 | |
|
8115 | 0 | convert->fin_y[0] = convert->in_y; |
8116 | 0 | convert->fout_y[0] = convert->out_y; |
8117 | 0 | convert->fout_width[0] = out_width; |
8118 | 0 | convert->fout_height[0] = out_height; |
8119 | 0 | convert->fconvert[0] = convert_plane_hv; |
8120 | 0 | convert->fformat[0] = get_scale_format (in_format, 0); |
8121 | 0 | convert->fsplane[0] = 0; |
8122 | 0 | } else { |
8123 | 0 | for (i = 0; i < n_planes; i++) { |
8124 | 0 | gint out_comp[GST_VIDEO_MAX_COMPONENTS]; |
8125 | 0 | gint comp, j, iw, ih, ow, oh, pstride; |
8126 | 0 | gboolean need_v_scaler, need_h_scaler; |
8127 | 0 | GstStructure *config; |
8128 | 0 | gint resample_method; |
8129 | |
|
8130 | 0 | gst_video_format_info_component (out_finfo, i, out_comp); |
8131 | 0 | ow = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, out_comp[0], |
8132 | 0 | out_width); |
8133 | 0 | oh = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, out_comp[0], |
8134 | 0 | out_height); |
8135 | 0 | pstride = GST_VIDEO_FORMAT_INFO_PSTRIDE (out_finfo, out_comp[0]); |
8136 | | |
8137 | | /* find the component in this plane and map it to the plane of |
8138 | | * the source */ |
8139 | 0 | if (out_comp[0] < GST_VIDEO_FORMAT_INFO_N_COMPONENTS (in_finfo)) { |
8140 | 0 | comp = out_comp[0]; |
8141 | 0 | iw = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, comp, in_width); |
8142 | 0 | ih = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (in_finfo, comp, in_height); |
8143 | 0 | convert->fin_x[i] = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (in_finfo, comp, |
8144 | 0 | convert->in_x); |
8145 | 0 | convert->fin_x[i] *= pstride; |
8146 | 0 | convert->fin_y[i] = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (in_finfo, comp, |
8147 | 0 | convert->in_y); |
8148 | 0 | } else { |
8149 | | /* we will use a fill instead, setting the parameters to an invalid |
8150 | | * size to reduce confusion */ |
8151 | 0 | comp = -1; |
8152 | 0 | iw = ih = -1; |
8153 | 0 | convert->fin_x[i] = -1; |
8154 | 0 | convert->fin_y[i] = -1; |
8155 | 0 | } |
8156 | |
|
8157 | 0 | convert->fout_width[i] = ow; |
8158 | 0 | convert->fout_height[i] = oh; |
8159 | |
|
8160 | 0 | convert->fout_x[i] = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (out_finfo, |
8161 | 0 | out_comp[0], convert->out_x); |
8162 | 0 | convert->fout_x[i] *= pstride; |
8163 | 0 | convert->fout_y[i] = GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (out_finfo, |
8164 | 0 | out_comp[0], convert->out_y); |
8165 | |
|
8166 | 0 | GST_LOG ("plane %d: %dx%d -> %dx%d", i, iw, ih, ow, oh); |
8167 | 0 | GST_LOG ("plane %d: pstride %d", i, pstride); |
8168 | 0 | GST_LOG ("plane %d: in_x %d, in_y %d", i, convert->fin_x[i], |
8169 | 0 | convert->fin_y[i]); |
8170 | 0 | GST_LOG ("plane %d: out_x %d, out_y %d", i, convert->fout_x[i], |
8171 | 0 | convert->fout_y[i]); |
8172 | |
|
8173 | 0 | if (comp == -1) { |
8174 | 0 | convert->fconvert[i] = convert_plane_fill; |
8175 | 0 | if (GST_VIDEO_INFO_IS_YUV (out_info)) { |
8176 | 0 | if (i == 3) |
8177 | 0 | convert->ffill[i] = convert->alpha_value; |
8178 | 0 | if (i == 0) |
8179 | 0 | convert->ffill[i] = 0x00; |
8180 | 0 | else |
8181 | 0 | convert->ffill[i] = 0x80; |
8182 | 0 | } else { |
8183 | 0 | if (i == 3) |
8184 | 0 | convert->ffill[i] = convert->alpha_value; |
8185 | 0 | else |
8186 | 0 | convert->ffill[i] = 0x00; |
8187 | 0 | } |
8188 | 0 | GST_LOG ("plane %d fill %02x", i, convert->ffill[i]); |
8189 | 0 | continue; |
8190 | 0 | } else { |
8191 | 0 | convert->fsplane[i] = GST_VIDEO_FORMAT_INFO_PLANE (in_finfo, comp); |
8192 | 0 | GST_LOG ("plane %d -> %d (comp %d)", i, convert->fsplane[i], comp); |
8193 | 0 | } |
8194 | | |
8195 | 0 | config = gst_structure_copy (convert->config); |
8196 | |
|
8197 | 0 | resample_method = (i == 0 ? method : cr_method); |
8198 | |
|
8199 | 0 | need_v_scaler = FALSE; |
8200 | 0 | need_h_scaler = FALSE; |
8201 | 0 | if (iw == ow) { |
8202 | 0 | if (!interlaced && ih == oh) { |
8203 | 0 | convert->fconvert[i] = convert_plane_hv; |
8204 | 0 | GST_LOG ("plane %d: copy", i); |
8205 | 0 | } else if (!interlaced && ih == 2 * oh && pstride == 1 |
8206 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) { |
8207 | 0 | convert->fconvert[i] = convert_plane_v_halve; |
8208 | 0 | GST_LOG ("plane %d: vertical halve", i); |
8209 | 0 | } else if (!interlaced && 2 * ih == oh && pstride == 1 |
8210 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) { |
8211 | 0 | convert->fconvert[i] = convert_plane_v_double; |
8212 | 0 | GST_LOG ("plane %d: vertical double", i); |
8213 | 0 | } else { |
8214 | 0 | convert->fconvert[i] = convert_plane_hv; |
8215 | 0 | GST_LOG ("plane %d: vertical scale", i); |
8216 | 0 | need_v_scaler = TRUE; |
8217 | 0 | } |
8218 | 0 | } else if (ih == oh) { |
8219 | 0 | if (!interlaced && iw == 2 * ow && pstride == 1 |
8220 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) { |
8221 | 0 | convert->fconvert[i] = convert_plane_h_halve; |
8222 | 0 | GST_LOG ("plane %d: horizontal halve", i); |
8223 | 0 | } else if (!interlaced && 2 * iw == ow && pstride == 1 |
8224 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) { |
8225 | 0 | convert->fconvert[i] = convert_plane_h_double; |
8226 | 0 | GST_LOG ("plane %d: horizontal double", i); |
8227 | 0 | } else { |
8228 | 0 | convert->fconvert[i] = convert_plane_hv; |
8229 | 0 | GST_LOG ("plane %d: horizontal scale", i); |
8230 | 0 | need_h_scaler = TRUE; |
8231 | 0 | } |
8232 | 0 | } else { |
8233 | 0 | if (!interlaced && iw == 2 * ow && ih == 2 * oh && pstride == 1 |
8234 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_LINEAR) { |
8235 | 0 | convert->fconvert[i] = convert_plane_hv_halve; |
8236 | 0 | GST_LOG ("plane %d: horizontal/vertical halve", i); |
8237 | 0 | } else if (!interlaced && 2 * iw == ow && 2 * ih == oh && pstride == 1 |
8238 | 0 | && resample_method == GST_VIDEO_RESAMPLER_METHOD_NEAREST) { |
8239 | 0 | convert->fconvert[i] = convert_plane_hv_double; |
8240 | 0 | GST_LOG ("plane %d: horizontal/vertical double", i); |
8241 | 0 | } else { |
8242 | 0 | convert->fconvert[i] = convert_plane_hv; |
8243 | 0 | GST_LOG ("plane %d: horizontal/vertical scale", i); |
8244 | 0 | need_v_scaler = TRUE; |
8245 | 0 | need_h_scaler = TRUE; |
8246 | 0 | } |
8247 | 0 | } |
8248 | |
|
8249 | 0 | if (need_h_scaler && iw != 0 && ow != 0) { |
8250 | 0 | convert->fh_scaler[i].scaler = g_new (GstVideoScaler *, n_threads); |
8251 | |
|
8252 | 0 | for (j = 0; j < n_threads; j++) { |
8253 | 0 | convert->fh_scaler[i].scaler[j] = |
8254 | 0 | gst_video_scaler_new (resample_method, GST_VIDEO_SCALER_FLAG_NONE, |
8255 | 0 | taps, iw, ow, config); |
8256 | 0 | } |
8257 | 0 | } else { |
8258 | 0 | convert->fh_scaler[i].scaler = NULL; |
8259 | 0 | } |
8260 | |
|
8261 | 0 | if (need_v_scaler && ih != 0 && oh != 0) { |
8262 | 0 | convert->fv_scaler[i].scaler = g_new (GstVideoScaler *, n_threads); |
8263 | |
|
8264 | 0 | for (j = 0; j < n_threads; j++) { |
8265 | 0 | convert->fv_scaler[i].scaler[j] = |
8266 | 0 | gst_video_scaler_new (resample_method, |
8267 | 0 | interlaced ? |
8268 | 0 | GST_VIDEO_SCALER_FLAG_INTERLACED : GST_VIDEO_SCALER_FLAG_NONE, |
8269 | 0 | taps, ih, oh, config); |
8270 | 0 | } |
8271 | 0 | } else { |
8272 | 0 | convert->fv_scaler[i].scaler = NULL; |
8273 | 0 | } |
8274 | |
|
8275 | 0 | gst_structure_free (config); |
8276 | 0 | convert->fformat[i] = get_scale_format (in_format, i); |
8277 | 0 | } |
8278 | 0 | } |
8279 | |
|
8280 | 0 | return TRUE; |
8281 | 0 | } |
8282 | | |
8283 | | /* Fast paths */ |
8284 | | |
8285 | | typedef struct |
8286 | | { |
8287 | | GstVideoFormat in_format; |
8288 | | GstVideoFormat out_format; |
8289 | | gboolean keeps_interlaced; |
8290 | | gboolean needs_color_matrix; |
8291 | | gboolean keeps_size; |
8292 | | gboolean do_crop; |
8293 | | gboolean do_border; |
8294 | | gboolean alpha_copy; |
8295 | | gboolean alpha_set; |
8296 | | gboolean alpha_mult; |
8297 | | gint width_align, height_align; |
8298 | | void (*convert) (GstVideoConverter * convert, const GstVideoFrame * src, |
8299 | | GstVideoFrame * dest); |
8300 | | } VideoTransform; |
8301 | | |
8302 | | static const VideoTransform transforms[] = { |
8303 | | /* planar -> packed */ |
8304 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, FALSE, |
8305 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_YUY2}, |
8306 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, FALSE, |
8307 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_UYVY}, |
8308 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, FALSE, |
8309 | | FALSE, FALSE, TRUE, FALSE, 0, 0, convert_I420_AYUV}, |
8310 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8311 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_v210}, |
8312 | | |
8313 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, FALSE, |
8314 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_YUY2}, |
8315 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, FALSE, |
8316 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_UYVY}, |
8317 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, FALSE, |
8318 | | FALSE, FALSE, TRUE, FALSE, 0, 0, convert_I420_AYUV}, |
8319 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8320 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_v210}, |
8321 | | |
8322 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE, |
8323 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_Y42B_YUY2}, |
8324 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE, |
8325 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_Y42B_UYVY}, |
8326 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE, |
8327 | | TRUE, FALSE, TRUE, FALSE, 1, 0, convert_Y42B_AYUV}, |
8328 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, TRUE, |
8329 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_Y42B_v210}, |
8330 | | |
8331 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE, |
8332 | | TRUE, FALSE, FALSE, FALSE, 1, 0, convert_Y444_YUY2}, |
8333 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE, |
8334 | | TRUE, FALSE, FALSE, FALSE, 1, 0, convert_Y444_UYVY}, |
8335 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE, |
8336 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_Y444_AYUV}, |
8337 | | |
8338 | | /* packed -> packed */ |
8339 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, FALSE, TRUE, |
8340 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8341 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE, |
8342 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_YUY2}, /* alias */ |
8343 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE, |
8344 | | TRUE, FALSE, TRUE, FALSE, 1, 0, convert_YUY2_AYUV}, |
8345 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8346 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_v210}, |
8347 | | |
8348 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, FALSE, TRUE, |
8349 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8350 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE, |
8351 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_YUY2}, |
8352 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, TRUE, TRUE, |
8353 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_UYVY_AYUV}, |
8354 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8355 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_v210}, |
8356 | | |
8357 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_AYUV, TRUE, FALSE, FALSE, TRUE, TRUE, |
8358 | | TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8359 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, TRUE, |
8360 | | TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_YUY2}, |
8361 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, TRUE, |
8362 | | TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_UYVY}, |
8363 | | |
8364 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_UYVY, TRUE, FALSE, TRUE, FALSE, |
8365 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_v210_UYVY}, |
8366 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_YUY2, TRUE, FALSE, TRUE, FALSE, |
8367 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_v210_YUY2}, |
8368 | | |
8369 | | /* packed -> planar */ |
8370 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_I420, TRUE, FALSE, TRUE, FALSE, |
8371 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_I420}, |
8372 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, TRUE, FALSE, |
8373 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_I420}, |
8374 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE, |
8375 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_Y42B}, |
8376 | | {GST_VIDEO_FORMAT_YUY2, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE, |
8377 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_YUY2_Y444}, |
8378 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_GRAY8, TRUE, TRUE, TRUE, TRUE, |
8379 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_GRAY8}, |
8380 | | |
8381 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_I420, TRUE, FALSE, TRUE, FALSE, |
8382 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_I420}, |
8383 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, TRUE, FALSE, |
8384 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_I420}, |
8385 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE, |
8386 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_Y42B}, |
8387 | | {GST_VIDEO_FORMAT_UYVY, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE, |
8388 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_UYVY_Y444}, |
8389 | | |
8390 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_I420, FALSE, FALSE, TRUE, TRUE, |
8391 | | TRUE, FALSE, FALSE, FALSE, 1, 1, convert_AYUV_I420}, |
8392 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, TRUE, TRUE, |
8393 | | TRUE, FALSE, FALSE, FALSE, 1, 1, convert_AYUV_I420}, |
8394 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, TRUE, |
8395 | | TRUE, FALSE, FALSE, FALSE, 1, 0, convert_AYUV_Y42B}, |
8396 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, TRUE, TRUE, |
8397 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_AYUV_Y444}, |
8398 | | |
8399 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_I420, TRUE, FALSE, TRUE, FALSE, |
8400 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I420}, |
8401 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, TRUE, FALSE, |
8402 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I420}, |
8403 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, TRUE, FALSE, |
8404 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_Y42B}, |
8405 | | |
8406 | | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
8407 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_I420_10LE, TRUE, FALSE, TRUE, FALSE, |
8408 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I420_10}, |
8409 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_I422_10LE, TRUE, FALSE, TRUE, FALSE, |
8410 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I422_10}, |
8411 | | {GST_VIDEO_FORMAT_I420_10LE, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8412 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_10_v210}, |
8413 | | {GST_VIDEO_FORMAT_I422_10LE, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, TRUE, |
8414 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I422_10_v210}, |
8415 | | #else |
8416 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_I420_10BE, TRUE, FALSE, TRUE, FALSE, |
8417 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I420_10}, |
8418 | | {GST_VIDEO_FORMAT_v210, GST_VIDEO_FORMAT_I422_10BE, TRUE, FALSE, TRUE, FALSE, |
8419 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_v210_I422_10}, |
8420 | | {GST_VIDEO_FORMAT_I420_10BE, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, FALSE, |
8421 | | FALSE, FALSE, FALSE, FALSE, 0, 0, convert_I420_10_v210}, |
8422 | | {GST_VIDEO_FORMAT_I422_10BE, GST_VIDEO_FORMAT_v210, TRUE, FALSE, TRUE, TRUE, |
8423 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I422_10_v210}, |
8424 | | #endif |
8425 | | |
8426 | | /* planar -> planar */ |
8427 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_I420, TRUE, FALSE, FALSE, TRUE, |
8428 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8429 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, FALSE, TRUE, |
8430 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8431 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8432 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8433 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8434 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8435 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8436 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8437 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8438 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8439 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8440 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8441 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8442 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8443 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8444 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8445 | | |
8446 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_I420, TRUE, FALSE, FALSE, TRUE, |
8447 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8448 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YV12, TRUE, FALSE, FALSE, TRUE, |
8449 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8450 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8451 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8452 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8453 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8454 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8455 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8456 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8457 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8458 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8459 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8460 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8461 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8462 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8463 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8464 | | |
8465 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8466 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8467 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8468 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8469 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y41B, TRUE, FALSE, FALSE, TRUE, |
8470 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8471 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8472 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8473 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8474 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8475 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8476 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8477 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8478 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8479 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8480 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8481 | | {GST_VIDEO_FORMAT_Y41B, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8482 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8483 | | |
8484 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8485 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8486 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8487 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8488 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8489 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8490 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y42B, TRUE, FALSE, FALSE, TRUE, |
8491 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8492 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8493 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8494 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8495 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8496 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8497 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8498 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8499 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8500 | | {GST_VIDEO_FORMAT_Y42B, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8501 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8502 | | |
8503 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8504 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8505 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8506 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8507 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8508 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8509 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8510 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8511 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_Y444, TRUE, FALSE, FALSE, TRUE, |
8512 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8513 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8514 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8515 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8516 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8517 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8518 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8519 | | {GST_VIDEO_FORMAT_Y444, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8520 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8521 | | |
8522 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8523 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8524 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8525 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8526 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8527 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8528 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8529 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8530 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8531 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8532 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_GRAY8, TRUE, FALSE, FALSE, TRUE, |
8533 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8534 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8535 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8536 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8537 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8538 | | {GST_VIDEO_FORMAT_GRAY8, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8539 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8540 | | |
8541 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8542 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8543 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8544 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8545 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8546 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8547 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8548 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8549 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8550 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8551 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8552 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8553 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_A420, TRUE, FALSE, FALSE, TRUE, |
8554 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8555 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YUV9, FALSE, FALSE, FALSE, TRUE, |
8556 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8557 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_YVU9, FALSE, FALSE, FALSE, TRUE, |
8558 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8559 | | |
8560 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8561 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8562 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8563 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8564 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8565 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8566 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8567 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8568 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8569 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8570 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8571 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8572 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8573 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8574 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YUV9, TRUE, FALSE, FALSE, TRUE, |
8575 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8576 | | {GST_VIDEO_FORMAT_YUV9, GST_VIDEO_FORMAT_YVU9, TRUE, FALSE, FALSE, TRUE, |
8577 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8578 | | |
8579 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_I420, FALSE, FALSE, FALSE, TRUE, |
8580 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8581 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YV12, FALSE, FALSE, FALSE, TRUE, |
8582 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8583 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y41B, FALSE, FALSE, FALSE, TRUE, |
8584 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8585 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y42B, FALSE, FALSE, FALSE, TRUE, |
8586 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8587 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_Y444, FALSE, FALSE, FALSE, TRUE, |
8588 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8589 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_GRAY8, FALSE, FALSE, FALSE, TRUE, |
8590 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8591 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_A420, FALSE, FALSE, FALSE, TRUE, |
8592 | | TRUE, FALSE, TRUE, FALSE, 0, 0, convert_scale_planes}, |
8593 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YUV9, TRUE, FALSE, FALSE, TRUE, |
8594 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8595 | | {GST_VIDEO_FORMAT_YVU9, GST_VIDEO_FORMAT_YVU9, TRUE, FALSE, FALSE, TRUE, |
8596 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8597 | | |
8598 | | /* sempiplanar -> semiplanar */ |
8599 | | {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE, |
8600 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8601 | | {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE, |
8602 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8603 | | {GST_VIDEO_FORMAT_NV12, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE, |
8604 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8605 | | |
8606 | | {GST_VIDEO_FORMAT_NV21, GST_VIDEO_FORMAT_NV21, TRUE, FALSE, FALSE, TRUE, |
8607 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8608 | | |
8609 | | {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE, |
8610 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8611 | | {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE, |
8612 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8613 | | {GST_VIDEO_FORMAT_NV16, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE, |
8614 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8615 | | |
8616 | | {GST_VIDEO_FORMAT_NV61, GST_VIDEO_FORMAT_NV61, TRUE, FALSE, FALSE, TRUE, |
8617 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8618 | | |
8619 | | {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV12, TRUE, FALSE, FALSE, TRUE, |
8620 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8621 | | {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV16, TRUE, FALSE, FALSE, TRUE, |
8622 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8623 | | {GST_VIDEO_FORMAT_NV24, GST_VIDEO_FORMAT_NV24, TRUE, FALSE, FALSE, TRUE, |
8624 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8625 | | |
8626 | | #if G_BYTE_ORDER == G_LITTLE_ENDIAN |
8627 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_ARGB, TRUE, TRUE, TRUE, TRUE, TRUE, |
8628 | | TRUE, FALSE, FALSE, 0, 0, convert_AYUV_ARGB}, |
8629 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_BGRA, TRUE, TRUE, TRUE, TRUE, TRUE, |
8630 | | TRUE, FALSE, FALSE, 0, 0, convert_AYUV_BGRA}, |
8631 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_xRGB, TRUE, TRUE, TRUE, TRUE, TRUE, |
8632 | | FALSE, FALSE, FALSE, 0, 0, convert_AYUV_ARGB}, /* alias */ |
8633 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_BGRx, TRUE, TRUE, TRUE, TRUE, TRUE, |
8634 | | FALSE, FALSE, FALSE, 0, 0, convert_AYUV_BGRA}, /* alias */ |
8635 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_ABGR, TRUE, TRUE, TRUE, TRUE, TRUE, |
8636 | | TRUE, FALSE, FALSE, 0, 0, convert_AYUV_ABGR}, |
8637 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_RGBA, TRUE, TRUE, TRUE, TRUE, TRUE, |
8638 | | TRUE, FALSE, FALSE, 0, 0, convert_AYUV_RGBA}, |
8639 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_xBGR, TRUE, TRUE, TRUE, TRUE, TRUE, |
8640 | | FALSE, FALSE, FALSE, 0, 0, convert_AYUV_ABGR}, /* alias */ |
8641 | | {GST_VIDEO_FORMAT_AYUV, GST_VIDEO_FORMAT_RGBx, TRUE, TRUE, TRUE, TRUE, TRUE, |
8642 | | FALSE, FALSE, FALSE, 0, 0, convert_AYUV_RGBA}, /* alias */ |
8643 | | #endif |
8644 | | |
8645 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGRA, FALSE, TRUE, TRUE, TRUE, |
8646 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA}, |
8647 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGRx, FALSE, TRUE, TRUE, TRUE, |
8648 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA}, |
8649 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGRA, FALSE, TRUE, TRUE, TRUE, |
8650 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA}, |
8651 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGRx, FALSE, TRUE, TRUE, TRUE, |
8652 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA}, |
8653 | | |
8654 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_ARGB, FALSE, TRUE, TRUE, TRUE, |
8655 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB}, |
8656 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_xRGB, FALSE, TRUE, TRUE, TRUE, |
8657 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB}, |
8658 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_ARGB, FALSE, TRUE, TRUE, TRUE, |
8659 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB}, |
8660 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_xRGB, FALSE, TRUE, TRUE, TRUE, |
8661 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_ARGB}, |
8662 | | |
8663 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_ABGR, FALSE, TRUE, TRUE, TRUE, |
8664 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8665 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_xBGR, FALSE, TRUE, TRUE, TRUE, |
8666 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8667 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGBA, FALSE, TRUE, TRUE, TRUE, |
8668 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8669 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGBx, FALSE, TRUE, TRUE, TRUE, |
8670 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8671 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB, FALSE, TRUE, TRUE, TRUE, |
8672 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8673 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR, FALSE, TRUE, TRUE, TRUE, |
8674 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8675 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB15, FALSE, TRUE, TRUE, TRUE, |
8676 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8677 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR15, FALSE, TRUE, TRUE, TRUE, |
8678 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8679 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_RGB16, FALSE, TRUE, TRUE, TRUE, |
8680 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8681 | | {GST_VIDEO_FORMAT_I420, GST_VIDEO_FORMAT_BGR16, FALSE, TRUE, TRUE, TRUE, |
8682 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8683 | | |
8684 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_ABGR, FALSE, TRUE, TRUE, TRUE, |
8685 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8686 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_xBGR, FALSE, TRUE, TRUE, TRUE, |
8687 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8688 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGBA, FALSE, TRUE, TRUE, TRUE, |
8689 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8690 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGBx, FALSE, TRUE, TRUE, TRUE, |
8691 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8692 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB, FALSE, TRUE, TRUE, TRUE, |
8693 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8694 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR, FALSE, TRUE, TRUE, TRUE, |
8695 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8696 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB15, FALSE, TRUE, TRUE, TRUE, |
8697 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8698 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR15, FALSE, TRUE, TRUE, TRUE, |
8699 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8700 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_RGB16, FALSE, TRUE, TRUE, TRUE, |
8701 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8702 | | {GST_VIDEO_FORMAT_YV12, GST_VIDEO_FORMAT_BGR16, FALSE, TRUE, TRUE, TRUE, |
8703 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8704 | | |
8705 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_ABGR, FALSE, TRUE, TRUE, TRUE, |
8706 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_A420_pack_ARGB}, |
8707 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_RGBA, FALSE, TRUE, TRUE, TRUE, |
8708 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_A420_pack_ARGB}, |
8709 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_BGRA, FALSE, TRUE, TRUE, TRUE, |
8710 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_A420_BGRA}, |
8711 | | /* A420 to non-alpha RGB formats, reuse I420_* method */ |
8712 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_BGRx, FALSE, TRUE, TRUE, TRUE, |
8713 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_BGRA}, |
8714 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_xBGR, FALSE, TRUE, TRUE, TRUE, |
8715 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8716 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_RGBx, FALSE, TRUE, TRUE, TRUE, |
8717 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8718 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_RGB, FALSE, TRUE, TRUE, TRUE, |
8719 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8720 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_BGR, FALSE, TRUE, TRUE, TRUE, |
8721 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8722 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_RGB15, FALSE, TRUE, TRUE, TRUE, |
8723 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8724 | | {GST_VIDEO_FORMAT_A420, GST_VIDEO_FORMAT_BGR16, FALSE, TRUE, TRUE, TRUE, |
8725 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_I420_pack_ARGB}, |
8726 | | |
8727 | | /* scalers */ |
8728 | | {GST_VIDEO_FORMAT_GBR, GST_VIDEO_FORMAT_GBR, TRUE, FALSE, FALSE, TRUE, |
8729 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8730 | | {GST_VIDEO_FORMAT_GBRA, GST_VIDEO_FORMAT_GBRA, TRUE, FALSE, FALSE, TRUE, |
8731 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8732 | | {GST_VIDEO_FORMAT_RGBP, GST_VIDEO_FORMAT_RGBP, TRUE, FALSE, FALSE, TRUE, |
8733 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8734 | | {GST_VIDEO_FORMAT_BGRP, GST_VIDEO_FORMAT_BGRP, TRUE, FALSE, FALSE, TRUE, |
8735 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8736 | | |
8737 | | {GST_VIDEO_FORMAT_YVYU, GST_VIDEO_FORMAT_YVYU, TRUE, FALSE, FALSE, TRUE, |
8738 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8739 | | |
8740 | | {GST_VIDEO_FORMAT_RGB15, GST_VIDEO_FORMAT_RGB15, TRUE, FALSE, FALSE, TRUE, |
8741 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8742 | | {GST_VIDEO_FORMAT_RGB16, GST_VIDEO_FORMAT_RGB16, TRUE, FALSE, FALSE, TRUE, |
8743 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8744 | | {GST_VIDEO_FORMAT_BGR15, GST_VIDEO_FORMAT_BGR15, TRUE, FALSE, FALSE, TRUE, |
8745 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8746 | | {GST_VIDEO_FORMAT_BGR16, GST_VIDEO_FORMAT_BGR16, TRUE, FALSE, FALSE, TRUE, |
8747 | | TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8748 | | |
8749 | | {GST_VIDEO_FORMAT_RGB, GST_VIDEO_FORMAT_RGB, TRUE, FALSE, FALSE, TRUE, TRUE, |
8750 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8751 | | {GST_VIDEO_FORMAT_BGR, GST_VIDEO_FORMAT_BGR, TRUE, FALSE, FALSE, TRUE, TRUE, |
8752 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8753 | | {GST_VIDEO_FORMAT_v308, GST_VIDEO_FORMAT_v308, TRUE, FALSE, FALSE, TRUE, TRUE, |
8754 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8755 | | {GST_VIDEO_FORMAT_IYU2, GST_VIDEO_FORMAT_IYU2, TRUE, FALSE, FALSE, TRUE, TRUE, |
8756 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8757 | | |
8758 | | {GST_VIDEO_FORMAT_ARGB, GST_VIDEO_FORMAT_ARGB, TRUE, FALSE, FALSE, TRUE, TRUE, |
8759 | | TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8760 | | {GST_VIDEO_FORMAT_xRGB, GST_VIDEO_FORMAT_xRGB, TRUE, FALSE, FALSE, TRUE, TRUE, |
8761 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8762 | | {GST_VIDEO_FORMAT_ABGR, GST_VIDEO_FORMAT_ABGR, TRUE, FALSE, FALSE, TRUE, TRUE, |
8763 | | TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8764 | | {GST_VIDEO_FORMAT_xBGR, GST_VIDEO_FORMAT_xBGR, TRUE, FALSE, FALSE, TRUE, TRUE, |
8765 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8766 | | {GST_VIDEO_FORMAT_RGBA, GST_VIDEO_FORMAT_RGBA, TRUE, FALSE, FALSE, TRUE, TRUE, |
8767 | | TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8768 | | {GST_VIDEO_FORMAT_RGBx, GST_VIDEO_FORMAT_RGBx, TRUE, FALSE, FALSE, TRUE, TRUE, |
8769 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8770 | | {GST_VIDEO_FORMAT_BGRA, GST_VIDEO_FORMAT_BGRA, TRUE, FALSE, FALSE, TRUE, TRUE, |
8771 | | TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8772 | | {GST_VIDEO_FORMAT_BGRx, GST_VIDEO_FORMAT_BGRx, TRUE, FALSE, FALSE, TRUE, TRUE, |
8773 | | FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8774 | | |
8775 | | {GST_VIDEO_FORMAT_ARGB64, GST_VIDEO_FORMAT_ARGB64, TRUE, FALSE, FALSE, TRUE, |
8776 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8777 | | {GST_VIDEO_FORMAT_AYUV64, GST_VIDEO_FORMAT_AYUV64, TRUE, FALSE, FALSE, TRUE, |
8778 | | TRUE, TRUE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8779 | | |
8780 | | {GST_VIDEO_FORMAT_GRAY16_LE, GST_VIDEO_FORMAT_GRAY16_LE, TRUE, FALSE, FALSE, |
8781 | | TRUE, TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8782 | | {GST_VIDEO_FORMAT_GRAY16_BE, GST_VIDEO_FORMAT_GRAY16_BE, TRUE, FALSE, FALSE, |
8783 | | TRUE, TRUE, FALSE, FALSE, FALSE, 0, 0, convert_scale_planes}, |
8784 | | }; |
8785 | | |
8786 | | static gboolean |
8787 | | video_converter_lookup_fastpath (GstVideoConverter * convert) |
8788 | 0 | { |
8789 | 0 | int i; |
8790 | 0 | GstVideoFormat in_format, out_format; |
8791 | 0 | GstVideoTransferFunction in_transf, out_transf; |
8792 | 0 | gboolean interlaced, same_matrix, same_primaries, same_size, crop, border; |
8793 | 0 | gboolean need_copy, need_set, need_mult; |
8794 | 0 | gint width, height; |
8795 | 0 | guint in_bpp, out_bpp; |
8796 | |
|
8797 | 0 | width = GST_VIDEO_INFO_WIDTH (&convert->in_info); |
8798 | 0 | height = GST_VIDEO_INFO_FIELD_HEIGHT (&convert->in_info); |
8799 | |
|
8800 | 0 | if (GET_OPT_DITHER_QUANTIZATION (convert) != 1) |
8801 | 0 | return FALSE; |
8802 | | |
8803 | 0 | in_bpp = convert->in_info.finfo->bits; |
8804 | 0 | out_bpp = convert->out_info.finfo->bits; |
8805 | | |
8806 | | /* we don't do gamma conversion in fastpath */ |
8807 | 0 | in_transf = convert->in_info.colorimetry.transfer; |
8808 | 0 | out_transf = convert->out_info.colorimetry.transfer; |
8809 | |
|
8810 | 0 | same_size = (width == convert->out_width && height == convert->out_height); |
8811 | | |
8812 | | /* fastpaths don't do gamma */ |
8813 | 0 | if (CHECK_GAMMA_REMAP (convert) && (!same_size |
8814 | 0 | || !gst_video_transfer_function_is_equivalent (in_transf, in_bpp, |
8815 | 0 | out_transf, out_bpp))) |
8816 | 0 | return FALSE; |
8817 | | |
8818 | 0 | need_copy = (convert->alpha_mode & ALPHA_MODE_COPY) == ALPHA_MODE_COPY; |
8819 | 0 | need_set = (convert->alpha_mode & ALPHA_MODE_SET) == ALPHA_MODE_SET; |
8820 | 0 | need_mult = (convert->alpha_mode & ALPHA_MODE_MULT) == ALPHA_MODE_MULT; |
8821 | 0 | GST_LOG ("alpha copy %d, set %d, mult %d", need_copy, need_set, need_mult); |
8822 | |
|
8823 | 0 | in_format = GST_VIDEO_INFO_FORMAT (&convert->in_info); |
8824 | 0 | out_format = GST_VIDEO_INFO_FORMAT (&convert->out_info); |
8825 | |
|
8826 | 0 | if (CHECK_MATRIX_NONE (convert)) { |
8827 | 0 | same_matrix = TRUE; |
8828 | 0 | } else { |
8829 | 0 | GstVideoColorMatrix in_matrix, out_matrix; |
8830 | |
|
8831 | 0 | in_matrix = convert->in_info.colorimetry.matrix; |
8832 | 0 | out_matrix = convert->out_info.colorimetry.matrix; |
8833 | 0 | same_matrix = in_matrix == out_matrix; |
8834 | 0 | } |
8835 | |
|
8836 | 0 | if (CHECK_PRIMARIES_NONE (convert)) { |
8837 | 0 | same_primaries = TRUE; |
8838 | 0 | } else { |
8839 | 0 | GstVideoColorPrimaries in_primaries, out_primaries; |
8840 | |
|
8841 | 0 | in_primaries = convert->in_info.colorimetry.primaries; |
8842 | 0 | out_primaries = convert->out_info.colorimetry.primaries; |
8843 | 0 | same_primaries = gst_video_color_primaries_is_equivalent (in_primaries, |
8844 | 0 | out_primaries); |
8845 | 0 | } |
8846 | |
|
8847 | 0 | interlaced = GST_VIDEO_INFO_IS_INTERLACED (&convert->in_info); |
8848 | 0 | interlaced |= GST_VIDEO_INFO_IS_INTERLACED (&convert->out_info); |
8849 | |
|
8850 | 0 | crop = convert->in_x || convert->in_y |
8851 | 0 | || convert->in_width < convert->in_maxwidth |
8852 | 0 | || convert->in_height < convert->in_maxheight; |
8853 | 0 | border = convert->out_x || convert->out_y |
8854 | 0 | || convert->out_width < convert->out_maxwidth |
8855 | 0 | || convert->out_height < convert->out_maxheight; |
8856 | |
|
8857 | 0 | for (i = 0; i < G_N_ELEMENTS (transforms); i++) { |
8858 | 0 | if (transforms[i].in_format == in_format && |
8859 | 0 | transforms[i].out_format == out_format && |
8860 | 0 | (transforms[i].keeps_interlaced || !interlaced) && |
8861 | 0 | (transforms[i].needs_color_matrix || (same_matrix && same_primaries)) |
8862 | 0 | && (!transforms[i].keeps_size || same_size) |
8863 | 0 | && (transforms[i].width_align & width) == 0 |
8864 | 0 | && (transforms[i].height_align & height) == 0 |
8865 | 0 | && (transforms[i].do_crop || !crop) |
8866 | 0 | && (transforms[i].do_border || !border) |
8867 | 0 | && (transforms[i].alpha_copy || !need_copy) |
8868 | 0 | && (transforms[i].alpha_set || !need_set) |
8869 | 0 | && (transforms[i].alpha_mult || !need_mult)) { |
8870 | 0 | guint j; |
8871 | |
|
8872 | 0 | GST_LOG ("using fastpath"); |
8873 | 0 | if (transforms[i].needs_color_matrix) |
8874 | 0 | video_converter_compute_matrix (convert); |
8875 | 0 | convert->convert = transforms[i].convert; |
8876 | |
|
8877 | 0 | convert->tmpline = |
8878 | 0 | g_new (guint16 *, convert->conversion_runner->n_threads); |
8879 | 0 | for (j = 0; j < convert->conversion_runner->n_threads; j++) |
8880 | 0 | convert->tmpline[j] = g_malloc0 (sizeof (guint16) * (width + 8) * 4); |
8881 | |
|
8882 | 0 | if (!transforms[i].keeps_size) |
8883 | 0 | if (!setup_scale (convert)) |
8884 | 0 | return FALSE; |
8885 | 0 | if (border) |
8886 | 0 | setup_borderline (convert); |
8887 | 0 | return TRUE; |
8888 | 0 | } |
8889 | 0 | } |
8890 | 0 | GST_LOG ("no fastpath found"); |
8891 | 0 | return FALSE; |
8892 | 0 | } |
8893 | | |
8894 | | /** |
8895 | | * gst_video_converter_get_in_info: |
8896 | | * @convert: a #GstVideoConverter |
8897 | | * |
8898 | | * Retrieve the input format of @convert. |
8899 | | * |
8900 | | * Returns: (transfer none): a #GstVideoInfo |
8901 | | * |
8902 | | * Since: 1.22 |
8903 | | */ |
8904 | | const GstVideoInfo * |
8905 | | gst_video_converter_get_in_info (GstVideoConverter * convert) |
8906 | 0 | { |
8907 | 0 | return &convert->in_info; |
8908 | 0 | } |
8909 | | |
8910 | | /** |
8911 | | * gst_video_converter_get_out_info: |
8912 | | * @convert: a #GstVideoConverter |
8913 | | * |
8914 | | * Retrieve the output format of @convert. |
8915 | | * |
8916 | | * Returns: (transfer none): a #GstVideoInfo |
8917 | | * |
8918 | | * Since: 1.22 |
8919 | | */ |
8920 | | const GstVideoInfo * |
8921 | | gst_video_converter_get_out_info (GstVideoConverter * convert) |
8922 | 0 | { |
8923 | 0 | return &convert->out_info; |
8924 | 0 | } |