/src/cairo/subprojects/pixman-0.44.2/pixman/pixman-fast-path.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */ |
2 | | /* |
3 | | * Copyright © 2000 SuSE, Inc. |
4 | | * Copyright © 2007 Red Hat, Inc. |
5 | | * |
6 | | * Permission to use, copy, modify, distribute, and sell this software and its |
7 | | * documentation for any purpose is hereby granted without fee, provided that |
8 | | * the above copyright notice appear in all copies and that both that |
9 | | * copyright notice and this permission notice appear in supporting |
10 | | * documentation, and that the name of SuSE not be used in advertising or |
11 | | * publicity pertaining to distribution of the software without specific, |
12 | | * written prior permission. SuSE makes no representations about the |
13 | | * suitability of this software for any purpose. It is provided "as is" |
14 | | * without express or implied warranty. |
15 | | * |
16 | | * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL |
17 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE |
18 | | * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
19 | | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION |
20 | | * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN |
21 | | * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
22 | | * |
23 | | * Author: Keith Packard, SuSE, Inc. |
24 | | */ |
25 | | |
26 | | #ifdef HAVE_CONFIG_H |
27 | | #include <pixman-config.h> |
28 | | #endif |
29 | | #include <string.h> |
30 | | #include <stdlib.h> |
31 | | #include "pixman-private.h" |
32 | | #include "pixman-combine32.h" |
33 | | #include "pixman-inlines.h" |
34 | | |
35 | | static force_inline uint32_t |
36 | | fetch_24 (uint8_t *a) |
37 | 0 | { |
38 | 0 | if (((uintptr_t)a) & 1) |
39 | 0 | { |
40 | | #ifdef WORDS_BIGENDIAN |
41 | | return (*a << 16) | (*(uint16_t *)(a + 1)); |
42 | | #else |
43 | 0 | return *a | (*(uint16_t *)(a + 1) << 8); |
44 | 0 | #endif |
45 | 0 | } |
46 | 0 | else |
47 | 0 | { |
48 | | #ifdef WORDS_BIGENDIAN |
49 | | return (*(uint16_t *)a << 8) | *(a + 2); |
50 | | #else |
51 | 0 | return *(uint16_t *)a | (*(a + 2) << 16); |
52 | 0 | #endif |
53 | 0 | } |
54 | 0 | } |
55 | | |
56 | | static force_inline void |
57 | | store_24 (uint8_t *a, |
58 | | uint32_t v) |
59 | 0 | { |
60 | 0 | if (((uintptr_t)a) & 1) |
61 | 0 | { |
62 | | #ifdef WORDS_BIGENDIAN |
63 | | *a = (uint8_t) (v >> 16); |
64 | | *(uint16_t *)(a + 1) = (uint16_t) (v); |
65 | | #else |
66 | 0 | *a = (uint8_t) (v); |
67 | 0 | *(uint16_t *)(a + 1) = (uint16_t) (v >> 8); |
68 | 0 | #endif |
69 | 0 | } |
70 | 0 | else |
71 | 0 | { |
72 | | #ifdef WORDS_BIGENDIAN |
73 | | *(uint16_t *)a = (uint16_t)(v >> 8); |
74 | | *(a + 2) = (uint8_t)v; |
75 | | #else |
76 | 0 | *(uint16_t *)a = (uint16_t)v; |
77 | 0 | *(a + 2) = (uint8_t)(v >> 16); |
78 | 0 | #endif |
79 | 0 | } |
80 | 0 | } |
81 | | |
82 | | static force_inline uint32_t |
83 | | over (uint32_t src, |
84 | | uint32_t dest) |
85 | 0 | { |
86 | 0 | uint32_t a = ~src >> 24; |
87 | |
|
88 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (dest, a, src); |
89 | |
|
90 | 0 | return dest; |
91 | 0 | } |
92 | | |
93 | | static force_inline uint32_t |
94 | | in (uint32_t x, |
95 | | uint8_t y) |
96 | 0 | { |
97 | 0 | uint16_t a = y; |
98 | |
|
99 | 0 | UN8x4_MUL_UN8 (x, a); |
100 | |
|
101 | 0 | return x; |
102 | 0 | } |
103 | | |
104 | | /* |
105 | | * Naming convention: |
106 | | * |
107 | | * op_src_mask_dest |
108 | | */ |
109 | | static void |
110 | | fast_composite_over_x888_8_8888 (pixman_implementation_t *imp, |
111 | | pixman_composite_info_t *info) |
112 | 0 | { |
113 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
114 | 0 | uint32_t *src, *src_line; |
115 | 0 | uint32_t *dst, *dst_line; |
116 | 0 | uint8_t *mask, *mask_line; |
117 | 0 | int src_stride, mask_stride, dst_stride; |
118 | 0 | uint8_t m; |
119 | 0 | uint32_t s, d; |
120 | 0 | int32_t w; |
121 | |
|
122 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
123 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
124 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
125 | |
|
126 | 0 | while (height--) |
127 | 0 | { |
128 | 0 | src = src_line; |
129 | 0 | src_line += src_stride; |
130 | 0 | dst = dst_line; |
131 | 0 | dst_line += dst_stride; |
132 | 0 | mask = mask_line; |
133 | 0 | mask_line += mask_stride; |
134 | |
|
135 | 0 | w = width; |
136 | 0 | while (w--) |
137 | 0 | { |
138 | 0 | m = *mask++; |
139 | 0 | if (m) |
140 | 0 | { |
141 | 0 | s = *src | 0xff000000; |
142 | |
|
143 | 0 | if (m == 0xff) |
144 | 0 | { |
145 | 0 | *dst = s; |
146 | 0 | } |
147 | 0 | else |
148 | 0 | { |
149 | 0 | d = in (s, m); |
150 | 0 | *dst = over (d, *dst); |
151 | 0 | } |
152 | 0 | } |
153 | 0 | src++; |
154 | 0 | dst++; |
155 | 0 | } |
156 | 0 | } |
157 | 0 | } |
158 | | |
159 | | static void |
160 | | fast_composite_in_n_8_8 (pixman_implementation_t *imp, |
161 | | pixman_composite_info_t *info) |
162 | 0 | { |
163 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
164 | 0 | uint32_t src, srca; |
165 | 0 | uint8_t *dst_line, *dst; |
166 | 0 | uint8_t *mask_line, *mask, m; |
167 | 0 | int dst_stride, mask_stride; |
168 | 0 | int32_t w; |
169 | 0 | uint16_t t; |
170 | |
|
171 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
172 | |
|
173 | 0 | srca = src >> 24; |
174 | |
|
175 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); |
176 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
177 | |
|
178 | 0 | if (srca == 0xff) |
179 | 0 | { |
180 | 0 | while (height--) |
181 | 0 | { |
182 | 0 | dst = dst_line; |
183 | 0 | dst_line += dst_stride; |
184 | 0 | mask = mask_line; |
185 | 0 | mask_line += mask_stride; |
186 | 0 | w = width; |
187 | |
|
188 | 0 | while (w--) |
189 | 0 | { |
190 | 0 | m = *mask++; |
191 | |
|
192 | 0 | if (m == 0) |
193 | 0 | *dst = 0; |
194 | 0 | else if (m != 0xff) |
195 | 0 | *dst = MUL_UN8 (m, *dst, t); |
196 | |
|
197 | 0 | dst++; |
198 | 0 | } |
199 | 0 | } |
200 | 0 | } |
201 | 0 | else |
202 | 0 | { |
203 | 0 | while (height--) |
204 | 0 | { |
205 | 0 | dst = dst_line; |
206 | 0 | dst_line += dst_stride; |
207 | 0 | mask = mask_line; |
208 | 0 | mask_line += mask_stride; |
209 | 0 | w = width; |
210 | |
|
211 | 0 | while (w--) |
212 | 0 | { |
213 | 0 | m = *mask++; |
214 | 0 | m = MUL_UN8 (m, srca, t); |
215 | |
|
216 | 0 | if (m == 0) |
217 | 0 | *dst = 0; |
218 | 0 | else if (m != 0xff) |
219 | 0 | *dst = MUL_UN8 (m, *dst, t); |
220 | |
|
221 | 0 | dst++; |
222 | 0 | } |
223 | 0 | } |
224 | 0 | } |
225 | 0 | } |
226 | | |
227 | | static void |
228 | | fast_composite_in_8_8 (pixman_implementation_t *imp, |
229 | | pixman_composite_info_t *info) |
230 | 0 | { |
231 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
232 | 0 | uint8_t *dst_line, *dst; |
233 | 0 | uint8_t *src_line, *src; |
234 | 0 | int dst_stride, src_stride; |
235 | 0 | int32_t w; |
236 | 0 | uint8_t s; |
237 | 0 | uint16_t t; |
238 | |
|
239 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); |
240 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); |
241 | |
|
242 | 0 | while (height--) |
243 | 0 | { |
244 | 0 | dst = dst_line; |
245 | 0 | dst_line += dst_stride; |
246 | 0 | src = src_line; |
247 | 0 | src_line += src_stride; |
248 | 0 | w = width; |
249 | |
|
250 | 0 | while (w--) |
251 | 0 | { |
252 | 0 | s = *src++; |
253 | |
|
254 | 0 | if (s == 0) |
255 | 0 | *dst = 0; |
256 | 0 | else if (s != 0xff) |
257 | 0 | *dst = MUL_UN8 (s, *dst, t); |
258 | |
|
259 | 0 | dst++; |
260 | 0 | } |
261 | 0 | } |
262 | 0 | } |
263 | | |
264 | | static void |
265 | | fast_composite_over_n_8_8888 (pixman_implementation_t *imp, |
266 | | pixman_composite_info_t *info) |
267 | 0 | { |
268 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
269 | 0 | uint32_t src, srca; |
270 | 0 | uint32_t *dst_line, *dst, d; |
271 | 0 | uint8_t *mask_line, *mask, m; |
272 | 0 | int dst_stride, mask_stride; |
273 | 0 | int32_t w; |
274 | |
|
275 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
276 | |
|
277 | 0 | srca = src >> 24; |
278 | 0 | if (src == 0) |
279 | 0 | return; |
280 | | |
281 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
282 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
283 | |
|
284 | 0 | while (height--) |
285 | 0 | { |
286 | 0 | dst = dst_line; |
287 | 0 | dst_line += dst_stride; |
288 | 0 | mask = mask_line; |
289 | 0 | mask_line += mask_stride; |
290 | 0 | w = width; |
291 | |
|
292 | 0 | while (w--) |
293 | 0 | { |
294 | 0 | m = *mask++; |
295 | 0 | if (m == 0xff) |
296 | 0 | { |
297 | 0 | if (srca == 0xff) |
298 | 0 | *dst = src; |
299 | 0 | else |
300 | 0 | *dst = over (src, *dst); |
301 | 0 | } |
302 | 0 | else if (m) |
303 | 0 | { |
304 | 0 | d = in (src, m); |
305 | 0 | *dst = over (d, *dst); |
306 | 0 | } |
307 | 0 | dst++; |
308 | 0 | } |
309 | 0 | } |
310 | 0 | } |
311 | | |
312 | | static void |
313 | | fast_composite_add_n_8888_8888_ca (pixman_implementation_t *imp, |
314 | | pixman_composite_info_t *info) |
315 | 0 | { |
316 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
317 | 0 | uint32_t src, s; |
318 | 0 | uint32_t *dst_line, *dst, d; |
319 | 0 | uint32_t *mask_line, *mask, ma; |
320 | 0 | int dst_stride, mask_stride; |
321 | 0 | int32_t w; |
322 | |
|
323 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
324 | |
|
325 | 0 | if (src == 0) |
326 | 0 | return; |
327 | | |
328 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
329 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); |
330 | |
|
331 | 0 | while (height--) |
332 | 0 | { |
333 | 0 | dst = dst_line; |
334 | 0 | dst_line += dst_stride; |
335 | 0 | mask = mask_line; |
336 | 0 | mask_line += mask_stride; |
337 | 0 | w = width; |
338 | |
|
339 | 0 | while (w--) |
340 | 0 | { |
341 | 0 | ma = *mask++; |
342 | |
|
343 | 0 | if (ma) |
344 | 0 | { |
345 | 0 | d = *dst; |
346 | 0 | s = src; |
347 | |
|
348 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4 (s, ma, d); |
349 | |
|
350 | 0 | *dst = s; |
351 | 0 | } |
352 | |
|
353 | 0 | dst++; |
354 | 0 | } |
355 | 0 | } |
356 | 0 | } |
357 | | |
358 | | static void |
359 | | fast_composite_over_n_8888_8888_ca (pixman_implementation_t *imp, |
360 | | pixman_composite_info_t *info) |
361 | 0 | { |
362 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
363 | 0 | uint32_t src, srca, s; |
364 | 0 | uint32_t *dst_line, *dst, d; |
365 | 0 | uint32_t *mask_line, *mask, ma; |
366 | 0 | int dst_stride, mask_stride; |
367 | 0 | int32_t w; |
368 | |
|
369 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
370 | |
|
371 | 0 | srca = src >> 24; |
372 | 0 | if (src == 0) |
373 | 0 | return; |
374 | | |
375 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
376 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); |
377 | |
|
378 | 0 | while (height--) |
379 | 0 | { |
380 | 0 | dst = dst_line; |
381 | 0 | dst_line += dst_stride; |
382 | 0 | mask = mask_line; |
383 | 0 | mask_line += mask_stride; |
384 | 0 | w = width; |
385 | |
|
386 | 0 | while (w--) |
387 | 0 | { |
388 | 0 | ma = *mask++; |
389 | 0 | if (ma == 0xffffffff) |
390 | 0 | { |
391 | 0 | if (srca == 0xff) |
392 | 0 | *dst = src; |
393 | 0 | else |
394 | 0 | *dst = over (src, *dst); |
395 | 0 | } |
396 | 0 | else if (ma) |
397 | 0 | { |
398 | 0 | d = *dst; |
399 | 0 | s = src; |
400 | |
|
401 | 0 | UN8x4_MUL_UN8x4 (s, ma); |
402 | 0 | UN8x4_MUL_UN8 (ma, srca); |
403 | 0 | ma = ~ma; |
404 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s); |
405 | |
|
406 | 0 | *dst = d; |
407 | 0 | } |
408 | |
|
409 | 0 | dst++; |
410 | 0 | } |
411 | 0 | } |
412 | 0 | } |
413 | | |
414 | | static void |
415 | | fast_composite_over_n_8_0888 (pixman_implementation_t *imp, |
416 | | pixman_composite_info_t *info) |
417 | 0 | { |
418 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
419 | 0 | uint32_t src, srca; |
420 | 0 | uint8_t *dst_line, *dst; |
421 | 0 | uint32_t d; |
422 | 0 | uint8_t *mask_line, *mask, m; |
423 | 0 | int dst_stride, mask_stride; |
424 | 0 | int32_t w; |
425 | |
|
426 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
427 | |
|
428 | 0 | srca = src >> 24; |
429 | 0 | if (src == 0) |
430 | 0 | return; |
431 | | |
432 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3); |
433 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
434 | |
|
435 | 0 | while (height--) |
436 | 0 | { |
437 | 0 | dst = dst_line; |
438 | 0 | dst_line += dst_stride; |
439 | 0 | mask = mask_line; |
440 | 0 | mask_line += mask_stride; |
441 | 0 | w = width; |
442 | |
|
443 | 0 | while (w--) |
444 | 0 | { |
445 | 0 | m = *mask++; |
446 | 0 | if (m == 0xff) |
447 | 0 | { |
448 | 0 | if (srca == 0xff) |
449 | 0 | { |
450 | 0 | d = src; |
451 | 0 | } |
452 | 0 | else |
453 | 0 | { |
454 | 0 | d = fetch_24 (dst); |
455 | 0 | d = over (src, d); |
456 | 0 | } |
457 | 0 | store_24 (dst, d); |
458 | 0 | } |
459 | 0 | else if (m) |
460 | 0 | { |
461 | 0 | d = over (in (src, m), fetch_24 (dst)); |
462 | 0 | store_24 (dst, d); |
463 | 0 | } |
464 | 0 | dst += 3; |
465 | 0 | } |
466 | 0 | } |
467 | 0 | } |
468 | | |
469 | | static void |
470 | | fast_composite_over_n_8_0565 (pixman_implementation_t *imp, |
471 | | pixman_composite_info_t *info) |
472 | 0 | { |
473 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
474 | 0 | uint32_t src, srca; |
475 | 0 | uint16_t *dst_line, *dst; |
476 | 0 | uint32_t d; |
477 | 0 | uint8_t *mask_line, *mask, m; |
478 | 0 | int dst_stride, mask_stride; |
479 | 0 | int32_t w; |
480 | |
|
481 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
482 | |
|
483 | 0 | srca = src >> 24; |
484 | 0 | if (src == 0) |
485 | 0 | return; |
486 | | |
487 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); |
488 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
489 | |
|
490 | 0 | while (height--) |
491 | 0 | { |
492 | 0 | dst = dst_line; |
493 | 0 | dst_line += dst_stride; |
494 | 0 | mask = mask_line; |
495 | 0 | mask_line += mask_stride; |
496 | 0 | w = width; |
497 | |
|
498 | 0 | while (w--) |
499 | 0 | { |
500 | 0 | m = *mask++; |
501 | 0 | if (m == 0xff) |
502 | 0 | { |
503 | 0 | if (srca == 0xff) |
504 | 0 | { |
505 | 0 | d = src; |
506 | 0 | } |
507 | 0 | else |
508 | 0 | { |
509 | 0 | d = *dst; |
510 | 0 | d = over (src, convert_0565_to_0888 (d)); |
511 | 0 | } |
512 | 0 | *dst = convert_8888_to_0565 (d); |
513 | 0 | } |
514 | 0 | else if (m) |
515 | 0 | { |
516 | 0 | d = *dst; |
517 | 0 | d = over (in (src, m), convert_0565_to_0888 (d)); |
518 | 0 | *dst = convert_8888_to_0565 (d); |
519 | 0 | } |
520 | 0 | dst++; |
521 | 0 | } |
522 | 0 | } |
523 | 0 | } |
524 | | |
525 | | static void |
526 | | fast_composite_over_n_8888_0565_ca (pixman_implementation_t *imp, |
527 | | pixman_composite_info_t *info) |
528 | 0 | { |
529 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
530 | 0 | uint32_t src, srca, s; |
531 | 0 | uint16_t src16; |
532 | 0 | uint16_t *dst_line, *dst; |
533 | 0 | uint32_t d; |
534 | 0 | uint32_t *mask_line, *mask, ma; |
535 | 0 | int dst_stride, mask_stride; |
536 | 0 | int32_t w; |
537 | |
|
538 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
539 | |
|
540 | 0 | srca = src >> 24; |
541 | 0 | if (src == 0) |
542 | 0 | return; |
543 | | |
544 | 0 | src16 = convert_8888_to_0565 (src); |
545 | |
|
546 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); |
547 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint32_t, mask_stride, mask_line, 1); |
548 | |
|
549 | 0 | while (height--) |
550 | 0 | { |
551 | 0 | dst = dst_line; |
552 | 0 | dst_line += dst_stride; |
553 | 0 | mask = mask_line; |
554 | 0 | mask_line += mask_stride; |
555 | 0 | w = width; |
556 | |
|
557 | 0 | while (w--) |
558 | 0 | { |
559 | 0 | ma = *mask++; |
560 | 0 | if (ma == 0xffffffff) |
561 | 0 | { |
562 | 0 | if (srca == 0xff) |
563 | 0 | { |
564 | 0 | *dst = src16; |
565 | 0 | } |
566 | 0 | else |
567 | 0 | { |
568 | 0 | d = *dst; |
569 | 0 | d = over (src, convert_0565_to_0888 (d)); |
570 | 0 | *dst = convert_8888_to_0565 (d); |
571 | 0 | } |
572 | 0 | } |
573 | 0 | else if (ma) |
574 | 0 | { |
575 | 0 | d = *dst; |
576 | 0 | d = convert_0565_to_0888 (d); |
577 | |
|
578 | 0 | s = src; |
579 | |
|
580 | 0 | UN8x4_MUL_UN8x4 (s, ma); |
581 | 0 | UN8x4_MUL_UN8 (ma, srca); |
582 | 0 | ma = ~ma; |
583 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4 (d, ma, s); |
584 | |
|
585 | 0 | *dst = convert_8888_to_0565 (d); |
586 | 0 | } |
587 | 0 | dst++; |
588 | 0 | } |
589 | 0 | } |
590 | 0 | } |
591 | | |
592 | | static void |
593 | | fast_composite_over_8888_8888 (pixman_implementation_t *imp, |
594 | | pixman_composite_info_t *info) |
595 | 0 | { |
596 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
597 | 0 | uint32_t *dst_line, *dst; |
598 | 0 | uint32_t *src_line, *src, s; |
599 | 0 | int dst_stride, src_stride; |
600 | 0 | uint8_t a; |
601 | 0 | int32_t w; |
602 | |
|
603 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
604 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
605 | |
|
606 | 0 | while (height--) |
607 | 0 | { |
608 | 0 | dst = dst_line; |
609 | 0 | dst_line += dst_stride; |
610 | 0 | src = src_line; |
611 | 0 | src_line += src_stride; |
612 | 0 | w = width; |
613 | |
|
614 | 0 | while (w--) |
615 | 0 | { |
616 | 0 | s = *src++; |
617 | 0 | a = s >> 24; |
618 | 0 | if (a == 0xff) |
619 | 0 | *dst = s; |
620 | 0 | else if (s) |
621 | 0 | *dst = over (s, *dst); |
622 | 0 | dst++; |
623 | 0 | } |
624 | 0 | } |
625 | 0 | } |
626 | | |
627 | | static void |
628 | | fast_composite_src_x888_8888 (pixman_implementation_t *imp, |
629 | | pixman_composite_info_t *info) |
630 | 0 | { |
631 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
632 | 0 | uint32_t *dst_line, *dst; |
633 | 0 | uint32_t *src_line, *src; |
634 | 0 | int dst_stride, src_stride; |
635 | 0 | int32_t w; |
636 | |
|
637 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
638 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
639 | |
|
640 | 0 | while (height--) |
641 | 0 | { |
642 | 0 | dst = dst_line; |
643 | 0 | dst_line += dst_stride; |
644 | 0 | src = src_line; |
645 | 0 | src_line += src_stride; |
646 | 0 | w = width; |
647 | |
|
648 | 0 | while (w--) |
649 | 0 | *dst++ = (*src++) | 0xff000000; |
650 | 0 | } |
651 | 0 | } |
652 | | |
653 | | #if 0 |
654 | | static void |
655 | | fast_composite_over_8888_0888 (pixman_implementation_t *imp, |
656 | | pixman_composite_info_t *info) |
657 | | { |
658 | | PIXMAN_COMPOSITE_ARGS (info); |
659 | | uint8_t *dst_line, *dst; |
660 | | uint32_t d; |
661 | | uint32_t *src_line, *src, s; |
662 | | uint8_t a; |
663 | | int dst_stride, src_stride; |
664 | | int32_t w; |
665 | | |
666 | | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 3); |
667 | | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
668 | | |
669 | | while (height--) |
670 | | { |
671 | | dst = dst_line; |
672 | | dst_line += dst_stride; |
673 | | src = src_line; |
674 | | src_line += src_stride; |
675 | | w = width; |
676 | | |
677 | | while (w--) |
678 | | { |
679 | | s = *src++; |
680 | | a = s >> 24; |
681 | | if (a) |
682 | | { |
683 | | if (a == 0xff) |
684 | | d = s; |
685 | | else |
686 | | d = over (s, fetch_24 (dst)); |
687 | | |
688 | | store_24 (dst, d); |
689 | | } |
690 | | dst += 3; |
691 | | } |
692 | | } |
693 | | } |
694 | | #endif |
695 | | |
696 | | static void |
697 | | fast_composite_over_8888_0565 (pixman_implementation_t *imp, |
698 | | pixman_composite_info_t *info) |
699 | 0 | { |
700 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
701 | 0 | uint16_t *dst_line, *dst; |
702 | 0 | uint32_t d; |
703 | 0 | uint32_t *src_line, *src, s; |
704 | 0 | uint8_t a; |
705 | 0 | int dst_stride, src_stride; |
706 | 0 | int32_t w; |
707 | |
|
708 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
709 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); |
710 | |
|
711 | 0 | while (height--) |
712 | 0 | { |
713 | 0 | dst = dst_line; |
714 | 0 | dst_line += dst_stride; |
715 | 0 | src = src_line; |
716 | 0 | src_line += src_stride; |
717 | 0 | w = width; |
718 | |
|
719 | 0 | while (w--) |
720 | 0 | { |
721 | 0 | s = *src++; |
722 | 0 | a = s >> 24; |
723 | 0 | if (s) |
724 | 0 | { |
725 | 0 | if (a == 0xff) |
726 | 0 | { |
727 | 0 | d = s; |
728 | 0 | } |
729 | 0 | else |
730 | 0 | { |
731 | 0 | d = *dst; |
732 | 0 | d = over (s, convert_0565_to_0888 (d)); |
733 | 0 | } |
734 | 0 | *dst = convert_8888_to_0565 (d); |
735 | 0 | } |
736 | 0 | dst++; |
737 | 0 | } |
738 | 0 | } |
739 | 0 | } |
740 | | |
741 | | static void |
742 | | fast_composite_add_8_8 (pixman_implementation_t *imp, |
743 | | pixman_composite_info_t *info) |
744 | 0 | { |
745 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
746 | 0 | uint8_t *dst_line, *dst; |
747 | 0 | uint8_t *src_line, *src; |
748 | 0 | int dst_stride, src_stride; |
749 | 0 | int32_t w; |
750 | 0 | uint8_t s, d; |
751 | 0 | uint16_t t; |
752 | |
|
753 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint8_t, src_stride, src_line, 1); |
754 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); |
755 | |
|
756 | 0 | while (height--) |
757 | 0 | { |
758 | 0 | dst = dst_line; |
759 | 0 | dst_line += dst_stride; |
760 | 0 | src = src_line; |
761 | 0 | src_line += src_stride; |
762 | 0 | w = width; |
763 | |
|
764 | 0 | while (w--) |
765 | 0 | { |
766 | 0 | s = *src++; |
767 | 0 | if (s) |
768 | 0 | { |
769 | 0 | if (s != 0xff) |
770 | 0 | { |
771 | 0 | d = *dst; |
772 | 0 | t = d + s; |
773 | 0 | s = t | (0 - (t >> 8)); |
774 | 0 | } |
775 | 0 | *dst = s; |
776 | 0 | } |
777 | 0 | dst++; |
778 | 0 | } |
779 | 0 | } |
780 | 0 | } |
781 | | |
782 | | static void |
783 | | fast_composite_add_0565_0565 (pixman_implementation_t *imp, |
784 | | pixman_composite_info_t *info) |
785 | 0 | { |
786 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
787 | 0 | uint16_t *dst_line, *dst; |
788 | 0 | uint32_t d; |
789 | 0 | uint16_t *src_line, *src; |
790 | 0 | uint32_t s; |
791 | 0 | int dst_stride, src_stride; |
792 | 0 | int32_t w; |
793 | |
|
794 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1); |
795 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1); |
796 | |
|
797 | 0 | while (height--) |
798 | 0 | { |
799 | 0 | dst = dst_line; |
800 | 0 | dst_line += dst_stride; |
801 | 0 | src = src_line; |
802 | 0 | src_line += src_stride; |
803 | 0 | w = width; |
804 | |
|
805 | 0 | while (w--) |
806 | 0 | { |
807 | 0 | s = *src++; |
808 | 0 | if (s) |
809 | 0 | { |
810 | 0 | d = *dst; |
811 | 0 | s = convert_0565_to_8888 (s); |
812 | 0 | if (d) |
813 | 0 | { |
814 | 0 | d = convert_0565_to_8888 (d); |
815 | 0 | UN8x4_ADD_UN8x4 (s, d); |
816 | 0 | } |
817 | 0 | *dst = convert_8888_to_0565 (s); |
818 | 0 | } |
819 | 0 | dst++; |
820 | 0 | } |
821 | 0 | } |
822 | 0 | } |
823 | | |
824 | | static void |
825 | | fast_composite_add_8888_8888 (pixman_implementation_t *imp, |
826 | | pixman_composite_info_t *info) |
827 | 0 | { |
828 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
829 | 0 | uint32_t *dst_line, *dst; |
830 | 0 | uint32_t *src_line, *src; |
831 | 0 | int dst_stride, src_stride; |
832 | 0 | int32_t w; |
833 | 0 | uint32_t s, d; |
834 | |
|
835 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1); |
836 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
837 | |
|
838 | 0 | while (height--) |
839 | 0 | { |
840 | 0 | dst = dst_line; |
841 | 0 | dst_line += dst_stride; |
842 | 0 | src = src_line; |
843 | 0 | src_line += src_stride; |
844 | 0 | w = width; |
845 | |
|
846 | 0 | while (w--) |
847 | 0 | { |
848 | 0 | s = *src++; |
849 | 0 | if (s) |
850 | 0 | { |
851 | 0 | if (s != 0xffffffff) |
852 | 0 | { |
853 | 0 | d = *dst; |
854 | 0 | if (d) |
855 | 0 | UN8x4_ADD_UN8x4 (s, d); |
856 | 0 | } |
857 | 0 | *dst = s; |
858 | 0 | } |
859 | 0 | dst++; |
860 | 0 | } |
861 | 0 | } |
862 | 0 | } |
863 | | |
864 | | static void |
865 | | fast_composite_add_n_8_8 (pixman_implementation_t *imp, |
866 | | pixman_composite_info_t *info) |
867 | 0 | { |
868 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
869 | 0 | uint8_t *dst_line, *dst; |
870 | 0 | uint8_t *mask_line, *mask; |
871 | 0 | int dst_stride, mask_stride; |
872 | 0 | int32_t w; |
873 | 0 | uint32_t src; |
874 | 0 | uint8_t sa; |
875 | |
|
876 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint8_t, dst_stride, dst_line, 1); |
877 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, uint8_t, mask_stride, mask_line, 1); |
878 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
879 | 0 | sa = (src >> 24); |
880 | |
|
881 | 0 | while (height--) |
882 | 0 | { |
883 | 0 | dst = dst_line; |
884 | 0 | dst_line += dst_stride; |
885 | 0 | mask = mask_line; |
886 | 0 | mask_line += mask_stride; |
887 | 0 | w = width; |
888 | |
|
889 | 0 | while (w--) |
890 | 0 | { |
891 | 0 | uint16_t tmp; |
892 | 0 | uint16_t a; |
893 | 0 | uint32_t m, d; |
894 | 0 | uint32_t r; |
895 | |
|
896 | 0 | a = *mask++; |
897 | 0 | d = *dst; |
898 | |
|
899 | 0 | m = MUL_UN8 (sa, a, tmp); |
900 | 0 | r = ADD_UN8 (m, d, tmp); |
901 | |
|
902 | 0 | *dst++ = r; |
903 | 0 | } |
904 | 0 | } |
905 | 0 | } |
906 | | |
907 | | #ifdef WORDS_BIGENDIAN |
908 | | #define CREATE_BITMASK(n) (0x80000000 >> (n)) |
909 | | #define UPDATE_BITMASK(n) ((n) >> 1) |
910 | | #else |
911 | 0 | #define CREATE_BITMASK(n) (1U << (n)) |
912 | 0 | #define UPDATE_BITMASK(n) ((n) << 1) |
913 | | #endif |
914 | | |
915 | | #define TEST_BIT(p, n) \ |
916 | 0 | (*((p) + ((n) >> 5)) & CREATE_BITMASK ((n) & 31)) |
917 | | #define SET_BIT(p, n) \ |
918 | 0 | do { *((p) + ((n) >> 5)) |= CREATE_BITMASK ((n) & 31); } while (0); |
919 | | |
920 | | static void |
921 | | fast_composite_add_1_1 (pixman_implementation_t *imp, |
922 | | pixman_composite_info_t *info) |
923 | 0 | { |
924 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
925 | 0 | uint32_t *dst_line, *dst; |
926 | 0 | uint32_t *src_line, *src; |
927 | 0 | int dst_stride, src_stride; |
928 | 0 | int32_t w; |
929 | |
|
930 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, 0, src_y, uint32_t, |
931 | 0 | src_stride, src_line, 1); |
932 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, 0, dest_y, uint32_t, |
933 | 0 | dst_stride, dst_line, 1); |
934 | |
|
935 | 0 | while (height--) |
936 | 0 | { |
937 | 0 | dst = dst_line; |
938 | 0 | dst_line += dst_stride; |
939 | 0 | src = src_line; |
940 | 0 | src_line += src_stride; |
941 | 0 | w = width; |
942 | |
|
943 | 0 | while (w--) |
944 | 0 | { |
945 | | /* |
946 | | * TODO: improve performance by processing uint32_t data instead |
947 | | * of individual bits |
948 | | */ |
949 | 0 | if (TEST_BIT (src, src_x + w)) |
950 | 0 | SET_BIT (dst, dest_x + w); |
951 | 0 | } |
952 | 0 | } |
953 | 0 | } |
954 | | |
955 | | static void |
956 | | fast_composite_over_n_1_8888 (pixman_implementation_t *imp, |
957 | | pixman_composite_info_t *info) |
958 | 0 | { |
959 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
960 | 0 | uint32_t src, srca; |
961 | 0 | uint32_t *dst, *dst_line; |
962 | 0 | uint32_t *mask, *mask_line; |
963 | 0 | int mask_stride, dst_stride; |
964 | 0 | uint32_t bitcache, bitmask; |
965 | 0 | int32_t w; |
966 | |
|
967 | 0 | if (width <= 0) |
968 | 0 | return; |
969 | | |
970 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
971 | 0 | srca = src >> 24; |
972 | 0 | if (src == 0) |
973 | 0 | return; |
974 | | |
975 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, |
976 | 0 | dst_stride, dst_line, 1); |
977 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t, |
978 | 0 | mask_stride, mask_line, 1); |
979 | 0 | mask_line += mask_x >> 5; |
980 | |
|
981 | 0 | if (srca == 0xff) |
982 | 0 | { |
983 | 0 | while (height--) |
984 | 0 | { |
985 | 0 | dst = dst_line; |
986 | 0 | dst_line += dst_stride; |
987 | 0 | mask = mask_line; |
988 | 0 | mask_line += mask_stride; |
989 | 0 | w = width; |
990 | |
|
991 | 0 | bitcache = *mask++; |
992 | 0 | bitmask = CREATE_BITMASK (mask_x & 31); |
993 | |
|
994 | 0 | while (w--) |
995 | 0 | { |
996 | 0 | if (bitmask == 0) |
997 | 0 | { |
998 | 0 | bitcache = *mask++; |
999 | 0 | bitmask = CREATE_BITMASK (0); |
1000 | 0 | } |
1001 | 0 | if (bitcache & bitmask) |
1002 | 0 | *dst = src; |
1003 | 0 | bitmask = UPDATE_BITMASK (bitmask); |
1004 | 0 | dst++; |
1005 | 0 | } |
1006 | 0 | } |
1007 | 0 | } |
1008 | 0 | else |
1009 | 0 | { |
1010 | 0 | while (height--) |
1011 | 0 | { |
1012 | 0 | dst = dst_line; |
1013 | 0 | dst_line += dst_stride; |
1014 | 0 | mask = mask_line; |
1015 | 0 | mask_line += mask_stride; |
1016 | 0 | w = width; |
1017 | |
|
1018 | 0 | bitcache = *mask++; |
1019 | 0 | bitmask = CREATE_BITMASK (mask_x & 31); |
1020 | |
|
1021 | 0 | while (w--) |
1022 | 0 | { |
1023 | 0 | if (bitmask == 0) |
1024 | 0 | { |
1025 | 0 | bitcache = *mask++; |
1026 | 0 | bitmask = CREATE_BITMASK (0); |
1027 | 0 | } |
1028 | 0 | if (bitcache & bitmask) |
1029 | 0 | *dst = over (src, *dst); |
1030 | 0 | bitmask = UPDATE_BITMASK (bitmask); |
1031 | 0 | dst++; |
1032 | 0 | } |
1033 | 0 | } |
1034 | 0 | } |
1035 | 0 | } |
1036 | | |
1037 | | static void |
1038 | | fast_composite_over_n_1_0565 (pixman_implementation_t *imp, |
1039 | | pixman_composite_info_t *info) |
1040 | 0 | { |
1041 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
1042 | 0 | uint32_t src, srca; |
1043 | 0 | uint16_t *dst, *dst_line; |
1044 | 0 | uint32_t *mask, *mask_line; |
1045 | 0 | int mask_stride, dst_stride; |
1046 | 0 | uint32_t bitcache, bitmask; |
1047 | 0 | int32_t w; |
1048 | 0 | uint32_t d; |
1049 | 0 | uint16_t src565; |
1050 | |
|
1051 | 0 | if (width <= 0) |
1052 | 0 | return; |
1053 | | |
1054 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
1055 | 0 | srca = src >> 24; |
1056 | 0 | if (src == 0) |
1057 | 0 | return; |
1058 | | |
1059 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, |
1060 | 0 | dst_stride, dst_line, 1); |
1061 | 0 | PIXMAN_IMAGE_GET_LINE (mask_image, 0, mask_y, uint32_t, |
1062 | 0 | mask_stride, mask_line, 1); |
1063 | 0 | mask_line += mask_x >> 5; |
1064 | |
|
1065 | 0 | if (srca == 0xff) |
1066 | 0 | { |
1067 | 0 | src565 = convert_8888_to_0565 (src); |
1068 | 0 | while (height--) |
1069 | 0 | { |
1070 | 0 | dst = dst_line; |
1071 | 0 | dst_line += dst_stride; |
1072 | 0 | mask = mask_line; |
1073 | 0 | mask_line += mask_stride; |
1074 | 0 | w = width; |
1075 | |
|
1076 | 0 | bitcache = *mask++; |
1077 | 0 | bitmask = CREATE_BITMASK (mask_x & 31); |
1078 | |
|
1079 | 0 | while (w--) |
1080 | 0 | { |
1081 | 0 | if (bitmask == 0) |
1082 | 0 | { |
1083 | 0 | bitcache = *mask++; |
1084 | 0 | bitmask = CREATE_BITMASK (0); |
1085 | 0 | } |
1086 | 0 | if (bitcache & bitmask) |
1087 | 0 | *dst = src565; |
1088 | 0 | bitmask = UPDATE_BITMASK (bitmask); |
1089 | 0 | dst++; |
1090 | 0 | } |
1091 | 0 | } |
1092 | 0 | } |
1093 | 0 | else |
1094 | 0 | { |
1095 | 0 | while (height--) |
1096 | 0 | { |
1097 | 0 | dst = dst_line; |
1098 | 0 | dst_line += dst_stride; |
1099 | 0 | mask = mask_line; |
1100 | 0 | mask_line += mask_stride; |
1101 | 0 | w = width; |
1102 | |
|
1103 | 0 | bitcache = *mask++; |
1104 | 0 | bitmask = CREATE_BITMASK (mask_x & 31); |
1105 | |
|
1106 | 0 | while (w--) |
1107 | 0 | { |
1108 | 0 | if (bitmask == 0) |
1109 | 0 | { |
1110 | 0 | bitcache = *mask++; |
1111 | 0 | bitmask = CREATE_BITMASK (0); |
1112 | 0 | } |
1113 | 0 | if (bitcache & bitmask) |
1114 | 0 | { |
1115 | 0 | d = over (src, convert_0565_to_0888 (*dst)); |
1116 | 0 | *dst = convert_8888_to_0565 (d); |
1117 | 0 | } |
1118 | 0 | bitmask = UPDATE_BITMASK (bitmask); |
1119 | 0 | dst++; |
1120 | 0 | } |
1121 | 0 | } |
1122 | 0 | } |
1123 | 0 | } |
1124 | | |
1125 | | /* |
1126 | | * Simple bitblt |
1127 | | */ |
1128 | | |
1129 | | static void |
1130 | | fast_composite_solid_fill (pixman_implementation_t *imp, |
1131 | | pixman_composite_info_t *info) |
1132 | 0 | { |
1133 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
1134 | 0 | uint32_t src; |
1135 | |
|
1136 | 0 | src = _pixman_image_get_solid (imp, src_image, dest_image->bits.format); |
1137 | |
|
1138 | 0 | if (dest_image->bits.format == PIXMAN_a1) |
1139 | 0 | { |
1140 | 0 | src = src >> 31; |
1141 | 0 | } |
1142 | 0 | else if (dest_image->bits.format == PIXMAN_a8) |
1143 | 0 | { |
1144 | 0 | src = src >> 24; |
1145 | 0 | } |
1146 | 0 | else if (dest_image->bits.format == PIXMAN_r5g6b5 || |
1147 | 0 | dest_image->bits.format == PIXMAN_b5g6r5) |
1148 | 0 | { |
1149 | 0 | src = convert_8888_to_0565 (src); |
1150 | 0 | } |
1151 | |
|
1152 | 0 | pixman_fill (dest_image->bits.bits, dest_image->bits.rowstride, |
1153 | 0 | PIXMAN_FORMAT_BPP (dest_image->bits.format), |
1154 | 0 | dest_x, dest_y, |
1155 | 0 | width, height, |
1156 | 0 | src); |
1157 | 0 | } |
1158 | | |
1159 | | static void |
1160 | | fast_composite_src_memcpy (pixman_implementation_t *imp, |
1161 | | pixman_composite_info_t *info) |
1162 | 0 | { |
1163 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
1164 | 0 | int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8; |
1165 | 0 | uint32_t n_bytes = width * bpp; |
1166 | 0 | int dst_stride, src_stride; |
1167 | 0 | uint8_t *dst; |
1168 | 0 | uint8_t *src; |
1169 | |
|
1170 | 0 | src_stride = src_image->bits.rowstride * 4; |
1171 | 0 | dst_stride = dest_image->bits.rowstride * 4; |
1172 | |
|
1173 | 0 | src = (uint8_t *)src_image->bits.bits + src_y * src_stride + src_x * bpp; |
1174 | 0 | dst = (uint8_t *)dest_image->bits.bits + dest_y * dst_stride + dest_x * bpp; |
1175 | |
|
1176 | 0 | while (height--) |
1177 | 0 | { |
1178 | 0 | memcpy (dst, src, n_bytes); |
1179 | |
|
1180 | 0 | dst += dst_stride; |
1181 | 0 | src += src_stride; |
1182 | 0 | } |
1183 | 0 | } |
1184 | | |
1185 | | FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, SRC, COVER) |
1186 | | FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, SRC, NONE) |
1187 | | FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, SRC, PAD) |
1188 | | FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, SRC, NORMAL) |
1189 | | FAST_NEAREST (x888_8888_cover, x888, 8888, uint32_t, uint32_t, SRC, COVER) |
1190 | | FAST_NEAREST (x888_8888_pad, x888, 8888, uint32_t, uint32_t, SRC, PAD) |
1191 | | FAST_NEAREST (x888_8888_normal, x888, 8888, uint32_t, uint32_t, SRC, NORMAL) |
1192 | | FAST_NEAREST (8888_8888_cover, 8888, 8888, uint32_t, uint32_t, OVER, COVER) |
1193 | | FAST_NEAREST (8888_8888_none, 8888, 8888, uint32_t, uint32_t, OVER, NONE) |
1194 | | FAST_NEAREST (8888_8888_pad, 8888, 8888, uint32_t, uint32_t, OVER, PAD) |
1195 | | FAST_NEAREST (8888_8888_normal, 8888, 8888, uint32_t, uint32_t, OVER, NORMAL) |
1196 | | FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, SRC, COVER) |
1197 | | FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, SRC, NONE) |
1198 | | FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, SRC, PAD) |
1199 | | FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, SRC, NORMAL) |
1200 | | FAST_NEAREST (565_565_normal, 0565, 0565, uint16_t, uint16_t, SRC, NORMAL) |
1201 | | FAST_NEAREST (8888_565_cover, 8888, 0565, uint32_t, uint16_t, OVER, COVER) |
1202 | | FAST_NEAREST (8888_565_none, 8888, 0565, uint32_t, uint16_t, OVER, NONE) |
1203 | | FAST_NEAREST (8888_565_pad, 8888, 0565, uint32_t, uint16_t, OVER, PAD) |
1204 | | FAST_NEAREST (8888_565_normal, 8888, 0565, uint32_t, uint16_t, OVER, NORMAL) |
1205 | | |
1206 | 0 | #define REPEAT_MIN_WIDTH 32 |
1207 | | |
1208 | | static void |
1209 | | fast_composite_tiled_repeat (pixman_implementation_t *imp, |
1210 | | pixman_composite_info_t *info) |
1211 | 0 | { |
1212 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
1213 | 0 | pixman_composite_func_t func; |
1214 | 0 | pixman_format_code_t mask_format; |
1215 | 0 | uint32_t src_flags, mask_flags; |
1216 | 0 | int32_t sx, sy; |
1217 | 0 | int32_t width_remain; |
1218 | 0 | int32_t num_pixels; |
1219 | 0 | int32_t src_width; |
1220 | 0 | int32_t i, j; |
1221 | 0 | pixman_image_t extended_src_image; |
1222 | 0 | uint32_t extended_src[REPEAT_MIN_WIDTH * 2]; |
1223 | 0 | pixman_bool_t need_src_extension; |
1224 | 0 | uint32_t *src_line; |
1225 | 0 | int32_t src_stride; |
1226 | 0 | int32_t src_bpp; |
1227 | 0 | pixman_composite_info_t info2 = *info; |
1228 | |
|
1229 | 0 | src_flags = (info->src_flags & ~FAST_PATH_NORMAL_REPEAT) | |
1230 | 0 | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST; |
1231 | |
|
1232 | 0 | if (mask_image) |
1233 | 0 | { |
1234 | 0 | mask_format = mask_image->common.extended_format_code; |
1235 | 0 | mask_flags = info->mask_flags; |
1236 | 0 | } |
1237 | 0 | else |
1238 | 0 | { |
1239 | 0 | mask_format = PIXMAN_null; |
1240 | 0 | mask_flags = FAST_PATH_IS_OPAQUE; |
1241 | 0 | } |
1242 | |
|
1243 | 0 | _pixman_implementation_lookup_composite ( |
1244 | 0 | imp->toplevel, info->op, |
1245 | 0 | src_image->common.extended_format_code, src_flags, |
1246 | 0 | mask_format, mask_flags, |
1247 | 0 | dest_image->common.extended_format_code, info->dest_flags, |
1248 | 0 | &imp, &func); |
1249 | |
|
1250 | 0 | src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format); |
1251 | |
|
1252 | 0 | if (src_image->bits.width < REPEAT_MIN_WIDTH && |
1253 | 0 | (src_bpp == 32 || src_bpp == 16 || src_bpp == 8) && |
1254 | 0 | !src_image->bits.indexed) |
1255 | 0 | { |
1256 | 0 | sx = src_x; |
1257 | 0 | sx = MOD (sx, src_image->bits.width); |
1258 | 0 | sx += width; |
1259 | 0 | src_width = 0; |
1260 | |
|
1261 | 0 | while (src_width < REPEAT_MIN_WIDTH && src_width <= sx) |
1262 | 0 | src_width += src_image->bits.width; |
1263 | |
|
1264 | 0 | src_stride = (src_width * (src_bpp >> 3) + 3) / (int) sizeof (uint32_t); |
1265 | | |
1266 | | /* Initialize/validate stack-allocated temporary image */ |
1267 | 0 | _pixman_bits_image_init (&extended_src_image, src_image->bits.format, |
1268 | 0 | src_width, 1, &extended_src[0], src_stride, |
1269 | 0 | FALSE); |
1270 | 0 | _pixman_image_validate (&extended_src_image); |
1271 | |
|
1272 | 0 | info2.src_image = &extended_src_image; |
1273 | 0 | need_src_extension = TRUE; |
1274 | 0 | } |
1275 | 0 | else |
1276 | 0 | { |
1277 | 0 | src_width = src_image->bits.width; |
1278 | 0 | need_src_extension = FALSE; |
1279 | 0 | } |
1280 | |
|
1281 | 0 | sx = src_x; |
1282 | 0 | sy = src_y; |
1283 | |
|
1284 | 0 | while (--height >= 0) |
1285 | 0 | { |
1286 | 0 | sx = MOD (sx, src_width); |
1287 | 0 | sy = MOD (sy, src_image->bits.height); |
1288 | |
|
1289 | 0 | if (need_src_extension) |
1290 | 0 | { |
1291 | 0 | if (src_bpp == 32) |
1292 | 0 | { |
1293 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint32_t, src_stride, src_line, 1); |
1294 | |
|
1295 | 0 | for (i = 0; i < src_width; ) |
1296 | 0 | { |
1297 | 0 | for (j = 0; j < src_image->bits.width; j++, i++) |
1298 | 0 | extended_src[i] = src_line[j]; |
1299 | 0 | } |
1300 | 0 | } |
1301 | 0 | else if (src_bpp == 16) |
1302 | 0 | { |
1303 | 0 | uint16_t *src_line_16; |
1304 | |
|
1305 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint16_t, src_stride, |
1306 | 0 | src_line_16, 1); |
1307 | 0 | src_line = (uint32_t*)src_line_16; |
1308 | |
|
1309 | 0 | for (i = 0; i < src_width; ) |
1310 | 0 | { |
1311 | 0 | for (j = 0; j < src_image->bits.width; j++, i++) |
1312 | 0 | ((uint16_t*)extended_src)[i] = ((uint16_t*)src_line)[j]; |
1313 | 0 | } |
1314 | 0 | } |
1315 | 0 | else if (src_bpp == 8) |
1316 | 0 | { |
1317 | 0 | uint8_t *src_line_8; |
1318 | |
|
1319 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, 0, sy, uint8_t, src_stride, |
1320 | 0 | src_line_8, 1); |
1321 | 0 | src_line = (uint32_t*)src_line_8; |
1322 | |
|
1323 | 0 | for (i = 0; i < src_width; ) |
1324 | 0 | { |
1325 | 0 | for (j = 0; j < src_image->bits.width; j++, i++) |
1326 | 0 | ((uint8_t*)extended_src)[i] = ((uint8_t*)src_line)[j]; |
1327 | 0 | } |
1328 | 0 | } |
1329 | |
|
1330 | 0 | info2.src_y = 0; |
1331 | 0 | } |
1332 | 0 | else |
1333 | 0 | { |
1334 | 0 | info2.src_y = sy; |
1335 | 0 | } |
1336 | |
|
1337 | 0 | width_remain = width; |
1338 | |
|
1339 | 0 | while (width_remain > 0) |
1340 | 0 | { |
1341 | 0 | num_pixels = src_width - sx; |
1342 | |
|
1343 | 0 | if (num_pixels > width_remain) |
1344 | 0 | num_pixels = width_remain; |
1345 | |
|
1346 | 0 | info2.src_x = sx; |
1347 | 0 | info2.width = num_pixels; |
1348 | 0 | info2.height = 1; |
1349 | |
|
1350 | 0 | func (imp, &info2); |
1351 | |
|
1352 | 0 | width_remain -= num_pixels; |
1353 | 0 | info2.mask_x += num_pixels; |
1354 | 0 | info2.dest_x += num_pixels; |
1355 | 0 | sx = 0; |
1356 | 0 | } |
1357 | |
|
1358 | 0 | sx = src_x; |
1359 | 0 | sy++; |
1360 | 0 | info2.mask_x = info->mask_x; |
1361 | 0 | info2.mask_y++; |
1362 | 0 | info2.dest_x = info->dest_x; |
1363 | 0 | info2.dest_y++; |
1364 | 0 | } |
1365 | |
|
1366 | 0 | if (need_src_extension) |
1367 | 0 | _pixman_image_fini (&extended_src_image); |
1368 | 0 | } |
1369 | | |
1370 | | /* Use more unrolling for src_0565_0565 because it is typically CPU bound */ |
1371 | | static force_inline void |
1372 | | scaled_nearest_scanline_565_565_SRC (uint16_t * dst, |
1373 | | const uint16_t * src, |
1374 | | int32_t w, |
1375 | | pixman_fixed_t vx, |
1376 | | pixman_fixed_t unit_x, |
1377 | | pixman_fixed_t max_vx, |
1378 | | pixman_bool_t fully_transparent_src) |
1379 | 0 | { |
1380 | 0 | uint16_t tmp1, tmp2, tmp3, tmp4; |
1381 | 0 | while ((w -= 4) >= 0) |
1382 | 0 | { |
1383 | 0 | tmp1 = *(src + pixman_fixed_to_int (vx)); |
1384 | 0 | vx += unit_x; |
1385 | 0 | tmp2 = *(src + pixman_fixed_to_int (vx)); |
1386 | 0 | vx += unit_x; |
1387 | 0 | tmp3 = *(src + pixman_fixed_to_int (vx)); |
1388 | 0 | vx += unit_x; |
1389 | 0 | tmp4 = *(src + pixman_fixed_to_int (vx)); |
1390 | 0 | vx += unit_x; |
1391 | 0 | *dst++ = tmp1; |
1392 | 0 | *dst++ = tmp2; |
1393 | 0 | *dst++ = tmp3; |
1394 | 0 | *dst++ = tmp4; |
1395 | 0 | } |
1396 | 0 | if (w & 2) |
1397 | 0 | { |
1398 | 0 | tmp1 = *(src + pixman_fixed_to_int (vx)); |
1399 | 0 | vx += unit_x; |
1400 | 0 | tmp2 = *(src + pixman_fixed_to_int (vx)); |
1401 | 0 | vx += unit_x; |
1402 | 0 | *dst++ = tmp1; |
1403 | 0 | *dst++ = tmp2; |
1404 | 0 | } |
1405 | 0 | if (w & 1) |
1406 | 0 | *dst = *(src + pixman_fixed_to_int (vx)); |
1407 | 0 | } |
1408 | | |
1409 | | FAST_NEAREST_MAINLOOP (565_565_cover_SRC, |
1410 | | scaled_nearest_scanline_565_565_SRC, |
1411 | | uint16_t, uint16_t, COVER) |
1412 | | FAST_NEAREST_MAINLOOP (565_565_none_SRC, |
1413 | | scaled_nearest_scanline_565_565_SRC, |
1414 | | uint16_t, uint16_t, NONE) |
1415 | | FAST_NEAREST_MAINLOOP (565_565_pad_SRC, |
1416 | | scaled_nearest_scanline_565_565_SRC, |
1417 | | uint16_t, uint16_t, PAD) |
1418 | | |
1419 | | static force_inline uint32_t |
1420 | | fetch_nearest (pixman_repeat_t src_repeat, |
1421 | | pixman_format_code_t format, |
1422 | | uint32_t *src, int x, int src_width) |
1423 | 0 | { |
1424 | 0 | if (repeat (src_repeat, &x, src_width)) |
1425 | 0 | { |
1426 | 0 | if (format == PIXMAN_x8r8g8b8 || format == PIXMAN_x8b8g8r8) |
1427 | 0 | return *(src + x) | 0xff000000; |
1428 | 0 | else |
1429 | 0 | return *(src + x); |
1430 | 0 | } |
1431 | 0 | else |
1432 | 0 | { |
1433 | 0 | return 0; |
1434 | 0 | } |
1435 | 0 | } |
1436 | | |
1437 | | static force_inline void |
1438 | | combine_over (uint32_t s, uint32_t *dst) |
1439 | 0 | { |
1440 | 0 | if (s) |
1441 | 0 | { |
1442 | 0 | uint8_t ia = 0xff - (s >> 24); |
1443 | |
|
1444 | 0 | if (ia) |
1445 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (*dst, ia, s); |
1446 | 0 | else |
1447 | 0 | *dst = s; |
1448 | 0 | } |
1449 | 0 | } |
1450 | | |
1451 | | static force_inline void |
1452 | | combine_src (uint32_t s, uint32_t *dst) |
1453 | 0 | { |
1454 | 0 | *dst = s; |
1455 | 0 | } |
1456 | | |
1457 | | static void |
1458 | | fast_composite_scaled_nearest (pixman_implementation_t *imp, |
1459 | | pixman_composite_info_t *info) |
1460 | 0 | { |
1461 | 0 | PIXMAN_COMPOSITE_ARGS (info); |
1462 | 0 | uint32_t *dst_line; |
1463 | 0 | uint32_t *src_line; |
1464 | 0 | int dst_stride, src_stride; |
1465 | 0 | int src_width, src_height; |
1466 | 0 | pixman_repeat_t src_repeat; |
1467 | 0 | pixman_fixed_t unit_x, unit_y; |
1468 | 0 | pixman_format_code_t src_format; |
1469 | 0 | pixman_vector_t v; |
1470 | 0 | pixman_fixed_t vy; |
1471 | |
|
1472 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint32_t, dst_stride, dst_line, 1); |
1473 | | /* pass in 0 instead of src_x and src_y because src_x and src_y need to be |
1474 | | * transformed from destination space to source space |
1475 | | */ |
1476 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, uint32_t, src_stride, src_line, 1); |
1477 | | |
1478 | | /* reference point is the center of the pixel */ |
1479 | 0 | v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; |
1480 | 0 | v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; |
1481 | 0 | v.vector[2] = pixman_fixed_1; |
1482 | |
|
1483 | 0 | if (!pixman_transform_point_3d (src_image->common.transform, &v)) |
1484 | 0 | return; |
1485 | | |
1486 | 0 | unit_x = src_image->common.transform->matrix[0][0]; |
1487 | 0 | unit_y = src_image->common.transform->matrix[1][1]; |
1488 | | |
1489 | | /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ |
1490 | 0 | v.vector[0] -= pixman_fixed_e; |
1491 | 0 | v.vector[1] -= pixman_fixed_e; |
1492 | |
|
1493 | 0 | src_height = src_image->bits.height; |
1494 | 0 | src_width = src_image->bits.width; |
1495 | 0 | src_repeat = src_image->common.repeat; |
1496 | 0 | src_format = src_image->bits.format; |
1497 | |
|
1498 | 0 | vy = v.vector[1]; |
1499 | 0 | while (height--) |
1500 | 0 | { |
1501 | 0 | pixman_fixed_t vx = v.vector[0]; |
1502 | 0 | int y = pixman_fixed_to_int (vy); |
1503 | 0 | uint32_t *dst = dst_line; |
1504 | |
|
1505 | 0 | dst_line += dst_stride; |
1506 | | |
1507 | | /* adjust the y location by a unit vector in the y direction |
1508 | | * this is equivalent to transforming y+1 of the destination point to source space */ |
1509 | 0 | vy += unit_y; |
1510 | |
|
1511 | 0 | if (!repeat (src_repeat, &y, src_height)) |
1512 | 0 | { |
1513 | 0 | if (op == PIXMAN_OP_SRC) |
1514 | 0 | memset (dst, 0, sizeof (*dst) * width); |
1515 | 0 | } |
1516 | 0 | else |
1517 | 0 | { |
1518 | 0 | int w = width; |
1519 | |
|
1520 | 0 | uint32_t *src = src_line + y * src_stride; |
1521 | |
|
1522 | 0 | while (w >= 2) |
1523 | 0 | { |
1524 | 0 | uint32_t s1, s2; |
1525 | 0 | int x1, x2; |
1526 | |
|
1527 | 0 | x1 = pixman_fixed_to_int (vx); |
1528 | 0 | vx += unit_x; |
1529 | |
|
1530 | 0 | x2 = pixman_fixed_to_int (vx); |
1531 | 0 | vx += unit_x; |
1532 | |
|
1533 | 0 | w -= 2; |
1534 | |
|
1535 | 0 | s1 = fetch_nearest (src_repeat, src_format, src, x1, src_width); |
1536 | 0 | s2 = fetch_nearest (src_repeat, src_format, src, x2, src_width); |
1537 | |
|
1538 | 0 | if (op == PIXMAN_OP_OVER) |
1539 | 0 | { |
1540 | 0 | combine_over (s1, dst++); |
1541 | 0 | combine_over (s2, dst++); |
1542 | 0 | } |
1543 | 0 | else |
1544 | 0 | { |
1545 | 0 | combine_src (s1, dst++); |
1546 | 0 | combine_src (s2, dst++); |
1547 | 0 | } |
1548 | 0 | } |
1549 | |
|
1550 | 0 | while (w--) |
1551 | 0 | { |
1552 | 0 | uint32_t s; |
1553 | 0 | int x; |
1554 | |
|
1555 | 0 | x = pixman_fixed_to_int (vx); |
1556 | 0 | vx += unit_x; |
1557 | |
|
1558 | 0 | s = fetch_nearest (src_repeat, src_format, src, x, src_width); |
1559 | |
|
1560 | 0 | if (op == PIXMAN_OP_OVER) |
1561 | 0 | combine_over (s, dst++); |
1562 | 0 | else |
1563 | 0 | combine_src (s, dst++); |
1564 | 0 | } |
1565 | 0 | } |
1566 | 0 | } |
1567 | 0 | } |
1568 | | |
1569 | 0 | #define CACHE_LINE_SIZE 64 |
1570 | | |
1571 | | #define FAST_SIMPLE_ROTATE(suffix, pix_type) \ |
1572 | | \ |
1573 | | static void \ |
1574 | | blt_rotated_90_trivial_##suffix (pix_type *dst, \ |
1575 | | int dst_stride, \ |
1576 | | const pix_type *src, \ |
1577 | | int src_stride, \ |
1578 | | int w, \ |
1579 | 0 | int h) \ |
1580 | 0 | { \ |
1581 | 0 | int x, y; \ |
1582 | 0 | for (y = 0; y < h; y++) \ |
1583 | 0 | { \ |
1584 | 0 | const pix_type *s = src + (h - y - 1); \ |
1585 | 0 | pix_type *d = dst + dst_stride * y; \ |
1586 | 0 | for (x = 0; x < w; x++) \ |
1587 | 0 | { \ |
1588 | 0 | *d++ = *s; \ |
1589 | 0 | s += src_stride; \ |
1590 | 0 | } \ |
1591 | 0 | } \ |
1592 | 0 | } \ Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_trivial_8888 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_trivial_565 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_trivial_8 |
1593 | | \ |
1594 | | static void \ |
1595 | | blt_rotated_270_trivial_##suffix (pix_type *dst, \ |
1596 | | int dst_stride, \ |
1597 | | const pix_type *src, \ |
1598 | | int src_stride, \ |
1599 | | int w, \ |
1600 | 0 | int h) \ |
1601 | 0 | { \ |
1602 | 0 | int x, y; \ |
1603 | 0 | for (y = 0; y < h; y++) \ |
1604 | 0 | { \ |
1605 | 0 | const pix_type *s = src + src_stride * (w - 1) + y; \ |
1606 | 0 | pix_type *d = dst + dst_stride * y; \ |
1607 | 0 | for (x = 0; x < w; x++) \ |
1608 | 0 | { \ |
1609 | 0 | *d++ = *s; \ |
1610 | 0 | s -= src_stride; \ |
1611 | 0 | } \ |
1612 | 0 | } \ |
1613 | 0 | } \ Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_trivial_8888 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_trivial_565 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_trivial_8 |
1614 | | \ |
1615 | | static void \ |
1616 | | blt_rotated_90_##suffix (pix_type *dst, \ |
1617 | | int dst_stride, \ |
1618 | | const pix_type *src, \ |
1619 | | int src_stride, \ |
1620 | | int W, \ |
1621 | 0 | int H) \ |
1622 | 0 | { \ |
1623 | 0 | int x; \ |
1624 | 0 | int leading_pixels = 0, trailing_pixels = 0; \ |
1625 | 0 | const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \ |
1626 | 0 | \ |
1627 | 0 | /* \ |
1628 | 0 | * split processing into handling destination as TILE_SIZExH cache line \ |
1629 | 0 | * aligned vertical stripes (optimistically assuming that destination \ |
1630 | 0 | * stride is a multiple of cache line, if not - it will be just a bit \ |
1631 | 0 | * slower) \ |
1632 | 0 | */ \ |
1633 | 0 | \ |
1634 | 0 | if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \ |
1635 | 0 | { \ |
1636 | 0 | leading_pixels = TILE_SIZE - (((uintptr_t)dst & \ |
1637 | 0 | (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ |
1638 | 0 | if (leading_pixels > W) \ |
1639 | 0 | leading_pixels = W; \ |
1640 | 0 | \ |
1641 | 0 | /* unaligned leading part NxH (where N < TILE_SIZE) */ \ |
1642 | 0 | blt_rotated_90_trivial_##suffix ( \ |
1643 | 0 | dst, \ |
1644 | 0 | dst_stride, \ |
1645 | 0 | src, \ |
1646 | 0 | src_stride, \ |
1647 | 0 | leading_pixels, \ |
1648 | 0 | H); \ |
1649 | 0 | \ |
1650 | 0 | dst += leading_pixels; \ |
1651 | 0 | src += leading_pixels * src_stride; \ |
1652 | 0 | W -= leading_pixels; \ |
1653 | 0 | } \ |
1654 | 0 | \ |
1655 | 0 | if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \ |
1656 | 0 | { \ |
1657 | 0 | trailing_pixels = (((uintptr_t)(dst + W) & \ |
1658 | 0 | (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ |
1659 | 0 | if (trailing_pixels > W) \ |
1660 | 0 | trailing_pixels = W; \ |
1661 | 0 | W -= trailing_pixels; \ |
1662 | 0 | } \ |
1663 | 0 | \ |
1664 | 0 | for (x = 0; x < W; x += TILE_SIZE) \ |
1665 | 0 | { \ |
1666 | 0 | /* aligned middle part TILE_SIZExH */ \ |
1667 | 0 | blt_rotated_90_trivial_##suffix ( \ |
1668 | 0 | dst + x, \ |
1669 | 0 | dst_stride, \ |
1670 | 0 | src + src_stride * x, \ |
1671 | 0 | src_stride, \ |
1672 | 0 | TILE_SIZE, \ |
1673 | 0 | H); \ |
1674 | 0 | } \ |
1675 | 0 | \ |
1676 | 0 | if (trailing_pixels) \ |
1677 | 0 | { \ |
1678 | 0 | /* unaligned trailing part NxH (where N < TILE_SIZE) */ \ |
1679 | 0 | blt_rotated_90_trivial_##suffix ( \ |
1680 | 0 | dst + W, \ |
1681 | 0 | dst_stride, \ |
1682 | 0 | src + W * src_stride, \ |
1683 | 0 | src_stride, \ |
1684 | 0 | trailing_pixels, \ |
1685 | 0 | H); \ |
1686 | 0 | } \ |
1687 | 0 | } \ Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_8888 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_565 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_90_8 |
1688 | | \ |
1689 | | static void \ |
1690 | | blt_rotated_270_##suffix (pix_type *dst, \ |
1691 | | int dst_stride, \ |
1692 | | const pix_type *src, \ |
1693 | | int src_stride, \ |
1694 | | int W, \ |
1695 | 0 | int H) \ |
1696 | 0 | { \ |
1697 | 0 | int x; \ |
1698 | 0 | int leading_pixels = 0, trailing_pixels = 0; \ |
1699 | 0 | const int TILE_SIZE = CACHE_LINE_SIZE / sizeof(pix_type); \ |
1700 | 0 | \ |
1701 | 0 | /* \ |
1702 | 0 | * split processing into handling destination as TILE_SIZExH cache line \ |
1703 | 0 | * aligned vertical stripes (optimistically assuming that destination \ |
1704 | 0 | * stride is a multiple of cache line, if not - it will be just a bit \ |
1705 | 0 | * slower) \ |
1706 | 0 | */ \ |
1707 | 0 | \ |
1708 | 0 | if ((uintptr_t)dst & (CACHE_LINE_SIZE - 1)) \ |
1709 | 0 | { \ |
1710 | 0 | leading_pixels = TILE_SIZE - (((uintptr_t)dst & \ |
1711 | 0 | (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ |
1712 | 0 | if (leading_pixels > W) \ |
1713 | 0 | leading_pixels = W; \ |
1714 | 0 | \ |
1715 | 0 | /* unaligned leading part NxH (where N < TILE_SIZE) */ \ |
1716 | 0 | blt_rotated_270_trivial_##suffix ( \ |
1717 | 0 | dst, \ |
1718 | 0 | dst_stride, \ |
1719 | 0 | src + src_stride * (W - leading_pixels), \ |
1720 | 0 | src_stride, \ |
1721 | 0 | leading_pixels, \ |
1722 | 0 | H); \ |
1723 | 0 | \ |
1724 | 0 | dst += leading_pixels; \ |
1725 | 0 | W -= leading_pixels; \ |
1726 | 0 | } \ |
1727 | 0 | \ |
1728 | 0 | if ((uintptr_t)(dst + W) & (CACHE_LINE_SIZE - 1)) \ |
1729 | 0 | { \ |
1730 | 0 | trailing_pixels = (((uintptr_t)(dst + W) & \ |
1731 | 0 | (CACHE_LINE_SIZE - 1)) / sizeof(pix_type)); \ |
1732 | 0 | if (trailing_pixels > W) \ |
1733 | 0 | trailing_pixels = W; \ |
1734 | 0 | W -= trailing_pixels; \ |
1735 | 0 | src += trailing_pixels * src_stride; \ |
1736 | 0 | } \ |
1737 | 0 | \ |
1738 | 0 | for (x = 0; x < W; x += TILE_SIZE) \ |
1739 | 0 | { \ |
1740 | 0 | /* aligned middle part TILE_SIZExH */ \ |
1741 | 0 | blt_rotated_270_trivial_##suffix ( \ |
1742 | 0 | dst + x, \ |
1743 | 0 | dst_stride, \ |
1744 | 0 | src + src_stride * (W - x - TILE_SIZE), \ |
1745 | 0 | src_stride, \ |
1746 | 0 | TILE_SIZE, \ |
1747 | 0 | H); \ |
1748 | 0 | } \ |
1749 | 0 | \ |
1750 | 0 | if (trailing_pixels) \ |
1751 | 0 | { \ |
1752 | 0 | /* unaligned trailing part NxH (where N < TILE_SIZE) */ \ |
1753 | 0 | blt_rotated_270_trivial_##suffix ( \ |
1754 | 0 | dst + W, \ |
1755 | 0 | dst_stride, \ |
1756 | 0 | src - trailing_pixels * src_stride, \ |
1757 | 0 | src_stride, \ |
1758 | 0 | trailing_pixels, \ |
1759 | 0 | H); \ |
1760 | 0 | } \ |
1761 | 0 | } \ Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_8888 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_565 Unexecuted instantiation: pixman-fast-path.c:blt_rotated_270_8 |
1762 | | \ |
1763 | | static void \ |
1764 | | fast_composite_rotate_90_##suffix (pixman_implementation_t *imp, \ |
1765 | 0 | pixman_composite_info_t *info) \ |
1766 | 0 | { \ |
1767 | 0 | PIXMAN_COMPOSITE_ARGS (info); \ |
1768 | 0 | pix_type *dst_line; \ |
1769 | 0 | pix_type *src_line; \ |
1770 | 0 | int dst_stride, src_stride; \ |
1771 | 0 | int src_x_t, src_y_t; \ |
1772 | 0 | \ |
1773 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \ |
1774 | 0 | dst_stride, dst_line, 1); \ |
1775 | 0 | src_x_t = -src_y + pixman_fixed_to_int ( \ |
1776 | 0 | src_image->common.transform->matrix[0][2] + \ |
1777 | 0 | pixman_fixed_1 / 2 - pixman_fixed_e) - height;\ |
1778 | 0 | src_y_t = src_x + pixman_fixed_to_int ( \ |
1779 | 0 | src_image->common.transform->matrix[1][2] + \ |
1780 | 0 | pixman_fixed_1 / 2 - pixman_fixed_e); \ |
1781 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \ |
1782 | 0 | src_stride, src_line, 1); \ |
1783 | 0 | blt_rotated_90_##suffix (dst_line, dst_stride, src_line, src_stride, \ |
1784 | 0 | width, height); \ |
1785 | 0 | } \ Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_90_8888 Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_90_565 Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_90_8 |
1786 | | \ |
1787 | | static void \ |
1788 | | fast_composite_rotate_270_##suffix (pixman_implementation_t *imp, \ |
1789 | 0 | pixman_composite_info_t *info) \ |
1790 | 0 | { \ |
1791 | 0 | PIXMAN_COMPOSITE_ARGS (info); \ |
1792 | 0 | pix_type *dst_line; \ |
1793 | 0 | pix_type *src_line; \ |
1794 | 0 | int dst_stride, src_stride; \ |
1795 | 0 | int src_x_t, src_y_t; \ |
1796 | 0 | \ |
1797 | 0 | PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, pix_type, \ |
1798 | 0 | dst_stride, dst_line, 1); \ |
1799 | 0 | src_x_t = src_y + pixman_fixed_to_int ( \ |
1800 | 0 | src_image->common.transform->matrix[0][2] + \ |
1801 | 0 | pixman_fixed_1 / 2 - pixman_fixed_e); \ |
1802 | 0 | src_y_t = -src_x + pixman_fixed_to_int ( \ |
1803 | 0 | src_image->common.transform->matrix[1][2] + \ |
1804 | 0 | pixman_fixed_1 / 2 - pixman_fixed_e) - width; \ |
1805 | 0 | PIXMAN_IMAGE_GET_LINE (src_image, src_x_t, src_y_t, pix_type, \ |
1806 | 0 | src_stride, src_line, 1); \ |
1807 | 0 | blt_rotated_270_##suffix (dst_line, dst_stride, src_line, src_stride, \ |
1808 | 0 | width, height); \ |
1809 | 0 | } Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_270_8888 Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_270_565 Unexecuted instantiation: pixman-fast-path.c:fast_composite_rotate_270_8 |
1810 | | |
1811 | | FAST_SIMPLE_ROTATE (8, uint8_t) |
1812 | | FAST_SIMPLE_ROTATE (565, uint16_t) |
1813 | | FAST_SIMPLE_ROTATE (8888, uint32_t) |
1814 | | |
1815 | | static const pixman_fast_path_t c_fast_paths[] = |
1816 | | { |
1817 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, fast_composite_over_n_8_0565), |
1818 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, fast_composite_over_n_8_0565), |
1819 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, r8g8b8, fast_composite_over_n_8_0888), |
1820 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, b8g8r8, fast_composite_over_n_8_0888), |
1821 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, fast_composite_over_n_8_8888), |
1822 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, fast_composite_over_n_8_8888), |
1823 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, fast_composite_over_n_8_8888), |
1824 | | PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, fast_composite_over_n_8_8888), |
1825 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8r8g8b8, fast_composite_over_n_1_8888), |
1826 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8r8g8b8, fast_composite_over_n_1_8888), |
1827 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, a8b8g8r8, fast_composite_over_n_1_8888), |
1828 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, x8b8g8r8, fast_composite_over_n_1_8888), |
1829 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, r5g6b5, fast_composite_over_n_1_0565), |
1830 | | PIXMAN_STD_FAST_PATH (OVER, solid, a1, b5g6r5, fast_composite_over_n_1_0565), |
1831 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, fast_composite_over_n_8888_8888_ca), |
1832 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, fast_composite_over_n_8888_8888_ca), |
1833 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, fast_composite_over_n_8888_0565_ca), |
1834 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, fast_composite_over_n_8888_8888_ca), |
1835 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, fast_composite_over_n_8888_8888_ca), |
1836 | | PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, fast_composite_over_n_8888_0565_ca), |
1837 | | PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, x8r8g8b8, fast_composite_over_x888_8_8888), |
1838 | | PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, a8, a8r8g8b8, fast_composite_over_x888_8_8888), |
1839 | | PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, x8b8g8r8, fast_composite_over_x888_8_8888), |
1840 | | PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, a8, a8b8g8r8, fast_composite_over_x888_8_8888), |
1841 | | PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, fast_composite_over_8888_8888), |
1842 | | PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, fast_composite_over_8888_8888), |
1843 | | PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, fast_composite_over_8888_0565), |
1844 | | PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888), |
1845 | | PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888), |
1846 | | PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565), |
1847 | | PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565), |
1848 | | PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565), |
1849 | | PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888), |
1850 | | PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888), |
1851 | | PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8), |
1852 | | PIXMAN_STD_FAST_PATH (ADD, a1, null, a1, fast_composite_add_1_1), |
1853 | | PIXMAN_STD_FAST_PATH_CA (ADD, solid, a8r8g8b8, a8r8g8b8, fast_composite_add_n_8888_8888_ca), |
1854 | | PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, fast_composite_add_n_8_8), |
1855 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, a8r8g8b8, fast_composite_solid_fill), |
1856 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, x8r8g8b8, fast_composite_solid_fill), |
1857 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, a8b8g8r8, fast_composite_solid_fill), |
1858 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, x8b8g8r8, fast_composite_solid_fill), |
1859 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, a1, fast_composite_solid_fill), |
1860 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, a8, fast_composite_solid_fill), |
1861 | | PIXMAN_STD_FAST_PATH (SRC, solid, null, r5g6b5, fast_composite_solid_fill), |
1862 | | PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, fast_composite_src_x888_8888), |
1863 | | PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, fast_composite_src_x888_8888), |
1864 | | PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy), |
1865 | | PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, fast_composite_src_memcpy), |
1866 | | PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, fast_composite_src_memcpy), |
1867 | | PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy), |
1868 | | PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, fast_composite_src_memcpy), |
1869 | | PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, fast_composite_src_memcpy), |
1870 | | PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8x8, fast_composite_src_memcpy), |
1871 | | PIXMAN_STD_FAST_PATH (SRC, b8g8r8a8, null, b8g8r8a8, fast_composite_src_memcpy), |
1872 | | PIXMAN_STD_FAST_PATH (SRC, b8g8r8x8, null, b8g8r8x8, fast_composite_src_memcpy), |
1873 | | PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, fast_composite_src_memcpy), |
1874 | | PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, fast_composite_src_memcpy), |
1875 | | PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, fast_composite_src_memcpy), |
1876 | | PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, b8g8r8, fast_composite_src_memcpy), |
1877 | | PIXMAN_STD_FAST_PATH (SRC, x1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy), |
1878 | | PIXMAN_STD_FAST_PATH (SRC, a1r5g5b5, null, x1r5g5b5, fast_composite_src_memcpy), |
1879 | | PIXMAN_STD_FAST_PATH (SRC, a8, null, a8, fast_composite_src_memcpy), |
1880 | | PIXMAN_STD_FAST_PATH (IN, a8, null, a8, fast_composite_in_8_8), |
1881 | | PIXMAN_STD_FAST_PATH (IN, solid, a8, a8, fast_composite_in_n_8_8), |
1882 | | |
1883 | | SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888_8888), |
1884 | | SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888_8888), |
1885 | | SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, 8888_8888), |
1886 | | SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, 8888_8888), |
1887 | | |
1888 | | SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888_8888), |
1889 | | SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, 8888_8888), |
1890 | | |
1891 | | SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, 8888_565), |
1892 | | SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, 8888_565), |
1893 | | |
1894 | | SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, r5g6b5, 565_565), |
1895 | | |
1896 | | SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8r8g8b8, a8r8g8b8, x888_8888), |
1897 | | SIMPLE_NEAREST_FAST_PATH_COVER (SRC, x8b8g8r8, a8b8g8r8, x888_8888), |
1898 | | SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8r8g8b8, a8r8g8b8, x888_8888), |
1899 | | SIMPLE_NEAREST_FAST_PATH_PAD (SRC, x8b8g8r8, a8b8g8r8, x888_8888), |
1900 | | SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8r8g8b8, a8r8g8b8, x888_8888), |
1901 | | SIMPLE_NEAREST_FAST_PATH_NORMAL (SRC, x8b8g8r8, a8b8g8r8, x888_8888), |
1902 | | |
1903 | | SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, 8888_8888), |
1904 | | SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, 8888_8888), |
1905 | | SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, 8888_8888), |
1906 | | SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, 8888_8888), |
1907 | | |
1908 | | SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, 8888_565), |
1909 | | |
1910 | | #define NEAREST_FAST_PATH(op,s,d) \ |
1911 | | { PIXMAN_OP_ ## op, \ |
1912 | | PIXMAN_ ## s, SCALED_NEAREST_FLAGS, \ |
1913 | | PIXMAN_null, 0, \ |
1914 | | PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ |
1915 | | fast_composite_scaled_nearest, \ |
1916 | | } |
1917 | | |
1918 | | NEAREST_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8), |
1919 | | NEAREST_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8), |
1920 | | NEAREST_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8), |
1921 | | NEAREST_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8), |
1922 | | |
1923 | | NEAREST_FAST_PATH (SRC, x8r8g8b8, a8r8g8b8), |
1924 | | NEAREST_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8), |
1925 | | NEAREST_FAST_PATH (SRC, x8b8g8r8, a8b8g8r8), |
1926 | | NEAREST_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8), |
1927 | | |
1928 | | NEAREST_FAST_PATH (OVER, x8r8g8b8, x8r8g8b8), |
1929 | | NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8), |
1930 | | NEAREST_FAST_PATH (OVER, x8b8g8r8, x8b8g8r8), |
1931 | | NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8), |
1932 | | |
1933 | | NEAREST_FAST_PATH (OVER, x8r8g8b8, a8r8g8b8), |
1934 | | NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8), |
1935 | | NEAREST_FAST_PATH (OVER, x8b8g8r8, a8b8g8r8), |
1936 | | NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8), |
1937 | | |
1938 | | #define SIMPLE_ROTATE_FLAGS(angle) \ |
1939 | | (FAST_PATH_ROTATE_ ## angle ## _TRANSFORM | \ |
1940 | | FAST_PATH_NEAREST_FILTER | \ |
1941 | | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST | \ |
1942 | | FAST_PATH_STANDARD_FLAGS) |
1943 | | |
1944 | | #define SIMPLE_ROTATE_FAST_PATH(op,s,d,suffix) \ |
1945 | | { PIXMAN_OP_ ## op, \ |
1946 | | PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (90), \ |
1947 | | PIXMAN_null, 0, \ |
1948 | | PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ |
1949 | | fast_composite_rotate_90_##suffix, \ |
1950 | | }, \ |
1951 | | { PIXMAN_OP_ ## op, \ |
1952 | | PIXMAN_ ## s, SIMPLE_ROTATE_FLAGS (270), \ |
1953 | | PIXMAN_null, 0, \ |
1954 | | PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \ |
1955 | | fast_composite_rotate_270_##suffix, \ |
1956 | | } |
1957 | | |
1958 | | SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, 8888), |
1959 | | SIMPLE_ROTATE_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, 8888), |
1960 | | SIMPLE_ROTATE_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, 8888), |
1961 | | SIMPLE_ROTATE_FAST_PATH (SRC, r5g6b5, r5g6b5, 565), |
1962 | | SIMPLE_ROTATE_FAST_PATH (SRC, a8, a8, 8), |
1963 | | |
1964 | | /* Simple repeat fast path entry. */ |
1965 | | { PIXMAN_OP_any, |
1966 | | PIXMAN_any, |
1967 | | (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE | |
1968 | | FAST_PATH_NORMAL_REPEAT), |
1969 | | PIXMAN_any, 0, |
1970 | | PIXMAN_any, FAST_PATH_STD_DEST_FLAGS, |
1971 | | fast_composite_tiled_repeat |
1972 | | }, |
1973 | | |
1974 | | { PIXMAN_OP_NONE }, |
1975 | | }; |
1976 | | |
1977 | | #ifdef WORDS_BIGENDIAN |
1978 | | #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (32 - (offs) - (n))) |
1979 | | #else |
1980 | 0 | #define A1_FILL_MASK(n, offs) (((1U << (n)) - 1) << (offs)) |
1981 | | #endif |
1982 | | |
1983 | | static force_inline void |
1984 | | pixman_fill1_line (uint32_t *dst, int offs, int width, int v) |
1985 | 0 | { |
1986 | 0 | if (offs) |
1987 | 0 | { |
1988 | 0 | int leading_pixels = 32 - offs; |
1989 | 0 | if (leading_pixels >= width) |
1990 | 0 | { |
1991 | 0 | if (v) |
1992 | 0 | *dst |= A1_FILL_MASK (width, offs); |
1993 | 0 | else |
1994 | 0 | *dst &= ~A1_FILL_MASK (width, offs); |
1995 | 0 | return; |
1996 | 0 | } |
1997 | 0 | else |
1998 | 0 | { |
1999 | 0 | if (v) |
2000 | 0 | *dst++ |= A1_FILL_MASK (leading_pixels, offs); |
2001 | 0 | else |
2002 | 0 | *dst++ &= ~A1_FILL_MASK (leading_pixels, offs); |
2003 | 0 | width -= leading_pixels; |
2004 | 0 | } |
2005 | 0 | } |
2006 | 0 | while (width >= 32) |
2007 | 0 | { |
2008 | 0 | if (v) |
2009 | 0 | *dst++ = 0xFFFFFFFF; |
2010 | 0 | else |
2011 | 0 | *dst++ = 0; |
2012 | 0 | width -= 32; |
2013 | 0 | } |
2014 | 0 | if (width > 0) |
2015 | 0 | { |
2016 | 0 | if (v) |
2017 | 0 | *dst |= A1_FILL_MASK (width, 0); |
2018 | 0 | else |
2019 | 0 | *dst &= ~A1_FILL_MASK (width, 0); |
2020 | 0 | } |
2021 | 0 | } |
2022 | | |
2023 | | static void |
2024 | | pixman_fill1 (uint32_t *bits, |
2025 | | int stride, |
2026 | | int x, |
2027 | | int y, |
2028 | | int width, |
2029 | | int height, |
2030 | | uint32_t filler) |
2031 | 0 | { |
2032 | 0 | uint32_t *dst = bits + y * stride + (x >> 5); |
2033 | 0 | int offs = x & 31; |
2034 | |
|
2035 | 0 | if (filler & 1) |
2036 | 0 | { |
2037 | 0 | while (height--) |
2038 | 0 | { |
2039 | 0 | pixman_fill1_line (dst, offs, width, 1); |
2040 | 0 | dst += stride; |
2041 | 0 | } |
2042 | 0 | } |
2043 | 0 | else |
2044 | 0 | { |
2045 | 0 | while (height--) |
2046 | 0 | { |
2047 | 0 | pixman_fill1_line (dst, offs, width, 0); |
2048 | 0 | dst += stride; |
2049 | 0 | } |
2050 | 0 | } |
2051 | 0 | } |
2052 | | |
2053 | | static void |
2054 | | pixman_fill8 (uint32_t *bits, |
2055 | | int stride, |
2056 | | int x, |
2057 | | int y, |
2058 | | int width, |
2059 | | int height, |
2060 | | uint32_t filler) |
2061 | 0 | { |
2062 | 0 | int byte_stride = stride * (int) sizeof (uint32_t); |
2063 | 0 | uint8_t *dst = (uint8_t *) bits; |
2064 | 0 | uint8_t v = filler & 0xff; |
2065 | 0 | int i; |
2066 | |
|
2067 | 0 | dst = dst + y * byte_stride + x; |
2068 | |
|
2069 | 0 | while (height--) |
2070 | 0 | { |
2071 | 0 | for (i = 0; i < width; ++i) |
2072 | 0 | dst[i] = v; |
2073 | |
|
2074 | 0 | dst += byte_stride; |
2075 | 0 | } |
2076 | 0 | } |
2077 | | |
2078 | | static void |
2079 | | pixman_fill16 (uint32_t *bits, |
2080 | | int stride, |
2081 | | int x, |
2082 | | int y, |
2083 | | int width, |
2084 | | int height, |
2085 | | uint32_t filler) |
2086 | 0 | { |
2087 | 0 | int short_stride = |
2088 | 0 | (stride * (int)sizeof (uint32_t)) / (int)sizeof (uint16_t); |
2089 | 0 | uint16_t *dst = (uint16_t *)bits; |
2090 | 0 | uint16_t v = filler & 0xffff; |
2091 | 0 | int i; |
2092 | |
|
2093 | 0 | dst = dst + y * short_stride + x; |
2094 | |
|
2095 | 0 | while (height--) |
2096 | 0 | { |
2097 | 0 | for (i = 0; i < width; ++i) |
2098 | 0 | dst[i] = v; |
2099 | |
|
2100 | 0 | dst += short_stride; |
2101 | 0 | } |
2102 | 0 | } |
2103 | | |
2104 | | static void |
2105 | | pixman_fill32 (uint32_t *bits, |
2106 | | int stride, |
2107 | | int x, |
2108 | | int y, |
2109 | | int width, |
2110 | | int height, |
2111 | | uint32_t filler) |
2112 | 0 | { |
2113 | 0 | int i; |
2114 | |
|
2115 | 0 | bits = bits + y * stride + x; |
2116 | |
|
2117 | 0 | while (height--) |
2118 | 0 | { |
2119 | 0 | for (i = 0; i < width; ++i) |
2120 | 0 | bits[i] = filler; |
2121 | |
|
2122 | 0 | bits += stride; |
2123 | 0 | } |
2124 | 0 | } |
2125 | | |
2126 | | static pixman_bool_t |
2127 | | fast_path_fill (pixman_implementation_t *imp, |
2128 | | uint32_t * bits, |
2129 | | int stride, |
2130 | | int bpp, |
2131 | | int x, |
2132 | | int y, |
2133 | | int width, |
2134 | | int height, |
2135 | | uint32_t filler) |
2136 | 0 | { |
2137 | 0 | switch (bpp) |
2138 | 0 | { |
2139 | 0 | case 1: |
2140 | 0 | pixman_fill1 (bits, stride, x, y, width, height, filler); |
2141 | 0 | break; |
2142 | | |
2143 | 0 | case 8: |
2144 | 0 | pixman_fill8 (bits, stride, x, y, width, height, filler); |
2145 | 0 | break; |
2146 | | |
2147 | 0 | case 16: |
2148 | 0 | pixman_fill16 (bits, stride, x, y, width, height, filler); |
2149 | 0 | break; |
2150 | | |
2151 | 0 | case 32: |
2152 | 0 | pixman_fill32 (bits, stride, x, y, width, height, filler); |
2153 | 0 | break; |
2154 | | |
2155 | 0 | default: |
2156 | 0 | return FALSE; |
2157 | 0 | } |
2158 | | |
2159 | 0 | return TRUE; |
2160 | 0 | } |
2161 | | |
2162 | | /*****************************************************************************/ |
2163 | | |
2164 | | static uint32_t * |
2165 | | fast_fetch_r5g6b5 (pixman_iter_t *iter, const uint32_t *mask) |
2166 | 0 | { |
2167 | 0 | int32_t w = iter->width; |
2168 | 0 | uint32_t *dst = iter->buffer; |
2169 | 0 | const uint16_t *src = (const uint16_t *)iter->bits; |
2170 | |
|
2171 | 0 | iter->bits += iter->stride; |
2172 | | |
2173 | | /* Align the source buffer at 4 bytes boundary */ |
2174 | 0 | if (w > 0 && ((uintptr_t)src & 3)) |
2175 | 0 | { |
2176 | 0 | *dst++ = convert_0565_to_8888 (*src++); |
2177 | 0 | w--; |
2178 | 0 | } |
2179 | | /* Process two pixels per iteration */ |
2180 | 0 | while ((w -= 2) >= 0) |
2181 | 0 | { |
2182 | 0 | uint32_t sr, sb, sg, t0, t1; |
2183 | 0 | uint32_t s = *(const uint32_t *)src; |
2184 | 0 | src += 2; |
2185 | 0 | sr = (s >> 8) & 0x00F800F8; |
2186 | 0 | sb = (s << 3) & 0x00F800F8; |
2187 | 0 | sg = (s >> 3) & 0x00FC00FC; |
2188 | 0 | sr |= sr >> 5; |
2189 | 0 | sb |= sb >> 5; |
2190 | 0 | sg |= sg >> 6; |
2191 | 0 | t0 = ((sr << 16) & 0x00FF0000) | ((sg << 8) & 0x0000FF00) | |
2192 | 0 | (sb & 0xFF) | 0xFF000000; |
2193 | 0 | t1 = (sr & 0x00FF0000) | ((sg >> 8) & 0x0000FF00) | |
2194 | 0 | (sb >> 16) | 0xFF000000; |
2195 | | #ifdef WORDS_BIGENDIAN |
2196 | | *dst++ = t1; |
2197 | | *dst++ = t0; |
2198 | | #else |
2199 | 0 | *dst++ = t0; |
2200 | 0 | *dst++ = t1; |
2201 | 0 | #endif |
2202 | 0 | } |
2203 | 0 | if (w & 1) |
2204 | 0 | { |
2205 | 0 | *dst = convert_0565_to_8888 (*src); |
2206 | 0 | } |
2207 | |
|
2208 | 0 | return iter->buffer; |
2209 | 0 | } |
2210 | | |
2211 | | static uint32_t * |
2212 | | fast_dest_fetch_noop (pixman_iter_t *iter, const uint32_t *mask) |
2213 | 0 | { |
2214 | 0 | iter->bits += iter->stride; |
2215 | 0 | return iter->buffer; |
2216 | 0 | } |
2217 | | |
2218 | | /* Helper function for a workaround, which tries to ensure that 0x1F001F |
2219 | | * constant is always allocated in a register on RISC architectures. |
2220 | | */ |
2221 | | static force_inline uint32_t |
2222 | | convert_8888_to_0565_workaround (uint32_t s, uint32_t x1F001F) |
2223 | 0 | { |
2224 | 0 | uint32_t a, b; |
2225 | 0 | a = (s >> 3) & x1F001F; |
2226 | 0 | b = s & 0xFC00; |
2227 | 0 | a |= a >> 5; |
2228 | 0 | a |= b >> 5; |
2229 | 0 | return a; |
2230 | 0 | } |
2231 | | |
2232 | | static void |
2233 | | fast_write_back_r5g6b5 (pixman_iter_t *iter) |
2234 | 0 | { |
2235 | 0 | int32_t w = iter->width; |
2236 | 0 | uint16_t *dst = (uint16_t *)(iter->bits - iter->stride); |
2237 | 0 | const uint32_t *src = iter->buffer; |
2238 | | /* Workaround to ensure that x1F001F variable is allocated in a register */ |
2239 | 0 | static volatile uint32_t volatile_x1F001F = 0x1F001F; |
2240 | 0 | uint32_t x1F001F = volatile_x1F001F; |
2241 | |
|
2242 | 0 | while ((w -= 4) >= 0) |
2243 | 0 | { |
2244 | 0 | uint32_t s1 = *src++; |
2245 | 0 | uint32_t s2 = *src++; |
2246 | 0 | uint32_t s3 = *src++; |
2247 | 0 | uint32_t s4 = *src++; |
2248 | 0 | *dst++ = convert_8888_to_0565_workaround (s1, x1F001F); |
2249 | 0 | *dst++ = convert_8888_to_0565_workaround (s2, x1F001F); |
2250 | 0 | *dst++ = convert_8888_to_0565_workaround (s3, x1F001F); |
2251 | 0 | *dst++ = convert_8888_to_0565_workaround (s4, x1F001F); |
2252 | 0 | } |
2253 | 0 | if (w & 2) |
2254 | 0 | { |
2255 | 0 | *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F); |
2256 | 0 | *dst++ = convert_8888_to_0565_workaround (*src++, x1F001F); |
2257 | 0 | } |
2258 | 0 | if (w & 1) |
2259 | 0 | { |
2260 | 0 | *dst = convert_8888_to_0565_workaround (*src, x1F001F); |
2261 | 0 | } |
2262 | 0 | } |
2263 | | |
2264 | | typedef struct |
2265 | | { |
2266 | | int y; |
2267 | | uint64_t * buffer; |
2268 | | } line_t; |
2269 | | |
2270 | | typedef struct |
2271 | | { |
2272 | | line_t lines[2]; |
2273 | | pixman_fixed_t y; |
2274 | | pixman_fixed_t x; |
2275 | | uint64_t data[1]; |
2276 | | } bilinear_info_t; |
2277 | | |
2278 | | static void |
2279 | | fetch_horizontal (bits_image_t *image, line_t *line, |
2280 | | int y, pixman_fixed_t x, pixman_fixed_t ux, int n) |
2281 | 0 | { |
2282 | 0 | uint32_t *bits = image->bits + y * image->rowstride; |
2283 | 0 | int i; |
2284 | |
|
2285 | 0 | for (i = 0; i < n; ++i) |
2286 | 0 | { |
2287 | 0 | int x0 = pixman_fixed_to_int (x); |
2288 | 0 | int x1 = x0 + 1; |
2289 | 0 | int32_t dist_x; |
2290 | |
|
2291 | 0 | uint32_t left = *(bits + x0); |
2292 | 0 | uint32_t right = *(bits + x1); |
2293 | |
|
2294 | 0 | dist_x = pixman_fixed_to_bilinear_weight (x); |
2295 | 0 | dist_x <<= (8 - BILINEAR_INTERPOLATION_BITS); |
2296 | |
|
2297 | | #if SIZEOF_LONG <= 4 |
2298 | | { |
2299 | | uint32_t lag, rag, ag; |
2300 | | uint32_t lrb, rrb, rb; |
2301 | | |
2302 | | lag = (left & 0xff00ff00) >> 8; |
2303 | | rag = (right & 0xff00ff00) >> 8; |
2304 | | ag = (lag << 8) + dist_x * (rag - lag); |
2305 | | |
2306 | | lrb = (left & 0x00ff00ff); |
2307 | | rrb = (right & 0x00ff00ff); |
2308 | | rb = (lrb << 8) + dist_x * (rrb - lrb); |
2309 | | |
2310 | | *((uint32_t *)(line->buffer + i)) = ag; |
2311 | | *((uint32_t *)(line->buffer + i) + 1) = rb; |
2312 | | } |
2313 | | #else |
2314 | 0 | { |
2315 | 0 | uint64_t lagrb, ragrb; |
2316 | 0 | uint32_t lag, rag; |
2317 | 0 | uint32_t lrb, rrb; |
2318 | |
|
2319 | 0 | lag = (left & 0xff00ff00); |
2320 | 0 | lrb = (left & 0x00ff00ff); |
2321 | 0 | rag = (right & 0xff00ff00); |
2322 | 0 | rrb = (right & 0x00ff00ff); |
2323 | 0 | lagrb = (((uint64_t)lag) << 24) | lrb; |
2324 | 0 | ragrb = (((uint64_t)rag) << 24) | rrb; |
2325 | |
|
2326 | 0 | line->buffer[i] = (lagrb << 8) + dist_x * (ragrb - lagrb); |
2327 | 0 | } |
2328 | 0 | #endif |
2329 | |
|
2330 | 0 | x += ux; |
2331 | 0 | } |
2332 | |
|
2333 | 0 | line->y = y; |
2334 | 0 | } |
2335 | | |
2336 | | static uint32_t * |
2337 | | fast_fetch_bilinear_cover (pixman_iter_t *iter, const uint32_t *mask) |
2338 | 0 | { |
2339 | 0 | pixman_fixed_t fx, ux; |
2340 | 0 | bilinear_info_t *info = iter->data; |
2341 | 0 | line_t *line0, *line1; |
2342 | 0 | int y0, y1; |
2343 | 0 | int32_t dist_y; |
2344 | 0 | int i; |
2345 | |
|
2346 | 0 | COMPILE_TIME_ASSERT (BILINEAR_INTERPOLATION_BITS < 8); |
2347 | |
|
2348 | 0 | fx = info->x; |
2349 | 0 | ux = iter->image->common.transform->matrix[0][0]; |
2350 | |
|
2351 | 0 | y0 = pixman_fixed_to_int (info->y); |
2352 | 0 | y1 = y0 + 1; |
2353 | 0 | dist_y = pixman_fixed_to_bilinear_weight (info->y); |
2354 | 0 | dist_y <<= (8 - BILINEAR_INTERPOLATION_BITS); |
2355 | |
|
2356 | 0 | line0 = &info->lines[y0 & 0x01]; |
2357 | 0 | line1 = &info->lines[y1 & 0x01]; |
2358 | |
|
2359 | 0 | if (line0->y != y0) |
2360 | 0 | { |
2361 | 0 | fetch_horizontal ( |
2362 | 0 | &iter->image->bits, line0, y0, fx, ux, iter->width); |
2363 | 0 | } |
2364 | |
|
2365 | 0 | if (line1->y != y1) |
2366 | 0 | { |
2367 | 0 | fetch_horizontal ( |
2368 | 0 | &iter->image->bits, line1, y1, fx, ux, iter->width); |
2369 | 0 | } |
2370 | |
|
2371 | 0 | for (i = 0; i < iter->width; ++i) |
2372 | 0 | { |
2373 | | #if SIZEOF_LONG <= 4 |
2374 | | uint32_t ta, tr, tg, tb; |
2375 | | uint32_t ba, br, bg, bb; |
2376 | | uint32_t tag, trb; |
2377 | | uint32_t bag, brb; |
2378 | | uint32_t a, r, g, b; |
2379 | | |
2380 | | tag = *((uint32_t *)(line0->buffer + i)); |
2381 | | trb = *((uint32_t *)(line0->buffer + i) + 1); |
2382 | | bag = *((uint32_t *)(line1->buffer + i)); |
2383 | | brb = *((uint32_t *)(line1->buffer + i) + 1); |
2384 | | |
2385 | | ta = tag >> 16; |
2386 | | ba = bag >> 16; |
2387 | | a = (ta << 8) + dist_y * (ba - ta); |
2388 | | |
2389 | | tr = trb >> 16; |
2390 | | br = brb >> 16; |
2391 | | r = (tr << 8) + dist_y * (br - tr); |
2392 | | |
2393 | | tg = tag & 0xffff; |
2394 | | bg = bag & 0xffff; |
2395 | | g = (tg << 8) + dist_y * (bg - tg); |
2396 | | |
2397 | | tb = trb & 0xffff; |
2398 | | bb = brb & 0xffff; |
2399 | | b = (tb << 8) + dist_y * (bb - tb); |
2400 | | |
2401 | | a = (a << 8) & 0xff000000; |
2402 | | r = (r << 0) & 0x00ff0000; |
2403 | | g = (g >> 8) & 0x0000ff00; |
2404 | | b = (b >> 16) & 0x000000ff; |
2405 | | #else |
2406 | 0 | uint64_t top = line0->buffer[i]; |
2407 | 0 | uint64_t bot = line1->buffer[i]; |
2408 | 0 | uint64_t tar = (top & 0xffff0000ffff0000ULL) >> 16; |
2409 | 0 | uint64_t bar = (bot & 0xffff0000ffff0000ULL) >> 16; |
2410 | 0 | uint64_t tgb = (top & 0x0000ffff0000ffffULL); |
2411 | 0 | uint64_t bgb = (bot & 0x0000ffff0000ffffULL); |
2412 | 0 | uint64_t ar, gb; |
2413 | 0 | uint32_t a, r, g, b; |
2414 | |
|
2415 | 0 | ar = (tar << 8) + dist_y * (bar - tar); |
2416 | 0 | gb = (tgb << 8) + dist_y * (bgb - tgb); |
2417 | |
|
2418 | 0 | a = ((ar >> 24) & 0xff000000); |
2419 | 0 | r = ((ar >> 0) & 0x00ff0000); |
2420 | 0 | g = ((gb >> 40) & 0x0000ff00); |
2421 | 0 | b = ((gb >> 16) & 0x000000ff); |
2422 | 0 | #endif |
2423 | |
|
2424 | 0 | iter->buffer[i] = a | r | g | b; |
2425 | 0 | } |
2426 | |
|
2427 | 0 | info->y += iter->image->common.transform->matrix[1][1]; |
2428 | |
|
2429 | 0 | return iter->buffer; |
2430 | 0 | } |
2431 | | |
2432 | | static void |
2433 | | bilinear_cover_iter_fini (pixman_iter_t *iter) |
2434 | 0 | { |
2435 | 0 | free (iter->data); |
2436 | 0 | } |
2437 | | |
2438 | | static void |
2439 | | fast_bilinear_cover_iter_init (pixman_iter_t *iter, const pixman_iter_info_t *iter_info) |
2440 | 0 | { |
2441 | 0 | int width = iter->width; |
2442 | 0 | bilinear_info_t *info; |
2443 | 0 | pixman_vector_t v; |
2444 | | |
2445 | | /* Reference point is the center of the pixel */ |
2446 | 0 | v.vector[0] = pixman_int_to_fixed (iter->x) + pixman_fixed_1 / 2; |
2447 | 0 | v.vector[1] = pixman_int_to_fixed (iter->y) + pixman_fixed_1 / 2; |
2448 | 0 | v.vector[2] = pixman_fixed_1; |
2449 | |
|
2450 | 0 | if (!pixman_transform_point_3d (iter->image->common.transform, &v)) |
2451 | 0 | goto fail; |
2452 | | |
2453 | 0 | info = malloc (sizeof (*info) + (2 * width - 1) * sizeof (uint64_t)); |
2454 | 0 | if (!info) |
2455 | 0 | goto fail; |
2456 | | |
2457 | 0 | info->x = v.vector[0] - pixman_fixed_1 / 2; |
2458 | 0 | info->y = v.vector[1] - pixman_fixed_1 / 2; |
2459 | | |
2460 | | /* It is safe to set the y coordinates to -1 initially |
2461 | | * because COVER_CLIP_BILINEAR ensures that we will only |
2462 | | * be asked to fetch lines in the [0, height) interval |
2463 | | */ |
2464 | 0 | info->lines[0].y = -1; |
2465 | 0 | info->lines[0].buffer = &(info->data[0]); |
2466 | 0 | info->lines[1].y = -1; |
2467 | 0 | info->lines[1].buffer = &(info->data[width]); |
2468 | |
|
2469 | 0 | iter->get_scanline = fast_fetch_bilinear_cover; |
2470 | 0 | iter->fini = bilinear_cover_iter_fini; |
2471 | |
|
2472 | 0 | iter->data = info; |
2473 | 0 | return; |
2474 | | |
2475 | 0 | fail: |
2476 | | /* Something went wrong, either a bad matrix or OOM; in such cases, |
2477 | | * we don't guarantee any particular rendering. |
2478 | | */ |
2479 | 0 | _pixman_log_error ( |
2480 | 0 | FUNC, "Allocation failure or bad matrix, skipping rendering\n"); |
2481 | | |
2482 | 0 | iter->get_scanline = _pixman_iter_get_scanline_noop; |
2483 | 0 | iter->fini = NULL; |
2484 | 0 | } |
2485 | | |
2486 | | static uint32_t * |
2487 | | bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter, |
2488 | | const uint32_t *mask) |
2489 | 0 | { |
2490 | |
|
2491 | 0 | pixman_image_t * ima = iter->image; |
2492 | 0 | int offset = iter->x; |
2493 | 0 | int line = iter->y++; |
2494 | 0 | int width = iter->width; |
2495 | 0 | uint32_t * buffer = iter->buffer; |
2496 | |
|
2497 | 0 | bits_image_t *bits = &ima->bits; |
2498 | 0 | pixman_fixed_t x_top, x_bottom, x; |
2499 | 0 | pixman_fixed_t ux_top, ux_bottom, ux; |
2500 | 0 | pixman_vector_t v; |
2501 | 0 | uint32_t top_mask, bottom_mask; |
2502 | 0 | uint32_t *top_row; |
2503 | 0 | uint32_t *bottom_row; |
2504 | 0 | uint32_t *end; |
2505 | 0 | uint32_t zero[2] = { 0, 0 }; |
2506 | 0 | uint32_t one = 1; |
2507 | 0 | int y, y1, y2; |
2508 | 0 | int disty; |
2509 | 0 | int mask_inc; |
2510 | 0 | int w; |
2511 | | |
2512 | | /* reference point is the center of the pixel */ |
2513 | 0 | v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; |
2514 | 0 | v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; |
2515 | 0 | v.vector[2] = pixman_fixed_1; |
2516 | |
|
2517 | 0 | if (!pixman_transform_point_3d (bits->common.transform, &v)) |
2518 | 0 | return iter->buffer; |
2519 | | |
2520 | 0 | ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0]; |
2521 | 0 | x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2; |
2522 | |
|
2523 | 0 | y = v.vector[1] - pixman_fixed_1/2; |
2524 | 0 | disty = pixman_fixed_to_bilinear_weight (y); |
2525 | | |
2526 | | /* Load the pointers to the first and second lines from the source |
2527 | | * image that bilinear code must read. |
2528 | | * |
2529 | | * The main trick in this code is about the check if any line are |
2530 | | * outside of the image; |
2531 | | * |
2532 | | * When I realize that a line (any one) is outside, I change |
2533 | | * the pointer to a dummy area with zeros. Once I change this, I |
2534 | | * must be sure the pointer will not change, so I set the |
2535 | | * variables to each pointer increments inside the loop. |
2536 | | */ |
2537 | 0 | y1 = pixman_fixed_to_int (y); |
2538 | 0 | y2 = y1 + 1; |
2539 | |
|
2540 | 0 | if (y1 < 0 || y1 >= bits->height) |
2541 | 0 | { |
2542 | 0 | top_row = zero; |
2543 | 0 | x_top = 0; |
2544 | 0 | ux_top = 0; |
2545 | 0 | } |
2546 | 0 | else |
2547 | 0 | { |
2548 | 0 | top_row = bits->bits + y1 * bits->rowstride; |
2549 | 0 | x_top = x; |
2550 | 0 | ux_top = ux; |
2551 | 0 | } |
2552 | |
|
2553 | 0 | if (y2 < 0 || y2 >= bits->height) |
2554 | 0 | { |
2555 | 0 | bottom_row = zero; |
2556 | 0 | x_bottom = 0; |
2557 | 0 | ux_bottom = 0; |
2558 | 0 | } |
2559 | 0 | else |
2560 | 0 | { |
2561 | 0 | bottom_row = bits->bits + y2 * bits->rowstride; |
2562 | 0 | x_bottom = x; |
2563 | 0 | ux_bottom = ux; |
2564 | 0 | } |
2565 | | |
2566 | | /* Instead of checking whether the operation uses the mast in |
2567 | | * each loop iteration, verify this only once and prepare the |
2568 | | * variables to make the code smaller inside the loop. |
2569 | | */ |
2570 | 0 | if (!mask) |
2571 | 0 | { |
2572 | 0 | mask_inc = 0; |
2573 | 0 | mask = &one; |
2574 | 0 | } |
2575 | 0 | else |
2576 | 0 | { |
2577 | | /* If have a mask, prepare the variables to check it */ |
2578 | 0 | mask_inc = 1; |
2579 | 0 | } |
2580 | | |
2581 | | /* If both are zero, then the whole thing is zero */ |
2582 | 0 | if (top_row == zero && bottom_row == zero) |
2583 | 0 | { |
2584 | 0 | memset (buffer, 0, width * sizeof (uint32_t)); |
2585 | 0 | return iter->buffer; |
2586 | 0 | } |
2587 | 0 | else if (bits->format == PIXMAN_x8r8g8b8) |
2588 | 0 | { |
2589 | 0 | if (top_row == zero) |
2590 | 0 | { |
2591 | 0 | top_mask = 0; |
2592 | 0 | bottom_mask = 0xff000000; |
2593 | 0 | } |
2594 | 0 | else if (bottom_row == zero) |
2595 | 0 | { |
2596 | 0 | top_mask = 0xff000000; |
2597 | 0 | bottom_mask = 0; |
2598 | 0 | } |
2599 | 0 | else |
2600 | 0 | { |
2601 | 0 | top_mask = 0xff000000; |
2602 | 0 | bottom_mask = 0xff000000; |
2603 | 0 | } |
2604 | 0 | } |
2605 | 0 | else |
2606 | 0 | { |
2607 | 0 | top_mask = 0; |
2608 | 0 | bottom_mask = 0; |
2609 | 0 | } |
2610 | | |
2611 | 0 | end = buffer + width; |
2612 | | |
2613 | | /* Zero fill to the left of the image */ |
2614 | 0 | while (buffer < end && x < pixman_fixed_minus_1) |
2615 | 0 | { |
2616 | 0 | *buffer++ = 0; |
2617 | 0 | x += ux; |
2618 | 0 | x_top += ux_top; |
2619 | 0 | x_bottom += ux_bottom; |
2620 | 0 | mask += mask_inc; |
2621 | 0 | } |
2622 | | |
2623 | | /* Left edge |
2624 | | */ |
2625 | 0 | while (buffer < end && x < 0) |
2626 | 0 | { |
2627 | 0 | uint32_t tr, br; |
2628 | 0 | int32_t distx; |
2629 | |
|
2630 | 0 | tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask; |
2631 | 0 | br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask; |
2632 | |
|
2633 | 0 | distx = pixman_fixed_to_bilinear_weight (x); |
2634 | |
|
2635 | 0 | *buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty); |
2636 | |
|
2637 | 0 | x += ux; |
2638 | 0 | x_top += ux_top; |
2639 | 0 | x_bottom += ux_bottom; |
2640 | 0 | mask += mask_inc; |
2641 | 0 | } |
2642 | | |
2643 | | /* Main part */ |
2644 | 0 | w = pixman_int_to_fixed (bits->width - 1); |
2645 | |
|
2646 | 0 | while (buffer < end && x < w) |
2647 | 0 | { |
2648 | 0 | if (*mask) |
2649 | 0 | { |
2650 | 0 | uint32_t tl, tr, bl, br; |
2651 | 0 | int32_t distx; |
2652 | |
|
2653 | 0 | tl = top_row [pixman_fixed_to_int (x_top)] | top_mask; |
2654 | 0 | tr = top_row [pixman_fixed_to_int (x_top) + 1] | top_mask; |
2655 | 0 | bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask; |
2656 | 0 | br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask; |
2657 | |
|
2658 | 0 | distx = pixman_fixed_to_bilinear_weight (x); |
2659 | |
|
2660 | 0 | *buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty); |
2661 | 0 | } |
2662 | |
|
2663 | 0 | buffer++; |
2664 | 0 | x += ux; |
2665 | 0 | x_top += ux_top; |
2666 | 0 | x_bottom += ux_bottom; |
2667 | 0 | mask += mask_inc; |
2668 | 0 | } |
2669 | | |
2670 | | /* Right Edge */ |
2671 | 0 | w = pixman_int_to_fixed (bits->width); |
2672 | 0 | while (buffer < end && x < w) |
2673 | 0 | { |
2674 | 0 | if (*mask) |
2675 | 0 | { |
2676 | 0 | uint32_t tl, bl; |
2677 | 0 | int32_t distx; |
2678 | |
|
2679 | 0 | tl = top_row [pixman_fixed_to_int (x_top)] | top_mask; |
2680 | 0 | bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask; |
2681 | |
|
2682 | 0 | distx = pixman_fixed_to_bilinear_weight (x); |
2683 | |
|
2684 | 0 | *buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty); |
2685 | 0 | } |
2686 | |
|
2687 | 0 | buffer++; |
2688 | 0 | x += ux; |
2689 | 0 | x_top += ux_top; |
2690 | 0 | x_bottom += ux_bottom; |
2691 | 0 | mask += mask_inc; |
2692 | 0 | } |
2693 | | |
2694 | | /* Zero fill to the left of the image */ |
2695 | 0 | while (buffer < end) |
2696 | 0 | *buffer++ = 0; |
2697 | |
|
2698 | 0 | return iter->buffer; |
2699 | 0 | } |
2700 | | |
2701 | | typedef uint32_t (* convert_pixel_t) (const uint8_t *row, int x); |
2702 | | |
2703 | | static force_inline void |
2704 | | bits_image_fetch_separable_convolution_affine (pixman_image_t * image, |
2705 | | int offset, |
2706 | | int line, |
2707 | | int width, |
2708 | | uint32_t * buffer, |
2709 | | const uint32_t * mask, |
2710 | | |
2711 | | convert_pixel_t convert_pixel, |
2712 | | pixman_format_code_t format, |
2713 | | pixman_repeat_t repeat_mode) |
2714 | 0 | { |
2715 | 0 | bits_image_t *bits = &image->bits; |
2716 | 0 | pixman_fixed_t *params = image->common.filter_params; |
2717 | 0 | int cwidth = pixman_fixed_to_int (params[0]); |
2718 | 0 | int cheight = pixman_fixed_to_int (params[1]); |
2719 | 0 | int x_off = ((cwidth << 16) - pixman_fixed_1) >> 1; |
2720 | 0 | int y_off = ((cheight << 16) - pixman_fixed_1) >> 1; |
2721 | 0 | int x_phase_bits = pixman_fixed_to_int (params[2]); |
2722 | 0 | int y_phase_bits = pixman_fixed_to_int (params[3]); |
2723 | 0 | int x_phase_shift = 16 - x_phase_bits; |
2724 | 0 | int y_phase_shift = 16 - y_phase_bits; |
2725 | 0 | pixman_fixed_t vx, vy; |
2726 | 0 | pixman_fixed_t ux, uy; |
2727 | 0 | pixman_vector_t v; |
2728 | 0 | int k; |
2729 | | |
2730 | | /* reference point is the center of the pixel */ |
2731 | 0 | v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; |
2732 | 0 | v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; |
2733 | 0 | v.vector[2] = pixman_fixed_1; |
2734 | |
|
2735 | 0 | if (!pixman_transform_point_3d (image->common.transform, &v)) |
2736 | 0 | return; |
2737 | | |
2738 | 0 | ux = image->common.transform->matrix[0][0]; |
2739 | 0 | uy = image->common.transform->matrix[1][0]; |
2740 | |
|
2741 | 0 | vx = v.vector[0]; |
2742 | 0 | vy = v.vector[1]; |
2743 | |
|
2744 | 0 | for (k = 0; k < width; ++k) |
2745 | 0 | { |
2746 | 0 | pixman_fixed_t *y_params; |
2747 | 0 | int satot, srtot, sgtot, sbtot; |
2748 | 0 | pixman_fixed_t x, y; |
2749 | 0 | int32_t x1, x2, y1, y2; |
2750 | 0 | int32_t px, py; |
2751 | 0 | int i, j; |
2752 | |
|
2753 | 0 | if (mask && !mask[k]) |
2754 | 0 | goto next; |
2755 | | |
2756 | | /* Round x and y to the middle of the closest phase before continuing. This |
2757 | | * ensures that the convolution matrix is aligned right, since it was |
2758 | | * positioned relative to a particular phase (and not relative to whatever |
2759 | | * exact fraction we happen to get here). |
2760 | | */ |
2761 | 0 | x = ((vx >> x_phase_shift) << x_phase_shift) + ((1 << x_phase_shift) >> 1); |
2762 | 0 | y = ((vy >> y_phase_shift) << y_phase_shift) + ((1 << y_phase_shift) >> 1); |
2763 | |
|
2764 | 0 | px = (x & 0xffff) >> x_phase_shift; |
2765 | 0 | py = (y & 0xffff) >> y_phase_shift; |
2766 | |
|
2767 | 0 | x1 = pixman_fixed_to_int (x - pixman_fixed_e - x_off); |
2768 | 0 | y1 = pixman_fixed_to_int (y - pixman_fixed_e - y_off); |
2769 | 0 | x2 = x1 + cwidth; |
2770 | 0 | y2 = y1 + cheight; |
2771 | |
|
2772 | 0 | satot = srtot = sgtot = sbtot = 0; |
2773 | |
|
2774 | 0 | y_params = params + 4 + (1 << x_phase_bits) * cwidth + py * cheight; |
2775 | |
|
2776 | 0 | for (i = y1; i < y2; ++i) |
2777 | 0 | { |
2778 | 0 | pixman_fixed_t fy = *y_params++; |
2779 | |
|
2780 | 0 | if (fy) |
2781 | 0 | { |
2782 | 0 | pixman_fixed_t *x_params = params + 4 + px * cwidth; |
2783 | |
|
2784 | 0 | for (j = x1; j < x2; ++j) |
2785 | 0 | { |
2786 | 0 | pixman_fixed_t fx = *x_params++; |
2787 | 0 | int rx = j; |
2788 | 0 | int ry = i; |
2789 | | |
2790 | 0 | if (fx) |
2791 | 0 | { |
2792 | 0 | pixman_fixed_t f; |
2793 | 0 | uint32_t pixel, mask; |
2794 | 0 | uint8_t *row; |
2795 | |
|
2796 | 0 | mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; |
2797 | |
|
2798 | 0 | if (repeat_mode != PIXMAN_REPEAT_NONE) |
2799 | 0 | { |
2800 | 0 | repeat (repeat_mode, &rx, bits->width); |
2801 | 0 | repeat (repeat_mode, &ry, bits->height); |
2802 | |
|
2803 | 0 | row = (uint8_t *)(bits->bits + bits->rowstride * ry); |
2804 | 0 | pixel = convert_pixel (row, rx) | mask; |
2805 | 0 | } |
2806 | 0 | else |
2807 | 0 | { |
2808 | 0 | if (rx < 0 || ry < 0 || rx >= bits->width || ry >= bits->height) |
2809 | 0 | { |
2810 | 0 | pixel = 0; |
2811 | 0 | } |
2812 | 0 | else |
2813 | 0 | { |
2814 | 0 | row = (uint8_t *)(bits->bits + bits->rowstride * ry); |
2815 | 0 | pixel = convert_pixel (row, rx) | mask; |
2816 | 0 | } |
2817 | 0 | } |
2818 | |
|
2819 | 0 | f = ((pixman_fixed_32_32_t)fx * fy + 0x8000) >> 16; |
2820 | 0 | srtot += (int)RED_8 (pixel) * f; |
2821 | 0 | sgtot += (int)GREEN_8 (pixel) * f; |
2822 | 0 | sbtot += (int)BLUE_8 (pixel) * f; |
2823 | 0 | satot += (int)ALPHA_8 (pixel) * f; |
2824 | 0 | } |
2825 | 0 | } |
2826 | 0 | } |
2827 | 0 | } |
2828 | |
|
2829 | 0 | satot = (satot + 0x8000) >> 16; |
2830 | 0 | srtot = (srtot + 0x8000) >> 16; |
2831 | 0 | sgtot = (sgtot + 0x8000) >> 16; |
2832 | 0 | sbtot = (sbtot + 0x8000) >> 16; |
2833 | |
|
2834 | 0 | satot = CLIP (satot, 0, 0xff); |
2835 | 0 | srtot = CLIP (srtot, 0, 0xff); |
2836 | 0 | sgtot = CLIP (sgtot, 0, 0xff); |
2837 | 0 | sbtot = CLIP (sbtot, 0, 0xff); |
2838 | |
|
2839 | 0 | buffer[k] = (satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot << 0); |
2840 | |
|
2841 | 0 | next: |
2842 | 0 | vx += ux; |
2843 | 0 | vy += uy; |
2844 | 0 | } |
2845 | 0 | } |
2846 | | |
2847 | | static const uint32_t zero[2] = { 0, 0 }; |
2848 | | |
2849 | | static force_inline void |
2850 | | bits_image_fetch_bilinear_affine (pixman_image_t * image, |
2851 | | int offset, |
2852 | | int line, |
2853 | | int width, |
2854 | | uint32_t * buffer, |
2855 | | const uint32_t * mask, |
2856 | | |
2857 | | convert_pixel_t convert_pixel, |
2858 | | pixman_format_code_t format, |
2859 | | pixman_repeat_t repeat_mode) |
2860 | 0 | { |
2861 | 0 | pixman_fixed_t x, y; |
2862 | 0 | pixman_fixed_t ux, uy; |
2863 | 0 | pixman_vector_t v; |
2864 | 0 | bits_image_t *bits = &image->bits; |
2865 | 0 | int i; |
2866 | | |
2867 | | /* reference point is the center of the pixel */ |
2868 | 0 | v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; |
2869 | 0 | v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; |
2870 | 0 | v.vector[2] = pixman_fixed_1; |
2871 | |
|
2872 | 0 | if (!pixman_transform_point_3d (image->common.transform, &v)) |
2873 | 0 | return; |
2874 | | |
2875 | 0 | ux = image->common.transform->matrix[0][0]; |
2876 | 0 | uy = image->common.transform->matrix[1][0]; |
2877 | |
|
2878 | 0 | x = v.vector[0]; |
2879 | 0 | y = v.vector[1]; |
2880 | |
|
2881 | 0 | for (i = 0; i < width; ++i) |
2882 | 0 | { |
2883 | 0 | int x1, y1, x2, y2; |
2884 | 0 | uint32_t tl, tr, bl, br; |
2885 | 0 | int32_t distx, disty; |
2886 | 0 | int width = image->bits.width; |
2887 | 0 | int height = image->bits.height; |
2888 | 0 | const uint8_t *row1; |
2889 | 0 | const uint8_t *row2; |
2890 | |
|
2891 | 0 | if (mask && !mask[i]) |
2892 | 0 | goto next; |
2893 | | |
2894 | 0 | x1 = x - pixman_fixed_1 / 2; |
2895 | 0 | y1 = y - pixman_fixed_1 / 2; |
2896 | |
|
2897 | 0 | distx = pixman_fixed_to_bilinear_weight (x1); |
2898 | 0 | disty = pixman_fixed_to_bilinear_weight (y1); |
2899 | |
|
2900 | 0 | y1 = pixman_fixed_to_int (y1); |
2901 | 0 | y2 = y1 + 1; |
2902 | 0 | x1 = pixman_fixed_to_int (x1); |
2903 | 0 | x2 = x1 + 1; |
2904 | |
|
2905 | 0 | if (repeat_mode != PIXMAN_REPEAT_NONE) |
2906 | 0 | { |
2907 | 0 | uint32_t mask; |
2908 | |
|
2909 | 0 | mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; |
2910 | |
|
2911 | 0 | repeat (repeat_mode, &x1, width); |
2912 | 0 | repeat (repeat_mode, &y1, height); |
2913 | 0 | repeat (repeat_mode, &x2, width); |
2914 | 0 | repeat (repeat_mode, &y2, height); |
2915 | |
|
2916 | 0 | row1 = (uint8_t *)(bits->bits + bits->rowstride * y1); |
2917 | 0 | row2 = (uint8_t *)(bits->bits + bits->rowstride * y2); |
2918 | |
|
2919 | 0 | tl = convert_pixel (row1, x1) | mask; |
2920 | 0 | tr = convert_pixel (row1, x2) | mask; |
2921 | 0 | bl = convert_pixel (row2, x1) | mask; |
2922 | 0 | br = convert_pixel (row2, x2) | mask; |
2923 | 0 | } |
2924 | 0 | else |
2925 | 0 | { |
2926 | 0 | uint32_t mask1, mask2; |
2927 | 0 | int bpp; |
2928 | | |
2929 | | /* Note: PIXMAN_FORMAT_BPP() returns an unsigned value, |
2930 | | * which means if you use it in expressions, those |
2931 | | * expressions become unsigned themselves. Since |
2932 | | * the variables below can be negative in some cases, |
2933 | | * that will lead to crashes on 64 bit architectures. |
2934 | | * |
2935 | | * So this line makes sure bpp is signed |
2936 | | */ |
2937 | 0 | bpp = PIXMAN_FORMAT_BPP (format); |
2938 | |
|
2939 | 0 | if (x1 >= width || x2 < 0 || y1 >= height || y2 < 0) |
2940 | 0 | { |
2941 | 0 | buffer[i] = 0; |
2942 | 0 | goto next; |
2943 | 0 | } |
2944 | | |
2945 | 0 | if (y2 == 0) |
2946 | 0 | { |
2947 | 0 | row1 = (const uint8_t *)zero; |
2948 | 0 | mask1 = 0; |
2949 | 0 | } |
2950 | 0 | else |
2951 | 0 | { |
2952 | 0 | row1 = (uint8_t *)(bits->bits + bits->rowstride * y1); |
2953 | 0 | row1 += bpp / 8 * x1; |
2954 | |
|
2955 | 0 | mask1 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; |
2956 | 0 | } |
2957 | |
|
2958 | 0 | if (y1 == height - 1) |
2959 | 0 | { |
2960 | 0 | row2 = (const uint8_t *)zero; |
2961 | 0 | mask2 = 0; |
2962 | 0 | } |
2963 | 0 | else |
2964 | 0 | { |
2965 | 0 | row2 = (uint8_t *)(bits->bits + bits->rowstride * y2); |
2966 | 0 | row2 += bpp / 8 * x1; |
2967 | |
|
2968 | 0 | mask2 = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; |
2969 | 0 | } |
2970 | |
|
2971 | 0 | if (x2 == 0) |
2972 | 0 | { |
2973 | 0 | tl = 0; |
2974 | 0 | bl = 0; |
2975 | 0 | } |
2976 | 0 | else |
2977 | 0 | { |
2978 | 0 | tl = convert_pixel (row1, 0) | mask1; |
2979 | 0 | bl = convert_pixel (row2, 0) | mask2; |
2980 | 0 | } |
2981 | |
|
2982 | 0 | if (x1 == width - 1) |
2983 | 0 | { |
2984 | 0 | tr = 0; |
2985 | 0 | br = 0; |
2986 | 0 | } |
2987 | 0 | else |
2988 | 0 | { |
2989 | 0 | tr = convert_pixel (row1, 1) | mask1; |
2990 | 0 | br = convert_pixel (row2, 1) | mask2; |
2991 | 0 | } |
2992 | 0 | } |
2993 | | |
2994 | 0 | buffer[i] = bilinear_interpolation ( |
2995 | 0 | tl, tr, bl, br, distx, disty); |
2996 | |
|
2997 | 0 | next: |
2998 | 0 | x += ux; |
2999 | 0 | y += uy; |
3000 | 0 | } |
3001 | 0 | } |
3002 | | |
3003 | | static force_inline void |
3004 | | bits_image_fetch_nearest_affine (pixman_image_t * image, |
3005 | | int offset, |
3006 | | int line, |
3007 | | int width, |
3008 | | uint32_t * buffer, |
3009 | | const uint32_t * mask, |
3010 | | |
3011 | | convert_pixel_t convert_pixel, |
3012 | | pixman_format_code_t format, |
3013 | | pixman_repeat_t repeat_mode) |
3014 | 0 | { |
3015 | 0 | pixman_fixed_t x, y; |
3016 | 0 | pixman_fixed_t ux, uy; |
3017 | 0 | pixman_vector_t v; |
3018 | 0 | bits_image_t *bits = &image->bits; |
3019 | 0 | int i; |
3020 | | |
3021 | | /* reference point is the center of the pixel */ |
3022 | 0 | v.vector[0] = pixman_int_to_fixed (offset) + pixman_fixed_1 / 2; |
3023 | 0 | v.vector[1] = pixman_int_to_fixed (line) + pixman_fixed_1 / 2; |
3024 | 0 | v.vector[2] = pixman_fixed_1; |
3025 | |
|
3026 | 0 | if (!pixman_transform_point_3d (image->common.transform, &v)) |
3027 | 0 | return; |
3028 | | |
3029 | 0 | ux = image->common.transform->matrix[0][0]; |
3030 | 0 | uy = image->common.transform->matrix[1][0]; |
3031 | |
|
3032 | 0 | x = v.vector[0]; |
3033 | 0 | y = v.vector[1]; |
3034 | |
|
3035 | 0 | for (i = 0; i < width; ++i) |
3036 | 0 | { |
3037 | 0 | int width, height, x0, y0; |
3038 | 0 | const uint8_t *row; |
3039 | |
|
3040 | 0 | if (mask && !mask[i]) |
3041 | 0 | goto next; |
3042 | | |
3043 | 0 | width = image->bits.width; |
3044 | 0 | height = image->bits.height; |
3045 | 0 | x0 = pixman_fixed_to_int (x - pixman_fixed_e); |
3046 | 0 | y0 = pixman_fixed_to_int (y - pixman_fixed_e); |
3047 | |
|
3048 | 0 | if (repeat_mode == PIXMAN_REPEAT_NONE && |
3049 | 0 | (y0 < 0 || y0 >= height || x0 < 0 || x0 >= width)) |
3050 | 0 | { |
3051 | 0 | buffer[i] = 0; |
3052 | 0 | } |
3053 | 0 | else |
3054 | 0 | { |
3055 | 0 | uint32_t mask = PIXMAN_FORMAT_A (format)? 0 : 0xff000000; |
3056 | |
|
3057 | 0 | if (repeat_mode != PIXMAN_REPEAT_NONE) |
3058 | 0 | { |
3059 | 0 | repeat (repeat_mode, &x0, width); |
3060 | 0 | repeat (repeat_mode, &y0, height); |
3061 | 0 | } |
3062 | |
|
3063 | 0 | row = (uint8_t *)(bits->bits + bits->rowstride * y0); |
3064 | |
|
3065 | 0 | buffer[i] = convert_pixel (row, x0) | mask; |
3066 | 0 | } |
3067 | |
|
3068 | 0 | next: |
3069 | 0 | x += ux; |
3070 | 0 | y += uy; |
3071 | 0 | } |
3072 | 0 | } |
3073 | | |
3074 | | static force_inline uint32_t |
3075 | | convert_a8r8g8b8 (const uint8_t *row, int x) |
3076 | 0 | { |
3077 | 0 | return *(((uint32_t *)row) + x); |
3078 | 0 | } |
3079 | | |
3080 | | static force_inline uint32_t |
3081 | | convert_x8r8g8b8 (const uint8_t *row, int x) |
3082 | 0 | { |
3083 | 0 | return *(((uint32_t *)row) + x); |
3084 | 0 | } |
3085 | | |
3086 | | static force_inline uint32_t |
3087 | | convert_a8 (const uint8_t *row, int x) |
3088 | 0 | { |
3089 | 0 | return (uint32_t) *(row + x) << 24; |
3090 | 0 | } |
3091 | | |
3092 | | static force_inline uint32_t |
3093 | | convert_r5g6b5 (const uint8_t *row, int x) |
3094 | 0 | { |
3095 | 0 | return convert_0565_to_0888 (*((uint16_t *)row + x)); |
3096 | 0 | } |
3097 | | |
3098 | | #define MAKE_SEPARABLE_CONVOLUTION_FETCHER(name, format, repeat_mode) \ |
3099 | | static uint32_t * \ |
3100 | | bits_image_fetch_separable_convolution_affine_ ## name (pixman_iter_t *iter, \ |
3101 | | const uint32_t * mask) \ |
3102 | 0 | { \ |
3103 | 0 | bits_image_fetch_separable_convolution_affine ( \ |
3104 | 0 | iter->image, \ |
3105 | 0 | iter->x, iter->y++, \ |
3106 | 0 | iter->width, \ |
3107 | 0 | iter->buffer, mask, \ |
3108 | 0 | convert_ ## format, \ |
3109 | 0 | PIXMAN_ ## format, \ |
3110 | 0 | repeat_mode); \ |
3111 | 0 | \ |
3112 | 0 | return iter->buffer; \ |
3113 | 0 | } Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_pad_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_none_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_reflect_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_normal_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_pad_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_none_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_reflect_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_normal_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_pad_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_none_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_reflect_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_normal_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_pad_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_none_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_reflect_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_separable_convolution_affine_normal_r5g6b5 |
3114 | | |
3115 | | #define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \ |
3116 | | static uint32_t * \ |
3117 | | bits_image_fetch_bilinear_affine_ ## name (pixman_iter_t *iter, \ |
3118 | | const uint32_t * mask) \ |
3119 | 0 | { \ |
3120 | 0 | bits_image_fetch_bilinear_affine (iter->image, \ |
3121 | 0 | iter->x, iter->y++, \ |
3122 | 0 | iter->width, \ |
3123 | 0 | iter->buffer, mask, \ |
3124 | 0 | convert_ ## format, \ |
3125 | 0 | PIXMAN_ ## format, \ |
3126 | 0 | repeat_mode); \ |
3127 | 0 | return iter->buffer; \ |
3128 | 0 | } Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_pad_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_none_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_reflect_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_normal_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_pad_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_none_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_reflect_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_normal_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_pad_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_none_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_reflect_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_normal_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_pad_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_none_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_reflect_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_bilinear_affine_normal_r5g6b5 |
3129 | | |
3130 | | #define MAKE_NEAREST_FETCHER(name, format, repeat_mode) \ |
3131 | | static uint32_t * \ |
3132 | | bits_image_fetch_nearest_affine_ ## name (pixman_iter_t *iter, \ |
3133 | | const uint32_t * mask) \ |
3134 | 0 | { \ |
3135 | 0 | bits_image_fetch_nearest_affine (iter->image, \ |
3136 | 0 | iter->x, iter->y++, \ |
3137 | 0 | iter->width, \ |
3138 | 0 | iter->buffer, mask, \ |
3139 | 0 | convert_ ## format, \ |
3140 | 0 | PIXMAN_ ## format, \ |
3141 | 0 | repeat_mode); \ |
3142 | 0 | return iter->buffer; \ |
3143 | 0 | } Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_pad_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_none_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_reflect_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_normal_a8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_pad_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_none_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_reflect_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_normal_x8r8g8b8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_pad_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_none_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_reflect_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_normal_a8 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_pad_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_none_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_reflect_r5g6b5 Unexecuted instantiation: pixman-fast-path.c:bits_image_fetch_nearest_affine_normal_r5g6b5 |
3144 | | |
3145 | | #define MAKE_FETCHERS(name, format, repeat_mode) \ |
3146 | | MAKE_NEAREST_FETCHER (name, format, repeat_mode) \ |
3147 | | MAKE_BILINEAR_FETCHER (name, format, repeat_mode) \ |
3148 | | MAKE_SEPARABLE_CONVOLUTION_FETCHER (name, format, repeat_mode) |
3149 | | |
3150 | | MAKE_FETCHERS (pad_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_PAD) |
3151 | | MAKE_FETCHERS (none_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NONE) |
3152 | | MAKE_FETCHERS (reflect_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_REFLECT) |
3153 | | MAKE_FETCHERS (normal_a8r8g8b8, a8r8g8b8, PIXMAN_REPEAT_NORMAL) |
3154 | | MAKE_FETCHERS (pad_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_PAD) |
3155 | | MAKE_FETCHERS (none_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NONE) |
3156 | | MAKE_FETCHERS (reflect_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_REFLECT) |
3157 | | MAKE_FETCHERS (normal_x8r8g8b8, x8r8g8b8, PIXMAN_REPEAT_NORMAL) |
3158 | | MAKE_FETCHERS (pad_a8, a8, PIXMAN_REPEAT_PAD) |
3159 | | MAKE_FETCHERS (none_a8, a8, PIXMAN_REPEAT_NONE) |
3160 | | MAKE_FETCHERS (reflect_a8, a8, PIXMAN_REPEAT_REFLECT) |
3161 | | MAKE_FETCHERS (normal_a8, a8, PIXMAN_REPEAT_NORMAL) |
3162 | | MAKE_FETCHERS (pad_r5g6b5, r5g6b5, PIXMAN_REPEAT_PAD) |
3163 | | MAKE_FETCHERS (none_r5g6b5, r5g6b5, PIXMAN_REPEAT_NONE) |
3164 | | MAKE_FETCHERS (reflect_r5g6b5, r5g6b5, PIXMAN_REPEAT_REFLECT) |
3165 | | MAKE_FETCHERS (normal_r5g6b5, r5g6b5, PIXMAN_REPEAT_NORMAL) |
3166 | | |
3167 | | #define IMAGE_FLAGS \ |
3168 | | (FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \ |
3169 | | FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST) |
3170 | | |
3171 | | static const pixman_iter_info_t fast_iters[] = |
3172 | | { |
3173 | | { PIXMAN_r5g6b5, IMAGE_FLAGS, ITER_NARROW | ITER_SRC, |
3174 | | _pixman_iter_init_bits_stride, fast_fetch_r5g6b5, NULL }, |
3175 | | |
3176 | | { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS, |
3177 | | ITER_NARROW | ITER_DEST, |
3178 | | _pixman_iter_init_bits_stride, |
3179 | | fast_fetch_r5g6b5, fast_write_back_r5g6b5 }, |
3180 | | |
3181 | | { PIXMAN_r5g6b5, FAST_PATH_STD_DEST_FLAGS, |
3182 | | ITER_NARROW | ITER_DEST | ITER_IGNORE_RGB | ITER_IGNORE_ALPHA, |
3183 | | _pixman_iter_init_bits_stride, |
3184 | | fast_dest_fetch_noop, fast_write_back_r5g6b5 }, |
3185 | | |
3186 | | { PIXMAN_a8r8g8b8, |
3187 | | (FAST_PATH_STANDARD_FLAGS | |
3188 | | FAST_PATH_SCALE_TRANSFORM | |
3189 | | FAST_PATH_BILINEAR_FILTER | |
3190 | | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR), |
3191 | | ITER_NARROW | ITER_SRC, |
3192 | | fast_bilinear_cover_iter_init, |
3193 | | NULL, NULL |
3194 | | }, |
3195 | | |
3196 | | #define FAST_BILINEAR_FLAGS \ |
3197 | | (FAST_PATH_NO_ALPHA_MAP | \ |
3198 | | FAST_PATH_NO_ACCESSORS | \ |
3199 | | FAST_PATH_HAS_TRANSFORM | \ |
3200 | | FAST_PATH_AFFINE_TRANSFORM | \ |
3201 | | FAST_PATH_X_UNIT_POSITIVE | \ |
3202 | | FAST_PATH_Y_UNIT_ZERO | \ |
3203 | | FAST_PATH_NONE_REPEAT | \ |
3204 | | FAST_PATH_BILINEAR_FILTER) |
3205 | | |
3206 | | { PIXMAN_a8r8g8b8, |
3207 | | FAST_BILINEAR_FLAGS, |
3208 | | ITER_NARROW | ITER_SRC, |
3209 | | NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL |
3210 | | }, |
3211 | | |
3212 | | { PIXMAN_x8r8g8b8, |
3213 | | FAST_BILINEAR_FLAGS, |
3214 | | ITER_NARROW | ITER_SRC, |
3215 | | NULL, bits_image_fetch_bilinear_no_repeat_8888, NULL |
3216 | | }, |
3217 | | |
3218 | | #define GENERAL_BILINEAR_FLAGS \ |
3219 | | (FAST_PATH_NO_ALPHA_MAP | \ |
3220 | | FAST_PATH_NO_ACCESSORS | \ |
3221 | | FAST_PATH_HAS_TRANSFORM | \ |
3222 | | FAST_PATH_AFFINE_TRANSFORM | \ |
3223 | | FAST_PATH_BILINEAR_FILTER) |
3224 | | |
3225 | | #define GENERAL_NEAREST_FLAGS \ |
3226 | | (FAST_PATH_NO_ALPHA_MAP | \ |
3227 | | FAST_PATH_NO_ACCESSORS | \ |
3228 | | FAST_PATH_HAS_TRANSFORM | \ |
3229 | | FAST_PATH_AFFINE_TRANSFORM | \ |
3230 | | FAST_PATH_NEAREST_FILTER) |
3231 | | |
3232 | | #define GENERAL_SEPARABLE_CONVOLUTION_FLAGS \ |
3233 | | (FAST_PATH_NO_ALPHA_MAP | \ |
3234 | | FAST_PATH_NO_ACCESSORS | \ |
3235 | | FAST_PATH_HAS_TRANSFORM | \ |
3236 | | FAST_PATH_AFFINE_TRANSFORM | \ |
3237 | | FAST_PATH_SEPARABLE_CONVOLUTION_FILTER) |
3238 | | |
3239 | | #define SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) \ |
3240 | | { PIXMAN_ ## format, \ |
3241 | | GENERAL_SEPARABLE_CONVOLUTION_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ |
3242 | | ITER_NARROW | ITER_SRC, \ |
3243 | | NULL, bits_image_fetch_separable_convolution_affine_ ## name, NULL \ |
3244 | | }, |
3245 | | |
3246 | | #define BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \ |
3247 | | { PIXMAN_ ## format, \ |
3248 | | GENERAL_BILINEAR_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ |
3249 | | ITER_NARROW | ITER_SRC, \ |
3250 | | NULL, bits_image_fetch_bilinear_affine_ ## name, NULL, \ |
3251 | | }, |
3252 | | |
3253 | | #define NEAREST_AFFINE_FAST_PATH(name, format, repeat) \ |
3254 | | { PIXMAN_ ## format, \ |
3255 | | GENERAL_NEAREST_FLAGS | FAST_PATH_ ## repeat ## _REPEAT, \ |
3256 | | ITER_NARROW | ITER_SRC, \ |
3257 | | NULL, bits_image_fetch_nearest_affine_ ## name, NULL \ |
3258 | | }, |
3259 | | |
3260 | | #define AFFINE_FAST_PATHS(name, format, repeat) \ |
3261 | | NEAREST_AFFINE_FAST_PATH(name, format, repeat) \ |
3262 | | BILINEAR_AFFINE_FAST_PATH(name, format, repeat) \ |
3263 | | SEPARABLE_CONVOLUTION_AFFINE_FAST_PATH(name, format, repeat) |
3264 | | |
3265 | | AFFINE_FAST_PATHS (pad_a8r8g8b8, a8r8g8b8, PAD) |
3266 | | AFFINE_FAST_PATHS (none_a8r8g8b8, a8r8g8b8, NONE) |
3267 | | AFFINE_FAST_PATHS (reflect_a8r8g8b8, a8r8g8b8, REFLECT) |
3268 | | AFFINE_FAST_PATHS (normal_a8r8g8b8, a8r8g8b8, NORMAL) |
3269 | | AFFINE_FAST_PATHS (pad_x8r8g8b8, x8r8g8b8, PAD) |
3270 | | AFFINE_FAST_PATHS (none_x8r8g8b8, x8r8g8b8, NONE) |
3271 | | AFFINE_FAST_PATHS (reflect_x8r8g8b8, x8r8g8b8, REFLECT) |
3272 | | AFFINE_FAST_PATHS (normal_x8r8g8b8, x8r8g8b8, NORMAL) |
3273 | | AFFINE_FAST_PATHS (pad_a8, a8, PAD) |
3274 | | AFFINE_FAST_PATHS (none_a8, a8, NONE) |
3275 | | AFFINE_FAST_PATHS (reflect_a8, a8, REFLECT) |
3276 | | AFFINE_FAST_PATHS (normal_a8, a8, NORMAL) |
3277 | | AFFINE_FAST_PATHS (pad_r5g6b5, r5g6b5, PAD) |
3278 | | AFFINE_FAST_PATHS (none_r5g6b5, r5g6b5, NONE) |
3279 | | AFFINE_FAST_PATHS (reflect_r5g6b5, r5g6b5, REFLECT) |
3280 | | AFFINE_FAST_PATHS (normal_r5g6b5, r5g6b5, NORMAL) |
3281 | | |
3282 | | { PIXMAN_null }, |
3283 | | }; |
3284 | | |
3285 | | pixman_implementation_t * |
3286 | | _pixman_implementation_create_fast_path (pixman_implementation_t *fallback) |
3287 | 12 | { |
3288 | 12 | pixman_implementation_t *imp = _pixman_implementation_create (fallback, c_fast_paths); |
3289 | | |
3290 | 12 | imp->fill = fast_path_fill; |
3291 | 12 | imp->iter_info = fast_iters; |
3292 | | |
3293 | 12 | return imp; |
3294 | 12 | } |