/src/cairo/subprojects/pixman-0.44.2/pixman/pixman-combine32.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc. |
3 | | * 2005 Lars Knoll & Zack Rusin, Trolltech |
4 | | * |
5 | | * Permission to use, copy, modify, distribute, and sell this software and its |
6 | | * documentation for any purpose is hereby granted without fee, provided that |
7 | | * the above copyright notice appear in all copies and that both that |
8 | | * copyright notice and this permission notice appear in supporting |
9 | | * documentation, and that the name of Keith Packard not be used in |
10 | | * advertising or publicity pertaining to distribution of the software without |
11 | | * specific, written prior permission. Keith Packard makes no |
12 | | * representations about the suitability of this software for any purpose. It |
13 | | * is provided "as is" without express or implied warranty. |
14 | | * |
15 | | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS |
16 | | * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND |
17 | | * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY |
18 | | * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
19 | | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN |
20 | | * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING |
21 | | * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS |
22 | | * SOFTWARE. |
23 | | */ |
24 | | #ifdef HAVE_CONFIG_H |
25 | | #include <pixman-config.h> |
26 | | #endif |
27 | | |
28 | | #include <math.h> |
29 | | #include <string.h> |
30 | | |
31 | | #include "pixman-private.h" |
32 | | #include "pixman-combine32.h" |
33 | | |
34 | | /* component alpha helper functions */ |
35 | | |
36 | | static void |
37 | | combine_mask_ca (uint32_t *src, uint32_t *mask) |
38 | 0 | { |
39 | 0 | uint32_t a = *mask; |
40 | |
|
41 | 0 | uint32_t x; |
42 | 0 | uint16_t xa; |
43 | |
|
44 | 0 | if (!a) |
45 | 0 | { |
46 | 0 | *(src) = 0; |
47 | 0 | return; |
48 | 0 | } |
49 | | |
50 | 0 | x = *(src); |
51 | 0 | if (a == ~0) |
52 | 0 | { |
53 | 0 | x = x >> A_SHIFT; |
54 | 0 | x |= x << G_SHIFT; |
55 | 0 | x |= x << R_SHIFT; |
56 | 0 | *(mask) = x; |
57 | 0 | return; |
58 | 0 | } |
59 | | |
60 | 0 | xa = x >> A_SHIFT; |
61 | 0 | UN8x4_MUL_UN8x4 (x, a); |
62 | 0 | *(src) = x; |
63 | | |
64 | 0 | UN8x4_MUL_UN8 (a, xa); |
65 | 0 | *(mask) = a; |
66 | 0 | } |
67 | | |
68 | | static void |
69 | | combine_mask_value_ca (uint32_t *src, const uint32_t *mask) |
70 | 0 | { |
71 | 0 | uint32_t a = *mask; |
72 | 0 | uint32_t x; |
73 | |
|
74 | 0 | if (!a) |
75 | 0 | { |
76 | 0 | *(src) = 0; |
77 | 0 | return; |
78 | 0 | } |
79 | | |
80 | 0 | if (a == ~0) |
81 | 0 | return; |
82 | | |
83 | 0 | x = *(src); |
84 | 0 | UN8x4_MUL_UN8x4 (x, a); |
85 | 0 | *(src) = x; |
86 | 0 | } |
87 | | |
88 | | static void |
89 | | combine_mask_alpha_ca (const uint32_t *src, uint32_t *mask) |
90 | 0 | { |
91 | 0 | uint32_t a = *(mask); |
92 | 0 | uint32_t x; |
93 | |
|
94 | 0 | if (!a) |
95 | 0 | return; |
96 | | |
97 | 0 | x = *(src) >> A_SHIFT; |
98 | 0 | if (x == MASK) |
99 | 0 | return; |
100 | | |
101 | 0 | if (a == ~0) |
102 | 0 | { |
103 | 0 | x |= x << G_SHIFT; |
104 | 0 | x |= x << R_SHIFT; |
105 | 0 | *(mask) = x; |
106 | 0 | return; |
107 | 0 | } |
108 | | |
109 | 0 | UN8x4_MUL_UN8 (a, x); |
110 | 0 | *(mask) = a; |
111 | 0 | } |
112 | | |
113 | | /* |
114 | | * There are two ways of handling alpha -- either as a single unified value or |
115 | | * a separate value for each component, hence each macro must have two |
116 | | * versions. The unified alpha version has a 'u' at the end of the name, |
117 | | * the component version has a 'ca'. Similarly, functions which deal with |
118 | | * this difference will have two versions using the same convention. |
119 | | */ |
120 | | |
121 | | static force_inline uint32_t |
122 | | combine_mask (const uint32_t *src, const uint32_t *mask, int i) |
123 | 5.53k | { |
124 | 5.53k | uint32_t s, m; |
125 | | |
126 | 5.53k | if (mask) |
127 | 5.53k | { |
128 | 5.53k | m = *(mask + i) >> A_SHIFT; |
129 | | |
130 | 5.53k | if (!m) |
131 | 548 | return 0; |
132 | 5.53k | } |
133 | | |
134 | 4.98k | s = *(src + i); |
135 | | |
136 | 4.98k | if (mask) |
137 | 4.98k | UN8x4_MUL_UN8 (s, m); |
138 | | |
139 | 4.98k | return s; |
140 | 5.53k | } |
141 | | |
142 | | static void |
143 | | combine_clear (pixman_implementation_t *imp, |
144 | | pixman_op_t op, |
145 | | uint32_t * dest, |
146 | | const uint32_t * src, |
147 | | const uint32_t * mask, |
148 | | int width) |
149 | 0 | { |
150 | 0 | memset (dest, 0, width * sizeof (uint32_t)); |
151 | 0 | } |
152 | | |
153 | | static void |
154 | | combine_dst (pixman_implementation_t *imp, |
155 | | pixman_op_t op, |
156 | | uint32_t * dest, |
157 | | const uint32_t * src, |
158 | | const uint32_t * mask, |
159 | | int width) |
160 | 0 | { |
161 | 0 | return; |
162 | 0 | } |
163 | | |
164 | | static void |
165 | | combine_src_u (pixman_implementation_t *imp, |
166 | | pixman_op_t op, |
167 | | uint32_t * dest, |
168 | | const uint32_t * src, |
169 | | const uint32_t * mask, |
170 | | int width) |
171 | 207 | { |
172 | 207 | int i; |
173 | | |
174 | 207 | if (!mask) |
175 | 0 | { |
176 | 0 | memcpy (dest, src, width * sizeof (uint32_t)); |
177 | 0 | } |
178 | 207 | else |
179 | 207 | { |
180 | 5.73k | for (i = 0; i < width; ++i) |
181 | 5.53k | { |
182 | 5.53k | uint32_t s = combine_mask (src, mask, i); |
183 | | |
184 | 5.53k | *(dest + i) = s; |
185 | 5.53k | } |
186 | 207 | } |
187 | 207 | } |
188 | | |
189 | | static void |
190 | | combine_over_u (pixman_implementation_t *imp, |
191 | | pixman_op_t op, |
192 | | uint32_t * dest, |
193 | | const uint32_t * src, |
194 | | const uint32_t * mask, |
195 | | int width) |
196 | 0 | { |
197 | 0 | int i; |
198 | |
|
199 | 0 | if (!mask) |
200 | 0 | { |
201 | 0 | for (i = 0; i < width; ++i) |
202 | 0 | { |
203 | 0 | uint32_t s = *(src + i); |
204 | 0 | uint32_t a = ALPHA_8 (s); |
205 | 0 | if (a == 0xFF) |
206 | 0 | { |
207 | 0 | *(dest + i) = s; |
208 | 0 | } |
209 | 0 | else if (s) |
210 | 0 | { |
211 | 0 | uint32_t d = *(dest + i); |
212 | 0 | uint32_t ia = a ^ 0xFF; |
213 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); |
214 | 0 | *(dest + i) = d; |
215 | 0 | } |
216 | 0 | } |
217 | 0 | } |
218 | 0 | else |
219 | 0 | { |
220 | 0 | for (i = 0; i < width; ++i) |
221 | 0 | { |
222 | 0 | uint32_t m = ALPHA_8 (*(mask + i)); |
223 | 0 | if (m == 0xFF) |
224 | 0 | { |
225 | 0 | uint32_t s = *(src + i); |
226 | 0 | uint32_t a = ALPHA_8 (s); |
227 | 0 | if (a == 0xFF) |
228 | 0 | { |
229 | 0 | *(dest + i) = s; |
230 | 0 | } |
231 | 0 | else if (s) |
232 | 0 | { |
233 | 0 | uint32_t d = *(dest + i); |
234 | 0 | uint32_t ia = a ^ 0xFF; |
235 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (d, ia, s); |
236 | 0 | *(dest + i) = d; |
237 | 0 | } |
238 | 0 | } |
239 | 0 | else if (m) |
240 | 0 | { |
241 | 0 | uint32_t s = *(src + i); |
242 | 0 | if (s) |
243 | 0 | { |
244 | 0 | uint32_t d = *(dest + i); |
245 | 0 | UN8x4_MUL_UN8 (s, m); |
246 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (d, ALPHA_8 (~s), s); |
247 | 0 | *(dest + i) = d; |
248 | 0 | } |
249 | 0 | } |
250 | 0 | } |
251 | 0 | } |
252 | 0 | } |
253 | | |
254 | | static void |
255 | | combine_over_reverse_u (pixman_implementation_t *imp, |
256 | | pixman_op_t op, |
257 | | uint32_t * dest, |
258 | | const uint32_t * src, |
259 | | const uint32_t * mask, |
260 | | int width) |
261 | 0 | { |
262 | 0 | int i; |
263 | |
|
264 | 0 | for (i = 0; i < width; ++i) |
265 | 0 | { |
266 | 0 | uint32_t s = combine_mask (src, mask, i); |
267 | 0 | uint32_t d = *(dest + i); |
268 | 0 | uint32_t ia = ALPHA_8 (~*(dest + i)); |
269 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (s, ia, d); |
270 | 0 | *(dest + i) = s; |
271 | 0 | } |
272 | 0 | } |
273 | | |
274 | | static void |
275 | | combine_in_u (pixman_implementation_t *imp, |
276 | | pixman_op_t op, |
277 | | uint32_t * dest, |
278 | | const uint32_t * src, |
279 | | const uint32_t * mask, |
280 | | int width) |
281 | 0 | { |
282 | 0 | int i; |
283 | |
|
284 | 0 | for (i = 0; i < width; ++i) |
285 | 0 | { |
286 | 0 | uint32_t s = combine_mask (src, mask, i); |
287 | 0 | uint32_t a = ALPHA_8 (*(dest + i)); |
288 | 0 | UN8x4_MUL_UN8 (s, a); |
289 | 0 | *(dest + i) = s; |
290 | 0 | } |
291 | 0 | } |
292 | | |
293 | | static void |
294 | | combine_in_reverse_u (pixman_implementation_t *imp, |
295 | | pixman_op_t op, |
296 | | uint32_t * dest, |
297 | | const uint32_t * src, |
298 | | const uint32_t * mask, |
299 | | int width) |
300 | 0 | { |
301 | 0 | int i; |
302 | |
|
303 | 0 | for (i = 0; i < width; ++i) |
304 | 0 | { |
305 | 0 | uint32_t s = combine_mask (src, mask, i); |
306 | 0 | uint32_t d = *(dest + i); |
307 | 0 | uint32_t a = ALPHA_8 (s); |
308 | 0 | UN8x4_MUL_UN8 (d, a); |
309 | 0 | *(dest + i) = d; |
310 | 0 | } |
311 | 0 | } |
312 | | |
313 | | static void |
314 | | combine_out_u (pixman_implementation_t *imp, |
315 | | pixman_op_t op, |
316 | | uint32_t * dest, |
317 | | const uint32_t * src, |
318 | | const uint32_t * mask, |
319 | | int width) |
320 | 0 | { |
321 | 0 | int i; |
322 | |
|
323 | 0 | for (i = 0; i < width; ++i) |
324 | 0 | { |
325 | 0 | uint32_t s = combine_mask (src, mask, i); |
326 | 0 | uint32_t a = ALPHA_8 (~*(dest + i)); |
327 | 0 | UN8x4_MUL_UN8 (s, a); |
328 | 0 | *(dest + i) = s; |
329 | 0 | } |
330 | 0 | } |
331 | | |
332 | | static void |
333 | | combine_out_reverse_u (pixman_implementation_t *imp, |
334 | | pixman_op_t op, |
335 | | uint32_t * dest, |
336 | | const uint32_t * src, |
337 | | const uint32_t * mask, |
338 | | int width) |
339 | 0 | { |
340 | 0 | int i; |
341 | |
|
342 | 0 | for (i = 0; i < width; ++i) |
343 | 0 | { |
344 | 0 | uint32_t s = combine_mask (src, mask, i); |
345 | 0 | uint32_t d = *(dest + i); |
346 | 0 | uint32_t a = ALPHA_8 (~s); |
347 | 0 | UN8x4_MUL_UN8 (d, a); |
348 | 0 | *(dest + i) = d; |
349 | 0 | } |
350 | 0 | } |
351 | | |
352 | | static void |
353 | | combine_atop_u (pixman_implementation_t *imp, |
354 | | pixman_op_t op, |
355 | | uint32_t * dest, |
356 | | const uint32_t * src, |
357 | | const uint32_t * mask, |
358 | | int width) |
359 | 0 | { |
360 | 0 | int i; |
361 | |
|
362 | 0 | for (i = 0; i < width; ++i) |
363 | 0 | { |
364 | 0 | uint32_t s = combine_mask (src, mask, i); |
365 | 0 | uint32_t d = *(dest + i); |
366 | 0 | uint32_t dest_a = ALPHA_8 (d); |
367 | 0 | uint32_t src_ia = ALPHA_8 (~s); |
368 | |
|
369 | 0 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_a, d, src_ia); |
370 | 0 | *(dest + i) = s; |
371 | 0 | } |
372 | 0 | } |
373 | | |
374 | | static void |
375 | | combine_atop_reverse_u (pixman_implementation_t *imp, |
376 | | pixman_op_t op, |
377 | | uint32_t * dest, |
378 | | const uint32_t * src, |
379 | | const uint32_t * mask, |
380 | | int width) |
381 | 0 | { |
382 | 0 | int i; |
383 | |
|
384 | 0 | for (i = 0; i < width; ++i) |
385 | 0 | { |
386 | 0 | uint32_t s = combine_mask (src, mask, i); |
387 | 0 | uint32_t d = *(dest + i); |
388 | 0 | uint32_t src_a = ALPHA_8 (s); |
389 | 0 | uint32_t dest_ia = ALPHA_8 (~d); |
390 | |
|
391 | 0 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_a); |
392 | 0 | *(dest + i) = s; |
393 | 0 | } |
394 | 0 | } |
395 | | |
396 | | static void |
397 | | combine_xor_u (pixman_implementation_t *imp, |
398 | | pixman_op_t op, |
399 | | uint32_t * dest, |
400 | | const uint32_t * src, |
401 | | const uint32_t * mask, |
402 | | int width) |
403 | 0 | { |
404 | 0 | int i; |
405 | |
|
406 | 0 | for (i = 0; i < width; ++i) |
407 | 0 | { |
408 | 0 | uint32_t s = combine_mask (src, mask, i); |
409 | 0 | uint32_t d = *(dest + i); |
410 | 0 | uint32_t src_ia = ALPHA_8 (~s); |
411 | 0 | uint32_t dest_ia = ALPHA_8 (~d); |
412 | |
|
413 | 0 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (s, dest_ia, d, src_ia); |
414 | 0 | *(dest + i) = s; |
415 | 0 | } |
416 | 0 | } |
417 | | |
418 | | static void |
419 | | combine_add_u (pixman_implementation_t *imp, |
420 | | pixman_op_t op, |
421 | | uint32_t * dest, |
422 | | const uint32_t * src, |
423 | | const uint32_t * mask, |
424 | | int width) |
425 | 0 | { |
426 | 0 | int i; |
427 | |
|
428 | 0 | for (i = 0; i < width; ++i) |
429 | 0 | { |
430 | 0 | uint32_t s = combine_mask (src, mask, i); |
431 | 0 | uint32_t d = *(dest + i); |
432 | 0 | UN8x4_ADD_UN8x4 (d, s); |
433 | 0 | *(dest + i) = d; |
434 | 0 | } |
435 | 0 | } |
436 | | |
437 | | /* |
438 | | * PDF blend modes: |
439 | | * |
440 | | * The following blend modes have been taken from the PDF ISO 32000 |
441 | | * specification, which at this point in time is available from |
442 | | * |
443 | | * http://www.adobe.com/devnet/pdf/pdf_reference.html |
444 | | * |
445 | | * The specific documents of interest are the PDF spec itself: |
446 | | * |
447 | | * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/PDF32000_2008.pdf |
448 | | * |
449 | | * chapters 11.3.5 and 11.3.6 and a later supplement for Adobe Acrobat |
450 | | * 9.1 and Reader 9.1: |
451 | | * |
452 | | * http://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/adobe_supplement_iso32000_1.pdf |
453 | | * |
454 | | * that clarifies the specifications for blend modes ColorDodge and |
455 | | * ColorBurn. |
456 | | * |
457 | | * The formula for computing the final pixel color given in 11.3.6 is: |
458 | | * |
459 | | * αr × Cr = (1 – αs) × αb × Cb + (1 – αb) × αs × Cs + αb × αs × B(Cb, Cs) |
460 | | * |
461 | | * with B() is the blend function. When B(Cb, Cs) = Cs, this formula |
462 | | * reduces to the regular OVER operator. |
463 | | * |
464 | | * Cs and Cb are not premultiplied, so in our implementation we instead |
465 | | * use: |
466 | | * |
467 | | * cr = (1 – αs) × cb + (1 – αb) × cs + αb × αs × B (cb/αb, cs/αs) |
468 | | * |
469 | | * where cr, cs, and cb are premultiplied colors, and where the |
470 | | * |
471 | | * αb × αs × B(cb/αb, cs/αs) |
472 | | * |
473 | | * part is first arithmetically simplified under the assumption that αb |
474 | | * and αs are not 0, and then updated to produce a meaningful result when |
475 | | * they are. |
476 | | * |
477 | | * For all the blend mode operators, the alpha channel is given by |
478 | | * |
479 | | * αr = αs + αb + αb × αs |
480 | | */ |
481 | | |
482 | | /* |
483 | | * Multiply |
484 | | * |
485 | | * ad * as * B(d / ad, s / as) |
486 | | * = ad * as * d/ad * s/as |
487 | | * = d * s |
488 | | * |
489 | | */ |
490 | | static void |
491 | | combine_multiply_u (pixman_implementation_t *imp, |
492 | | pixman_op_t op, |
493 | | uint32_t * dest, |
494 | | const uint32_t * src, |
495 | | const uint32_t * mask, |
496 | | int width) |
497 | 0 | { |
498 | 0 | int i; |
499 | |
|
500 | 0 | for (i = 0; i < width; ++i) |
501 | 0 | { |
502 | 0 | uint32_t s = combine_mask (src, mask, i); |
503 | 0 | uint32_t d = *(dest + i); |
504 | 0 | uint32_t ss = s; |
505 | 0 | uint32_t src_ia = ALPHA_8 (~s); |
506 | 0 | uint32_t dest_ia = ALPHA_8 (~d); |
507 | |
|
508 | 0 | UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (ss, dest_ia, d, src_ia); |
509 | 0 | UN8x4_MUL_UN8x4 (d, s); |
510 | 0 | UN8x4_ADD_UN8x4 (d, ss); |
511 | |
|
512 | 0 | *(dest + i) = d; |
513 | 0 | } |
514 | 0 | } |
515 | | |
516 | | static void |
517 | | combine_multiply_ca (pixman_implementation_t *imp, |
518 | | pixman_op_t op, |
519 | | uint32_t * dest, |
520 | | const uint32_t * src, |
521 | | const uint32_t * mask, |
522 | | int width) |
523 | 0 | { |
524 | 0 | int i; |
525 | |
|
526 | 0 | for (i = 0; i < width; ++i) |
527 | 0 | { |
528 | 0 | uint32_t m = *(mask + i); |
529 | 0 | uint32_t s = *(src + i); |
530 | 0 | uint32_t d = *(dest + i); |
531 | 0 | uint32_t r = d; |
532 | 0 | uint32_t dest_ia = ALPHA_8 (~d); |
533 | |
|
534 | 0 | combine_mask_ca (&s, &m); |
535 | |
|
536 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (r, ~m, s, dest_ia); |
537 | 0 | UN8x4_MUL_UN8x4 (d, s); |
538 | 0 | UN8x4_ADD_UN8x4 (r, d); |
539 | |
|
540 | 0 | *(dest + i) = r; |
541 | 0 | } |
542 | 0 | } |
543 | | |
544 | | #define CLAMP(v, low, high) \ |
545 | 0 | do \ |
546 | 0 | { \ |
547 | 0 | if (v < (low)) \ |
548 | 0 | v = (low); \ |
549 | 0 | if (v > (high)) \ |
550 | 0 | v = (high); \ |
551 | 0 | } while (0) |
552 | | |
553 | | #define PDF_SEPARABLE_BLEND_MODE(name) \ |
554 | | static void \ |
555 | | combine_ ## name ## _u (pixman_implementation_t *imp, \ |
556 | | pixman_op_t op, \ |
557 | | uint32_t * dest, \ |
558 | | const uint32_t * src, \ |
559 | | const uint32_t * mask, \ |
560 | | int width) \ |
561 | 0 | { \ |
562 | 0 | int i; \ |
563 | 0 | for (i = 0; i < width; ++i) \ |
564 | 0 | { \ |
565 | 0 | uint32_t s = combine_mask (src, mask, i); \ |
566 | 0 | uint32_t d = *(dest + i); \ |
567 | 0 | uint8_t sa = ALPHA_8 (s); \ |
568 | 0 | uint8_t isa = ~sa; \ |
569 | 0 | uint8_t da = ALPHA_8 (d); \ |
570 | 0 | uint8_t ida = ~da; \ |
571 | 0 | uint32_t ra, rr, rg, rb; \ |
572 | 0 | \ |
573 | 0 | ra = da * 0xff + sa * 0xff - sa * da; \ |
574 | 0 | rr = isa * RED_8 (d) + ida * RED_8 (s); \ |
575 | 0 | rg = isa * GREEN_8 (d) + ida * GREEN_8 (s); \ |
576 | 0 | rb = isa * BLUE_8 (d) + ida * BLUE_8 (s); \ |
577 | 0 | \ |
578 | 0 | rr += blend_ ## name (RED_8 (d), da, RED_8 (s), sa); \ |
579 | 0 | rg += blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), sa); \ |
580 | 0 | rb += blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), sa); \ |
581 | 0 | \ |
582 | 0 | CLAMP (ra, 0, 255 * 255); \ |
583 | 0 | CLAMP (rr, 0, 255 * 255); \ |
584 | 0 | CLAMP (rg, 0, 255 * 255); \ |
585 | 0 | CLAMP (rb, 0, 255 * 255); \ |
586 | 0 | \ |
587 | 0 | ra = DIV_ONE_UN8 (ra); \ |
588 | 0 | rr = DIV_ONE_UN8 (rr); \ |
589 | 0 | rg = DIV_ONE_UN8 (rg); \ |
590 | 0 | rb = DIV_ONE_UN8 (rb); \ |
591 | 0 | \ |
592 | 0 | *(dest + i) = ra << 24 | rr << 16 | rg << 8 | rb; \ |
593 | 0 | } \ |
594 | 0 | } \ Unexecuted instantiation: pixman-combine32.c:combine_screen_u Unexecuted instantiation: pixman-combine32.c:combine_overlay_u Unexecuted instantiation: pixman-combine32.c:combine_darken_u Unexecuted instantiation: pixman-combine32.c:combine_lighten_u Unexecuted instantiation: pixman-combine32.c:combine_hard_light_u Unexecuted instantiation: pixman-combine32.c:combine_difference_u Unexecuted instantiation: pixman-combine32.c:combine_exclusion_u |
595 | | \ |
596 | | static void \ |
597 | | combine_ ## name ## _ca (pixman_implementation_t *imp, \ |
598 | | pixman_op_t op, \ |
599 | | uint32_t * dest, \ |
600 | | const uint32_t * src, \ |
601 | | const uint32_t * mask, \ |
602 | | int width) \ |
603 | 0 | { \ |
604 | 0 | int i; \ |
605 | 0 | for (i = 0; i < width; ++i) \ |
606 | 0 | { \ |
607 | 0 | uint32_t m = *(mask + i); \ |
608 | 0 | uint32_t s = *(src + i); \ |
609 | 0 | uint32_t d = *(dest + i); \ |
610 | 0 | uint8_t da = ALPHA_8 (d); \ |
611 | 0 | uint8_t ida = ~da; \ |
612 | 0 | uint32_t ra, rr, rg, rb; \ |
613 | 0 | uint8_t ira, iga, iba; \ |
614 | 0 | \ |
615 | 0 | combine_mask_ca (&s, &m); \ |
616 | 0 | \ |
617 | 0 | ira = ~RED_8 (m); \ |
618 | 0 | iga = ~GREEN_8 (m); \ |
619 | 0 | iba = ~BLUE_8 (m); \ |
620 | 0 | \ |
621 | 0 | ra = da * 0xff + ALPHA_8 (s) * 0xff - ALPHA_8 (s) * da; \ |
622 | 0 | rr = ira * RED_8 (d) + ida * RED_8 (s); \ |
623 | 0 | rg = iga * GREEN_8 (d) + ida * GREEN_8 (s); \ |
624 | 0 | rb = iba * BLUE_8 (d) + ida * BLUE_8 (s); \ |
625 | 0 | \ |
626 | 0 | rr += blend_ ## name (RED_8 (d), da, RED_8 (s), RED_8 (m)); \ |
627 | 0 | rg += blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), GREEN_8 (m)); \ |
628 | 0 | rb += blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), BLUE_8 (m)); \ |
629 | 0 | \ |
630 | 0 | CLAMP (ra, 0, 255 * 255); \ |
631 | 0 | CLAMP (rr, 0, 255 * 255); \ |
632 | 0 | CLAMP (rg, 0, 255 * 255); \ |
633 | 0 | CLAMP (rb, 0, 255 * 255); \ |
634 | 0 | \ |
635 | 0 | ra = DIV_ONE_UN8 (ra); \ |
636 | 0 | rr = DIV_ONE_UN8 (rr); \ |
637 | 0 | rg = DIV_ONE_UN8 (rg); \ |
638 | 0 | rb = DIV_ONE_UN8 (rb); \ |
639 | 0 | \ |
640 | 0 | *(dest + i) = ra << 24 | rr << 16 | rg << 8 | rb; \ |
641 | 0 | } \ |
642 | 0 | } Unexecuted instantiation: pixman-combine32.c:combine_screen_ca Unexecuted instantiation: pixman-combine32.c:combine_overlay_ca Unexecuted instantiation: pixman-combine32.c:combine_darken_ca Unexecuted instantiation: pixman-combine32.c:combine_lighten_ca Unexecuted instantiation: pixman-combine32.c:combine_hard_light_ca Unexecuted instantiation: pixman-combine32.c:combine_difference_ca Unexecuted instantiation: pixman-combine32.c:combine_exclusion_ca |
643 | | |
644 | | /* |
645 | | * Screen |
646 | | * |
647 | | * ad * as * B(d/ad, s/as) |
648 | | * = ad * as * (d/ad + s/as - s/as * d/ad) |
649 | | * = ad * s + as * d - s * d |
650 | | */ |
651 | | static inline int32_t |
652 | | blend_screen (int32_t d, int32_t ad, int32_t s, int32_t as) |
653 | 0 | { |
654 | 0 | return s * ad + d * as - s * d; |
655 | 0 | } |
656 | | |
657 | | PDF_SEPARABLE_BLEND_MODE (screen) |
658 | | |
659 | | /* |
660 | | * Overlay |
661 | | * |
662 | | * ad * as * B(d/ad, s/as) |
663 | | * = ad * as * Hardlight (s, d) |
664 | | * = if (d / ad < 0.5) |
665 | | * as * ad * Multiply (s/as, 2 * d/ad) |
666 | | * else |
667 | | * as * ad * Screen (s/as, 2 * d / ad - 1) |
668 | | * = if (d < 0.5 * ad) |
669 | | * as * ad * s/as * 2 * d /ad |
670 | | * else |
671 | | * as * ad * (s/as + 2 * d / ad - 1 - s / as * (2 * d / ad - 1)) |
672 | | * = if (2 * d < ad) |
673 | | * 2 * s * d |
674 | | * else |
675 | | * ad * s + 2 * as * d - as * ad - ad * s * (2 * d / ad - 1) |
676 | | * = if (2 * d < ad) |
677 | | * 2 * s * d |
678 | | * else |
679 | | * as * ad - 2 * (ad - d) * (as - s) |
680 | | */ |
681 | | static inline int32_t |
682 | | blend_overlay (int32_t d, int32_t ad, int32_t s, int32_t as) |
683 | 0 | { |
684 | 0 | uint32_t r; |
685 | |
|
686 | 0 | if (2 * d < ad) |
687 | 0 | r = 2 * s * d; |
688 | 0 | else |
689 | 0 | r = as * ad - 2 * (ad - d) * (as - s); |
690 | |
|
691 | 0 | return r; |
692 | 0 | } |
693 | | |
694 | | PDF_SEPARABLE_BLEND_MODE (overlay) |
695 | | |
696 | | /* |
697 | | * Darken |
698 | | * |
699 | | * ad * as * B(d/ad, s/as) |
700 | | * = ad * as * MIN(d/ad, s/as) |
701 | | * = MIN (as * d, ad * s) |
702 | | */ |
703 | | static inline int32_t |
704 | | blend_darken (int32_t d, int32_t ad, int32_t s, int32_t as) |
705 | 0 | { |
706 | 0 | s = ad * s; |
707 | 0 | d = as * d; |
708 | |
|
709 | 0 | return s > d ? d : s; |
710 | 0 | } |
711 | | |
712 | | PDF_SEPARABLE_BLEND_MODE (darken) |
713 | | |
714 | | /* |
715 | | * Lighten |
716 | | * |
717 | | * ad * as * B(d/ad, s/as) |
718 | | * = ad * as * MAX(d/ad, s/as) |
719 | | * = MAX (as * d, ad * s) |
720 | | */ |
721 | | static inline int32_t |
722 | | blend_lighten (int32_t d, int32_t ad, int32_t s, int32_t as) |
723 | 0 | { |
724 | 0 | s = ad * s; |
725 | 0 | d = as * d; |
726 | | |
727 | 0 | return s > d ? s : d; |
728 | 0 | } |
729 | | |
730 | | PDF_SEPARABLE_BLEND_MODE (lighten) |
731 | | |
732 | | /* |
733 | | * Hard light |
734 | | * |
735 | | * ad * as * B(d/ad, s/as) |
736 | | * = if (s/as <= 0.5) |
737 | | * ad * as * Multiply (d/ad, 2 * s/as) |
738 | | * else |
739 | | * ad * as * Screen (d/ad, 2 * s/as - 1) |
740 | | * = if 2 * s <= as |
741 | | * ad * as * d/ad * 2 * s / as |
742 | | * else |
743 | | * ad * as * (d/ad + (2 * s/as - 1) + d/ad * (2 * s/as - 1)) |
744 | | * = if 2 * s <= as |
745 | | * 2 * s * d |
746 | | * else |
747 | | * as * ad - 2 * (ad - d) * (as - s) |
748 | | */ |
749 | | static inline int32_t |
750 | | blend_hard_light (int32_t d, int32_t ad, int32_t s, int32_t as) |
751 | 0 | { |
752 | 0 | if (2 * s < as) |
753 | 0 | return 2 * s * d; |
754 | 0 | else |
755 | 0 | return as * ad - 2 * (ad - d) * (as - s); |
756 | 0 | } |
757 | | |
758 | | PDF_SEPARABLE_BLEND_MODE (hard_light) |
759 | | |
760 | | /* |
761 | | * Difference |
762 | | * |
763 | | * ad * as * B(s/as, d/ad) |
764 | | * = ad * as * abs (s/as - d/ad) |
765 | | * = if (s/as <= d/ad) |
766 | | * ad * as * (d/ad - s/as) |
767 | | * else |
768 | | * ad * as * (s/as - d/ad) |
769 | | * = if (ad * s <= as * d) |
770 | | * as * d - ad * s |
771 | | * else |
772 | | * ad * s - as * d |
773 | | */ |
774 | | static inline int32_t |
775 | | blend_difference (int32_t d, int32_t ad, int32_t s, int32_t as) |
776 | 0 | { |
777 | 0 | int32_t das = d * as; |
778 | 0 | int32_t sad = s * ad; |
779 | |
|
780 | 0 | if (sad < das) |
781 | 0 | return das - sad; |
782 | 0 | else |
783 | 0 | return sad - das; |
784 | 0 | } |
785 | | |
786 | | PDF_SEPARABLE_BLEND_MODE (difference) |
787 | | |
788 | | /* |
789 | | * Exclusion |
790 | | * |
791 | | * ad * as * B(s/as, d/ad) |
792 | | * = ad * as * (d/ad + s/as - 2 * d/ad * s/as) |
793 | | * = as * d + ad * s - 2 * s * d |
794 | | */ |
795 | | |
796 | | /* This can be made faster by writing it directly and not using |
797 | | * PDF_SEPARABLE_BLEND_MODE, but that's a performance optimization */ |
798 | | |
799 | | static inline int32_t |
800 | | blend_exclusion (int32_t d, int32_t ad, int32_t s, int32_t as) |
801 | 0 | { |
802 | 0 | return s * ad + d * as - 2 * d * s; |
803 | 0 | } |
804 | | |
805 | | PDF_SEPARABLE_BLEND_MODE (exclusion) |
806 | | |
807 | | #undef PDF_SEPARABLE_BLEND_MODE |
808 | | |
809 | | /* Component alpha combiners */ |
810 | | |
811 | | static void |
812 | | combine_clear_ca (pixman_implementation_t *imp, |
813 | | pixman_op_t op, |
814 | | uint32_t * dest, |
815 | | const uint32_t * src, |
816 | | const uint32_t * mask, |
817 | | int width) |
818 | 0 | { |
819 | 0 | memset (dest, 0, width * sizeof(uint32_t)); |
820 | 0 | } |
821 | | |
822 | | static void |
823 | | combine_src_ca (pixman_implementation_t *imp, |
824 | | pixman_op_t op, |
825 | | uint32_t * dest, |
826 | | const uint32_t * src, |
827 | | const uint32_t * mask, |
828 | | int width) |
829 | 0 | { |
830 | 0 | int i; |
831 | |
|
832 | 0 | for (i = 0; i < width; ++i) |
833 | 0 | { |
834 | 0 | uint32_t s = *(src + i); |
835 | 0 | uint32_t m = *(mask + i); |
836 | |
|
837 | 0 | combine_mask_value_ca (&s, &m); |
838 | |
|
839 | 0 | *(dest + i) = s; |
840 | 0 | } |
841 | 0 | } |
842 | | |
843 | | static void |
844 | | combine_over_ca (pixman_implementation_t *imp, |
845 | | pixman_op_t op, |
846 | | uint32_t * dest, |
847 | | const uint32_t * src, |
848 | | const uint32_t * mask, |
849 | | int width) |
850 | 0 | { |
851 | 0 | int i; |
852 | |
|
853 | 0 | for (i = 0; i < width; ++i) |
854 | 0 | { |
855 | 0 | uint32_t s = *(src + i); |
856 | 0 | uint32_t m = *(mask + i); |
857 | 0 | uint32_t a; |
858 | |
|
859 | 0 | combine_mask_ca (&s, &m); |
860 | |
|
861 | 0 | a = ~m; |
862 | 0 | if (a) |
863 | 0 | { |
864 | 0 | uint32_t d = *(dest + i); |
865 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4 (d, a, s); |
866 | 0 | s = d; |
867 | 0 | } |
868 | |
|
869 | 0 | *(dest + i) = s; |
870 | 0 | } |
871 | 0 | } |
872 | | |
873 | | static void |
874 | | combine_over_reverse_ca (pixman_implementation_t *imp, |
875 | | pixman_op_t op, |
876 | | uint32_t * dest, |
877 | | const uint32_t * src, |
878 | | const uint32_t * mask, |
879 | | int width) |
880 | 0 | { |
881 | 0 | int i; |
882 | |
|
883 | 0 | for (i = 0; i < width; ++i) |
884 | 0 | { |
885 | 0 | uint32_t d = *(dest + i); |
886 | 0 | uint32_t a = ~d >> A_SHIFT; |
887 | |
|
888 | 0 | if (a) |
889 | 0 | { |
890 | 0 | uint32_t s = *(src + i); |
891 | 0 | uint32_t m = *(mask + i); |
892 | |
|
893 | 0 | UN8x4_MUL_UN8x4 (s, m); |
894 | 0 | UN8x4_MUL_UN8_ADD_UN8x4 (s, a, d); |
895 | |
|
896 | 0 | *(dest + i) = s; |
897 | 0 | } |
898 | 0 | } |
899 | 0 | } |
900 | | |
901 | | static void |
902 | | combine_in_ca (pixman_implementation_t *imp, |
903 | | pixman_op_t op, |
904 | | uint32_t * dest, |
905 | | const uint32_t * src, |
906 | | const uint32_t * mask, |
907 | | int width) |
908 | 0 | { |
909 | 0 | int i; |
910 | |
|
911 | 0 | for (i = 0; i < width; ++i) |
912 | 0 | { |
913 | 0 | uint32_t d = *(dest + i); |
914 | 0 | uint16_t a = d >> A_SHIFT; |
915 | 0 | uint32_t s = 0; |
916 | |
|
917 | 0 | if (a) |
918 | 0 | { |
919 | 0 | uint32_t m = *(mask + i); |
920 | |
|
921 | 0 | s = *(src + i); |
922 | 0 | combine_mask_value_ca (&s, &m); |
923 | |
|
924 | 0 | if (a != MASK) |
925 | 0 | UN8x4_MUL_UN8 (s, a); |
926 | 0 | } |
927 | |
|
928 | 0 | *(dest + i) = s; |
929 | 0 | } |
930 | 0 | } |
931 | | |
932 | | static void |
933 | | combine_in_reverse_ca (pixman_implementation_t *imp, |
934 | | pixman_op_t op, |
935 | | uint32_t * dest, |
936 | | const uint32_t * src, |
937 | | const uint32_t * mask, |
938 | | int width) |
939 | 0 | { |
940 | 0 | int i; |
941 | |
|
942 | 0 | for (i = 0; i < width; ++i) |
943 | 0 | { |
944 | 0 | uint32_t s = *(src + i); |
945 | 0 | uint32_t m = *(mask + i); |
946 | 0 | uint32_t a; |
947 | |
|
948 | 0 | combine_mask_alpha_ca (&s, &m); |
949 | |
|
950 | 0 | a = m; |
951 | 0 | if (a != ~0) |
952 | 0 | { |
953 | 0 | uint32_t d = 0; |
954 | |
|
955 | 0 | if (a) |
956 | 0 | { |
957 | 0 | d = *(dest + i); |
958 | 0 | UN8x4_MUL_UN8x4 (d, a); |
959 | 0 | } |
960 | |
|
961 | 0 | *(dest + i) = d; |
962 | 0 | } |
963 | 0 | } |
964 | 0 | } |
965 | | |
966 | | static void |
967 | | combine_out_ca (pixman_implementation_t *imp, |
968 | | pixman_op_t op, |
969 | | uint32_t * dest, |
970 | | const uint32_t * src, |
971 | | const uint32_t * mask, |
972 | | int width) |
973 | 0 | { |
974 | 0 | int i; |
975 | |
|
976 | 0 | for (i = 0; i < width; ++i) |
977 | 0 | { |
978 | 0 | uint32_t d = *(dest + i); |
979 | 0 | uint16_t a = ~d >> A_SHIFT; |
980 | 0 | uint32_t s = 0; |
981 | |
|
982 | 0 | if (a) |
983 | 0 | { |
984 | 0 | uint32_t m = *(mask + i); |
985 | |
|
986 | 0 | s = *(src + i); |
987 | 0 | combine_mask_value_ca (&s, &m); |
988 | |
|
989 | 0 | if (a != MASK) |
990 | 0 | UN8x4_MUL_UN8 (s, a); |
991 | 0 | } |
992 | |
|
993 | 0 | *(dest + i) = s; |
994 | 0 | } |
995 | 0 | } |
996 | | |
997 | | static void |
998 | | combine_out_reverse_ca (pixman_implementation_t *imp, |
999 | | pixman_op_t op, |
1000 | | uint32_t * dest, |
1001 | | const uint32_t * src, |
1002 | | const uint32_t * mask, |
1003 | | int width) |
1004 | 0 | { |
1005 | 0 | int i; |
1006 | |
|
1007 | 0 | for (i = 0; i < width; ++i) |
1008 | 0 | { |
1009 | 0 | uint32_t s = *(src + i); |
1010 | 0 | uint32_t m = *(mask + i); |
1011 | 0 | uint32_t a; |
1012 | |
|
1013 | 0 | combine_mask_alpha_ca (&s, &m); |
1014 | |
|
1015 | 0 | a = ~m; |
1016 | 0 | if (a != ~0) |
1017 | 0 | { |
1018 | 0 | uint32_t d = 0; |
1019 | |
|
1020 | 0 | if (a) |
1021 | 0 | { |
1022 | 0 | d = *(dest + i); |
1023 | 0 | UN8x4_MUL_UN8x4 (d, a); |
1024 | 0 | } |
1025 | |
|
1026 | 0 | *(dest + i) = d; |
1027 | 0 | } |
1028 | 0 | } |
1029 | 0 | } |
1030 | | |
1031 | | static void |
1032 | | combine_atop_ca (pixman_implementation_t *imp, |
1033 | | pixman_op_t op, |
1034 | | uint32_t * dest, |
1035 | | const uint32_t * src, |
1036 | | const uint32_t * mask, |
1037 | | int width) |
1038 | 0 | { |
1039 | 0 | int i; |
1040 | |
|
1041 | 0 | for (i = 0; i < width; ++i) |
1042 | 0 | { |
1043 | 0 | uint32_t d = *(dest + i); |
1044 | 0 | uint32_t s = *(src + i); |
1045 | 0 | uint32_t m = *(mask + i); |
1046 | 0 | uint32_t ad; |
1047 | 0 | uint16_t as = d >> A_SHIFT; |
1048 | |
|
1049 | 0 | combine_mask_ca (&s, &m); |
1050 | |
|
1051 | 0 | ad = ~m; |
1052 | |
|
1053 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); |
1054 | |
|
1055 | 0 | *(dest + i) = d; |
1056 | 0 | } |
1057 | 0 | } |
1058 | | |
1059 | | static void |
1060 | | combine_atop_reverse_ca (pixman_implementation_t *imp, |
1061 | | pixman_op_t op, |
1062 | | uint32_t * dest, |
1063 | | const uint32_t * src, |
1064 | | const uint32_t * mask, |
1065 | | int width) |
1066 | 0 | { |
1067 | 0 | int i; |
1068 | |
|
1069 | 0 | for (i = 0; i < width; ++i) |
1070 | 0 | { |
1071 | 0 | uint32_t d = *(dest + i); |
1072 | 0 | uint32_t s = *(src + i); |
1073 | 0 | uint32_t m = *(mask + i); |
1074 | 0 | uint32_t ad; |
1075 | 0 | uint16_t as = ~d >> A_SHIFT; |
1076 | |
|
1077 | 0 | combine_mask_ca (&s, &m); |
1078 | |
|
1079 | 0 | ad = m; |
1080 | |
|
1081 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); |
1082 | |
|
1083 | 0 | *(dest + i) = d; |
1084 | 0 | } |
1085 | 0 | } |
1086 | | |
1087 | | static void |
1088 | | combine_xor_ca (pixman_implementation_t *imp, |
1089 | | pixman_op_t op, |
1090 | | uint32_t * dest, |
1091 | | const uint32_t * src, |
1092 | | const uint32_t * mask, |
1093 | | int width) |
1094 | 0 | { |
1095 | 0 | int i; |
1096 | |
|
1097 | 0 | for (i = 0; i < width; ++i) |
1098 | 0 | { |
1099 | 0 | uint32_t d = *(dest + i); |
1100 | 0 | uint32_t s = *(src + i); |
1101 | 0 | uint32_t m = *(mask + i); |
1102 | 0 | uint32_t ad; |
1103 | 0 | uint16_t as = ~d >> A_SHIFT; |
1104 | |
|
1105 | 0 | combine_mask_ca (&s, &m); |
1106 | |
|
1107 | 0 | ad = ~m; |
1108 | |
|
1109 | 0 | UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (d, ad, s, as); |
1110 | |
|
1111 | 0 | *(dest + i) = d; |
1112 | 0 | } |
1113 | 0 | } |
1114 | | |
1115 | | static void |
1116 | | combine_add_ca (pixman_implementation_t *imp, |
1117 | | pixman_op_t op, |
1118 | | uint32_t * dest, |
1119 | | const uint32_t * src, |
1120 | | const uint32_t * mask, |
1121 | | int width) |
1122 | 0 | { |
1123 | 0 | int i; |
1124 | |
|
1125 | 0 | for (i = 0; i < width; ++i) |
1126 | 0 | { |
1127 | 0 | uint32_t s = *(src + i); |
1128 | 0 | uint32_t m = *(mask + i); |
1129 | 0 | uint32_t d = *(dest + i); |
1130 | |
|
1131 | 0 | combine_mask_value_ca (&s, &m); |
1132 | |
|
1133 | 0 | UN8x4_ADD_UN8x4 (d, s); |
1134 | |
|
1135 | 0 | *(dest + i) = d; |
1136 | 0 | } |
1137 | 0 | } |
1138 | | |
1139 | | void |
1140 | | _pixman_setup_combiner_functions_32 (pixman_implementation_t *imp) |
1141 | 12 | { |
1142 | | /* Unified alpha */ |
1143 | 12 | imp->combine_32[PIXMAN_OP_CLEAR] = combine_clear; |
1144 | 12 | imp->combine_32[PIXMAN_OP_SRC] = combine_src_u; |
1145 | 12 | imp->combine_32[PIXMAN_OP_DST] = combine_dst; |
1146 | 12 | imp->combine_32[PIXMAN_OP_OVER] = combine_over_u; |
1147 | 12 | imp->combine_32[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_u; |
1148 | 12 | imp->combine_32[PIXMAN_OP_IN] = combine_in_u; |
1149 | 12 | imp->combine_32[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_u; |
1150 | 12 | imp->combine_32[PIXMAN_OP_OUT] = combine_out_u; |
1151 | 12 | imp->combine_32[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_u; |
1152 | 12 | imp->combine_32[PIXMAN_OP_ATOP] = combine_atop_u; |
1153 | 12 | imp->combine_32[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_u; |
1154 | 12 | imp->combine_32[PIXMAN_OP_XOR] = combine_xor_u; |
1155 | 12 | imp->combine_32[PIXMAN_OP_ADD] = combine_add_u; |
1156 | | |
1157 | 12 | imp->combine_32[PIXMAN_OP_MULTIPLY] = combine_multiply_u; |
1158 | 12 | imp->combine_32[PIXMAN_OP_SCREEN] = combine_screen_u; |
1159 | 12 | imp->combine_32[PIXMAN_OP_OVERLAY] = combine_overlay_u; |
1160 | 12 | imp->combine_32[PIXMAN_OP_DARKEN] = combine_darken_u; |
1161 | 12 | imp->combine_32[PIXMAN_OP_LIGHTEN] = combine_lighten_u; |
1162 | 12 | imp->combine_32[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_u; |
1163 | 12 | imp->combine_32[PIXMAN_OP_DIFFERENCE] = combine_difference_u; |
1164 | 12 | imp->combine_32[PIXMAN_OP_EXCLUSION] = combine_exclusion_u; |
1165 | | |
1166 | | /* Component alpha combiners */ |
1167 | 12 | imp->combine_32_ca[PIXMAN_OP_CLEAR] = combine_clear_ca; |
1168 | 12 | imp->combine_32_ca[PIXMAN_OP_SRC] = combine_src_ca; |
1169 | | /* dest */ |
1170 | 12 | imp->combine_32_ca[PIXMAN_OP_OVER] = combine_over_ca; |
1171 | 12 | imp->combine_32_ca[PIXMAN_OP_OVER_REVERSE] = combine_over_reverse_ca; |
1172 | 12 | imp->combine_32_ca[PIXMAN_OP_IN] = combine_in_ca; |
1173 | 12 | imp->combine_32_ca[PIXMAN_OP_IN_REVERSE] = combine_in_reverse_ca; |
1174 | 12 | imp->combine_32_ca[PIXMAN_OP_OUT] = combine_out_ca; |
1175 | 12 | imp->combine_32_ca[PIXMAN_OP_OUT_REVERSE] = combine_out_reverse_ca; |
1176 | 12 | imp->combine_32_ca[PIXMAN_OP_ATOP] = combine_atop_ca; |
1177 | 12 | imp->combine_32_ca[PIXMAN_OP_ATOP_REVERSE] = combine_atop_reverse_ca; |
1178 | 12 | imp->combine_32_ca[PIXMAN_OP_XOR] = combine_xor_ca; |
1179 | 12 | imp->combine_32_ca[PIXMAN_OP_ADD] = combine_add_ca; |
1180 | | |
1181 | 12 | imp->combine_32_ca[PIXMAN_OP_MULTIPLY] = combine_multiply_ca; |
1182 | 12 | imp->combine_32_ca[PIXMAN_OP_SCREEN] = combine_screen_ca; |
1183 | 12 | imp->combine_32_ca[PIXMAN_OP_OVERLAY] = combine_overlay_ca; |
1184 | 12 | imp->combine_32_ca[PIXMAN_OP_DARKEN] = combine_darken_ca; |
1185 | 12 | imp->combine_32_ca[PIXMAN_OP_LIGHTEN] = combine_lighten_ca; |
1186 | 12 | imp->combine_32_ca[PIXMAN_OP_HARD_LIGHT] = combine_hard_light_ca; |
1187 | 12 | imp->combine_32_ca[PIXMAN_OP_DIFFERENCE] = combine_difference_ca; |
1188 | 12 | imp->combine_32_ca[PIXMAN_OP_EXCLUSION] = combine_exclusion_ca; |
1189 | 12 | } |