/src/ghostpdl/base/gdevm1.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2023 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | /* Monobit "memory" (stored bitmap) device */ |
17 | | #include "memory_.h" |
18 | | #include "gserrors.h" |
19 | | #include "gx.h" |
20 | | #include "gxdevice.h" |
21 | | #include "gxdevmem.h" /* semi-public definitions */ |
22 | | #include "gdevmem.h" /* private definitions */ |
23 | | #include "gsrop.h" |
24 | | |
25 | | /* Either we can implement copy_mono directly, or we can call copy_rop to do |
26 | | * its work. We still do it directly for 'thin' regions by default. */ |
27 | | #define DO_COPY_MONO_BY_COPY_ROP |
28 | | |
29 | | /* Either we can implement tile_rect directly, or we can call copy_rop to do |
30 | | * its work. It used to be faster to do it directly, but no more. */ |
31 | | #define DO_TILE_RECT_BY_COPY_ROP |
32 | | |
33 | | /* Either we can implement fill_rect directly, or we can call copy_rop to do |
34 | | * its work. For now we still implement it directly, as for small tile widths |
35 | | * it wins over using run_rop. */ |
36 | | #undef DO_FILL_RECT_BY_COPY_ROP |
37 | | |
38 | | /* Calculate the X offset for a given Y value, */ |
39 | | /* taking shift into account if necessary. */ |
40 | | #define x_offset(px, ty, textures)\ |
41 | 0 | ((textures)->shift == 0 ? (px) :\ |
42 | 0 | (px) + (ty) / (textures)->rep_height * (textures)->rep_shift) |
43 | | |
44 | | /* ---------------- Monobit RasterOp ---------------- */ |
45 | | |
46 | | /* The guts of this function originally came from mem_mono_strip_copy_rop, |
47 | | * but have been split out here to allow other callers, such as the |
48 | | * functions below. In this function, rop works in terms of device pixel |
49 | | * values, not RGB-space values. */ |
50 | | int |
51 | | mem_mono_strip_copy_rop2_dev(gx_device * dev, const byte * sdata, |
52 | | int sourcex,uint sraster, gx_bitmap_id id, |
53 | | const gx_color_index * scolors, |
54 | | const gx_strip_bitmap * textures, |
55 | | const gx_color_index * tcolors, |
56 | | int x, int y, int width, int height, |
57 | | int phase_x, int phase_y, |
58 | | gs_logical_operation_t lop, |
59 | | uint planar_height) |
60 | 21.8k | { |
61 | 21.8k | gx_device_memory *mdev = (gx_device_memory *) dev; |
62 | 21.8k | gs_rop3_t rop = (gs_rop3_t)lop; |
63 | 21.8k | uint draster = mdev->raster; |
64 | 21.8k | uint traster; |
65 | 21.8k | int line_count; |
66 | 21.8k | byte *drow; |
67 | 21.8k | const byte *srow; |
68 | 21.8k | int ty; |
69 | 21.8k | rop_run_op ropper; |
70 | | |
71 | 21.8k | if (planar_height != 0) { |
72 | 0 | dmlprintf(dev->memory, "mem_default_strip_copy_rop2 should never be called!\n"); |
73 | 0 | return_error(gs_error_Fatal); |
74 | 0 | } |
75 | | |
76 | | /* Modify the raster operation according to the source palette. */ |
77 | 21.8k | if (scolors != 0) { /* Source with palette. */ |
78 | 0 | switch ((int)((scolors[1] << 1) + scolors[0])) { |
79 | 0 | case 0: |
80 | 0 | rop = rop3_know_S_0(rop); |
81 | 0 | break; |
82 | 0 | case 1: |
83 | 0 | rop = rop3_invert_S(rop); |
84 | 0 | break; |
85 | 0 | case 2: |
86 | 0 | break; |
87 | 0 | case 3: |
88 | 0 | rop = rop3_know_S_1(rop); |
89 | 0 | break; |
90 | 0 | } |
91 | 0 | } |
92 | | /* Modify the raster operation according to the texture palette. */ |
93 | 21.8k | if (tcolors != 0) { /* Texture with palette. */ |
94 | 0 | switch ((int)((tcolors[1] << 1) + tcolors[0])) { |
95 | 0 | case 0: |
96 | 0 | rop = rop3_know_T_0(rop); |
97 | 0 | break; |
98 | 0 | case 1: |
99 | 0 | rop = rop3_invert_T(rop); |
100 | 0 | break; |
101 | 0 | case 2: |
102 | 0 | break; |
103 | 0 | case 3: |
104 | 0 | rop = rop3_know_T_1(rop); |
105 | 0 | break; |
106 | 0 | } |
107 | 0 | } |
108 | | /* Handle constant source and/or texture, and other special cases. */ |
109 | 21.8k | { |
110 | | #if !defined(DO_COPY_MONO_BY_COPY_ROP) || !defined(DO_TILE_RECT_BY_COPY_ROP) |
111 | | gx_color_index color0, color1; |
112 | | #endif |
113 | | |
114 | 21.8k | switch (rop_usage_table[rop]) { |
115 | 0 | case rop_usage_none: |
116 | 0 | #ifndef DO_FILL_RECT_BY_COPY_ROP /* Fill rect calls us - don't call it */ |
117 | | /* We're just filling with a constant. */ |
118 | 0 | return (*dev_proc(dev, fill_rectangle)) |
119 | 0 | (dev, x, y, width, height, (gx_color_index) (rop & 1)); |
120 | | #else |
121 | | break; |
122 | | #endif |
123 | 0 | case rop_usage_D: |
124 | | /* This is either D (no-op) or ~D. */ |
125 | 0 | if (rop == rop3_D) |
126 | 0 | return 0; |
127 | | /* Code no_S inline, then finish with no_T. */ |
128 | 0 | fit_fill(dev, x, y, width, height); |
129 | 0 | sdata = scan_line_base(mdev, 0); |
130 | 0 | sourcex = x; |
131 | 0 | sraster = 0; |
132 | 0 | goto no_T; |
133 | 12.6k | case rop_usage_S: |
134 | | #ifndef DO_COPY_MONO_BY_COPY_ROP /* Copy mono is calling us, don't call it! */ |
135 | | /* This is either S or ~S, which copy_mono can handle. */ |
136 | | if (rop == rop3_S) |
137 | | color0 = 0, color1 = 1; |
138 | | else |
139 | | color0 = 1, color1 = 0; |
140 | | do_copy:return (*dev_proc(dev, copy_mono)) |
141 | | (dev, sdata, sourcex, sraster, id, x, y, width, height, |
142 | | color0, color1); |
143 | | #else |
144 | 12.6k | fit_copy(dev, sdata, sourcex, sraster, id, x, y, width, height); |
145 | 12.6k | goto no_T; |
146 | 12.6k | break; |
147 | 12.6k | #endif |
148 | 12.6k | case rop_usage_DS: |
149 | | #ifndef DO_COPY_MONO_BY_COPY_ROP /* Copy mono is calling us, don't call it! */ |
150 | | /* This might be a case that copy_mono can handle. */ |
151 | | #define copy_case(c0, c1) color0 = c0, color1 = c1; goto do_copy; |
152 | | switch ((uint) rop) { /* cast shuts up picky compilers */ |
153 | | case rop3_D & rop3_not(rop3_S): |
154 | | copy_case(gx_no_color_index, 0); |
155 | | case rop3_D | rop3_S: |
156 | | copy_case(gx_no_color_index, 1); |
157 | | case rop3_D & rop3_S: |
158 | | copy_case(0, gx_no_color_index); |
159 | | case rop3_D | rop3_not(rop3_S): |
160 | | copy_case(1, gx_no_color_index); |
161 | | default:; |
162 | | } |
163 | | #undef copy_case |
164 | | #endif |
165 | 9.25k | fit_copy(dev, sdata, sourcex, sraster, id, x, y, width, height); |
166 | 21.8k | no_T: /* Texture is not used; textures may be garbage. */ |
167 | 21.8k | textures = NULL; |
168 | 21.8k | break; |
169 | 0 | case rop_usage_T: |
170 | | #ifndef DO_TILE_RECT_BY_COPY_ROP /* Tile rect calls us - don't call it! */ |
171 | | /* This is either T or ~T, which tile_rectangle can handle. */ |
172 | | if (rop == rop3_T) |
173 | | color0 = 0, color1 = 1; |
174 | | else |
175 | | color0 = 1, color1 = 0; |
176 | | do_tile:return (*dev_proc(dev, strip_tile_rectangle)) |
177 | | (dev, textures, x, y, width, height, color0, color1, |
178 | | phase_x, phase_y); |
179 | | #else |
180 | 0 | fit_fill(dev, x, y, width, height); |
181 | 0 | break; |
182 | 0 | #endif |
183 | 0 | case rop_usage_DT: |
184 | | #ifndef DO_TILE_RECT_BY_COPY_ROP /* Tile rect calls us - don't call it! */ |
185 | | /* This might be a case that tile_rectangle can handle. */ |
186 | | #define tile_case(c0, c1) color0 = c0, color1 = c1; goto do_tile; |
187 | | switch ((uint) rop) { /* cast shuts up picky compilers */ |
188 | | case rop3_D & rop3_not(rop3_T): |
189 | | tile_case(gx_no_color_index, 0); |
190 | | case rop3_D | rop3_T: |
191 | | tile_case(gx_no_color_index, 1); |
192 | | case rop3_D & rop3_T: |
193 | | tile_case(0, gx_no_color_index); |
194 | | case rop3_D | rop3_not(rop3_T): |
195 | | tile_case(1, gx_no_color_index); |
196 | | default:; |
197 | | } |
198 | | #undef tile_case |
199 | | #endif |
200 | 0 | fit_fill(dev, x, y, width, height); |
201 | | /* Source is not used; sdata et al may be garbage. */ |
202 | 0 | sdata = mdev->base; /* arbitrary, as long as all */ |
203 | | /* accesses are valid */ |
204 | 0 | sourcex = x; /* guarantee no source skew */ |
205 | 0 | sraster = 0; |
206 | 0 | break; |
207 | 0 | default: /* rop_usage_[D]ST */ |
208 | 0 | fit_copy(dev, sdata, sourcex, sraster, id, x, y, width, height); |
209 | 21.8k | } |
210 | 21.8k | } |
211 | | |
212 | | #ifdef DEBUG |
213 | | if_debug1m('b', dev->memory, "final rop=0x%x\n", rop); |
214 | | #endif |
215 | | |
216 | | /* Set up transfer parameters. */ |
217 | 21.8k | line_count = height; |
218 | 21.8k | srow = sdata; |
219 | 21.8k | drow = scan_line_base(mdev, y); |
220 | 21.8k | traster = (textures ? textures->raster : 0); |
221 | 21.8k | ty = y + phase_y; |
222 | | |
223 | 21.8k | if (textures == NULL) { |
224 | 21.8k | int dbit = x & 7; |
225 | 21.8k | int sbit = sourcex & 7; |
226 | 21.8k | drow += (x>>3); |
227 | 21.8k | srow += (sourcex>>3); |
228 | 21.8k | if (width < 32) { |
229 | | /* Do it the old, 'slow' way. rop runs of less than 1 word are |
230 | | * not likely to be a win with rop_run. */ |
231 | | /* Loop over scan lines. */ |
232 | 0 | int sskew = sbit - dbit; |
233 | 0 | const rop_proc proc = rop_proc_table[rop]; |
234 | 0 | byte lmask, rmask; |
235 | |
|
236 | 0 | lmask = 0xff >> dbit; |
237 | 0 | width += dbit; |
238 | 0 | rmask = 0xff << (~(width - 1) & 7); |
239 | 0 | if (sskew < 0) |
240 | 0 | --srow, sskew += 8; |
241 | 0 | if (width < 8) |
242 | 0 | lmask &= rmask; |
243 | 0 | for (; line_count-- > 0; drow += draster, srow += sraster) { |
244 | 0 | byte *dptr = drow; |
245 | 0 | const byte *sptr = srow; |
246 | 0 | int left = width-8; |
247 | 0 | #define fetch1(ptr, skew)\ |
248 | 0 | (skew ? ((ptr[0] << skew) | (ptr[1] >> (8 - skew))) : *ptr) |
249 | 0 | { |
250 | | /* Left hand byte */ |
251 | 0 | byte dbyte = *dptr; |
252 | 0 | byte sbyte = fetch1(sptr, sskew); |
253 | 0 | byte result = (*proc)(dbyte,sbyte,0); |
254 | 0 | sptr++; |
255 | 0 | *dptr++ = (result & lmask) | (dbyte & ~lmask); |
256 | 0 | } |
257 | 0 | if (left <= 0) /* if (width <= 8) we're done */ |
258 | 0 | continue; |
259 | 0 | left -= 8; /* left = bits to go - 8 */ |
260 | 0 | while (left > 0) |
261 | 0 | { |
262 | 0 | byte dbyte = *dptr; |
263 | 0 | byte sbyte = fetch1(sptr, sskew); |
264 | 0 | sptr++; |
265 | 0 | *dptr++ = (*proc)(dbyte,sbyte,0); |
266 | 0 | left -= 8; |
267 | 0 | } |
268 | 0 | left += 8; /* left = bits to go < 8 */ |
269 | 0 | { |
270 | 0 | byte dbyte = *dptr; |
271 | 0 | byte sbyte = fetch1(sptr, sskew); |
272 | 0 | byte result = (*proc)(dbyte,sbyte,0); |
273 | 0 | *dptr = (result & rmask) | (dbyte & ~rmask); |
274 | 0 | } |
275 | 0 | #undef fetch1 |
276 | 0 | } |
277 | 21.8k | } else { |
278 | | /* Use Rop run */ |
279 | 21.8k | if (rop_get_run_op(&ropper, rop, 1, 0)) { |
280 | | /* Loop over scan lines. */ |
281 | 1.10M | for (; line_count-- > 0; drow += draster, srow += sraster) { |
282 | 1.08M | rop_set_s_bitmap_subbyte(&ropper, srow, sbit); |
283 | 1.08M | rop_run_subbyte(&ropper, drow, dbit, width); |
284 | 1.08M | } |
285 | 21.8k | rop_release_run_op(&ropper); |
286 | 21.8k | } |
287 | 21.8k | } |
288 | 21.8k | } else if (textures->rep_width > 32) { |
289 | | /* Use Rop run */ |
290 | 0 | if (rop_get_run_op(&ropper, rop, 1, 0)) { |
291 | | /* Loop over scan lines. */ |
292 | 0 | for (; line_count-- > 0; drow += draster, srow += sraster, ++ty) { |
293 | 0 | int sx = sourcex; |
294 | 0 | int dx = x; |
295 | 0 | int w = width; |
296 | 0 | const byte *trow = textures->data + imod(ty, textures->rep_height) * traster; |
297 | 0 | int xoff = x_offset(phase_x, ty, textures); |
298 | 0 | int nw; |
299 | 0 | int tx = imod(dx + xoff, textures->rep_width); |
300 | | |
301 | | /* Loop over (horizontal) copies of the tile. */ |
302 | 0 | for (; w > 0; sx += nw, dx += nw, w -= nw, tx = 0) { |
303 | 0 | int dbit = dx & 7; |
304 | 0 | int sbit = sx & 7; |
305 | 0 | int tbit = tx & 7; |
306 | 0 | byte *dptr = drow + (dx >> 3); |
307 | 0 | const byte *sptr = srow + (sx >> 3); |
308 | 0 | const byte *tptr = trow + (tx >> 3); |
309 | 0 | nw = min(w, textures->size.x - tx); |
310 | 0 | rop_set_s_bitmap_subbyte(&ropper, sptr, sbit); |
311 | 0 | rop_set_t_bitmap_subbyte(&ropper, tptr, tbit); |
312 | 0 | rop_run_subbyte(&ropper, dptr, dbit, nw); |
313 | 0 | } |
314 | 0 | } |
315 | 0 | rop_release_run_op(&ropper); |
316 | 0 | } |
317 | 0 | } else if (srow == NULL) { |
318 | | /* Do it the old, 'slow' way. rop runs of less than 1 word are |
319 | | * not likely to be a win with rop_run. */ |
320 | | /* Loop over scan lines. */ |
321 | 0 | const rop_proc proc = rop_proc_table[rop]; |
322 | 0 | for (; line_count-- > 0; drow += draster, ++ty) { |
323 | 0 | int dx = x; |
324 | 0 | int w = width; |
325 | 0 | const byte *trow = textures->data + imod(ty, textures->rep_height) * traster; |
326 | 0 | int xoff = x_offset(phase_x, ty, textures); |
327 | 0 | int nw; |
328 | 0 | int tx = imod(dx + xoff, textures->rep_width); |
329 | | |
330 | | /* Loop over (horizontal) copies of the tile. */ |
331 | 0 | for (; w > 0; dx += nw, w -= nw, tx = 0) { |
332 | 0 | int dbit = dx & 7; |
333 | 0 | int tbit = tx & 7; |
334 | 0 | int tskew = tbit - dbit; |
335 | 0 | int left = nw = min(w, textures->size.x - tx); |
336 | 0 | byte lmask = 0xff >> dbit; |
337 | 0 | byte rmask = 0xff << (~(dbit + nw - 1) & 7); |
338 | 0 | byte mask = lmask; |
339 | 0 | int nx = 8 - dbit; |
340 | 0 | byte *dptr = drow + (dx >> 3); |
341 | 0 | const byte *tptr = trow + (tx >> 3); |
342 | |
|
343 | 0 | if (tskew < 0) |
344 | 0 | --tptr, tskew += 8; |
345 | 0 | for (; left > 0; |
346 | 0 | left -= nx, mask = 0xff, nx = 8, |
347 | 0 | ++dptr, ++tptr |
348 | 0 | ) { |
349 | 0 | byte dbyte = *dptr; |
350 | |
|
351 | 0 | #define fetch1(ptr, skew)\ |
352 | 0 | (skew ? ((ptr[0] << skew) | (ptr[1] >> (8 - skew))) : *ptr) |
353 | 0 | byte tbyte = fetch1(tptr, tskew); |
354 | |
|
355 | 0 | #undef fetch1 |
356 | 0 | byte result = (*proc)(dbyte,0,tbyte); |
357 | |
|
358 | 0 | if (left <= nx) |
359 | 0 | mask &= rmask; |
360 | 0 | *dptr = (mask == 0xff ? result : |
361 | 0 | (result & mask) | (dbyte & ~mask)); |
362 | 0 | } |
363 | 0 | } |
364 | 0 | } |
365 | 0 | } else { |
366 | | /* Do it the old, 'slow' way. rop runs of less than 1 word are |
367 | | * not likely to be a win with rop_run. */ |
368 | | /* Loop over scan lines. */ |
369 | 0 | const rop_proc proc = rop_proc_table[rop]; |
370 | 0 | for (; line_count-- > 0; drow += draster, srow += sraster, ++ty) { |
371 | 0 | int sx = sourcex; |
372 | 0 | int dx = x; |
373 | 0 | int w = width; |
374 | 0 | const byte *trow = textures->data + imod(ty, textures->rep_height) * traster; |
375 | 0 | int xoff = x_offset(phase_x, ty, textures); |
376 | 0 | int nw; |
377 | 0 | int tx = imod(dx + xoff, textures->rep_width); |
378 | | |
379 | | /* Loop over (horizontal) copies of the tile. */ |
380 | 0 | for (; w > 0; sx += nw, dx += nw, w -= nw, tx = 0) { |
381 | 0 | int dbit = dx & 7; |
382 | 0 | int sbit = sx & 7; |
383 | 0 | int sskew = sbit - dbit; |
384 | 0 | int tbit = tx & 7; |
385 | 0 | int tskew = tbit - dbit; |
386 | 0 | int left = nw = min(w, textures->size.x - tx); |
387 | 0 | byte lmask = 0xff >> dbit; |
388 | 0 | byte rmask = 0xff << (~(dbit + nw - 1) & 7); |
389 | 0 | byte mask = lmask; |
390 | 0 | int nx = 8 - dbit; |
391 | 0 | byte *dptr = drow + (dx >> 3); |
392 | 0 | const byte *sptr = srow + (sx >> 3); |
393 | 0 | const byte *tptr = trow + (tx >> 3); |
394 | |
|
395 | 0 | if (sskew < 0) |
396 | 0 | --sptr, sskew += 8; |
397 | 0 | if (tskew < 0) |
398 | 0 | --tptr, tskew += 8; |
399 | 0 | for (; left > 0; |
400 | 0 | left -= nx, mask = 0xff, nx = 8, |
401 | 0 | ++dptr, ++sptr, ++tptr |
402 | 0 | ) { |
403 | 0 | byte dbyte = *dptr; |
404 | |
|
405 | 0 | #define fetch1(ptr, skew)\ |
406 | 0 | (skew ? ((ptr[0] << skew) | (ptr[1] >> (8 - skew))) : *ptr) |
407 | 0 | byte sbyte = fetch1(sptr, sskew); |
408 | 0 | byte tbyte = fetch1(tptr, tskew); |
409 | |
|
410 | 0 | #undef fetch1 |
411 | 0 | byte result = (*proc)(dbyte,sbyte,tbyte); |
412 | |
|
413 | 0 | if (left <= nx) |
414 | 0 | mask &= rmask; |
415 | 0 | *dptr = (mask == 0xff ? result : |
416 | 0 | (result & mask) | (dbyte & ~mask)); |
417 | 0 | } |
418 | 0 | } |
419 | 0 | } |
420 | 0 | } |
421 | | |
422 | | #ifdef DEBUG |
423 | | if (gs_debug_c('B')) |
424 | | debug_dump_bitmap(mdev->memory, scan_line_base(mdev, y), mdev->raster, |
425 | | height, "final dest bits"); |
426 | | #endif |
427 | 21.8k | return 0; |
428 | 21.8k | } |
429 | | |
430 | | /* ================ Standard (byte-oriented) device ================ */ |
431 | | |
432 | | /* Procedures */ |
433 | | static dev_proc_map_rgb_color(mem_mono_map_rgb_color); |
434 | | static dev_proc_map_color_rgb(mem_mono_map_color_rgb); |
435 | | static dev_proc_strip_tile_rectangle(mem_mono_strip_tile_rectangle); |
436 | | |
437 | | /* The device descriptor. */ |
438 | | /* The instance is public. */ |
439 | | const gx_device_memory mem_mono_device = |
440 | | mem_device("image1", 0, 1, mem_dev_initialize_device_procs); |
441 | | |
442 | | const gdev_mem_functions gdev_mem_fns_1 = |
443 | | { |
444 | | mem_mono_map_rgb_color, |
445 | | mem_mono_map_color_rgb, |
446 | | mem_mono_fill_rectangle, |
447 | | mem_mono_copy_mono, |
448 | | gx_default_copy_color, |
449 | | gx_default_copy_alpha, |
450 | | mem_mono_strip_tile_rectangle, |
451 | | mem_mono_strip_copy_rop2, |
452 | | mem_get_bits_rectangle |
453 | | }; |
454 | | |
455 | | /* Map color to/from RGB. This may be inverted. */ |
456 | | static gx_color_index |
457 | | mem_mono_map_rgb_color(gx_device * dev, const gx_color_value cv[]) |
458 | 0 | { |
459 | 0 | gx_device_memory * const mdev = (gx_device_memory *)dev; |
460 | 0 | return (gx_default_w_b_map_rgb_color(dev, cv) ^ mdev->palette.data[0]) & 1; |
461 | 0 | } |
462 | | |
463 | | static int |
464 | | mem_mono_map_color_rgb(gx_device * dev, gx_color_index color, |
465 | | gx_color_value prgb[3]) |
466 | 0 | { |
467 | 0 | gx_device_memory * const mdev = (gx_device_memory *)dev; |
468 | | /* NB code doesn't make sense... map_color_rgb procedures return an error code */ |
469 | 0 | return (gx_default_w_b_map_color_rgb(dev, color, prgb) ^ mdev->palette.data[0]) & 1; |
470 | 0 | } |
471 | | |
472 | | /* Fill a rectangle with a color. */ |
473 | | int |
474 | | mem_mono_fill_rectangle(gx_device * dev, int x, int y, int w, int h, |
475 | | gx_color_index color) |
476 | 431k | { |
477 | 431k | gx_device_memory * const mdev = (gx_device_memory *)dev; |
478 | | |
479 | | #ifdef DO_FILL_RECT_BY_COPY_ROP |
480 | | return mem_mono_strip_copy_rop(dev, NULL, 0, 0, gx_no_bitmap_id, NULL, |
481 | | NULL, NULL, |
482 | | x, y, w, h, 0, 0, |
483 | | (color ? rop3_1 : rop3_0)); |
484 | | #else |
485 | 431k | fit_fill(dev, x, y, w, h); |
486 | 428k | bits_fill_rectangle(scan_line_base(mdev, y), x, mdev->raster, |
487 | 428k | -(int)(mono_fill_chunk) color, w, h); |
488 | 428k | return 0; |
489 | 431k | #endif |
490 | 431k | } |
491 | | |
492 | | /* Convert x coordinate to byte offset in scan line. */ |
493 | 452k | #define x_to_byte(x) ((x) >> 3) |
494 | | |
495 | | /* Copy a monochrome bitmap. */ |
496 | | #undef mono_masks |
497 | 905k | #define mono_masks mono_copy_masks |
498 | | |
499 | | /* |
500 | | * Fetch a chunk from the source. |
501 | | * |
502 | | * Since source and destination are both always big-endian, |
503 | | * fetching an aligned chunk never requires byte swapping. |
504 | | */ |
505 | | #define CFETCH_ALIGNED(cptr)\ |
506 | | (*(const chunk *)(cptr)) |
507 | | |
508 | | /* |
509 | | * Note that the macros always cast cptr, |
510 | | * so it doesn't matter what the type of cptr is. |
511 | | */ |
512 | | /* cshift = chunk_bits - shift. */ |
513 | | #undef chunk |
514 | | #if ARCH_IS_BIG_ENDIAN |
515 | | # define chunk uint |
516 | | # define CFETCH_RIGHT(cptr, shift, cshift)\ |
517 | | (CFETCH_ALIGNED(cptr) >> shift) |
518 | | # define CFETCH_LEFT(cptr, shift, cshift)\ |
519 | | (CFETCH_ALIGNED(cptr) << shift) |
520 | | # define CFETCH_USES_CSKEW 0 |
521 | | /* Fetch a chunk that straddles a chunk boundary. */ |
522 | | # define CFETCH2(cptr, cskew, skew)\ |
523 | | (CFETCH_LEFT(cptr, cskew, skew) |\ |
524 | | CFETCH_RIGHT((const chunk *)(cptr) + 1, skew, cskew)) |
525 | | #else /* little-endian */ |
526 | | # define chunk bits16 |
527 | | static const bits16 right_masks2[9] = |
528 | | { |
529 | | 0xffff, 0x7f7f, 0x3f3f, 0x1f1f, 0x0f0f, 0x0707, 0x0303, 0x0101, 0x0000 |
530 | | }; |
531 | | static const bits16 left_masks2[9] = |
532 | | { |
533 | | 0xffff, 0xfefe, 0xfcfc, 0xf8f8, 0xf0f0, 0xe0e0, 0xc0c0, 0x8080, 0x0000 |
534 | | }; |
535 | | |
536 | 4.17M | # define CCONT(cptr, off) (((const chunk *)(cptr))[off]) |
537 | | # define CFETCH_RIGHT(cptr, shift, cshift)\ |
538 | 1.97M | ((shift) < 8 ?\ |
539 | 1.97M | ((CCONT(cptr, 0) >> (shift)) & right_masks2[shift]) |\ |
540 | 1.00M | (CCONT(cptr, 0) << (cshift)) :\ |
541 | 1.97M | ((chunk)*(const byte *)(cptr) << (cshift)) & 0xff00) |
542 | | # define CFETCH_LEFT(cptr, shift, cshift)\ |
543 | 1.08M | ((shift) < 8 ?\ |
544 | 1.08M | ((CCONT(cptr, 0) << (shift)) & left_masks2[shift]) |\ |
545 | 494k | (CCONT(cptr, 0) >> (cshift)) :\ |
546 | 1.08M | ((CCONT(cptr, 0) & 0xff00) >> (cshift)) & 0xff) |
547 | | # define CFETCH_USES_CSKEW 1 |
548 | | /* Fetch a chunk that straddles a chunk boundary. */ |
549 | | /* We can avoid testing the shift amount twice */ |
550 | | /* by expanding the CFETCH_LEFT/right macros in-line. */ |
551 | | # define CFETCH2(cptr, cskew, skew)\ |
552 | 264k | ((cskew) < 8 ?\ |
553 | 264k | ((CCONT(cptr, 0) << (cskew)) & left_masks2[cskew]) |\ |
554 | 213k | (CCONT(cptr, 0) >> (skew)) |\ |
555 | 213k | (((chunk)(((const byte *)(cptr))[2]) << (cskew)) & 0xff00) :\ |
556 | 264k | (((CCONT(cptr, 0) & 0xff00) >> (skew)) & 0xff) |\ |
557 | 50.9k | ((CCONT(cptr, 1) >> (skew)) & right_masks2[skew]) |\ |
558 | 50.9k | (CCONT(cptr, 1) << (cskew))) |
559 | | #endif |
560 | | |
561 | | typedef enum { |
562 | | COPY_OR = 0, COPY_STORE, COPY_AND, COPY_FUNNY |
563 | | } copy_function; |
564 | | typedef struct { |
565 | | int invert; |
566 | | copy_function op; |
567 | | } copy_mode; |
568 | | |
569 | | /* |
570 | | * Map from <color0,color1> to copy_mode. |
571 | | * Logically, this is a 2-D array. |
572 | | * The indexing is (transparent, 0, 1, unused). */ |
573 | | static const copy_mode copy_modes[16] = { |
574 | | {~0, COPY_FUNNY}, /* NN */ |
575 | | {~0, COPY_AND}, /* N0 */ |
576 | | {0, COPY_OR}, /* N1 */ |
577 | | {0, 0}, /* unused */ |
578 | | {0, COPY_AND}, /* 0N */ |
579 | | {0, COPY_FUNNY}, /* 00 */ |
580 | | {0, COPY_STORE}, /* 01 */ |
581 | | {0, 0}, /* unused */ |
582 | | {~0, COPY_OR}, /* 1N */ |
583 | | {~0, COPY_STORE}, /* 10 */ |
584 | | {0, COPY_FUNNY}, /* 11 */ |
585 | | {0, 0}, /* unused */ |
586 | | {0, 0}, /* unused */ |
587 | | {0, 0}, /* unused */ |
588 | | {0, 0}, /* unused */ |
589 | | {0, 0}, /* unused */ |
590 | | }; |
591 | | |
592 | | /* Handle the funny cases that aren't supposed to happen. */ |
593 | | #define FUNNY_CASE()\ |
594 | 0 | (invert ? gs_note_error(-1) :\ |
595 | 0 | mem_mono_fill_rectangle(dev, x, y, w, h, color0)) |
596 | | |
597 | | int |
598 | | mem_mono_copy_mono(gx_device * dev, |
599 | | const byte * source_data, int source_x, int source_raster, gx_bitmap_id id, |
600 | | int x, int y, int w, int h, gx_color_index color0, gx_color_index color1) |
601 | 474k | { |
602 | | /* Macros for writing partial chunks. */ |
603 | | /* The destination pointer is always named optr, */ |
604 | | /* and must be declared as chunk *. */ |
605 | | /* CINVERT may be temporarily redefined. */ |
606 | 5.42M | #define CINVERT(bits) ((bits) ^ invert) |
607 | 474k | #define WRITE_OR_MASKED(bits, mask, off)\ |
608 | 474k | optr[off] |= (CINVERT(bits) & mask) |
609 | 474k | #define WRITE_STORE_MASKED(bits, mask, off)\ |
610 | 10.7M | optr[off] = ((optr[off] & ~mask) | (CINVERT(bits) & mask)) |
611 | 474k | #define WRITE_AND_MASKED(bits, mask, off)\ |
612 | 474k | optr[off] &= (CINVERT(bits) | ~mask) |
613 | | /* Macros for writing full chunks. */ |
614 | 474k | #define WRITE_OR(bits) *optr |= CINVERT(bits) |
615 | 474k | #define WRITE_STORE(bits) *optr = CINVERT(bits) |
616 | 474k | #define WRITE_AND(bits) *optr &= CINVERT(bits) |
617 | | |
618 | 474k | gx_device_memory * const mdev = (gx_device_memory *)dev; |
619 | 474k | register const byte *bptr; /* actually chunk * */ |
620 | 474k | int dbit, wleft; |
621 | 474k | uint mask; |
622 | 474k | copy_mode mode; |
623 | | |
624 | 474k | DECLARE_SCAN_PTR_VARS(dbptr, byte *, dest_raster); |
625 | 21.7M | #define optr ((chunk *)dbptr) |
626 | 474k | register int skew; |
627 | 474k | register uint invert; |
628 | | |
629 | 474k | fit_copy(dev, source_data, source_x, source_raster, id, x, y, w, h); |
630 | 474k | #ifdef DO_COPY_MONO_BY_COPY_ROP |
631 | 474k | if (w >= 32) { |
632 | 21.8k | return mem_mono_strip_copy_rop2_dev(dev, source_data, source_x, |
633 | 21.8k | source_raster, |
634 | 21.8k | id, NULL, NULL, NULL, |
635 | 21.8k | x, y, w, h, 0, 0, |
636 | 21.8k | ((color0 == gx_no_color_index ? rop3_D : |
637 | 21.8k | color0 == 0 ? rop3_0 : rop3_1) & ~rop3_S) | |
638 | 21.8k | ((color1 == gx_no_color_index ? rop3_D : |
639 | 21.8k | color1 == 0 ? rop3_0 : rop3_1) & rop3_S), |
640 | 21.8k | 0); |
641 | 21.8k | } |
642 | 452k | #endif /* !DO_COPY_MONO_BY_COPY_ROP */ |
643 | | #if gx_no_color_index_value != -1 /* hokey! */ |
644 | | if (color0 == gx_no_color_index) |
645 | | color0 = -1; |
646 | | if (color1 == gx_no_color_index) |
647 | | color1 = -1; |
648 | | #endif |
649 | 452k | mode = copy_modes[((int)color0 << 2) + (int)color1 + 5]; |
650 | 452k | invert = (uint)mode.invert; /* load register */ |
651 | 452k | SETUP_RECT_VARS(dbptr, byte *, dest_raster); |
652 | 452k | bptr = source_data + ((source_x & ~chunk_align_bit_mask) >> 3); |
653 | 452k | dbit = x & chunk_align_bit_mask; |
654 | 452k | skew = dbit - (source_x & chunk_align_bit_mask); |
655 | | |
656 | | /* Macro for incrementing to next chunk. */ |
657 | 452k | #define NEXT_X_CHUNK()\ |
658 | 452k | bptr += chunk_bytes; dbptr += chunk_bytes |
659 | | /* Common macro for the end of each scan line. */ |
660 | 452k | #define END_Y_LOOP(sdelta, ddelta)\ |
661 | 6.31M | bptr += sdelta; dbptr += ddelta |
662 | | |
663 | 452k | if ((wleft = w + dbit - chunk_bits) <= 0) { /* The entire operation fits in one (destination) chunk. */ |
664 | 237k | set_mono_thin_mask(mask, w, dbit); |
665 | | |
666 | 237k | #define WRITE_SINGLE(wr_op, src)\ |
667 | 237k | for ( ; ; )\ |
668 | 2.74M | { wr_op(src, mask, 0);\ |
669 | 2.74M | if ( --h == 0 ) break;\ |
670 | 2.74M | END_Y_LOOP(source_raster, dest_raster);\ |
671 | 2.50M | } |
672 | | |
673 | 237k | #define WRITE1_LOOP(src)\ |
674 | 237k | switch ( mode.op ) {\ |
675 | 0 | case COPY_OR: WRITE_SINGLE(WRITE_OR_MASKED, src); break;\ |
676 | 2.74M | case COPY_STORE: WRITE_SINGLE(WRITE_STORE_MASKED, src); break;\ |
677 | 0 | case COPY_AND: WRITE_SINGLE(WRITE_AND_MASKED, src); break;\ |
678 | 0 | default: return FUNNY_CASE();\ |
679 | 237k | } |
680 | | |
681 | 237k | if (skew >= 0) { /* single -> single, right/no shift */ |
682 | 237k | if (skew == 0) { /* no shift */ |
683 | 1.63k | WRITE1_LOOP(CFETCH_ALIGNED(bptr)); |
684 | 236k | } else { /* right shift */ |
685 | 236k | #if CFETCH_USES_CSKEW |
686 | 236k | int cskew = chunk_bits - skew; |
687 | 236k | #endif |
688 | | |
689 | 236k | WRITE1_LOOP(CFETCH_RIGHT(bptr, skew, cskew)); |
690 | 236k | } |
691 | 237k | } else if (wleft <= skew) { /* single -> single, left shift */ |
692 | 0 | #if CFETCH_USES_CSKEW |
693 | 0 | int cskew = chunk_bits + skew; |
694 | 0 | #endif |
695 | |
|
696 | 0 | skew = -skew; |
697 | 0 | WRITE1_LOOP(CFETCH_LEFT(bptr, skew, cskew)); |
698 | 0 | } else { /* double -> single */ |
699 | 0 | int cskew = -skew; |
700 | |
|
701 | 0 | skew += chunk_bits; |
702 | 0 | WRITE1_LOOP(CFETCH2(bptr, cskew, skew)); |
703 | 0 | } |
704 | 237k | #undef WRITE1_LOOP |
705 | 237k | #undef WRITE_SINGLE |
706 | 237k | } else if (wleft <= skew) { /* 1 source chunk -> 2 destination chunks. */ |
707 | | /* This is an important special case for */ |
708 | | /* both characters and halftone tiles. */ |
709 | 166k | uint rmask; |
710 | 166k | int cskew = chunk_bits - skew; |
711 | | |
712 | 166k | set_mono_left_mask(mask, dbit); |
713 | 166k | set_mono_right_mask(rmask, wleft); |
714 | 166k | #undef CINVERT |
715 | 11.5M | #define CINVERT(bits) (bits) /* pre-inverted here */ |
716 | | |
717 | | #if ARCH_IS_BIG_ENDIAN /* no byte swapping */ |
718 | | # define WRITE_1TO2(wr_op)\ |
719 | | for ( ; ; )\ |
720 | | { register uint bits = CFETCH_ALIGNED(bptr) ^ invert;\ |
721 | | wr_op(bits >> skew, mask, 0);\ |
722 | | wr_op(bits << cskew, rmask, 1);\ |
723 | | if ( --h == 0 ) break;\ |
724 | | END_Y_LOOP(source_raster, dest_raster);\ |
725 | | } |
726 | | #else /* byte swapping */ |
727 | 166k | # define WRITE_1TO2(wr_op)\ |
728 | 166k | for ( ; ; )\ |
729 | 2.87M | { wr_op(CFETCH_RIGHT(bptr, skew, cskew) ^ invert, mask, 0);\ |
730 | 2.87M | wr_op(CFETCH_LEFT(bptr, cskew, skew) ^ invert, rmask, 1);\ |
731 | 2.87M | if ( --h == 0 ) break;\ |
732 | 2.87M | END_Y_LOOP(source_raster, dest_raster);\ |
733 | 2.70M | } |
734 | 166k | #endif |
735 | | |
736 | 166k | switch (mode.op) { |
737 | 0 | case COPY_OR: |
738 | 0 | WRITE_1TO2(WRITE_OR_MASKED); |
739 | 0 | break; |
740 | 166k | case COPY_STORE: |
741 | 5.75M | WRITE_1TO2(WRITE_STORE_MASKED); |
742 | 166k | break; |
743 | 0 | case COPY_AND: |
744 | 0 | WRITE_1TO2(WRITE_AND_MASKED); |
745 | 0 | break; |
746 | 0 | default: |
747 | 0 | return FUNNY_CASE(); |
748 | 166k | } |
749 | 166k | #undef CINVERT |
750 | 2.52M | #define CINVERT(bits) ((bits) ^ invert) |
751 | 166k | #undef WRITE_1TO2 |
752 | 166k | } else { /* More than one source chunk and more than one */ |
753 | | /* destination chunk are involved. */ |
754 | 48.0k | uint rmask; |
755 | 48.0k | int words = (wleft & ~chunk_bit_mask) >> 3; |
756 | 48.0k | uint sskip = source_raster - words; |
757 | 48.0k | uint dskip = dest_raster - words; |
758 | 48.0k | register uint bits; |
759 | | |
760 | 48.0k | set_mono_left_mask(mask, dbit); |
761 | 48.0k | set_mono_right_mask(rmask, wleft & chunk_bit_mask); |
762 | 48.0k | if (skew == 0) { /* optimize the aligned case */ |
763 | | |
764 | 840 | #define WRITE_ALIGNED(wr_op, wr_op_masked)\ |
765 | 840 | for ( ; ; )\ |
766 | 25.9k | { int count = wleft;\ |
767 | | /* Do first partial chunk. */\ |
768 | 25.9k | wr_op_masked(CFETCH_ALIGNED(bptr), mask, 0);\ |
769 | | /* Do full chunks. */\ |
770 | 25.9k | while ( (count -= chunk_bits) >= 0 )\ |
771 | 25.9k | { NEXT_X_CHUNK(); wr_op(CFETCH_ALIGNED(bptr)); }\ |
772 | | /* Do last chunk */\ |
773 | 25.9k | if ( count > -chunk_bits )\ |
774 | 25.9k | { wr_op_masked(CFETCH_ALIGNED(bptr + chunk_bytes), rmask, 1); }\ |
775 | 25.9k | if ( --h == 0 ) break;\ |
776 | 25.9k | END_Y_LOOP(sskip, dskip);\ |
777 | 25.1k | } |
778 | | |
779 | 840 | switch (mode.op) { |
780 | 0 | case COPY_OR: |
781 | 0 | WRITE_ALIGNED(WRITE_OR, WRITE_OR_MASKED); |
782 | 0 | break; |
783 | 840 | case COPY_STORE: |
784 | 51.9k | WRITE_ALIGNED(WRITE_STORE, WRITE_STORE_MASKED); |
785 | 840 | break; |
786 | 0 | case COPY_AND: |
787 | 0 | WRITE_ALIGNED(WRITE_AND, WRITE_AND_MASKED); |
788 | 0 | break; |
789 | 0 | default: |
790 | 0 | return FUNNY_CASE(); |
791 | 840 | } |
792 | 840 | #undef WRITE_ALIGNED |
793 | 47.1k | } else { /* not aligned */ |
794 | 47.1k | int cskew = -skew & chunk_bit_mask; |
795 | 47.1k | bool case_right = |
796 | 47.1k | (skew >= 0 ? true : |
797 | 47.1k | ((bptr += chunk_bytes), false)); |
798 | | |
799 | 47.1k | skew &= chunk_bit_mask; |
800 | | |
801 | 47.1k | #define WRITE_UNALIGNED(wr_op, wr_op_masked)\ |
802 | | /* Prefetch partial word. */\ |
803 | 1.12M | bits =\ |
804 | 1.12M | (case_right ? CFETCH_RIGHT(bptr, skew, cskew) :\ |
805 | 1.12M | CFETCH2(bptr - chunk_bytes, cskew, skew));\ |
806 | 1.12M | wr_op_masked(bits, mask, 0);\ |
807 | | /* Do full chunks. */\ |
808 | 1.38M | while ( count >= chunk_bits )\ |
809 | 1.12M | { bits = CFETCH2(bptr, cskew, skew);\ |
810 | 264k | NEXT_X_CHUNK(); wr_op(bits); count -= chunk_bits;\ |
811 | 264k | }\ |
812 | | /* Do last chunk */\ |
813 | 1.12M | if ( count > 0 )\ |
814 | 1.12M | { bits = CFETCH_LEFT(bptr, cskew, skew);\ |
815 | 1.08M | if ( count > skew ) bits |= CFETCH_RIGHT(bptr + chunk_bytes, skew, cskew);\ |
816 | 1.08M | wr_op_masked(bits, rmask, 1);\ |
817 | 1.08M | } |
818 | | |
819 | 47.1k | switch (mode.op) { |
820 | 0 | case COPY_OR: |
821 | 0 | for (;;) { |
822 | 0 | int count = wleft; |
823 | |
|
824 | 0 | WRITE_UNALIGNED(WRITE_OR, WRITE_OR_MASKED); |
825 | 0 | if (--h == 0) |
826 | 0 | break; |
827 | 0 | END_Y_LOOP(sskip, dskip); |
828 | 0 | } |
829 | 0 | break; |
830 | 47.1k | case COPY_STORE: |
831 | 1.12M | for (;;) { |
832 | 1.12M | int count = wleft; |
833 | | |
834 | 2.20M | WRITE_UNALIGNED(WRITE_STORE, WRITE_STORE_MASKED); |
835 | 1.12M | if (--h == 0) |
836 | 47.1k | break; |
837 | 1.07M | END_Y_LOOP(sskip, dskip); |
838 | 1.07M | } |
839 | 47.1k | break; |
840 | 0 | case COPY_AND: |
841 | 0 | for (;;) { |
842 | 0 | int count = wleft; |
843 | |
|
844 | 0 | WRITE_UNALIGNED(WRITE_AND, WRITE_AND_MASKED); |
845 | 0 | if (--h == 0) |
846 | 0 | break; |
847 | 0 | END_Y_LOOP(sskip, dskip); |
848 | 0 | } |
849 | 0 | break; |
850 | 0 | default /*case COPY_FUNNY */ : |
851 | 0 | return FUNNY_CASE(); |
852 | 47.1k | } |
853 | 47.1k | #undef WRITE_UNALIGNED |
854 | 47.1k | } |
855 | 48.0k | } |
856 | 452k | #undef END_Y_LOOP |
857 | 452k | #undef NEXT_X_CHUNK |
858 | 452k | return 0; |
859 | 452k | #undef optr |
860 | 452k | } |
861 | | |
862 | | /* Strip-tile with a monochrome halftone. */ |
863 | | /* This is a performance bottleneck for monochrome devices, */ |
864 | | /* so we re-implement it, even though it takes a lot of code. */ |
865 | | static int |
866 | | mem_mono_strip_tile_rectangle(gx_device * dev, |
867 | | register const gx_strip_bitmap * tiles, |
868 | | int tx, int y, int tw, int th, gx_color_index color0, gx_color_index color1, |
869 | | int px, int py) |
870 | 0 | { |
871 | 0 | #ifdef DO_TILE_RECT_BY_COPY_ROP |
872 | 0 | gs_logical_operation_t rop = ((color0 == gx_no_color_index ? rop3_D : |
873 | 0 | color0 == 0 ? rop3_0 : rop3_1) & ~rop3_T) | |
874 | 0 | ((color1 == gx_no_color_index ? rop3_D : |
875 | 0 | color1 == 0 ? rop3_0 : rop3_1) & rop3_T); |
876 | | |
877 | | /* If color0 == gx_no_color_index && color1 == gx_no_color_index then |
878 | | * we have a color pixmap, not a bitmap, so we want to use copy_color, |
879 | | * rather than copy_mono. This case gives us rop == 0xAA (no change). */ |
880 | 0 | if (rop == 0xAA) |
881 | 0 | return gx_default_strip_tile_rectangle(dev, tiles, tx, y, tw, th, |
882 | 0 | color0, color1, px, py); |
883 | 0 | return mem_mono_strip_copy_rop2_dev(dev, NULL, 0, 0, tiles->id, NULL, |
884 | 0 | tiles, NULL, |
885 | 0 | tx, y, tw, th, px, py, rop, 0); |
886 | | #else /* !USE_COPY_ROP */ |
887 | | gx_device_memory * const mdev = (gx_device_memory *)dev; |
888 | | register uint invert; |
889 | | int source_raster; |
890 | | uint tile_bits_size; |
891 | | const byte *source_data; |
892 | | const byte *end; |
893 | | int x, rw, w, h; |
894 | | register const byte *bptr; /* actually chunk * */ |
895 | | int dbit, wleft; |
896 | | uint mask; |
897 | | byte *dbase; |
898 | | |
899 | | DECLARE_SCAN_PTR_VARS(dbptr, byte *, dest_raster); |
900 | | #define optr ((chunk *)dbptr) |
901 | | register int skew; |
902 | | |
903 | | /* This implementation doesn't handle strips yet. */ |
904 | | if (color0 != (color1 ^ 1) || tiles->shift != 0) |
905 | | return gx_default_strip_tile_rectangle(dev, tiles, tx, y, tw, th, |
906 | | color0, color1, px, py); |
907 | | fit_fill(dev, tx, y, tw, th); |
908 | | invert = (uint)(-(int) color0); |
909 | | source_raster = tiles->raster; |
910 | | source_data = tiles->data + (imod(y + py, tiles->rep_height) * source_raster; |
911 | | tile_bits_size = tiles->size.y * source_raster; |
912 | | end = tiles->data + tile_bits_size; |
913 | | #undef END_Y_LOOP |
914 | | #define END_Y_LOOP(sdelta, ddelta)\ |
915 | | if ( end - bptr <= sdelta ) /* wrap around */\ |
916 | | bptr -= tile_bits_size;\ |
917 | | bptr += sdelta; dbptr += ddelta |
918 | | dest_raster = mdev->raster; |
919 | | dbase = scan_line_base(mdev, y); |
920 | | x = tx; |
921 | | rw = tw; |
922 | | /* |
923 | | * The outermost loop here works horizontally, one iteration per |
924 | | * copy of the tile. Note that all iterations except the first |
925 | | * have source_x = 0. |
926 | | */ |
927 | | { |
928 | | int source_x = imod(x + px, tiles->rep_width; |
929 | | |
930 | | w = tiles->size.x - source_x; |
931 | | bptr = source_data + ((source_x & ~chunk_align_bit_mask) >> 3); |
932 | | dbit = x & chunk_align_bit_mask; |
933 | | skew = dbit - (source_x & chunk_align_bit_mask); |
934 | | } |
935 | | outer:if (w > rw) |
936 | | w = rw; |
937 | | h = th; |
938 | | dbptr = dbase + ((x >> 3) & -chunk_align_bytes); |
939 | | if ((wleft = w + dbit - chunk_bits) <= 0) { /* The entire operation fits in one (destination) chunk. */ |
940 | | set_mono_thin_mask(mask, w, dbit); |
941 | | #define WRITE1_LOOP(src)\ |
942 | | for ( ; ; )\ |
943 | | { WRITE_STORE_MASKED(src, mask, 0);\ |
944 | | if ( --h == 0 ) break;\ |
945 | | END_Y_LOOP(source_raster, dest_raster);\ |
946 | | } |
947 | | if (skew >= 0) { /* single -> single, right/no shift */ |
948 | | if (skew == 0) { /* no shift */ |
949 | | WRITE1_LOOP(CFETCH_ALIGNED(bptr)); |
950 | | } else { /* right shift */ |
951 | | #if CFETCH_USES_CSKEW |
952 | | int cskew = chunk_bits - skew; |
953 | | #endif |
954 | | |
955 | | WRITE1_LOOP(CFETCH_RIGHT(bptr, skew, cskew)); |
956 | | } |
957 | | } else if (wleft <= skew) { /* single -> single, left shift */ |
958 | | #if CFETCH_USES_CSKEW |
959 | | int cskew = chunk_bits + skew; |
960 | | #endif |
961 | | |
962 | | skew = -skew; |
963 | | WRITE1_LOOP(CFETCH_LEFT(bptr, skew, cskew)); |
964 | | } else { /* double -> single */ |
965 | | int cskew = -skew; |
966 | | |
967 | | skew += chunk_bits; |
968 | | WRITE1_LOOP(CFETCH2(bptr, cskew, skew)); |
969 | | } |
970 | | #undef WRITE1_LOOP |
971 | | } else if (wleft <= skew) { /* 1 source chunk -> 2 destination chunks. */ |
972 | | /* This is an important special case for */ |
973 | | /* both characters and halftone tiles. */ |
974 | | uint rmask; |
975 | | int cskew = chunk_bits - skew; |
976 | | |
977 | | set_mono_left_mask(mask, dbit); |
978 | | set_mono_right_mask(rmask, wleft); |
979 | | #if ARCH_IS_BIG_ENDIAN /* no byte swapping */ |
980 | | #undef CINVERT |
981 | | #define CINVERT(bits) (bits) /* pre-inverted here */ |
982 | | for (;;) { |
983 | | register uint bits = CFETCH_ALIGNED(bptr) ^ invert; |
984 | | |
985 | | WRITE_STORE_MASKED(bits >> skew, mask, 0); |
986 | | WRITE_STORE_MASKED(bits << cskew, rmask, 1); |
987 | | if (--h == 0) |
988 | | break; |
989 | | END_Y_LOOP(source_raster, dest_raster); |
990 | | } |
991 | | #undef CINVERT |
992 | | #define CINVERT(bits) ((bits) ^ invert) |
993 | | #else /* byte swapping */ |
994 | | for (;;) { |
995 | | WRITE_STORE_MASKED(CFETCH_RIGHT(bptr, skew, cskew), mask, 0); |
996 | | WRITE_STORE_MASKED(CFETCH_LEFT(bptr, cskew, skew), rmask, 1); |
997 | | if (--h == 0) |
998 | | break; |
999 | | END_Y_LOOP(source_raster, dest_raster); |
1000 | | } |
1001 | | #endif |
1002 | | } else { /* More than one source chunk and more than one */ |
1003 | | /* destination chunk are involved. */ |
1004 | | uint rmask; |
1005 | | int words = (wleft & ~chunk_bit_mask) >> 3; |
1006 | | uint sskip = source_raster - words; |
1007 | | uint dskip = dest_raster - words; |
1008 | | register uint bits; |
1009 | | |
1010 | | #define NEXT_X_CHUNK()\ |
1011 | | bptr += chunk_bytes; dbptr += chunk_bytes |
1012 | | |
1013 | | set_mono_right_mask(rmask, wleft & chunk_bit_mask); |
1014 | | if (skew == 0) { /* optimize the aligned case */ |
1015 | | if (dbit == 0) |
1016 | | mask = 0; |
1017 | | else |
1018 | | set_mono_left_mask(mask, dbit); |
1019 | | for (;;) { |
1020 | | int count = wleft; |
1021 | | |
1022 | | /* Do first partial chunk. */ |
1023 | | if (mask) |
1024 | | WRITE_STORE_MASKED(CFETCH_ALIGNED(bptr), mask, 0); |
1025 | | else |
1026 | | WRITE_STORE(CFETCH_ALIGNED(bptr)); |
1027 | | /* Do full chunks. */ |
1028 | | while ((count -= chunk_bits) >= 0) { |
1029 | | NEXT_X_CHUNK(); |
1030 | | WRITE_STORE(CFETCH_ALIGNED(bptr)); |
1031 | | } |
1032 | | /* Do last chunk */ |
1033 | | if (count > -chunk_bits) { |
1034 | | WRITE_STORE_MASKED(CFETCH_ALIGNED(bptr + chunk_bytes), rmask, 1); |
1035 | | } |
1036 | | if (--h == 0) |
1037 | | break; |
1038 | | END_Y_LOOP(sskip, dskip); |
1039 | | } |
1040 | | } else { /* not aligned */ |
1041 | | bool case_right = |
1042 | | (skew >= 0 ? true : |
1043 | | ((bptr += chunk_bytes), false)); |
1044 | | int cskew = -skew & chunk_bit_mask; |
1045 | | |
1046 | | skew &= chunk_bit_mask; |
1047 | | set_mono_left_mask(mask, dbit); |
1048 | | for (;;) { |
1049 | | int count = wleft; |
1050 | | |
1051 | | if (case_right) |
1052 | | bits = CFETCH_RIGHT(bptr, skew, cskew); |
1053 | | else |
1054 | | bits = CFETCH2(bptr - chunk_bytes, cskew, skew); |
1055 | | WRITE_STORE_MASKED(bits, mask, 0); |
1056 | | /* Do full chunks. */ |
1057 | | while (count >= chunk_bits) { |
1058 | | bits = CFETCH2(bptr, cskew, skew); |
1059 | | NEXT_X_CHUNK(); |
1060 | | WRITE_STORE(bits); |
1061 | | count -= chunk_bits; |
1062 | | } |
1063 | | /* Do last chunk */ |
1064 | | if (count > 0) { |
1065 | | bits = CFETCH_LEFT(bptr, cskew, skew); |
1066 | | if (count > skew) |
1067 | | bits |= CFETCH_RIGHT(bptr + chunk_bytes, skew, cskew); |
1068 | | WRITE_STORE_MASKED(bits, rmask, 1); |
1069 | | } |
1070 | | if (--h == 0) |
1071 | | break; |
1072 | | END_Y_LOOP(sskip, dskip); |
1073 | | } |
1074 | | } |
1075 | | } |
1076 | | #undef END_Y_LOOP |
1077 | | #undef NEXT_X_CHUNK |
1078 | | #undef optr |
1079 | | if ((rw -= w) > 0) { |
1080 | | x += w; |
1081 | | w = tiles->size.x; |
1082 | | bptr = source_data; |
1083 | | skew = dbit = x & chunk_align_bit_mask; |
1084 | | goto outer; |
1085 | | } |
1086 | | return 0; |
1087 | | #endif /* !USE_COPY_ROP */ |
1088 | 0 | } |
1089 | | |
1090 | | |
1091 | | |
1092 | | /* ================ "Word"-oriented device ================ */ |
1093 | | |
1094 | | /* Note that on a big-endian machine, this is the same as the */ |
1095 | | /* standard byte-oriented-device. */ |
1096 | | |
1097 | | #if !ARCH_IS_BIG_ENDIAN |
1098 | | |
1099 | | /* Procedures */ |
1100 | | static dev_proc_copy_mono(mem1_word_copy_mono); |
1101 | | static dev_proc_fill_rectangle(mem1_word_fill_rectangle); |
1102 | | |
1103 | | #define mem1_word_strip_tile_rectangle gx_default_strip_tile_rectangle |
1104 | | |
1105 | | /* Here is the device descriptor. */ |
1106 | | const gx_device_memory mem_mono_word_device = |
1107 | | mem_device("image1w", 0, 1, mem_word_dev_initialize_device_procs); |
1108 | | |
1109 | | const gdev_mem_functions gdev_mem_fns_1w = |
1110 | | { |
1111 | | mem_mono_map_rgb_color, |
1112 | | mem_mono_map_color_rgb, |
1113 | | mem1_word_fill_rectangle, |
1114 | | mem1_word_copy_mono, |
1115 | | gx_default_copy_color, |
1116 | | gx_default_copy_alpha, |
1117 | | mem1_word_strip_tile_rectangle, |
1118 | | gx_no_strip_copy_rop2, |
1119 | | mem_word_get_bits_rectangle |
1120 | | }; |
1121 | | |
1122 | | /* Fill a rectangle with a color. */ |
1123 | | static int |
1124 | | mem1_word_fill_rectangle(gx_device * dev, int x, int y, int w, int h, |
1125 | | gx_color_index color) |
1126 | 0 | { |
1127 | 0 | gx_device_memory * const mdev = (gx_device_memory *)dev; |
1128 | 0 | byte *base; |
1129 | 0 | size_t raster; |
1130 | |
|
1131 | 0 | fit_fill(dev, x, y, w, h); |
1132 | 0 | base = scan_line_base(mdev, y); |
1133 | 0 | raster = mdev->raster; |
1134 | 0 | mem_swap_byte_rect(base, raster, x, w, h, true); |
1135 | 0 | bits_fill_rectangle(base, x, raster, -(int)(mono_fill_chunk) color, w, h); |
1136 | 0 | mem_swap_byte_rect(base, raster, x, w, h, true); |
1137 | 0 | return 0; |
1138 | 0 | } |
1139 | | |
1140 | | /* Copy a bitmap. */ |
1141 | | static int |
1142 | | mem1_word_copy_mono(gx_device * dev, |
1143 | | const byte * source_data, int source_x, int source_raster, gx_bitmap_id id, |
1144 | | int x, int y, int w, int h, gx_color_index color0, gx_color_index color1) |
1145 | 0 | { |
1146 | 0 | gx_device_memory * const mdev = (gx_device_memory *)dev; |
1147 | 0 | byte *row; |
1148 | 0 | size_t raster; |
1149 | 0 | bool store; |
1150 | |
|
1151 | 0 | fit_copy(dev, source_data, source_x, source_raster, id, x, y, w, h); |
1152 | 0 | row = scan_line_base(mdev, y); |
1153 | 0 | raster = mdev->raster; |
1154 | 0 | store = (color0 != gx_no_color_index && color1 != gx_no_color_index); |
1155 | 0 | mem_swap_byte_rect(row, raster, x, w, h, store); |
1156 | 0 | mem_mono_copy_mono(dev, source_data, source_x, source_raster, id, |
1157 | 0 | x, y, w, h, color0, color1); |
1158 | 0 | mem_swap_byte_rect(row, raster, x, w, h, false); |
1159 | 0 | return 0; |
1160 | 0 | } |
1161 | | |
1162 | | #endif /* !ARCH_IS_BIG_ENDIAN */ |