/src/ghostpdl/base/gxpcmap.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2025 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Pattern color mapping for Ghostscript library */ |
18 | | #include "math_.h" |
19 | | #include "memory_.h" |
20 | | #include "gx.h" |
21 | | #include "gp.h" |
22 | | #include "gserrors.h" |
23 | | #include "gsstruct.h" |
24 | | #include "gsutil.h" /* for gs_next_ids */ |
25 | | #include "gxfixed.h" |
26 | | #include "gxmatrix.h" |
27 | | #include "gspath2.h" |
28 | | #include "gxcspace.h" /* for gscolor2.h */ |
29 | | #include "gxcolor2.h" |
30 | | #include "gxdcolor.h" |
31 | | #include "gxdevice.h" |
32 | | #include "gxdevmem.h" |
33 | | #include "gxpcolor.h" |
34 | | #include "gxp1impl.h" |
35 | | #include "gxclist.h" |
36 | | #include "gxcldev.h" |
37 | | #include "gzstate.h" |
38 | | #include "gxdevsop.h" |
39 | | #include "gdevmpla.h" |
40 | | #include "gdevp14.h" |
41 | | #include "gxgetbit.h" |
42 | | #include "gscoord.h" |
43 | | #include "gsicc_blacktext.h" |
44 | | #include "gscspace.h" |
45 | | |
46 | | #if RAW_PATTERN_DUMP |
47 | | unsigned int global_pat_index = 0; |
48 | | #endif |
49 | | |
50 | | /* Define the default size of the Pattern cache. */ |
51 | 14.1k | #define max_cached_patterns_LARGE 50 |
52 | 14.1k | #define max_pattern_bits_LARGE 100000 |
53 | | #define max_cached_patterns_SMALL 5 |
54 | | #define max_pattern_bits_SMALL 1000 |
55 | | uint |
56 | | gx_pat_cache_default_tiles(void) |
57 | 14.1k | { |
58 | | #if ARCH_SMALL_MEMORY |
59 | | return max_cached_patterns_SMALL; |
60 | | #else |
61 | | #ifdef DEBUG |
62 | | return (gs_debug_c('.') ? max_cached_patterns_SMALL : |
63 | | max_cached_patterns_LARGE); |
64 | | #else |
65 | 14.1k | return max_cached_patterns_LARGE; |
66 | 14.1k | #endif |
67 | 14.1k | #endif |
68 | 14.1k | } |
69 | | ulong |
70 | | gx_pat_cache_default_bits(void) |
71 | 14.1k | { |
72 | | #if ARCH_SMALL_MEMORY |
73 | | return max_pattern_bits_SMALL; |
74 | | #else |
75 | | #ifdef DEBUG |
76 | | return (gs_debug_c('.') ? max_pattern_bits_SMALL : |
77 | | max_pattern_bits_LARGE); |
78 | | #else |
79 | 14.1k | return max_pattern_bits_LARGE; |
80 | 14.1k | #endif |
81 | 14.1k | #endif |
82 | 14.1k | } |
83 | | |
84 | | /* Define the structures for Pattern rendering and caching. */ |
85 | | private_st_color_tile(); |
86 | | private_st_color_tile_element(); |
87 | | private_st_pattern_cache(); |
88 | | private_st_device_pattern_accum(); |
89 | | private_st_pattern_trans(); |
90 | | |
91 | | /* ------ Pattern rendering ------ */ |
92 | | |
93 | | /* Device procedures */ |
94 | | static dev_proc_open_device(pattern_accum_open); |
95 | | static dev_proc_close_device(pattern_accum_close); |
96 | | static dev_proc_fill_rectangle(pattern_accum_fill_rectangle); |
97 | | static dev_proc_copy_mono(pattern_accum_copy_mono); |
98 | | static dev_proc_copy_color(pattern_accum_copy_color); |
99 | | static dev_proc_copy_planes(pattern_accum_copy_planes); |
100 | | static dev_proc_get_bits_rectangle(pattern_accum_get_bits_rectangle); |
101 | | static dev_proc_fill_rectangle_hl_color(pattern_accum_fill_rectangle_hl_color); |
102 | | /* not static for use by clist_dev_spec_op with pattern-clist */ |
103 | | dev_proc_dev_spec_op(pattern_accum_dev_spec_op); |
104 | | |
105 | | /* The device descriptor */ |
106 | | static void |
107 | | pattern_accum_initialize_device_procs(gx_device *dev) |
108 | 1.28k | { |
109 | 1.28k | set_dev_proc(dev, open_device, pattern_accum_open); |
110 | 1.28k | set_dev_proc(dev, close_device, pattern_accum_close); |
111 | 1.28k | set_dev_proc(dev, fill_rectangle, pattern_accum_fill_rectangle); |
112 | 1.28k | set_dev_proc(dev, copy_mono, pattern_accum_copy_mono); |
113 | 1.28k | set_dev_proc(dev, copy_color, pattern_accum_copy_color); |
114 | 1.28k | set_dev_proc(dev, get_clipping_box, gx_get_largest_clipping_box); |
115 | 1.28k | set_dev_proc(dev, get_bits_rectangle, pattern_accum_get_bits_rectangle); |
116 | 1.28k | set_dev_proc(dev, fill_rectangle_hl_color, pattern_accum_fill_rectangle_hl_color); |
117 | 1.28k | set_dev_proc(dev, dev_spec_op, pattern_accum_dev_spec_op); |
118 | 1.28k | set_dev_proc(dev, copy_planes, pattern_accum_copy_planes); |
119 | | |
120 | | /* It would be much nicer if gx_device_init set the following |
121 | | * defaults for us, but that doesn't work for some reason. */ |
122 | 1.28k | set_dev_proc(dev, copy_alpha, gx_default_copy_alpha); |
123 | 1.28k | set_dev_proc(dev, fill_path, gx_default_fill_path); |
124 | 1.28k | set_dev_proc(dev, stroke_path, gx_default_stroke_path); |
125 | 1.28k | set_dev_proc(dev, fill_mask, gx_default_fill_mask); |
126 | 1.28k | set_dev_proc(dev, fill_trapezoid, gx_default_fill_trapezoid); |
127 | 1.28k | set_dev_proc(dev, fill_parallelogram, gx_default_fill_parallelogram); |
128 | 1.28k | set_dev_proc(dev, fill_triangle, gx_default_fill_triangle); |
129 | 1.28k | set_dev_proc(dev, draw_thin_line, gx_default_draw_thin_line); |
130 | 1.28k | set_dev_proc(dev, strip_tile_rectangle, gx_default_strip_tile_rectangle); |
131 | 1.28k | set_dev_proc(dev, begin_typed_image, gx_default_begin_typed_image); |
132 | 1.28k | set_dev_proc(dev, composite, gx_default_composite); |
133 | 1.28k | set_dev_proc(dev, text_begin, gx_default_text_begin); |
134 | 1.28k | set_dev_proc(dev, strip_copy_rop2, gx_default_strip_copy_rop2); |
135 | 1.28k | set_dev_proc(dev, strip_tile_rect_devn, gx_default_strip_tile_rect_devn); |
136 | 1.28k | set_dev_proc(dev, transform_pixel_region, gx_default_transform_pixel_region); |
137 | 1.28k | set_dev_proc(dev, fill_stroke_path, gx_default_fill_stroke_path); |
138 | 1.28k | set_dev_proc(dev, lock_pattern, gx_default_lock_pattern); |
139 | 1.28k | set_dev_proc(dev, copy_alpha_hl_color, gx_default_copy_alpha_hl_color); |
140 | 1.28k | } |
141 | | |
142 | | static const gx_device_pattern_accum gs_pattern_accum_device = |
143 | | {std_device_std_body_type_open(gx_device_pattern_accum, |
144 | | pattern_accum_initialize_device_procs, |
145 | | "pattern accumulator", |
146 | | &st_device_pattern_accum, |
147 | | 0, 0, 72, 72) |
148 | | }; |
149 | | |
150 | | extern dev_proc_open_device(clist_open); |
151 | | |
152 | | int |
153 | | pattern_clist_open_device(gx_device *dev) |
154 | 4.11k | { |
155 | | /* This function is defiled only for clist_init_bands. */ |
156 | 4.11k | return clist_open(dev); |
157 | 4.11k | } |
158 | | |
159 | | static dev_proc_create_buf_device(dummy_create_buf_device) |
160 | 8.23k | { |
161 | 8.23k | gx_device_memory *mdev = (gx_device_memory *)*pbdev; |
162 | | |
163 | 8.23k | gs_make_mem_device(mdev, gdev_mem_device_for_bits(target->color_info.depth), |
164 | 8.23k | mem, 0, target); |
165 | 8.23k | return 0; |
166 | 8.23k | } |
167 | | static dev_proc_size_buf_device(dummy_size_buf_device) |
168 | 0 | { |
169 | 0 | return 0; |
170 | 0 | } |
171 | | static dev_proc_setup_buf_device(dummy_setup_buf_device) |
172 | 0 | { |
173 | 0 | return 0; |
174 | 0 | } |
175 | | static dev_proc_destroy_buf_device(dummy_destroy_buf_device) |
176 | 0 | { |
177 | 0 | } |
178 | | /* Attempt to determine the size of a pattern (the approximate amount that will */ |
179 | | /* be needed in the pattern cache). If we end up using the clist, this is only */ |
180 | | /* a guess -- we use the tile size which will _probably_ be too large. */ |
181 | | static size_t |
182 | | gx_pattern_size_estimate(gs_pattern1_instance_t *pinst, bool has_tags) |
183 | 6.70k | { |
184 | 6.70k | gx_device *tdev = pinst->saved->device; |
185 | 6.70k | int depth = (pinst->templat.PaintType == 2 ? 1 : tdev->color_info.depth); |
186 | 6.70k | size_t raster; |
187 | 6.70k | size_t size; |
188 | | |
189 | 6.70k | if (pinst->size.x == 0 || pinst->size.y == 0) |
190 | 0 | return 0; |
191 | | |
192 | 6.70k | if (pinst->templat.uses_transparency) { |
193 | | /* if the device has tags, add in an extra tag byte for the pdf14 compositor */ |
194 | 1.77k | raster = ((size_t)pinst->size.x * ((depth/8) + 1 + (has_tags ? 1 : 0))); |
195 | 4.93k | } else { |
196 | 4.93k | raster = ((size_t)pinst->size.x * depth + 7) / 8; |
197 | 4.93k | } |
198 | 6.70k | size = raster > max_size_t / pinst->size.y ? (max_size_t - 0xFFFF) : raster * pinst->size.y; |
199 | 6.70k | return size; |
200 | 6.70k | } |
201 | | |
202 | | static void gx_pattern_accum_finalize_cw(gx_device * dev) |
203 | 4.11k | { |
204 | 4.11k | gx_device_clist_writer *cwdev = (gx_device_clist_writer *)dev; |
205 | 4.11k | rc_decrement_only(cwdev->target, "gx_pattern_accum_finalize_cw"); |
206 | 4.11k | } |
207 | | |
208 | | bool gx_device_is_pattern_accum(gx_device *dev) |
209 | 334k | { |
210 | 334k | return dev_proc(dev, open_device) == pattern_accum_open; |
211 | 334k | } |
212 | | |
213 | | bool gx_device_is_pattern_clist(gx_device *dev) |
214 | 382k | { |
215 | 382k | return dev_proc(dev, open_device) == pattern_clist_open_device; |
216 | 382k | } |
217 | | |
218 | | /* Allocate a pattern accumulator, with an initial refct of 0. */ |
219 | | gx_device_forward * |
220 | | gx_pattern_accum_alloc(gs_memory_t * mem, gs_memory_t * storage_memory, |
221 | | gs_pattern1_instance_t *pinst, client_name_t cname) |
222 | 5.40k | { |
223 | 5.40k | gx_device *tdev = pinst->saved->device; |
224 | 5.40k | bool has_tags = device_encodes_tags(tdev); |
225 | 5.40k | size_t size = gx_pattern_size_estimate(pinst, has_tags); |
226 | 5.40k | gx_device_forward *fdev; |
227 | 5.40k | int force_no_clist = 0; |
228 | 5.40k | size_t max_pattern_bitmap = tdev->MaxPatternBitmap == 0 ? MaxPatternBitmap_DEFAULT : |
229 | 5.40k | tdev->MaxPatternBitmap; |
230 | | |
231 | 5.40k | pinst->num_planar_planes = tdev->num_planar_planes; |
232 | | /* |
233 | | * If the target device can accumulate a pattern stream and the language |
234 | | * client supports high level patterns (ps and pdf only) we don't need a |
235 | | * raster or clist representation for the pattern, but the code goes |
236 | | * through the motions of creating the device anyway. Later when the |
237 | | * pattern paint procedure is called an error is returned and whatever |
238 | | * has been set up here is destroyed. We try to make sure the same path |
239 | | * is taken in the code even though the device is never used because |
240 | | * there are pathological problems (see Bug689851.pdf) where the pattern |
241 | | * is so large we can't even allocate the memory for the device and the |
242 | | * dummy clist path must be used. None of this discussion is relevant if |
243 | | * the client language does not support high level patterns or the device |
244 | | * cannot accumulate the pattern stream. |
245 | | */ |
246 | 5.40k | if (pinst->saved->have_pattern_streams == 0 && (*dev_proc(pinst->saved->device, |
247 | 4.09k | dev_spec_op))((gx_device *)pinst->saved->device, |
248 | 4.09k | gxdso_pattern_can_accum, pinst, 0) == 1) |
249 | 0 | force_no_clist = 1; /* Set only for first time through */ |
250 | | /* If the blend mode in use is not Normal, then we CANNOT use a tile. What |
251 | | * if the blend mode changes half way through the tile? We simply must use |
252 | | * a clist. */ |
253 | 5.40k | if (force_no_clist || |
254 | 5.40k | (((size < max_pattern_bitmap && !pinst->is_clist) |
255 | 5.40k | || pinst->templat.PaintType != 1) && !pinst->templat.BM_Not_Normal)) { |
256 | 1.28k | gx_device_pattern_accum *adev = gs_alloc_struct_immovable(mem, gx_device_pattern_accum, |
257 | 1.28k | &st_device_pattern_accum, cname); |
258 | 1.28k | if (adev == 0) |
259 | 0 | return 0; |
260 | | #ifdef DEBUG |
261 | | if (pinst->is_clist) |
262 | | emprintf(mem, "not using clist even though clist is requested\n"); |
263 | | #endif |
264 | 1.28k | pinst->is_clist = false; |
265 | 1.28k | (void)gx_device_init((gx_device *)adev, |
266 | 1.28k | (const gx_device *)&gs_pattern_accum_device, |
267 | 1.28k | mem, true); |
268 | 1.28k | adev->instance = pinst; |
269 | 1.28k | adev->bitmap_memory = storage_memory; |
270 | 1.28k | fdev = (gx_device_forward *)adev; |
271 | 4.11k | } else { |
272 | 4.11k | gx_device_buf_procs_t buf_procs = {dummy_create_buf_device, |
273 | 4.11k | dummy_size_buf_device, dummy_setup_buf_device, dummy_destroy_buf_device}; |
274 | 4.11k | gx_device_clist *cdev; |
275 | 4.11k | gx_device_clist_writer *cwdev; |
276 | 4.11k | const int data_size = 1024*128; |
277 | 4.11k | gx_band_params_t band_params = { 0 }; |
278 | 4.11k | byte *data = gs_alloc_bytes(mem->non_gc_memory, data_size, cname); |
279 | | |
280 | 4.11k | if (data == NULL) |
281 | 0 | return 0; |
282 | 4.11k | pinst->is_clist = true; |
283 | | /* NB: band_params.page_uses_transparency is set in clist_make_accum_device */ |
284 | 4.11k | band_params.BandWidth = pinst->size.x; |
285 | 4.11k | band_params.BandHeight = pinst->size.y; |
286 | 4.11k | band_params.BandBufferSpace = 0; |
287 | | |
288 | 4.11k | cdev = clist_make_accum_device(mem, tdev, "pattern-clist", data, data_size, |
289 | 4.11k | &buf_procs, &band_params, true, /* use_memory_clist */ |
290 | 4.11k | pinst->templat.uses_transparency, pinst); |
291 | 4.11k | if (cdev == 0) { |
292 | 0 | gs_free_object(tdev->memory->non_gc_memory, data, cname); |
293 | 0 | return 0; |
294 | 0 | } |
295 | 4.11k | cwdev = (gx_device_clist_writer *)cdev; |
296 | 4.11k | cwdev->finalize = gx_pattern_accum_finalize_cw; |
297 | 4.11k | set_dev_proc(cwdev, open_device, pattern_clist_open_device); |
298 | 4.11k | fdev = (gx_device_forward *)cdev; |
299 | 4.11k | } |
300 | 5.40k | fdev->log2_align_mod = tdev->log2_align_mod; |
301 | 5.40k | fdev->pad = tdev->pad; |
302 | 5.40k | fdev->num_planar_planes = tdev->num_planar_planes; |
303 | 5.40k | fdev->graphics_type_tag = tdev->graphics_type_tag; |
304 | 5.40k | fdev->interpolate_control = tdev->interpolate_control; |
305 | 5.40k | fdev->non_strict_bounds = tdev->non_strict_bounds; |
306 | 5.40k | gx_device_forward_fill_in_procs(fdev); |
307 | 5.40k | return fdev; |
308 | 5.40k | } |
309 | | |
310 | | gx_pattern_trans_t* |
311 | | new_pattern_trans_buff(gs_memory_t *mem) |
312 | 8.08k | { |
313 | 8.08k | gx_pattern_trans_t *result; |
314 | | |
315 | | /* Allocate structure that we will use for the trans pattern */ |
316 | 8.08k | result = gs_alloc_struct(mem, gx_pattern_trans_t, &st_pattern_trans, "new_pattern_trans_buff"); |
317 | | |
318 | 8.08k | if (result != NULL) { |
319 | 8.08k | result->transbytes = NULL; |
320 | 8.08k | result->pdev14 = NULL; |
321 | 8.08k | result->mem = NULL; |
322 | 8.08k | result->fill_trans_buffer = NULL; |
323 | 8.08k | result->buf = NULL; |
324 | 8.08k | result->n_chan = 0; |
325 | 8.08k | } |
326 | | |
327 | 8.08k | return(result); |
328 | 8.08k | } |
329 | | |
330 | | /* |
331 | | * Initialize a pattern accumulator. |
332 | | * Client must already have set instance and bitmap_memory. |
333 | | * |
334 | | * Note that mask and bits accumulators are only created if necessary. |
335 | | */ |
336 | | static int |
337 | | pattern_accum_open(gx_device * dev) |
338 | 1.28k | { |
339 | 1.28k | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
340 | 1.28k | const gs_pattern1_instance_t *pinst = padev->instance; |
341 | 1.28k | gs_memory_t *mem = padev->bitmap_memory; |
342 | 1.28k | gx_device_memory *mask = 0; |
343 | 1.28k | gx_device_memory *bits = 0; |
344 | | /* |
345 | | * The client should preset the target, because the device for which the |
346 | | * pattern is being rendered may not (in general, will not) be the same |
347 | | * as the one that was current when the pattern was instantiated. |
348 | | */ |
349 | 1.28k | gx_device *target = |
350 | 1.28k | (padev->target == 0 ? gs_currentdevice(pinst->saved) : |
351 | 1.28k | padev->target); |
352 | 1.28k | int width = pinst->size.x; |
353 | 1.28k | int height = pinst->size.y; |
354 | 1.28k | int code = 0; |
355 | 1.28k | bool mask_open = false; |
356 | | |
357 | | /* |
358 | | * C's bizarre coercion rules force us to copy HWResolution in pieces |
359 | | * rather than using a single assignment. |
360 | | */ |
361 | 1.28k | #define PDSET(dev)\ |
362 | 2.97k | ((dev)->width = width, (dev)->height = height,\ |
363 | | /*(dev)->HWResolution = target->HWResolution*/\ |
364 | 2.97k | (dev)->HWResolution[0] = target->HWResolution[0],\ |
365 | 2.97k | (dev)->HWResolution[1] = target->HWResolution[1]) |
366 | | |
367 | 1.28k | PDSET(padev); |
368 | 1.28k | padev->color_info = target->color_info; |
369 | | /* Bug 689737: If PaintType == 2 (Uncolored tiling pattern), pattern is |
370 | | * 1bpp bitmap. No antialiasing in this case! */ |
371 | 1.28k | if (pinst->templat.PaintType == 2) { |
372 | 15 | padev->color_info.anti_alias.text_bits = 1; |
373 | 15 | padev->color_info.anti_alias.graphics_bits = 1; |
374 | 15 | } |
375 | | /* If we have transparency, then fix the color info |
376 | | now so that the mem device allocates the proper |
377 | | buffer space for the pattern template. We can |
378 | | do this since the transparency code all */ |
379 | 1.28k | if (pinst->templat.uses_transparency) { |
380 | | /* Allocate structure that we will use for the trans pattern */ |
381 | 865 | padev->transbuff = new_pattern_trans_buff(mem); |
382 | 865 | if (padev->transbuff == NULL) |
383 | 0 | return_error(gs_error_VMerror); |
384 | 865 | } else { |
385 | 421 | padev->transbuff = NULL; |
386 | 421 | } |
387 | 1.28k | if (pinst->uses_mask) { |
388 | 1.28k | mask = gs_alloc_struct( mem, |
389 | 1.28k | gx_device_memory, |
390 | 1.28k | &st_device_memory, |
391 | 1.28k | "pattern_accum_open(mask)" |
392 | 1.28k | ); |
393 | 1.28k | if (mask == 0) |
394 | 0 | return_error(gs_error_VMerror); |
395 | 1.28k | gs_make_mem_mono_device(mask, mem, 0); |
396 | 1.28k | PDSET(mask); |
397 | 1.28k | mask->bitmap_memory = mem; |
398 | 1.28k | mask->base = 0; |
399 | 1.28k | code = (*dev_proc(mask, open_device)) ((gx_device *) mask); |
400 | 1.28k | if (code >= 0) { |
401 | 1.28k | mask_open = true; |
402 | 1.28k | memset(mask->base, 0, (size_t)mask->raster * mask->height); |
403 | 1.28k | } |
404 | 1.28k | } |
405 | | |
406 | 1.28k | if (code >= 0) { |
407 | 1.28k | if (pinst->templat.uses_transparency) { |
408 | | /* In this case, we will grab the buffer created |
409 | | by the graphic state's device (which is pdf14) and |
410 | | we will be tiling that into a transparency group buffer |
411 | | to blend with the pattern accumulator's target. Since |
412 | | all the transparency stuff is planar format, it is |
413 | | best just to keep the data in that form */ |
414 | 865 | gx_device_set_target((gx_device_forward *)padev, target); |
415 | 865 | } else { |
416 | 421 | switch (pinst->templat.PaintType) { |
417 | 15 | case 2: /* uncolored */ |
418 | 15 | gx_device_set_target((gx_device_forward *)padev, target); |
419 | 15 | break; |
420 | 406 | case 1: /* colored */ |
421 | 406 | bits = gs_alloc_struct(mem, gx_device_memory, |
422 | 406 | &st_device_memory, |
423 | 406 | "pattern_accum_open(bits)"); |
424 | 406 | if (bits == 0) |
425 | 0 | code = gs_note_error(gs_error_VMerror); |
426 | 406 | else { |
427 | 406 | gs_make_mem_device(bits, |
428 | 406 | gdev_mem_device_for_bits(padev->color_info.depth), |
429 | 406 | mem, -1, target); |
430 | 406 | PDSET(bits); |
431 | 406 | #undef PDSET |
432 | 406 | bits->color_info = padev->color_info; |
433 | 406 | bits->bitmap_memory = mem; |
434 | | |
435 | 406 | if (target->num_planar_planes > 0) |
436 | 0 | { |
437 | 0 | gx_render_plane_t planes[GX_DEVICE_COLOR_MAX_COMPONENTS]; |
438 | 0 | uchar num_comp = padev->num_planar_planes; |
439 | 0 | uchar i; |
440 | 0 | int depth = target->color_info.depth / num_comp; |
441 | 0 | for (i = 0; i < num_comp; i++) |
442 | 0 | { |
443 | 0 | planes[i].shift = depth * (num_comp - 1 - i); |
444 | 0 | planes[i].depth = depth; |
445 | 0 | planes[i].index = i; |
446 | 0 | } |
447 | 0 | code = gdev_mem_set_planar(bits, num_comp, planes); |
448 | 0 | } |
449 | 406 | if (code >= 0) { |
450 | 406 | code = (*dev_proc(bits, open_device)) ((gx_device *) bits); |
451 | 406 | gx_device_set_target((gx_device_forward *)padev, |
452 | 406 | (gx_device *)bits); |
453 | | /* The update_spot_equivalent_color proc for the bits device |
454 | | should forward to the real target device. This will ensure |
455 | | that the target device can get equivalent CMYK values for |
456 | | spot colors if we are using a separation device and the spot |
457 | | color occurs only in patterns on the page. */ |
458 | 406 | bits->procs.update_spot_equivalent_colors = gx_forward_update_spot_equivalent_colors; |
459 | 406 | } |
460 | 406 | } |
461 | 421 | } |
462 | 421 | } |
463 | 1.28k | } |
464 | 1.28k | if (code < 0) { |
465 | 0 | if (bits != 0) |
466 | 0 | gs_free_object(mem, bits, "pattern_accum_open(bits)"); |
467 | 0 | if (mask != 0) { |
468 | 0 | if (mask_open) |
469 | 0 | (*dev_proc(mask, close_device)) ((gx_device *) mask); |
470 | 0 | gs_free_object(mem, mask, "pattern_accum_open(mask)"); |
471 | 0 | } |
472 | 0 | return code; |
473 | 0 | } |
474 | 1.28k | padev->mask = mask; |
475 | 1.28k | padev->bits = bits; |
476 | | /* Retain the device, so it will survive anomalous grestores. */ |
477 | 1.28k | gx_device_retain(dev, true); |
478 | 1.28k | return code; |
479 | 1.28k | } |
480 | | |
481 | | /* Close an accumulator and free the bits. */ |
482 | | static int |
483 | | pattern_accum_close(gx_device * dev) |
484 | 2.57k | { |
485 | 2.57k | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
486 | 2.57k | gs_memory_t *mem = padev->bitmap_memory; |
487 | | |
488 | | /* |
489 | | * If bits != 0, it is the target of the device; reference counting |
490 | | * will close and free it. |
491 | | */ |
492 | 2.57k | gx_device_set_target((gx_device_forward *)padev, NULL); |
493 | 2.57k | padev->bits = 0; |
494 | 2.57k | if (padev->mask != 0) { |
495 | 1.28k | (*dev_proc(padev->mask, close_device)) ((gx_device *) padev->mask); |
496 | 1.28k | gs_free_object(mem, padev->mask, "pattern_accum_close(mask)"); |
497 | 1.28k | padev->mask = 0; |
498 | 1.28k | } |
499 | | |
500 | 2.57k | if (padev->transbuff != 0) { |
501 | 865 | gs_free_object(mem,padev->target,"pattern_accum_close(transbuff)"); |
502 | 865 | padev->transbuff = NULL; |
503 | 865 | } |
504 | | |
505 | | /* Un-retain the device now, so reference counting will free it. */ |
506 | 2.57k | gx_device_retain(dev, false); |
507 | 2.57k | return 0; |
508 | 2.57k | } |
509 | | |
510 | | /* _hl_color */ |
511 | | static int |
512 | | pattern_accum_fill_rectangle_hl_color(gx_device *dev, const gs_fixed_rect *rect, |
513 | | const gs_gstate *pgs, |
514 | | const gx_drawing_color *pdcolor, |
515 | | const gx_clip_path *pcpath) |
516 | 0 | { |
517 | 0 | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
518 | 0 | int code; |
519 | |
|
520 | 0 | if (padev->bits) { |
521 | 0 | code = (*dev_proc(padev->target, fill_rectangle_hl_color)) |
522 | 0 | (padev->target, rect, pgs, pdcolor, pcpath); |
523 | 0 | if (code < 0) |
524 | 0 | return code; |
525 | 0 | } |
526 | 0 | if (padev->mask) { |
527 | 0 | int x, y, w, h; |
528 | |
|
529 | 0 | x = fixed2int(rect->p.x); |
530 | 0 | y = fixed2int(rect->p.y); |
531 | 0 | w = fixed2int(rect->q.x) - x; |
532 | 0 | h = fixed2int(rect->q.y) - y; |
533 | |
|
534 | 0 | return (*dev_proc(padev->mask, fill_rectangle)) |
535 | 0 | ((gx_device *) padev->mask, x, y, w, h, (gx_color_index) 1); |
536 | 0 | } |
537 | 0 | return 0; |
538 | 0 | } |
539 | | |
540 | | /* Fill a rectangle */ |
541 | | static int |
542 | | pattern_accum_fill_rectangle(gx_device * dev, int x, int y, int w, int h, |
543 | | gx_color_index color) |
544 | 4.46k | { |
545 | 4.46k | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
546 | | |
547 | 4.46k | if (padev->bits) |
548 | 4.30k | (*dev_proc(padev->target, fill_rectangle)) |
549 | 4.30k | (padev->target, x, y, w, h, color); |
550 | 4.46k | if (padev->mask) |
551 | 2.10k | return (*dev_proc(padev->mask, fill_rectangle)) |
552 | 2.10k | ((gx_device *) padev->mask, x, y, w, h, (gx_color_index) 1); |
553 | 2.36k | else |
554 | 2.36k | return 0; |
555 | 4.46k | } |
556 | | |
557 | | /* Copy a monochrome bitmap. */ |
558 | | static int |
559 | | pattern_accum_copy_mono(gx_device * dev, const byte * data, int data_x, |
560 | | int raster, gx_bitmap_id id, int x, int y, int w, int h, |
561 | | gx_color_index color0, gx_color_index color1) |
562 | 21.9k | { |
563 | 21.9k | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
564 | | |
565 | | /* opt out early if nothing to render (some may think this a bug) */ |
566 | 21.9k | if (color0 == gx_no_color_index && color1 == gx_no_color_index) |
567 | 0 | return 0; |
568 | 21.9k | if (padev->bits) |
569 | 21.9k | (*dev_proc(padev->target, copy_mono)) |
570 | 21.9k | (padev->target, data, data_x, raster, id, x, y, w, h, |
571 | 21.9k | color0, color1); |
572 | 21.9k | if (padev->mask) { |
573 | 21.9k | if (color0 != gx_no_color_index) |
574 | 21.9k | color0 = 1; |
575 | 21.9k | if (color1 != gx_no_color_index) |
576 | 21.9k | color1 = 1; |
577 | 21.9k | if (color0 == 1 && color1 == 1) |
578 | 21.9k | return (*dev_proc(padev->mask, fill_rectangle)) |
579 | 21.9k | ((gx_device *) padev->mask, x, y, w, h, (gx_color_index) 1); |
580 | 0 | else |
581 | 0 | return (*dev_proc(padev->mask, copy_mono)) |
582 | 0 | ((gx_device *) padev->mask, data, data_x, raster, id, x, y, w, h, |
583 | 0 | color0, color1); |
584 | 21.9k | } else |
585 | 0 | return 0; |
586 | 21.9k | } |
587 | | |
588 | | /* Copy a color bitmap. */ |
589 | | static int |
590 | | pattern_accum_copy_color(gx_device * dev, const byte * data, int data_x, |
591 | | int raster, gx_bitmap_id id, int x, int y, int w, int h) |
592 | 0 | { |
593 | 0 | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
594 | |
|
595 | 0 | if (padev->bits) |
596 | 0 | (*dev_proc(padev->target, copy_color)) |
597 | 0 | (padev->target, data, data_x, raster, id, x, y, w, h); |
598 | 0 | if (padev->mask) |
599 | 0 | return (*dev_proc(padev->mask, fill_rectangle)) |
600 | 0 | ((gx_device *) padev->mask, x, y, w, h, (gx_color_index) 1); |
601 | 0 | else |
602 | 0 | return 0; |
603 | 0 | } |
604 | | |
605 | | /* Copy a color plane. */ |
606 | | static int |
607 | | pattern_accum_copy_planes(gx_device * dev, const byte * data, int data_x, |
608 | | int raster, gx_bitmap_id id, |
609 | | int x, int y, int w, int h, int plane_height) |
610 | 0 | { |
611 | 0 | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
612 | |
|
613 | 0 | if (padev->bits) |
614 | 0 | (*dev_proc(padev->target, copy_planes)) |
615 | 0 | (padev->target, data, data_x, raster, id, x, y, w, h, plane_height); |
616 | 0 | if (padev->mask) |
617 | 0 | return (*dev_proc(padev->mask, fill_rectangle)) |
618 | 0 | ((gx_device *) padev->mask, x, y, w, h, (gx_color_index) 1); |
619 | 0 | else |
620 | 0 | return 0; |
621 | 0 | } |
622 | | |
623 | | static int |
624 | | blank_unmasked_bits(gx_device * mask, |
625 | | int polarity, |
626 | | int num_comps, |
627 | | int depth, |
628 | | const gs_int_rect *prect, |
629 | | gs_get_bits_params_t *p) |
630 | 0 | { |
631 | 0 | static const int required_options = GB_COLORS_NATIVE |
632 | 0 | | GB_ALPHA_NONE |
633 | 0 | | GB_RETURN_COPY |
634 | 0 | | GB_ALIGN_STANDARD |
635 | 0 | | GB_OFFSET_0 |
636 | 0 | | GB_RASTER_STANDARD; |
637 | 0 | int raster = p->raster; |
638 | 0 | byte *min; |
639 | 0 | int x0 = prect->p.x; |
640 | 0 | int y0 = prect->p.y; |
641 | 0 | int x, y; |
642 | 0 | int w = prect->q.x - x0; |
643 | 0 | int h = prect->q.y - y0; |
644 | 0 | int code = 0; |
645 | 0 | byte *ptr; |
646 | 0 | int blank = (polarity == GX_CINFO_POLARITY_ADDITIVE ? 255 : 0); |
647 | 0 | gs_int_rect rect; |
648 | 0 | gs_get_bits_params_t params; |
649 | |
|
650 | 0 | if ((p->options & required_options) != required_options) |
651 | 0 | return_error(gs_error_rangecheck); |
652 | | |
653 | 0 | min = gs_alloc_bytes(mask->memory, (w+7)>>3, "blank_unmasked_bits"); |
654 | 0 | if (min == NULL) |
655 | 0 | return_error(gs_error_VMerror); |
656 | | |
657 | 0 | rect.p.x = 0; |
658 | 0 | rect.q.x = mask->width; |
659 | 0 | params.x_offset = 0; |
660 | 0 | params.raster = bitmap_raster(mask->width * mask->color_info.depth); |
661 | |
|
662 | 0 | if (p->options & GB_PACKING_CHUNKY) |
663 | 0 | { |
664 | 0 | if ((depth & 7) != 0 || depth > 64) { |
665 | 0 | code = gs_note_error(gs_error_rangecheck); |
666 | 0 | goto fail; |
667 | 0 | } |
668 | 0 | ptr = p->data[0]; |
669 | 0 | depth >>= 3; |
670 | 0 | raster -= w*depth; |
671 | 0 | for (y = 0; y < h; y++) |
672 | 0 | { |
673 | 0 | byte *mine; |
674 | |
|
675 | 0 | rect.p.y = y+y0; |
676 | 0 | rect.q.y = y+y0+1; |
677 | 0 | params.options = (GB_ALIGN_ANY | |
678 | 0 | (GB_RETURN_COPY | GB_RETURN_POINTER) | |
679 | 0 | GB_OFFSET_0 | |
680 | 0 | GB_RASTER_STANDARD | GB_PACKING_CHUNKY | |
681 | 0 | GB_COLORS_NATIVE | GB_ALPHA_NONE); |
682 | 0 | params.data[0] = min; |
683 | 0 | code = (*dev_proc(mask, get_bits_rectangle))(mask, &rect, |
684 | 0 | ¶ms); |
685 | 0 | if (code < 0) |
686 | 0 | goto fail; |
687 | 0 | mine = params.data[0]; |
688 | 0 | for (x = 0; x < w; x++) |
689 | 0 | { |
690 | 0 | int xx = x+x0; |
691 | 0 | if (((mine[xx>>3]<<(x&7)) & 128) == 0) { |
692 | 0 | switch (depth) |
693 | 0 | { |
694 | 0 | case 8: |
695 | 0 | *ptr++ = blank; |
696 | 0 | case 7: |
697 | 0 | *ptr++ = blank; |
698 | 0 | case 6: |
699 | 0 | *ptr++ = blank; |
700 | 0 | case 5: |
701 | 0 | *ptr++ = blank; |
702 | 0 | case 4: |
703 | 0 | *ptr++ = blank; |
704 | 0 | case 3: |
705 | 0 | *ptr++ = blank; |
706 | 0 | case 2: |
707 | 0 | *ptr++ = blank; |
708 | 0 | case 1: |
709 | 0 | *ptr++ = blank; |
710 | 0 | break; |
711 | 0 | } |
712 | 0 | } else { |
713 | 0 | ptr += depth; |
714 | 0 | } |
715 | 0 | } |
716 | 0 | ptr += raster; |
717 | 0 | } |
718 | 0 | } else { |
719 | | /* Planar, only handle 8 or 16 bits */ |
720 | 0 | int bytes_per_component = (depth/num_comps) >> 3; |
721 | |
|
722 | 0 | if (depth/num_comps != 8 && depth/num_comps != 16) { |
723 | 0 | code = gs_note_error(gs_error_rangecheck); |
724 | 0 | goto fail; |
725 | 0 | } |
726 | 0 | for (y = 0; y < h; y++) |
727 | 0 | { |
728 | 0 | int c; |
729 | 0 | byte *mine; |
730 | |
|
731 | 0 | rect.p.y = y+y0; |
732 | 0 | rect.q.y = y+y0+1; |
733 | 0 | params.options = (GB_ALIGN_ANY | |
734 | 0 | (GB_RETURN_COPY | GB_RETURN_POINTER) | |
735 | 0 | GB_OFFSET_0 | |
736 | 0 | GB_RASTER_STANDARD | GB_PACKING_CHUNKY | |
737 | 0 | GB_COLORS_NATIVE | GB_ALPHA_NONE); |
738 | 0 | params.data[0] = min; |
739 | 0 | code = (*dev_proc(mask, get_bits_rectangle))(mask, &rect, |
740 | 0 | ¶ms); |
741 | 0 | if (code < 0) |
742 | 0 | goto fail; |
743 | 0 | mine = params.data[0]; |
744 | |
|
745 | 0 | for (c = 0; c < num_comps; c++) |
746 | 0 | { |
747 | 0 | if (p->data[c] == NULL) |
748 | 0 | continue; |
749 | 0 | ptr = p->data[c] + raster * y; |
750 | 0 | for (x = 0; x < w; x++) |
751 | 0 | { |
752 | 0 | int xx = x+x0; |
753 | 0 | if (((mine[xx>>3]>>(x&7)) & 1) == 0) { |
754 | 0 | *ptr++ = blank; |
755 | 0 | if (bytes_per_component > 1) |
756 | 0 | *ptr++ = blank; |
757 | 0 | } else { |
758 | 0 | ptr += bytes_per_component; |
759 | 0 | } |
760 | 0 | } |
761 | 0 | } |
762 | 0 | } |
763 | 0 | } |
764 | | |
765 | 0 | fail: |
766 | 0 | gs_free_object(mask->memory, min, "blank_unmasked_bits"); |
767 | |
|
768 | 0 | return code; |
769 | 0 | } |
770 | | |
771 | | /* Read back a rectangle of bits. */ |
772 | | /****** SHOULD USE MASK TO DEFINE UNREAD AREA *****/ |
773 | | static int |
774 | | pattern_accum_get_bits_rectangle(gx_device * dev, const gs_int_rect * prect, |
775 | | gs_get_bits_params_t * params) |
776 | 0 | { |
777 | 0 | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *) dev; |
778 | 0 | int code; |
779 | 0 | gs_get_bits_params_t params2 = *params; |
780 | |
|
781 | 0 | if (padev->bits) { |
782 | 0 | if (padev->mask) |
783 | 0 | params2.options &= ~GB_RETURN_POINTER; |
784 | 0 | code = (*dev_proc(padev->target, get_bits_rectangle)) |
785 | 0 | (padev->target, prect, ¶ms2); |
786 | | /* If we have a mask, then unmarked pixels of the bits |
787 | | * will be undefined. Strictly speaking it makes no |
788 | | * sense for us to return any value here, but the only |
789 | | * caller of this currently is the overprint code, which |
790 | | * uses the the values to parrot back to us. Let's |
791 | | * make sure they are set to the default 'empty' values. |
792 | | */ |
793 | 0 | if (code >= 0 && padev->mask) |
794 | 0 | code = blank_unmasked_bits((gx_device *)padev->mask, |
795 | 0 | padev->target->color_info.polarity, |
796 | 0 | padev->target->color_info.num_components, |
797 | 0 | padev->target->color_info.depth, |
798 | 0 | prect, ¶ms2); |
799 | 0 | return code; |
800 | 0 | } |
801 | | |
802 | 0 | return_error(gs_error_Fatal); /* shouldn't happen */ |
803 | 0 | } |
804 | | |
805 | | /* ------ Color space implementation ------ */ |
806 | | |
807 | | /* Free all entries in a pattern cache. */ |
808 | | static bool |
809 | | pattern_cache_choose_all(gx_color_tile * ctile, void *proc_data) |
810 | 8.08k | { |
811 | 8.08k | return true; |
812 | 8.08k | } |
813 | | static void |
814 | | pattern_cache_free_all(gx_pattern_cache * pcache) |
815 | 39.3k | { |
816 | 39.3k | gx_pattern_cache_winnow(pcache, pattern_cache_choose_all, NULL); |
817 | 39.3k | } |
818 | | |
819 | | /* Allocate a Pattern cache. */ |
820 | | gx_pattern_cache * |
821 | | gx_pattern_alloc_cache(gs_memory_t * mem, uint num_tiles, ulong max_bits) |
822 | 14.1k | { |
823 | 14.1k | gx_pattern_cache *pcache = |
824 | 14.1k | gs_alloc_struct(mem, gx_pattern_cache, &st_pattern_cache, |
825 | 14.1k | "gx_pattern_alloc_cache(struct)"); |
826 | 14.1k | gx_color_tile *tiles = |
827 | 14.1k | gs_alloc_struct_array(mem, num_tiles, gx_color_tile, |
828 | 14.1k | &st_color_tile_element, |
829 | 14.1k | "gx_pattern_alloc_cache(tiles)"); |
830 | 14.1k | uint i; |
831 | | |
832 | 14.1k | if (pcache == 0 || tiles == 0) { |
833 | 0 | gs_free_object(mem, tiles, "gx_pattern_alloc_cache(tiles)"); |
834 | 0 | gs_free_object(mem, pcache, "gx_pattern_alloc_cache(struct)"); |
835 | 0 | return 0; |
836 | 0 | } |
837 | 14.1k | pcache->memory = mem; |
838 | 14.1k | pcache->tiles = tiles; |
839 | 14.1k | pcache->num_tiles = num_tiles; |
840 | 14.1k | pcache->tiles_used = 0; |
841 | 14.1k | pcache->next = 0; |
842 | 14.1k | pcache->bits_used = 0; |
843 | 14.1k | pcache->max_bits = max_bits; |
844 | 14.1k | pcache->free_all = pattern_cache_free_all; |
845 | 720k | for (i = 0; i < num_tiles; tiles++, i++) { |
846 | 706k | tiles->id = gx_no_bitmap_id; |
847 | | /* Clear the pointers to pacify the GC. */ |
848 | 706k | uid_set_invalid(&tiles->uid); |
849 | 706k | tiles->bits_used = 0; |
850 | 706k | #ifdef PACIFY_VALGRIND |
851 | | /* The following memsets are required to avoid a valgrind warning |
852 | | * in: |
853 | | * gs -I./gs/lib -sOutputFile=out.pgm -dMaxBitmap=10000 |
854 | | * -sDEVICE=pgmraw -r300 -Z: -sDEFAULTPAPERSIZE=letter |
855 | | * -dNOPAUSE -dBATCH -K2000000 -dClusterJob -dJOBSERVER |
856 | | * tests_private/ps/ps3cet/11-14.PS |
857 | | * Setting the individual elements of the structures directly is |
858 | | * not enough, which leads me to believe that we are writing the |
859 | | * entire structs out, padding and all. |
860 | | */ |
861 | 706k | memset(&tiles->tbits, 0, sizeof(tiles->tbits)); |
862 | 706k | memset(&tiles->tmask, 0, sizeof(tiles->tmask)); |
863 | | #else |
864 | | tiles->tbits.data = 0; |
865 | | tiles->tmask.data = 0; |
866 | | #endif |
867 | 706k | tiles->index = i; |
868 | 706k | tiles->cdev = NULL; |
869 | 706k | tiles->ttrans = NULL; |
870 | 706k | tiles->num_planar_planes = 0; |
871 | 706k | } |
872 | 14.1k | return pcache; |
873 | 14.1k | } |
874 | | /* Ensure that an imager has a Pattern cache. */ |
875 | | static int |
876 | | ensure_pattern_cache(gs_gstate * pgs) |
877 | 23.3k | { |
878 | 23.3k | if (pgs->pattern_cache == 0) { |
879 | 4.46k | gx_pattern_cache *pcache = |
880 | 4.46k | gx_pattern_alloc_cache(pgs->memory, |
881 | 4.46k | gx_pat_cache_default_tiles(), |
882 | 4.46k | gx_pat_cache_default_bits()); |
883 | | |
884 | 4.46k | if (pcache == 0) |
885 | 0 | return_error(gs_error_VMerror); |
886 | 4.46k | pgs->pattern_cache = pcache; |
887 | 4.46k | } |
888 | 23.3k | return 0; |
889 | 23.3k | } |
890 | | |
891 | | /* Free pattern cache and its components. */ |
892 | | void |
893 | | gx_pattern_cache_free(gx_pattern_cache *pcache) |
894 | 9.84k | { |
895 | 9.84k | if (pcache == NULL) |
896 | 5.37k | return; |
897 | 4.46k | pattern_cache_free_all(pcache); |
898 | 4.46k | gs_free_object(pcache->memory, pcache->tiles, "gx_pattern_cache_free"); |
899 | 4.46k | pcache->tiles = NULL; |
900 | 4.46k | gs_free_object(pcache->memory, pcache, "gx_pattern_cache_free"); |
901 | 4.46k | } |
902 | | |
903 | | /* Get and set the Pattern cache in a gstate. */ |
904 | | gx_pattern_cache * |
905 | | gstate_pattern_cache(gs_gstate * pgs) |
906 | 6.08k | { |
907 | 6.08k | return pgs->pattern_cache; |
908 | 6.08k | } |
909 | | void |
910 | | gstate_set_pattern_cache(gs_gstate * pgs, gx_pattern_cache * pcache) |
911 | 9.66k | { |
912 | 9.66k | pgs->pattern_cache = pcache; |
913 | 9.66k | } |
914 | | |
915 | | /* Free a Pattern cache entry. */ |
916 | | /* This will not free a pattern if it is 'locked' which should only be for */ |
917 | | /* a stroke pattern during fill_stroke_path. */ |
918 | | static void |
919 | | gx_pattern_cache_free_entry(gx_pattern_cache * pcache, gx_color_tile * ctile, bool free_dummy) |
920 | 77.0k | { |
921 | 77.0k | gx_device *temp_device; |
922 | | |
923 | 77.0k | if ((ctile->id != gx_no_bitmap_id) && (!ctile->is_dummy || free_dummy) && !ctile->is_locked) { |
924 | 11.6k | gs_memory_t *mem = pcache->memory; |
925 | | |
926 | | /* |
927 | | * We must initialize the memory device properly, even though |
928 | | * we aren't using it for drawing. |
929 | | */ |
930 | 11.6k | if (ctile->tmask.data != 0) { |
931 | 2.67k | gs_free_object(mem, ctile->tmask.data, |
932 | 2.67k | "free_pattern_cache_entry(mask data)"); |
933 | 2.67k | ctile->tmask.data = 0; /* for GC */ |
934 | 2.67k | } |
935 | 11.6k | if (ctile->tbits.data != 0) { |
936 | 1.82k | gs_free_object(mem, ctile->tbits.data, |
937 | 1.82k | "free_pattern_cache_entry(bits data)"); |
938 | 1.82k | ctile->tbits.data = 0; /* for GC */ |
939 | 1.82k | } |
940 | 11.6k | if (ctile->cdev != NULL) { |
941 | 4.11k | ctile->cdev->common.do_not_open_or_close_bandfiles = false; /* make sure memfile gets freed/closed */ |
942 | 4.11k | dev_proc(&ctile->cdev->common, close_device)((gx_device *)&ctile->cdev->common); |
943 | | /* Free up the icc based stuff in the clist device. I am puzzled |
944 | | why the other objects are not released */ |
945 | 4.11k | clist_free_icc_table(ctile->cdev->common.icc_table, |
946 | 4.11k | ctile->cdev->common.memory); |
947 | 4.11k | ctile->cdev->common.icc_table = NULL; |
948 | 4.11k | rc_decrement(ctile->cdev->common.icc_cache_cl, |
949 | 4.11k | "gx_pattern_cache_free_entry"); |
950 | 4.11k | ctile->cdev->common.icc_cache_cl = NULL; |
951 | 4.11k | ctile->cdev->writer.pinst = NULL; |
952 | 4.11k | gs_free_object(ctile->cdev->common.memory->non_gc_memory, ctile->cdev->common.cache_chunk, "free tile cache for clist"); |
953 | 4.11k | ctile->cdev->common.cache_chunk = 0; |
954 | 4.11k | temp_device = (gx_device *)ctile->cdev; |
955 | 4.11k | gx_device_retain(temp_device, false); |
956 | 4.11k | ctile->cdev = NULL; |
957 | 4.11k | } |
958 | | |
959 | 11.6k | if (ctile->ttrans != NULL) { |
960 | 5.67k | if_debug2m('v', mem, |
961 | 5.67k | "[v*] Freeing trans pattern from cache, uid = %ld id = %ld\n", |
962 | 5.67k | ctile->uid.id, ctile->id); |
963 | 5.67k | if ( ctile->ttrans->pdev14 == NULL) { |
964 | | /* This can happen if we came from the clist */ |
965 | 5.67k | if (ctile->ttrans->mem != NULL) |
966 | 5.67k | gs_free_object(ctile->ttrans->mem ,ctile->ttrans->transbytes, |
967 | 5.67k | "free_pattern_cache_entry(transbytes)"); |
968 | 5.67k | gs_free_object(mem,ctile->ttrans->fill_trans_buffer, |
969 | 5.67k | "free_pattern_cache_entry(fill_trans_buffer)"); |
970 | 5.67k | ctile->ttrans->transbytes = NULL; |
971 | 5.67k | ctile->ttrans->fill_trans_buffer = NULL; |
972 | 5.67k | } else { |
973 | 0 | dev_proc(ctile->ttrans->pdev14, close_device)((gx_device *)ctile->ttrans->pdev14); |
974 | 0 | temp_device = (gx_device *)(ctile->ttrans->pdev14); |
975 | 0 | gx_device_retain(temp_device, false); |
976 | 0 | rc_decrement(temp_device,"gx_pattern_cache_free_entry"); |
977 | 0 | ctile->ttrans->pdev14 = NULL; |
978 | 0 | ctile->ttrans->transbytes = NULL; /* should be ok due to pdf14_close */ |
979 | 0 | ctile->ttrans->fill_trans_buffer = NULL; /* This is always freed */ |
980 | 0 | } |
981 | | |
982 | 5.67k | gs_free_object(mem, ctile->ttrans, |
983 | 5.67k | "free_pattern_cache_entry(ttrans)"); |
984 | 5.67k | ctile->ttrans = NULL; |
985 | | |
986 | 5.67k | } |
987 | | |
988 | 11.6k | pcache->tiles_used--; |
989 | 11.6k | pcache->bits_used -= ctile->bits_used; |
990 | 11.6k | ctile->id = gx_no_bitmap_id; |
991 | 11.6k | } |
992 | 77.0k | } |
993 | | |
994 | | /* |
995 | | Historically, the pattern cache has used a very simple hashing |
996 | | scheme whereby pattern A goes into slot idx = (A.id % num_tiles). |
997 | | Unfortunately, now we allow tiles to be 'locked' into the |
998 | | pattern cache, we might run into the case where we want both |
999 | | tiles A and B to be in the cache at once where: |
1000 | | (A.id % num_tiles) == (B.id % num_tiles). |
1001 | | |
1002 | | We have a maximum of 2 locked tiles, and one of those can be |
1003 | | placed while the other one is locked. So we only need to cope |
1004 | | with a single 'collision'. |
1005 | | |
1006 | | We therefore allow tiles to either go in at idx or at |
1007 | | (idx + 1) % num_tiles. This means we need to be prepared to |
1008 | | search a bit further for them, hence we now have 2 helper |
1009 | | functions to do this. |
1010 | | */ |
1011 | | |
1012 | | /* We can have at most 1 locked tile while looking for a place to |
1013 | | * put another tile. */ |
1014 | | gx_color_tile * |
1015 | | gx_pattern_cache_find_tile_for_id(gx_pattern_cache *pcache, gs_id id) |
1016 | 75.8k | { |
1017 | 75.8k | gx_color_tile *ctile = &pcache->tiles[id % pcache->num_tiles]; |
1018 | 75.8k | gx_color_tile *ctile2 = &pcache->tiles[(id+1) % pcache->num_tiles]; |
1019 | 75.8k | if (ctile->id == id || ctile->id == gs_no_id) |
1020 | 75.8k | return ctile; |
1021 | 0 | if (ctile2->id == id || ctile2->id == gs_no_id) |
1022 | 0 | return ctile2; |
1023 | 0 | if (!ctile->is_locked) |
1024 | 0 | return ctile; |
1025 | 0 | return ctile2; |
1026 | 0 | } |
1027 | | |
1028 | | |
1029 | | /* Given the size of a new pattern tile, free entries from the cache until */ |
1030 | | /* enough space is available (or nothing left to free). */ |
1031 | | /* This will allow 1 oversized entry */ |
1032 | | void |
1033 | | gx_pattern_cache_ensure_space(gs_gstate * pgs, size_t needed) |
1034 | 11.6k | { |
1035 | 11.6k | int code = ensure_pattern_cache(pgs); |
1036 | 11.6k | gx_pattern_cache *pcache; |
1037 | 11.6k | int start_free_id; |
1038 | | |
1039 | 11.6k | if (code < 0) |
1040 | 0 | return; /* no cache -- just exit */ |
1041 | | |
1042 | 11.6k | pcache = pgs->pattern_cache; |
1043 | 11.6k | start_free_id = pcache->next; /* for scan wrap check */ |
1044 | | /* If too large then start freeing entries */ |
1045 | | /* By starting just after 'next', we attempt to first free the oldest entries */ |
1046 | 67.9k | while (pcache->bits_used + needed > pcache->max_bits && |
1047 | 67.9k | pcache->bits_used != 0) { |
1048 | 56.3k | pcache->next = (pcache->next + 1) % pcache->num_tiles; |
1049 | 56.3k | gx_pattern_cache_free_entry(pcache, &pcache->tiles[pcache->next], false); |
1050 | | /* since a pattern may be temporarily locked (stroke pattern for fill_stroke_path) */ |
1051 | | /* we may not have freed all entries even though we've scanned the entire cache. */ |
1052 | | /* The following check for wrapping prevents infinite loop if stroke pattern was */ |
1053 | | /* larger than pcache->max_bits, */ |
1054 | 56.3k | if (pcache->next == start_free_id) |
1055 | 0 | break; /* we wrapped -- cache may not be empty */ |
1056 | 56.3k | } |
1057 | 11.6k | } |
1058 | | |
1059 | | /* Export updating the pattern_cache bits_used and tiles_used for clist reading */ |
1060 | | void |
1061 | | gx_pattern_cache_update_used(gs_gstate *pgs, size_t used) |
1062 | 11.6k | { |
1063 | 11.6k | gx_pattern_cache *pcache = pgs->pattern_cache; |
1064 | | |
1065 | 11.6k | pcache->bits_used += used; |
1066 | 11.6k | pcache->tiles_used++; |
1067 | 11.6k | } |
1068 | | |
1069 | | /* |
1070 | | * Add a Pattern cache entry. This is exported for the interpreter. |
1071 | | * Note that this does not free any of the data in the accumulator |
1072 | | * device, but it may zero out the bitmap_memory pointers to prevent |
1073 | | * the accumulated bitmaps from being freed when the device is closed. |
1074 | | */ |
1075 | | static void make_bitmap(gx_strip_bitmap *, const gx_device_memory *, gx_bitmap_id, const gs_memory_t *); |
1076 | | int |
1077 | | gx_pattern_cache_add_entry(gs_gstate * pgs, |
1078 | | gx_device_forward * fdev, gx_color_tile ** pctile) |
1079 | 1.30k | { |
1080 | 1.30k | gx_pattern_cache *pcache; |
1081 | 1.30k | const gs_pattern1_instance_t *pinst; |
1082 | 1.30k | size_t used = 0, mask_used = 0, trans_used = 0; |
1083 | 1.30k | gx_bitmap_id id; |
1084 | 1.30k | gx_color_tile *ctile; |
1085 | 1.30k | int code = ensure_pattern_cache(pgs); |
1086 | 1.30k | gx_device_memory *mmask = NULL; |
1087 | 1.30k | gx_device_memory *mbits = NULL; |
1088 | 1.30k | gx_pattern_trans_t *trans = NULL; |
1089 | 1.30k | int size_b, size_c; |
1090 | | |
1091 | 1.30k | if (code < 0) |
1092 | 0 | return code; |
1093 | 1.30k | pcache = pgs->pattern_cache; |
1094 | | |
1095 | 1.30k | if (dev_proc(fdev, open_device) != pattern_clist_open_device) { |
1096 | 1.28k | gx_device_pattern_accum *padev = (gx_device_pattern_accum *)fdev; |
1097 | | |
1098 | 1.28k | mbits = padev->bits; |
1099 | 1.28k | mmask = padev->mask; |
1100 | 1.28k | pinst = padev->instance; |
1101 | 1.28k | trans = padev->transbuff; |
1102 | | |
1103 | | /* |
1104 | | * Check whether the pattern completely fills its box. |
1105 | | * If so, we can avoid the expensive masking operations |
1106 | | * when using the pattern. |
1107 | | */ |
1108 | | /* Bug 700624: In cases where the mask is completely full, |
1109 | | * but the pattern cells are separated from one another, |
1110 | | * we need to leave gaps between the cells when rendering |
1111 | | * them. Sadly, the graphics library can't cope with this |
1112 | | * in the no-mask case. Therefore, only do the optimisation |
1113 | | * of not sending the mask if the step matrix is suitable. |
1114 | | * |
1115 | | * To do this, we compare the step matrix to the size. My |
1116 | | * belief is that the mask will only ever be full if it's |
1117 | | * orthogonal, cos otherwise the edges will be clipped, |
1118 | | * hence we lose no generality by checking for .xy and .yx |
1119 | | * being 0. |
1120 | | */ |
1121 | 1.28k | if (mmask != 0 && |
1122 | 1.28k | fabsf(pinst->step_matrix.xx) <= pinst->size.x && |
1123 | 1.28k | fabsf(pinst->step_matrix.yy) <= pinst->size.y && |
1124 | 1.28k | pinst->step_matrix.xy == 0 && |
1125 | 1.28k | pinst->step_matrix.yx == 0) { |
1126 | 913 | int y; |
1127 | 913 | int w_less_8 = mmask->width-8; |
1128 | | |
1129 | 1.92k | for (y = 0; y < mmask->height; y++) { |
1130 | 1.91k | const byte *row = scan_line_base(mmask, y); |
1131 | 1.91k | int w; |
1132 | | |
1133 | 50.2k | for (w = w_less_8; w > 0; w -= 8) |
1134 | 49.2k | if (*row++ != 0xff) |
1135 | 893 | goto keep; |
1136 | 1.02k | w += 8; |
1137 | 1.02k | if ((*row | (0xff >> w)) != 0xff) |
1138 | 5 | goto keep; |
1139 | 1.02k | } |
1140 | | /* We don't need a mask. */ |
1141 | 15 | mmask = 0; |
1142 | 913 | keep:; |
1143 | 913 | } |
1144 | | /* Need to get size of buffers that are being added to the cache */ |
1145 | 1.28k | if (mbits != 0) |
1146 | 406 | gdev_mem_bitmap_size(mbits, &used); |
1147 | 1.28k | if (mmask != 0) { |
1148 | 1.27k | gdev_mem_bitmap_size(mmask, &mask_used); |
1149 | 1.27k | used += mask_used; |
1150 | 1.27k | } |
1151 | 1.28k | if (trans != 0) { |
1152 | 865 | trans_used = (size_t)trans->planestride*trans->n_chan; |
1153 | 865 | used += trans_used; |
1154 | 865 | } |
1155 | 1.28k | } else { |
1156 | 21 | gx_device_clist *cdev = (gx_device_clist *)fdev; |
1157 | 21 | gx_device_clist_writer * cldev = (gx_device_clist_writer *)cdev; |
1158 | | |
1159 | 21 | code = clist_end_page(cldev); |
1160 | 21 | if (code < 0) |
1161 | 0 | return code; |
1162 | 21 | pinst = cdev->writer.pinst; |
1163 | 21 | size_b = clist_data_size(cdev, 0); |
1164 | 21 | if (size_b < 0) |
1165 | 0 | return_error(gs_error_unregistered); |
1166 | 21 | size_c = clist_data_size(cdev, 1); |
1167 | 21 | if (size_c < 0) |
1168 | 0 | return_error(gs_error_unregistered); |
1169 | | /* The memfile size is the size, not the size determined by the depth*width*height */ |
1170 | 21 | used = size_b + size_c; |
1171 | 21 | } |
1172 | 1.30k | id = pinst->id; |
1173 | 1.30k | ctile = gx_pattern_cache_find_tile_for_id(pcache, id); |
1174 | 1.30k | gx_pattern_cache_free_entry(pcache, ctile, false); /* ensure that this cache slot is empty */ |
1175 | 1.30k | ctile->id = id; |
1176 | 1.30k | ctile->num_planar_planes = pinst->num_planar_planes; |
1177 | 1.30k | ctile->depth = fdev->color_info.depth; |
1178 | 1.30k | ctile->uid = pinst->templat.uid; |
1179 | 1.30k | ctile->tiling_type = pinst->templat.TilingType; |
1180 | 1.30k | ctile->step_matrix = pinst->step_matrix; |
1181 | 1.30k | ctile->bbox = pinst->bbox; |
1182 | 1.30k | ctile->is_simple = pinst->is_simple; |
1183 | 1.30k | ctile->has_overlap = pinst->has_overlap; |
1184 | 1.30k | ctile->is_dummy = false; |
1185 | 1.30k | ctile->is_locked = false; |
1186 | 1.30k | ctile->blending_mode = 0; |
1187 | 1.30k | ctile->trans_group_popped = false; |
1188 | 1.30k | if (dev_proc(fdev, open_device) != pattern_clist_open_device) { |
1189 | 1.28k | if (mbits != 0) { |
1190 | 406 | make_bitmap(&ctile->tbits, mbits, gs_next_ids(pgs->memory, 1), pgs->memory); |
1191 | 406 | mbits->bitmap_memory = 0; /* don't free the bits */ |
1192 | 406 | } else |
1193 | 880 | ctile->tbits.data = 0; |
1194 | 1.28k | if (mmask != 0) { |
1195 | 1.27k | make_bitmap(&ctile->tmask, mmask, id, pgs->memory); |
1196 | 1.27k | mmask->bitmap_memory = 0; /* don't free the bits */ |
1197 | 1.27k | } else |
1198 | 15 | ctile->tmask.data = 0; |
1199 | 1.28k | if (trans != 0) { |
1200 | 865 | if_debug2m('v', pgs->memory, |
1201 | 865 | "[v*] Adding trans pattern to cache, uid = %ld id = %ld\n", |
1202 | 865 | ctile->uid.id, ctile->id); |
1203 | 865 | ctile->ttrans = trans; |
1204 | 865 | } |
1205 | | |
1206 | 1.28k | ctile->cdev = NULL; |
1207 | 1.28k | } else { |
1208 | 21 | gx_device_clist *cdev = (gx_device_clist *)fdev; |
1209 | 21 | gx_device_clist_writer *cwdev = (gx_device_clist_writer *)fdev; |
1210 | | |
1211 | 21 | ctile->tbits.data = 0; |
1212 | 21 | ctile->tbits.size.x = 0; |
1213 | 21 | ctile->tbits.size.y = 0; |
1214 | 21 | ctile->tmask.data = 0; |
1215 | 21 | ctile->tmask.size.x = 0; |
1216 | 21 | ctile->tmask.size.y = 0; |
1217 | 21 | ctile->cdev = cdev; |
1218 | | /* Prevent freeing files on pattern_paint_cleanup : */ |
1219 | 21 | cwdev->do_not_open_or_close_bandfiles = true; |
1220 | 21 | } |
1221 | | /* In the clist case, used is accurate. In the non-clist case, it may |
1222 | | * not be. The important thing is that we account the same for tiles |
1223 | | * going in and coming out of the cache. Therefore we store the used |
1224 | | * figure in the tile so we always remove the same amount. */ |
1225 | 1.30k | ctile->bits_used = used; |
1226 | 1.30k | gx_pattern_cache_update_used(pgs, used); |
1227 | | |
1228 | 1.30k | *pctile = ctile; |
1229 | 1.30k | return 0; |
1230 | 1.30k | } |
1231 | | |
1232 | | /* set or clear the 'is_locked' flag for a tile in the cache. Used by */ |
1233 | | /* fill_stroke_path to make sure a large stroke pattern stays in the */ |
1234 | | /* cache even if the fill is also a pattern. */ |
1235 | | int |
1236 | | gx_pattern_cache_entry_set_lock(gs_gstate *pgs, gs_id id, bool new_lock_value) |
1237 | 125 | { |
1238 | 125 | gx_color_tile *ctile; |
1239 | 125 | int code = ensure_pattern_cache(pgs); |
1240 | | |
1241 | 125 | if (code < 0) |
1242 | 0 | return code; |
1243 | 125 | ctile = gx_pattern_cache_find_tile_for_id(pgs->pattern_cache, id); |
1244 | 125 | if (ctile == NULL) |
1245 | 0 | return_error(gs_error_undefined); |
1246 | 125 | ctile->is_locked = new_lock_value; |
1247 | 125 | return 0; |
1248 | 125 | } |
1249 | | |
1250 | | /* Get entry for reading a pattern from clist. */ |
1251 | | int |
1252 | | gx_pattern_cache_get_entry(gs_gstate * pgs, gs_id id, gx_color_tile ** pctile) |
1253 | 10.3k | { |
1254 | 10.3k | gx_pattern_cache *pcache; |
1255 | 10.3k | gx_color_tile *ctile; |
1256 | 10.3k | int code = ensure_pattern_cache(pgs); |
1257 | | |
1258 | 10.3k | if (code < 0) |
1259 | 0 | return code; |
1260 | 10.3k | pcache = pgs->pattern_cache; |
1261 | 10.3k | ctile = gx_pattern_cache_find_tile_for_id(pcache, id); |
1262 | 10.3k | gx_pattern_cache_free_entry(pgs->pattern_cache, ctile, false); |
1263 | 10.3k | ctile->id = id; |
1264 | 10.3k | *pctile = ctile; |
1265 | 10.3k | return 0; |
1266 | 10.3k | } |
1267 | | |
1268 | | bool |
1269 | | gx_pattern_tile_is_clist(gx_color_tile *ptile) |
1270 | 102k | { |
1271 | 102k | return ptile != NULL && ptile->cdev != NULL; |
1272 | 102k | } |
1273 | | |
1274 | | /* Add a dummy Pattern cache entry. Stubs a pattern tile for interpreter when |
1275 | | device handles high level patterns. */ |
1276 | | int |
1277 | | gx_pattern_cache_add_dummy_entry(gs_gstate *pgs, |
1278 | | gs_pattern1_instance_t *pinst, int depth) |
1279 | 0 | { |
1280 | 0 | gx_color_tile *ctile; |
1281 | 0 | gx_pattern_cache *pcache; |
1282 | 0 | gx_bitmap_id id = pinst->id; |
1283 | 0 | int code = ensure_pattern_cache(pgs); |
1284 | |
|
1285 | 0 | if (code < 0) |
1286 | 0 | return code; |
1287 | 0 | pcache = pgs->pattern_cache; |
1288 | 0 | ctile = gx_pattern_cache_find_tile_for_id(pcache, id); |
1289 | 0 | gx_pattern_cache_free_entry(pcache, ctile, false); |
1290 | 0 | ctile->id = id; |
1291 | 0 | ctile->depth = depth; |
1292 | 0 | ctile->uid = pinst->templat.uid; |
1293 | 0 | ctile->tiling_type = pinst->templat.TilingType; |
1294 | 0 | ctile->step_matrix = pinst->step_matrix; |
1295 | 0 | ctile->bbox = pinst->bbox; |
1296 | 0 | ctile->is_simple = pinst->is_simple; |
1297 | 0 | ctile->has_overlap = pinst->has_overlap; |
1298 | 0 | ctile->is_dummy = true; |
1299 | 0 | ctile->is_locked = false; |
1300 | 0 | memset(&ctile->tbits, 0 , sizeof(ctile->tbits)); |
1301 | 0 | ctile->tbits.size = pinst->size; |
1302 | 0 | ctile->tbits.id = gs_no_bitmap_id; |
1303 | 0 | memset(&ctile->tmask, 0 , sizeof(ctile->tmask)); |
1304 | 0 | ctile->cdev = NULL; |
1305 | 0 | ctile->ttrans = NULL; |
1306 | 0 | ctile->bits_used = 0; |
1307 | 0 | pcache->tiles_used++; |
1308 | 0 | return 0; |
1309 | 0 | } |
1310 | | |
1311 | | #if RAW_PATTERN_DUMP |
1312 | | /* Debug dump of pattern image data. Saved in |
1313 | | interleaved form with global indexing in |
1314 | | file name */ |
1315 | | static void |
1316 | | dump_raw_pattern(int height, int width, int n_chan, int depth, |
1317 | | byte *Buffer, int raster, const gx_device_memory * mdev, |
1318 | | const gs_memory_t *memory) |
1319 | | { |
1320 | | char full_file_name[50]; |
1321 | | gp_file *fid; |
1322 | | int max_bands; |
1323 | | int j, k, m; |
1324 | | int byte_number, bit_position; |
1325 | | unsigned char current_byte; |
1326 | | unsigned char output_val; |
1327 | | bool is_planar; |
1328 | | byte *curr_ptr = Buffer; |
1329 | | int plane_offset; |
1330 | | |
1331 | | is_planar = mdev->num_planar_planes > 0; |
1332 | | max_bands = ( n_chan < 57 ? n_chan : 56); /* Photoshop handles at most 56 bands */ |
1333 | | if (is_planar) { |
1334 | | gs_snprintf(full_file_name, sizeof(full_file_name), "%d)PATTERN_PLANE_%dx%dx%d.raw", global_pat_index, |
1335 | | mdev->raster, height, max_bands); |
1336 | | } else { |
1337 | | gs_snprintf(full_file_name, sizeof(full_file_name), "%d)PATTERN_CHUNK_%dx%dx%d.raw", global_pat_index, |
1338 | | width, height, max_bands); |
1339 | | } |
1340 | | fid = gp_fopen(memory,full_file_name,"wb"); |
1341 | | if (depth >= 8) { |
1342 | | /* Contone data. */ |
1343 | | if (is_planar) { |
1344 | | for (m = 0; m < max_bands; m++) { |
1345 | | curr_ptr = mdev->line_ptrs[m*mdev->height]; |
1346 | | gp_fwrite(curr_ptr, 1, mdev->height * mdev->raster, fid); |
1347 | | } |
1348 | | } else { |
1349 | | /* Just dump it like it is */ |
1350 | | gp_fwrite(Buffer, 1, max_bands * height * width, fid); |
1351 | | } |
1352 | | } else { |
1353 | | /* Binary Data. Lets get to 8 bit for debugging. We have to |
1354 | | worry about planar vs. chunky. Note this assumes 1 bit data |
1355 | | only. */ |
1356 | | if (is_planar) { |
1357 | | plane_offset = mdev->raster * mdev->height; |
1358 | | for (m = 0; m < max_bands; m++) { |
1359 | | curr_ptr = mdev->line_ptrs[m*mdev->height]; |
1360 | | for (j = 0; j < height; j++) { |
1361 | | for (k = 0; k < width; k++) { |
1362 | | byte_number = (int) ceil((( (float) k + 1.0) / 8.0)) - 1; |
1363 | | current_byte = curr_ptr[j*(mdev->raster) + byte_number]; |
1364 | | bit_position = 7 - (k - byte_number*8); |
1365 | | output_val = ((current_byte >> bit_position) & 0x1) * 255; |
1366 | | gp_fwrite(&output_val,1,1,fid); |
1367 | | } |
1368 | | } |
1369 | | } |
1370 | | } else { |
1371 | | for (j = 0; j < height; j++) { |
1372 | | for (k = 0; k < width; k++) { |
1373 | | for (m = 0; m < max_bands; m++) { |
1374 | | /* index current byte */ |
1375 | | byte_number = |
1376 | | (int) ceil((( (float) k * (float) max_bands + |
1377 | | (float) m + 1.0) / 8.0)) - 1; |
1378 | | /* get byte of interest */ |
1379 | | current_byte = |
1380 | | curr_ptr[j*(mdev->raster) + byte_number]; |
1381 | | /* get bit position */ |
1382 | | bit_position = |
1383 | | 7 - (k * max_bands + m - byte_number * 8); |
1384 | | /* extract and create byte */ |
1385 | | output_val = |
1386 | | ((current_byte >> bit_position) & 0x1) * 255; |
1387 | | gp_fwrite(&output_val,1,1,fid); |
1388 | | } |
1389 | | } |
1390 | | } |
1391 | | } |
1392 | | } |
1393 | | gp_fclose(fid); |
1394 | | } |
1395 | | #endif |
1396 | | |
1397 | | static void |
1398 | | make_bitmap(register gx_strip_bitmap * pbm, const gx_device_memory * mdev, |
1399 | | gx_bitmap_id id, const gs_memory_t *memory) |
1400 | 1.67k | { |
1401 | 1.67k | pbm->data = mdev->base; |
1402 | 1.67k | pbm->raster = mdev->raster; |
1403 | 1.67k | pbm->rep_width = pbm->size.x = mdev->width; |
1404 | 1.67k | pbm->rep_height = pbm->size.y = mdev->height; |
1405 | 1.67k | pbm->id = id; |
1406 | 1.67k | pbm->rep_shift = pbm->shift = 0; |
1407 | 1.67k | pbm->num_planes = mdev->num_planar_planes ? mdev->num_planar_planes : 1; |
1408 | | |
1409 | | /* Lets dump this for debug purposes */ |
1410 | | |
1411 | | #if RAW_PATTERN_DUMP |
1412 | | dump_raw_pattern(pbm->rep_height, pbm->rep_width, |
1413 | | mdev->color_info.num_components, |
1414 | | mdev->color_info.depth, |
1415 | | (unsigned char*) mdev->base, |
1416 | | pbm->raster, mdev, memory); |
1417 | | |
1418 | | global_pat_index++; |
1419 | | |
1420 | | #endif |
1421 | | |
1422 | 1.67k | } |
1423 | | |
1424 | | /* Purge selected entries from the pattern cache. */ |
1425 | | void |
1426 | | gx_pattern_cache_winnow(gx_pattern_cache * pcache, |
1427 | | bool(*proc) (gx_color_tile * ctile, void *proc_data), void *proc_data) |
1428 | 39.3k | { |
1429 | 39.3k | uint i; |
1430 | | |
1431 | 39.3k | if (pcache == 0) /* no cache created yet */ |
1432 | 0 | return; |
1433 | 2.00M | for (i = 0; i < pcache->num_tiles; ++i) { |
1434 | 1.96M | gx_color_tile *ctile = &pcache->tiles[i]; |
1435 | | |
1436 | 1.96M | ctile->is_locked = false; /* force freeing */ |
1437 | 1.96M | if (ctile->id != gx_no_bitmap_id && (*proc) (ctile, proc_data)) |
1438 | 8.08k | gx_pattern_cache_free_entry(pcache, ctile, false); |
1439 | 1.96M | } |
1440 | 39.3k | } |
1441 | | |
1442 | | void |
1443 | | gx_pattern_cache_flush(gx_pattern_cache * pcache) |
1444 | 6.08k | { |
1445 | 6.08k | uint i; |
1446 | | |
1447 | 6.08k | if (pcache == 0) /* no cache created yet */ |
1448 | 0 | return; |
1449 | 310k | for (i = 0; i < pcache->num_tiles; ++i) { |
1450 | 304k | gx_color_tile *ctile = &pcache->tiles[i]; |
1451 | | |
1452 | 304k | ctile->is_locked = false; /* force freeing */ |
1453 | 304k | if (ctile->id != gx_no_bitmap_id) |
1454 | 991 | gx_pattern_cache_free_entry(pcache, ctile, true); |
1455 | 304k | } |
1456 | 6.08k | } |
1457 | | |
1458 | | /* blank the pattern accumulator device assumed to be in the graphics |
1459 | | state */ |
1460 | | int |
1461 | | gx_erase_colored_pattern(gs_gstate *pgs) |
1462 | 406 | { |
1463 | 406 | int code; |
1464 | 406 | gx_device_pattern_accum *pdev = (gx_device_pattern_accum *)gs_currentdevice(pgs); |
1465 | | |
1466 | 406 | if ((code = gs_gsave(pgs)) < 0) |
1467 | 0 | return code; |
1468 | 406 | if ((code = gs_setgray(pgs, 1.0)) >= 0) { |
1469 | 406 | gs_rect rect; |
1470 | 406 | gx_device_memory *mask; |
1471 | 406 | static const gs_matrix identity = { 1, 0, 0, 1, 0, 0 }; |
1472 | | |
1473 | 406 | pgs->log_op = lop_default; |
1474 | 406 | rect.p.x = 0.0; |
1475 | 406 | rect.p.y = 0.0; |
1476 | 406 | rect.q.x = (double)pdev->width; |
1477 | 406 | rect.q.y = (double)pdev->height; |
1478 | | |
1479 | 406 | code = gs_setmatrix(pgs, &identity); |
1480 | 406 | if (code < 0) { |
1481 | 0 | gs_grestore_only(pgs); |
1482 | 0 | return code; |
1483 | 0 | } |
1484 | | /* we don't want the fill rectangle device call to use the |
1485 | | mask */ |
1486 | 406 | mask = pdev->mask; |
1487 | 406 | pdev->mask = NULL; |
1488 | 406 | code = gs_rectfill(pgs, &rect, 1); |
1489 | | /* restore the mask */ |
1490 | 406 | pdev->mask = mask; |
1491 | 406 | if (code < 0) { |
1492 | 0 | gs_grestore_only(pgs); |
1493 | 0 | return code; |
1494 | 0 | } |
1495 | 406 | } |
1496 | | /* we don't need wraparound here */ |
1497 | 406 | gs_grestore_only(pgs); |
1498 | 406 | return code; |
1499 | 406 | } |
1500 | | |
1501 | | /* Reload a (non-null) Pattern color into the cache. */ |
1502 | | /* *pdc is already set, except for colors.pattern.p_tile and mask.m_tile. */ |
1503 | | int |
1504 | | gx_pattern_load(gx_device_color * pdc, const gs_gstate * pgs, |
1505 | | gx_device * dev, gs_color_select_t select) |
1506 | 1.34k | { |
1507 | 1.34k | gx_device_forward *adev = NULL; |
1508 | 1.34k | gs_pattern1_instance_t *pinst = |
1509 | 1.34k | (gs_pattern1_instance_t *)pdc->ccolor.pattern; |
1510 | 1.34k | gs_gstate *saved; |
1511 | 1.34k | gx_color_tile *ctile; |
1512 | 1.34k | gs_memory_t *mem = pgs->memory; |
1513 | 1.34k | bool has_tags = device_encodes_tags(dev); |
1514 | 1.34k | int code; |
1515 | | |
1516 | 1.34k | if (pgs->pattern_cache == NULL) |
1517 | 0 | if ((code = ensure_pattern_cache((gs_gstate *) pgs))< 0) /* break const for call */ |
1518 | 0 | return code; |
1519 | | |
1520 | 1.34k | if (gx_pattern_cache_lookup(pdc, pgs, dev, select)) |
1521 | 37 | return 0; |
1522 | | |
1523 | | /* Get enough space in the cache for this pattern (estimated if it is a clist) */ |
1524 | 1.30k | gx_pattern_cache_ensure_space((gs_gstate *)pgs, gx_pattern_size_estimate(pinst, has_tags)); |
1525 | | /* |
1526 | | * Note that adev is an internal device, so it will be freed when the |
1527 | | * last reference to it from a graphics state is deleted. |
1528 | | */ |
1529 | 1.30k | adev = gx_pattern_accum_alloc(mem, pgs->pattern_cache->memory, pinst, "gx_pattern_load"); |
1530 | 1.30k | if (adev == 0) |
1531 | 0 | return_error(gs_error_VMerror); |
1532 | 1.30k | gx_device_set_target((gx_device_forward *)adev, dev); |
1533 | 1.30k | code = dev_proc(adev, open_device)((gx_device *)adev); |
1534 | 1.30k | if (code < 0) { |
1535 | 0 | gs_free_object(mem, adev, "gx_pattern_load"); |
1536 | 0 | return code; |
1537 | 0 | } |
1538 | 1.30k | saved = gs_gstate_copy(pinst->saved, pinst->saved->memory); |
1539 | 1.30k | if (saved == 0) { |
1540 | 0 | code = gs_note_error(gs_error_VMerror); |
1541 | 0 | goto fail; |
1542 | 0 | } |
1543 | 1.30k | if (saved->pattern_cache == 0) |
1544 | 0 | saved->pattern_cache = pgs->pattern_cache; |
1545 | 1.30k | code = gs_setdevice_no_init(saved, (gx_device *)adev); |
1546 | 1.30k | if (code < 0) |
1547 | 0 | goto fail; |
1548 | 1.30k | if (pinst->templat.uses_transparency) { |
1549 | 886 | if_debug1m('v', mem, "gx_pattern_load: pushing the pdf14 compositor device into this graphics state pat_id = %ld\n", pinst->id); |
1550 | 886 | if ((code = gs_push_pdf14trans_device(saved, true, false, 0, 0)) < 0) /* spot_color_count taken from pdf14 target values */ |
1551 | 0 | goto fail; |
1552 | 886 | saved->device->is_open = true; |
1553 | 886 | } else { |
1554 | | /* For colored patterns we clear the pattern device's |
1555 | | background. This is necessary for the anti aliasing code |
1556 | | and (unfortunately) it masks a difficult to fix UMR |
1557 | | affecting pcl patterns, see bug #690487. Note we have to |
1558 | | make a similar change in zpcolor.c where much of this |
1559 | | pattern code is duplicated to support high level stream |
1560 | | patterns. */ |
1561 | 421 | if (pinst->templat.PaintType == 1 && !(pinst->is_clist) |
1562 | 421 | && dev_proc(pinst->saved->device, dev_spec_op)(pinst->saved->device, gxdso_pattern_can_accum, NULL, 0) == 0) |
1563 | 406 | if ((code = gx_erase_colored_pattern(saved)) < 0) |
1564 | 0 | goto fail; |
1565 | 421 | } |
1566 | | |
1567 | 1.30k | code = (*pinst->templat.PaintProc)(&pdc->ccolor, saved); |
1568 | 1.30k | if (code < 0) { |
1569 | 0 | if (dev_proc(adev, open_device) == pattern_accum_open) { |
1570 | | /* free pattern cache data that never got added to the dictionary */ |
1571 | 0 | gx_device_pattern_accum *padev = (gx_device_pattern_accum *) adev; |
1572 | 0 | if ((padev->bits != NULL) && (padev->bits->base != NULL)) { |
1573 | 0 | gs_free_object(padev->bits->memory, padev->bits->base, "mem_open"); |
1574 | 0 | } |
1575 | 0 | } |
1576 | | /* RJW: At this point, in the non transparency case, |
1577 | | * saved->device == adev. So unretain it, close it, and the |
1578 | | * gs_gstate_free(saved) will remove it. In the transparency case, |
1579 | | * saved->device = the pdf14 device. So we need to unretain it, |
1580 | | * close adev, and finally close saved->device. |
1581 | | */ |
1582 | 0 | gx_device_retain(saved->device, false); /* device no longer retained */ |
1583 | 0 | if (pinst->templat.uses_transparency) { |
1584 | 0 | if (pinst->is_clist == 0) { |
1585 | 0 | gs_free_object(((gx_device_pattern_accum *)adev)->bitmap_memory, |
1586 | 0 | ((gx_device_pattern_accum *)adev)->transbuff, |
1587 | 0 | "gx_pattern_load"); |
1588 | 0 | ((gx_device_pattern_accum *)adev)->transbuff = NULL; |
1589 | 0 | } |
1590 | 0 | dev_proc(adev, close_device)((gx_device *)adev); |
1591 | | /* adev was the target of the pdf14 device, so also is no longer retained */ |
1592 | 0 | gx_device_retain((gx_device *)adev, false); /* device no longer retained */ |
1593 | 0 | } |
1594 | 0 | dev_proc(saved->device, close_device)((gx_device *)saved->device); |
1595 | | /* Freeing the state should now free the device which may be the pdf14 compositor. */ |
1596 | 0 | gs_gstate_free_chain(saved); |
1597 | 0 | if (code == gs_error_handled) |
1598 | 0 | code = 0; |
1599 | 0 | return code; |
1600 | 0 | } |
1601 | 1.30k | if (pinst->templat.uses_transparency) { |
1602 | | /* if_debug0m('v', saved->memory, "gx_pattern_load: popping the pdf14 compositor device from this graphics state\n"); |
1603 | | if ((code = gs_pop_pdf14trans_device(saved, true)) < 0) |
1604 | | return code; */ |
1605 | 886 | if (pinst->is_clist) { |
1606 | | /* Send the compositor command to close the PDF14 device */ |
1607 | 21 | code = gs_pop_pdf14trans_device(saved, true); |
1608 | 21 | if (code < 0) |
1609 | 0 | goto fail; |
1610 | 865 | } else { |
1611 | | /* Not a clist, get PDF14 buffer information */ |
1612 | 865 | code = |
1613 | 865 | pdf14_get_buffer_information(saved->device, |
1614 | 865 | ((gx_device_pattern_accum*)adev)->transbuff, |
1615 | 865 | saved->memory, |
1616 | 865 | true); |
1617 | | /* PDF14 device (and buffer) is destroyed when pattern cache |
1618 | | entry is removed */ |
1619 | 865 | if (code < 0) |
1620 | 0 | goto fail; |
1621 | 865 | } |
1622 | 886 | } |
1623 | | /* We REALLY don't like the following cast.... */ |
1624 | 1.30k | code = gx_pattern_cache_add_entry((gs_gstate *)pgs, |
1625 | 1.30k | adev, &ctile); |
1626 | 1.30k | if (code >= 0) { |
1627 | 1.30k | if (!gx_pattern_cache_lookup(pdc, pgs, dev, select)) { |
1628 | 0 | mlprintf(mem, "Pattern cache lookup failed after insertion!\n"); |
1629 | 0 | code = gs_note_error(gs_error_Fatal); |
1630 | 0 | } |
1631 | 1.30k | } |
1632 | | #ifdef DEBUG |
1633 | | if (gs_debug_c('B') && dev_proc(adev, open_device) == pattern_accum_open) { |
1634 | | gx_device_pattern_accum *pdev = (gx_device_pattern_accum *)adev; |
1635 | | |
1636 | | if (pdev->mask) |
1637 | | debug_dump_bitmap(pdev->memory, |
1638 | | pdev->mask->base, pdev->mask->raster, |
1639 | | pdev->mask->height, "[B]Pattern mask"); |
1640 | | if (pdev->bits) |
1641 | | debug_dump_bitmap(pdev->memory, |
1642 | | ((gx_device_memory *) pdev->target)->base, |
1643 | | ((gx_device_memory *) pdev->target)->raster, |
1644 | | pdev->target->height, "[B]Pattern bits"); |
1645 | | } |
1646 | | #endif |
1647 | | /* Free the bookkeeping structures, except for the bits and mask */ |
1648 | | /* data iff they are still needed. */ |
1649 | 1.30k | dev_proc(adev, close_device)((gx_device *)adev); |
1650 | | /* Free the chain of gstates. Freeing the state will free the device. */ |
1651 | 1.30k | gs_gstate_free_chain(saved); |
1652 | 1.30k | return code; |
1653 | | |
1654 | 0 | fail: |
1655 | 0 | if (dev_proc(adev, open_device) == pattern_accum_open) { |
1656 | | /* free pattern cache data that never got added to the dictionary */ |
1657 | 0 | gx_device_pattern_accum *padev = (gx_device_pattern_accum *) adev; |
1658 | 0 | if ((padev->bits != NULL) && (padev->bits->base != NULL)) { |
1659 | 0 | gs_free_object(padev->bits->memory, padev->bits->base, "mem_open"); |
1660 | 0 | } |
1661 | 0 | } |
1662 | 0 | if (dev_proc(adev, open_device) == pattern_clist_open_device) { |
1663 | 0 | gx_device_clist *cdev = (gx_device_clist *)adev; |
1664 | |
|
1665 | 0 | gs_free_object(cdev->writer.bandlist_memory, cdev->common.data, "gx_pattern_load"); |
1666 | 0 | cdev->common.data = 0; |
1667 | 0 | } |
1668 | 0 | dev_proc(adev, close_device)((gx_device *)adev); |
1669 | 0 | gx_device_set_target(adev, NULL); |
1670 | 0 | gx_device_retain((gx_device *)adev, false); |
1671 | 0 | gs_gstate_free_chain(saved); |
1672 | 0 | return code; |
1673 | 1.30k | } |
1674 | | |
1675 | | /* Remap a PatternType 1 color. */ |
1676 | | cs_proc_remap_color(gx_remap_Pattern); /* check the prototype */ |
1677 | | int |
1678 | | gs_pattern1_remap_color(const gs_client_color * pc, const gs_color_space * pcs, |
1679 | | gx_device_color * pdc, const gs_gstate * pgs, |
1680 | | gx_device * dev, gs_color_select_t select) |
1681 | 1.35k | { |
1682 | 1.35k | gs_pattern1_instance_t *pinst = (gs_pattern1_instance_t *)pc->pattern; |
1683 | 1.35k | int code; |
1684 | | |
1685 | | /* Save original color space and color info into dev color */ |
1686 | 1.35k | pdc->ccolor = *pc; |
1687 | 1.35k | pdc->ccolor_valid = true; |
1688 | 1.35k | if (pinst == 0) { |
1689 | | /* Null pattern */ |
1690 | 0 | color_set_null_pattern(pdc); |
1691 | 0 | return 0; |
1692 | 0 | } |
1693 | 1.35k | if (pinst->templat.PaintType == 2) { /* uncolored */ |
1694 | 27 | if (pcs->base_space) { |
1695 | 17 | if (dev->icc_struct != NULL && dev->icc_struct->blackvector) { |
1696 | 0 | gs_client_color temppc; |
1697 | 0 | gs_color_space *graycs = gs_cspace_new_DeviceGray(pgs->memory); |
1698 | |
|
1699 | 0 | if (graycs == NULL) { |
1700 | 0 | code = (pcs->base_space->type->remap_color) |
1701 | 0 | (pc, pcs->base_space, pdc, pgs, dev, select); |
1702 | 0 | } else { |
1703 | 0 | if (gsicc_is_white_blacktextvec((gs_gstate*) pgs, |
1704 | 0 | dev, (gs_color_space*) pcs, (gs_client_color*) pc)) |
1705 | 0 | temppc.paint.values[0] = 1.0; |
1706 | 0 | else |
1707 | 0 | temppc.paint.values[0] = 0.0; |
1708 | 0 | code = (graycs->type->remap_color) |
1709 | 0 | (&temppc, graycs, pdc, pgs, dev, select); |
1710 | 0 | rc_decrement_cs(graycs, "gs_pattern1_remap_color"); |
1711 | 0 | } |
1712 | 17 | } else { |
1713 | 17 | code = (pcs->base_space->type->remap_color) |
1714 | 17 | (pc, pcs->base_space, pdc, pgs, dev, select); |
1715 | 17 | } |
1716 | 17 | } else |
1717 | 10 | code = gs_note_error(gs_error_unregistered); |
1718 | 27 | if (code < 0) |
1719 | 10 | return code; |
1720 | 17 | if (pdc->type == gx_dc_type_pure) |
1721 | 17 | pdc->type = &gx_dc_pure_masked; |
1722 | 0 | else if (pdc->type == gx_dc_type_ht_binary) |
1723 | 0 | pdc->type = &gx_dc_binary_masked; |
1724 | 0 | else if (pdc->type == gx_dc_type_ht_colored) |
1725 | 0 | pdc->type = &gx_dc_colored_masked; |
1726 | 0 | else if (pdc->type == gx_dc_type_devn) |
1727 | 0 | pdc->type = &gx_dc_devn_masked; |
1728 | 0 | else |
1729 | 0 | return_error(gs_error_unregistered); |
1730 | 17 | } else |
1731 | 1.32k | color_set_null_pattern(pdc); |
1732 | 1.34k | pdc->mask.id = pinst->id; |
1733 | 1.34k | pdc->mask.m_tile = 0; |
1734 | 1.34k | return gx_pattern_load(pdc, pgs, dev, select); |
1735 | 1.35k | } |
1736 | | |
1737 | | int |
1738 | | pattern_accum_dev_spec_op(gx_device *dev, int dso, void *data, int size) |
1739 | 16.3k | { |
1740 | 16.3k | gx_device_pattern_accum *const padev = (gx_device_pattern_accum *)dev; |
1741 | 16.3k | const gs_pattern1_instance_t *pinst = padev->instance; |
1742 | 16.3k | gx_device *target = |
1743 | 16.3k | (padev->target == 0 ? gs_currentdevice(pinst->saved) : |
1744 | 16.3k | padev->target); |
1745 | | |
1746 | 16.3k | if (dso == gxdso_in_pattern_accumulator) |
1747 | 51 | return (pinst->templat.PaintType == 2 ? 2 : 1); |
1748 | 16.2k | if (dso == gxdso_get_dev_param) { |
1749 | 0 | dev_param_req_t *request = (dev_param_req_t *)data; |
1750 | 0 | gs_param_list * plist = (gs_param_list *)request->list; |
1751 | 0 | bool bool_true = 1; |
1752 | |
|
1753 | 0 | if (strcmp(request->Param, "NoInterpolateImagemasks") == 0) { |
1754 | 0 | return param_write_bool(plist, "NoInterpolateImagemasks", &bool_true); |
1755 | 0 | } |
1756 | 0 | } |
1757 | | /* Bug 704670. Pattern accumulator should not allow whatever targets |
1758 | | lie beneath it to do any bbox adjustments. If we are here, the |
1759 | | pattern accumulator is actually drawing into a buffer |
1760 | | and it is not accumulating into a clist device. In this case, if it |
1761 | | was a pattern clist, we would be going to the special op for the clist |
1762 | | device of the pattern, which will have the proper extent and adjust |
1763 | | the bbox. Here we just need to clip to the buffer into which we are drawing */ |
1764 | 16.2k | if (dso == gxdso_restrict_bbox) { |
1765 | 0 | gs_int_rect* ibox = (gs_int_rect*)data; |
1766 | |
|
1767 | 0 | if (ibox->p.y < 0) |
1768 | 0 | ibox->p.y = 0; |
1769 | 0 | if (ibox->q.y > padev->height) |
1770 | 0 | ibox->q.y = padev->height; |
1771 | 0 | if (ibox->p.x < 0) |
1772 | 0 | ibox->p.x = 0; |
1773 | 0 | if (ibox->q.x > padev->width) |
1774 | 0 | ibox->q.x = padev->width; |
1775 | 0 | return 0; |
1776 | 0 | } |
1777 | | |
1778 | 16.2k | return dev_proc(target, dev_spec_op)(target, dso, data, size); |
1779 | 16.2k | } |