/src/ghostpdl/base/gxclist.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2024 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
13 | | CA 94129, USA, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* Command list document- and page-level code. */ |
18 | | #include "memory_.h" |
19 | | #include "string_.h" |
20 | | #include "gx.h" |
21 | | #include "gp.h" |
22 | | #include "gpcheck.h" |
23 | | #include "gserrors.h" |
24 | | #include "gxdevice.h" |
25 | | #include "gxdevmem.h" /* must precede gxcldev.h */ |
26 | | #include "gxcldev.h" |
27 | | #include "gxclpath.h" |
28 | | #include "gsparams.h" |
29 | | #include "gxdcolor.h" |
30 | | #include "gscms.h" |
31 | | #include "gsicc_manage.h" |
32 | | #include "gsicc_cache.h" |
33 | | #include "gxdevsop.h" |
34 | | #include "gxobj.h" |
35 | | |
36 | | #include "valgrind.h" |
37 | | |
38 | | extern dev_proc_open_device(pattern_clist_open_device); |
39 | | |
40 | | /* GC information */ |
41 | | /* Where is the GC information for the common objects that are |
42 | | shared between the reader and writer. I see pointers in |
43 | | there, but they don't seem to be GC. This is why I have |
44 | | put the icc_table and the link cache in the reader and the |
45 | | writer rather than the common. fixme: Also, if icc_cache_cl is not |
46 | | included in the writer, 64bit builds will seg fault */ |
47 | | |
48 | | extern_st(st_gs_gstate); |
49 | | static |
50 | 8.89M | ENUM_PTRS_WITH(device_clist_enum_ptrs, gx_device_clist *cdev) |
51 | 8.89M | if (index < st_device_forward_max_ptrs) { |
52 | 544k | gs_ptr_type_t ret = ENUM_USING_PREFIX(st_device_forward, st_device_max_ptrs); |
53 | | |
54 | 544k | return (ret ? ret : ENUM_OBJ(0)); |
55 | 544k | } |
56 | 8.34M | index -= st_device_forward_max_ptrs; |
57 | | /* RJW: We do not enumerate icc_cache_cl or icc_cache_list as they |
58 | | * are allocated in non gc space */ |
59 | 8.34M | if (CLIST_IS_WRITER(cdev)) { |
60 | 8.34M | switch (index) { |
61 | 181k | case 0: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ? |
62 | 0 | cdev->writer.clip_path : 0)); |
63 | 181k | case 1: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ? |
64 | 0 | cdev->writer.color_space.space : 0)); |
65 | 181k | case 2: return ENUM_OBJ(cdev->writer.pinst); |
66 | 181k | case 3: return ENUM_OBJ(cdev->writer.cropping_stack); |
67 | 181k | case 4: return ENUM_OBJ(cdev->writer.icc_table); |
68 | 7.44M | default: |
69 | 7.44M | return ENUM_USING(st_gs_gstate, &cdev->writer.gs_gstate, |
70 | 8.34M | sizeof(gs_gstate), index - 5); |
71 | 8.34M | } |
72 | 8.34M | } |
73 | 0 | else { |
74 | | /* 041207 |
75 | | * clist is reader. |
76 | | * We don't expect this code to be exercised at this time as the reader |
77 | | * runs under gdev_prn_output_page which is an atomic function of the |
78 | | * interpreter. We do this as this situation may change in the future. |
79 | | */ |
80 | |
|
81 | 0 | if (index == 0) |
82 | 0 | return ENUM_OBJ(cdev->reader.offset_map); |
83 | 0 | else if (index == 1) |
84 | 0 | return ENUM_OBJ(cdev->reader.icc_table); |
85 | 0 | else if (index == 2) |
86 | 0 | return ENUM_OBJ(cdev->reader.color_usage_array); |
87 | 0 | else |
88 | 0 | return 0; |
89 | 0 | } |
90 | 8.89M | ENUM_PTRS_END |
91 | | static |
92 | 181k | RELOC_PTRS_WITH(device_clist_reloc_ptrs, gx_device_clist *cdev) |
93 | 181k | { |
94 | 181k | RELOC_PREFIX(st_device_forward); |
95 | 181k | if (CLIST_IS_WRITER(cdev)) { |
96 | 181k | if (cdev->writer.image_enum_id != gs_no_id) { |
97 | 15 | RELOC_VAR(cdev->writer.clip_path); |
98 | 15 | RELOC_VAR(cdev->writer.color_space.space); |
99 | 15 | } |
100 | 181k | RELOC_VAR(cdev->writer.pinst); |
101 | 181k | RELOC_VAR(cdev->writer.cropping_stack); |
102 | 181k | RELOC_VAR(cdev->writer.icc_table); |
103 | 181k | RELOC_USING(st_gs_gstate, &cdev->writer.gs_gstate, |
104 | 181k | sizeof(gs_gstate)); |
105 | 181k | } else { |
106 | | /* 041207 |
107 | | * clist is reader. |
108 | | * See note above in ENUM_PTRS_WITH section. |
109 | | */ |
110 | 0 | RELOC_VAR(cdev->reader.offset_map); |
111 | 0 | RELOC_VAR(cdev->reader.icc_table); |
112 | 0 | RELOC_VAR(cdev->reader.color_usage_array); |
113 | 0 | } |
114 | 181k | } RELOC_PTRS_END |
115 | | public_st_device_clist(); |
116 | | private_st_clist_writer_cropping_buffer(); |
117 | | private_st_clist_icctable_entry(); |
118 | | private_st_clist_icctable(); |
119 | | |
120 | | /* Forward declarations of driver procedures */ |
121 | | dev_proc_open_device(clist_open); |
122 | | dev_proc_output_page(clist_output_page); |
123 | | dev_proc_close_device(clist_close); |
124 | | /* Driver procedures defined in other files are declared in gxcldev.h. */ |
125 | | |
126 | | /* Other forward declarations */ |
127 | | static int clist_put_current_params(gx_device_clist_writer *cldev); |
128 | | |
129 | | void |
130 | | clist_initialize_device_procs(gx_device *dev) |
131 | 349k | { |
132 | 349k | set_dev_proc(dev, open_device, clist_open); |
133 | 349k | set_dev_proc(dev, get_initial_matrix, gx_forward_get_initial_matrix); |
134 | 349k | set_dev_proc(dev, sync_output, gx_default_sync_output); |
135 | 349k | set_dev_proc(dev, output_page, clist_output_page); |
136 | 349k | set_dev_proc(dev, close_device, clist_close); |
137 | 349k | set_dev_proc(dev, map_rgb_color, gx_forward_map_rgb_color); |
138 | 349k | set_dev_proc(dev, map_color_rgb, gx_forward_map_color_rgb); |
139 | 349k | set_dev_proc(dev, fill_rectangle, clist_fill_rectangle); |
140 | 349k | set_dev_proc(dev, copy_mono, clist_copy_mono); |
141 | 349k | set_dev_proc(dev, copy_color, clist_copy_color); |
142 | 349k | set_dev_proc(dev, get_params, gx_forward_get_params); |
143 | 349k | set_dev_proc(dev, put_params, gx_forward_put_params); |
144 | 349k | set_dev_proc(dev, map_cmyk_color, gx_forward_map_cmyk_color); |
145 | 349k | set_dev_proc(dev, get_page_device, gx_forward_get_page_device); |
146 | 349k | set_dev_proc(dev, get_alpha_bits, gx_forward_get_alpha_bits); |
147 | 349k | set_dev_proc(dev, copy_alpha, clist_copy_alpha); |
148 | 349k | set_dev_proc(dev, fill_path, clist_fill_path); |
149 | 349k | set_dev_proc(dev, stroke_path, clist_stroke_path); |
150 | 349k | set_dev_proc(dev, fill_mask, clist_fill_mask); |
151 | 349k | set_dev_proc(dev, fill_trapezoid, clist_fill_trapezoid); |
152 | 349k | set_dev_proc(dev, fill_parallelogram, clist_fill_parallelogram); |
153 | 349k | set_dev_proc(dev, fill_triangle, clist_fill_triangle); |
154 | 349k | set_dev_proc(dev, strip_tile_rectangle, clist_strip_tile_rectangle); |
155 | 349k | set_dev_proc(dev, get_clipping_box, gx_forward_get_clipping_box); |
156 | 349k | set_dev_proc(dev, begin_typed_image, clist_begin_typed_image); |
157 | 349k | set_dev_proc(dev, get_bits_rectangle, clist_get_bits_rectangle); |
158 | 349k | set_dev_proc(dev, composite, clist_composite); |
159 | 349k | set_dev_proc(dev, get_hardware_params, gx_forward_get_hardware_params); |
160 | 349k | set_dev_proc(dev, get_color_mapping_procs, gx_forward_get_color_mapping_procs); |
161 | 349k | set_dev_proc(dev, get_color_comp_index, gx_forward_get_color_comp_index); |
162 | 349k | set_dev_proc(dev, encode_color, gx_forward_encode_color); |
163 | 349k | set_dev_proc(dev, decode_color, gx_forward_decode_color); |
164 | 349k | set_dev_proc(dev, fill_rectangle_hl_color, clist_fill_rectangle_hl_color); |
165 | 349k | set_dev_proc(dev, fill_linear_color_trapezoid, clist_fill_linear_color_trapezoid); |
166 | 349k | set_dev_proc(dev, fill_linear_color_triangle, clist_fill_linear_color_triangle); |
167 | 349k | set_dev_proc(dev, update_spot_equivalent_colors, gx_forward_update_spot_equivalent_colors); |
168 | 349k | set_dev_proc(dev, ret_devn_params, gx_forward_ret_devn_params); |
169 | 349k | set_dev_proc(dev, fillpage, clist_fillpage); |
170 | 349k | set_dev_proc(dev, dev_spec_op, clist_dev_spec_op); |
171 | 349k | set_dev_proc(dev, copy_planes, clist_copy_planes); |
172 | 349k | set_dev_proc(dev, strip_copy_rop2, clist_strip_copy_rop2); |
173 | 349k | set_dev_proc(dev, strip_tile_rect_devn, clist_strip_tile_rect_devn); |
174 | 349k | set_dev_proc(dev, copy_alpha_hl_color, clist_copy_alpha_hl_color); |
175 | 349k | set_dev_proc(dev, process_page, clist_process_page); |
176 | 349k | set_dev_proc(dev, fill_stroke_path, clist_fill_stroke_path); |
177 | 349k | set_dev_proc(dev, lock_pattern, clist_lock_pattern); |
178 | 349k | } |
179 | | |
180 | | /*------------------- Choose the implementation ----------------------- |
181 | | |
182 | | For choosing the clist i/o implementation by makefile options we |
183 | | define global variables (in gs_lib_ctx_core_t), which are |
184 | | initialized with file/memory io procs when they are included into |
185 | | the build. |
186 | | */ |
187 | | void |
188 | | clist_init_io_procs(gx_device_clist *pclist_dev, bool in_memory) |
189 | 399k | { |
190 | 399k | gs_lib_ctx_core_t *core = pclist_dev->common.memory->gs_lib_ctx->core; |
191 | 399k | #ifdef PACIFY_VALGRIND |
192 | 399k | VALGRIND_HG_DISABLE_CHECKING(&core->clist_io_procs_file, sizeof(core->clist_io_procs_file)); |
193 | 399k | VALGRIND_HG_DISABLE_CHECKING(&core->clist_io_procs_memory, sizeof(core->clist_io_procs_memory)); |
194 | 399k | #endif |
195 | | /* if core->clist_io_procs_file is NULL, then BAND_LIST_STORAGE=memory */ |
196 | | /* was specified in the build, and "file" is not available */ |
197 | 399k | if (in_memory || core->clist_io_procs_file == NULL) |
198 | 387k | pclist_dev->common.page_info.io_procs = core->clist_io_procs_memory; |
199 | 11.9k | else |
200 | 11.9k | pclist_dev->common.page_info.io_procs = core->clist_io_procs_file; |
201 | 399k | } |
202 | | |
203 | | /* ------ Define the command set and syntax ------ */ |
204 | | |
205 | | /* |
206 | | * The buffer area (data, data_size) holds a bitmap cache when both writing |
207 | | * and reading. The rest of the space is used for the command buffer and |
208 | | * band state bookkeeping when writing, and for the rendering buffer (image |
209 | | * device) when reading. For the moment, we divide the space up |
210 | | * arbitrarily, except that we allocate less space for the bitmap cache if |
211 | | * the device doesn't need halftoning. |
212 | | * |
213 | | * All the routines for allocating tables in the buffer are idempotent, so |
214 | | * they can be used to check whether a given-size buffer is large enough. |
215 | | */ |
216 | | |
217 | | /* |
218 | | * Calculate the desired size for the tile cache. |
219 | | */ |
220 | | static size_t |
221 | | clist_tile_cache_size(const gx_device * target, size_t data_size) |
222 | 1.17M | { |
223 | 1.17M | size_t bits_size = |
224 | 1.17M | (data_size / 5) & ~(align_cached_bits_mod-1); /* arbitrary */ |
225 | | |
226 | 1.17M | if (!gx_device_must_halftone(target)) { /* No halftones -- cache holds only Patterns & characters. */ |
227 | 444k | bits_size -= bits_size >> 2; |
228 | 444k | } |
229 | 1.17M | #define min_bits_size 1024 |
230 | 1.17M | if (bits_size < min_bits_size) |
231 | 0 | bits_size = min_bits_size; |
232 | 1.17M | #undef min_bits_size |
233 | 1.17M | return bits_size; |
234 | 1.17M | } |
235 | | |
236 | | /* |
237 | | * Initialize the allocation for the tile cache. Sets: tile_hash_mask, |
238 | | * tile_max_count, tile_table, chunk (structure), bits (structure). |
239 | | */ |
240 | | static int |
241 | | clist_init_tile_cache(gx_device * dev, byte * init_data, size_t data_size) |
242 | 1.43M | { |
243 | 1.43M | gx_device_clist_writer * const cdev = |
244 | 1.43M | &((gx_device_clist *)dev)->writer; |
245 | 1.43M | byte *data = init_data; |
246 | 1.43M | size_t bits_size = data_size; |
247 | | /* |
248 | | * Partition the bits area between the hash table and the actual |
249 | | * bitmaps. The per-bitmap overhead is about 24 bytes; if the |
250 | | * average character size is 10 points, its bitmap takes about 24 + |
251 | | * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a |
252 | | * fudge factor to account for characters being narrower than they |
253 | | * are tall), which gives us a guideline for the size of the hash |
254 | | * table. |
255 | | */ |
256 | 1.43M | size_t avg_char_size = |
257 | 1.43M | (size_t)(dev->HWResolution[0] * dev->HWResolution[1] * |
258 | 1.43M | (0.5 * 10 / 72 * 10 / 72 / 8)) + 24; |
259 | 1.43M | size_t hc = bits_size / avg_char_size; |
260 | 1.43M | size_t hsize; |
261 | | |
262 | 10.5M | while ((hc + 1) & hc) |
263 | 9.07M | hc |= hc >> 1; /* make mask (power of 2 - 1) */ |
264 | 1.43M | if (hc < 0xff) |
265 | 0 | hc = 0xff; /* make allowance for halftone tiles */ |
266 | 1.43M | else if (hc > 0xfff) |
267 | 102 | hc = 0xfff; /* cmd_op_set_tile_index has 12-bit operand */ |
268 | | /* Make sure the tables will fit. */ |
269 | 1.43M | while (hc >= 3 && (hsize = (hc + 1) * sizeof(tile_hash)) >= bits_size) |
270 | 0 | hc >>= 1; |
271 | 1.43M | if (hc < 3) |
272 | 0 | return_error(gs_error_rangecheck); |
273 | 1.43M | cdev->tile_hash_mask = hc; |
274 | 1.43M | cdev->tile_max_count = hc - (hc >> 2); |
275 | 1.43M | cdev->tile_table = (tile_hash *) data; |
276 | 1.43M | data += hsize; |
277 | 1.43M | bits_size -= hsize; |
278 | 1.43M | gx_bits_cache_chunk_init(cdev->cache_chunk, data, bits_size); |
279 | 1.43M | gx_bits_cache_init(&cdev->bits, cdev->cache_chunk); |
280 | 1.43M | return 0; |
281 | 1.43M | } |
282 | | |
283 | | /* |
284 | | * Initialize the allocation for the bands. Requires: target. Sets: |
285 | | * page_info.band_params.BandHeight (=page_info.band_params.BandHeight), nbands. |
286 | | */ |
287 | | static int |
288 | | clist_init_bands(gx_device * dev, gx_device_memory *bdev, size_t data_size, |
289 | | int band_width, int band_height) |
290 | 1.43M | { |
291 | 1.43M | gx_device_clist_writer * const cdev = |
292 | 1.43M | &((gx_device_clist *)dev)->writer; |
293 | 1.43M | int nbands; |
294 | 1.43M | size_t space; |
295 | | |
296 | 1.43M | if (dev_proc(dev, open_device) == pattern_clist_open_device) { |
297 | | /* We don't need bands really. */ |
298 | 257k | cdev->page_info.band_params.BandHeight = dev->height; |
299 | 257k | cdev->nbands = 1; |
300 | 257k | return 0; |
301 | 257k | } |
302 | 1.17M | if (gdev_mem_data_size(bdev, band_width, band_height, &space) < 0 || |
303 | 1.17M | space > data_size) |
304 | 0 | return_error(gs_error_rangecheck); |
305 | 1.17M | cdev->page_info.band_params.BandHeight = band_height; |
306 | 1.17M | nbands = (cdev->target->height + band_height - 1) / band_height; |
307 | 1.17M | cdev->nbands = nbands; |
308 | | #ifdef DEBUG |
309 | | if (gs_debug_c('l') | gs_debug_c(':')) |
310 | | dmlprintf4(dev->memory, "[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n", |
311 | | bdev->width, band_width, band_height, nbands); |
312 | | #endif |
313 | 1.17M | return 0; |
314 | 1.17M | } |
315 | | |
316 | | /* Minimum BufferSpace needed when writing the clist */ |
317 | | /* This is an exported function because it is used to set up render threads */ |
318 | | size_t |
319 | 1.43M | clist_minimum_buffer(int nbands) { |
320 | | |
321 | | /* Leave enough room after states for commands that write a reasonable |
322 | | * amount of data. The cmd_largest_size and the data_bits_size should be |
323 | | * enough to buffer command operands. The data_bits_size is the level |
324 | | * at which commands should expect to split data across buffers. If this |
325 | | * extra space is a little large, it doesn't really hurt. |
326 | | */ |
327 | 1.43M | return (nbands * (ulong) sizeof(gx_clist_state) + |
328 | 1.43M | sizeof(cmd_prefix) + |
329 | 1.43M | cmd_largest_size + |
330 | 1.43M | data_bits_size); |
331 | 1.43M | } |
332 | | |
333 | | /* |
334 | | * Initialize the allocation for the band states, which are used only |
335 | | * when writing. Requires: nbands. Sets: states, cbuf, cend, band_range_list. |
336 | | */ |
337 | | static int |
338 | | clist_init_states(gx_device * dev, byte * init_data, size_t data_size) |
339 | 1.43M | { |
340 | 1.43M | gx_device_clist_writer * const cdev = |
341 | 1.43M | &((gx_device_clist *)dev)->writer; |
342 | 1.43M | size_t state_size = cdev->nbands * sizeof(gx_clist_state); |
343 | | /* Align to the natural boundary for ARM processors, bug 689600 */ |
344 | 1.43M | intptr_t alignment = (-(intptr_t)init_data) & (sizeof(init_data) - 1); |
345 | | |
346 | 1.43M | if (clist_minimum_buffer(cdev->nbands) > data_size) |
347 | 49.9k | return_error(gs_error_rangecheck); |
348 | | /* The end buffer position is not affected by alignment */ |
349 | 1.38M | cdev->cend = init_data + data_size; |
350 | 1.38M | init_data += alignment; |
351 | 1.38M | cdev->states = (gx_clist_state *) init_data; |
352 | 1.38M | cdev->band_range_list = (cmd_list *)(init_data + state_size); |
353 | 1.38M | cdev->cbuf = init_data + state_size + sizeof(cmd_list); |
354 | 1.38M | return 0; |
355 | 1.43M | } |
356 | | |
357 | | /* |
358 | | * Initialize all the data allocations. Requires: target. Sets: |
359 | | * page_info.tile_cache_size, page_info.band_params.BandWidth, |
360 | | * page_info.band_params.BandBufferSpace, + see above. |
361 | | */ |
362 | | static int |
363 | | clist_init_data(gx_device * dev, byte * init_data, size_t data_size) |
364 | 1.43M | { |
365 | 1.43M | gx_device_clist_writer * const cdev = |
366 | 1.43M | &((gx_device_clist *)dev)->writer; |
367 | 1.43M | gx_device *target = cdev->target; |
368 | | /* BandWidth can't be smaller than target device width */ |
369 | 1.43M | const int band_width = |
370 | 1.43M | cdev->page_info.band_params.BandWidth = max(target->width, cdev->band_params.BandWidth); |
371 | 1.43M | int band_height = cdev->band_params.BandHeight; |
372 | 1.43M | bool page_uses_transparency = cdev->page_uses_transparency; |
373 | 1.43M | const size_t band_space = |
374 | 1.43M | cdev->page_info.band_params.BandBufferSpace = |
375 | 1.43M | (cdev->band_params.BandBufferSpace ? |
376 | 1.43M | cdev->band_params.BandBufferSpace : data_size); |
377 | 1.43M | byte *data = init_data; |
378 | 1.43M | size_t size = band_space; |
379 | 1.43M | size_t bits_size; |
380 | 1.43M | gx_device_memory bdev; |
381 | 1.43M | gx_device *pbdev = (gx_device *)&bdev; |
382 | 1.43M | int code; |
383 | 1.43M | int align = 1 << (target->log2_align_mod > log2_align_bitmap_mod ? target->log2_align_mod : log2_align_bitmap_mod); |
384 | | |
385 | 1.43M | align = align < obj_align_mod ? obj_align_mod : align; |
386 | | |
387 | | /* the clist writer has its own color info that depends upon the |
388 | | transparency group color space (if transparency exists). The data that is |
389 | | used in the clist writing. Here it is initialized with |
390 | | the target device color info. The values will be pushed and popped |
391 | | in a stack if we have changing color spaces in the transparency groups. */ |
392 | | |
393 | 1.43M | cdev->clist_color_info.depth = dev->color_info.depth; |
394 | 1.43M | cdev->clist_color_info.polarity = dev->color_info.polarity; |
395 | 1.43M | cdev->clist_color_info.num_components = dev->color_info.num_components; |
396 | 1.43M | cdev->graphics_type_tag = target->graphics_type_tag; /* initialize to same as target */ |
397 | | |
398 | | /* Call create_buf_device to get the memory planarity set up. */ |
399 | 1.43M | code = cdev->buf_procs.create_buf_device(&pbdev, target, 0, NULL, NULL, NULL); |
400 | 1.43M | if (code < 0) |
401 | 0 | return code; |
402 | | /* HACK - if the buffer device can't do copy_alpha, disallow */ |
403 | | /* copy_alpha in the commmand list device as well. */ |
404 | 1.43M | if (dev_proc(pbdev, copy_alpha) == gx_no_copy_alpha) |
405 | 0 | cdev->disable_mask |= clist_disable_copy_alpha; |
406 | 1.43M | if (dev_proc(cdev, open_device) == pattern_clist_open_device) { |
407 | 257k | bits_size = data_size / 2; |
408 | 257k | cdev->page_info.line_ptrs_offset = 0; |
409 | 1.17M | } else { |
410 | 1.17M | if (band_height) { |
411 | | /* |
412 | | * The band height is fixed, so the band buffer requirement |
413 | | * is completely determined. |
414 | | */ |
415 | 0 | size_t band_data_size; |
416 | 0 | int adjusted; |
417 | |
|
418 | 0 | adjusted = (dev_proc(dev, dev_spec_op)(dev, gxdso_adjust_bandheight, NULL, band_height)); |
419 | 0 | if (adjusted > 0) |
420 | 0 | band_height = adjusted; |
421 | |
|
422 | 0 | if (gdev_mem_data_size(&bdev, band_width, band_height, &band_data_size) < 0 || |
423 | 0 | band_data_size >= band_space) { |
424 | 0 | if (pbdev->finalize) |
425 | 0 | pbdev->finalize(pbdev); |
426 | 0 | return_error(gs_error_rangecheck); |
427 | 0 | } |
428 | | /* If the tile_cache_size is specified, use it */ |
429 | 0 | if (cdev->space_params.band.tile_cache_size == 0) { |
430 | 0 | bits_size = min(band_space - band_data_size, data_size >> 1); |
431 | 0 | } else { |
432 | 0 | bits_size = cdev->space_params.band.tile_cache_size; |
433 | 0 | } |
434 | | /* The top of the tile_cache is the bottom of the imagable band buffer, |
435 | | * which needs to be appropriately aligned. Because the band height is |
436 | | * fixed, we must round *down* the size of the cache to a appropriate |
437 | | * value. See clist_render_thread() and clist_rasterize_lines() |
438 | | * for where the value is used. |
439 | | */ |
440 | 0 | bits_size = ROUND_DOWN(bits_size, align); |
441 | 1.17M | } else { |
442 | 1.17M | int adjusted; |
443 | | /* |
444 | | * Choose the largest band height that will fit in the |
445 | | * rendering-time buffer. |
446 | | */ |
447 | 1.17M | bits_size = clist_tile_cache_size(target, band_space); |
448 | 1.17M | bits_size = min(bits_size, data_size >> 1); |
449 | | /* The top of the tile_cache is the bottom of the imagable band buffer, |
450 | | * which needs to be appropriately aligned. Because the band height is |
451 | | * fixed, here we round up the size of the cache, since the band height |
452 | | * is variable, and it should only be a few bytes. See clist_render_thread() |
453 | | * and clist_rasterize_lines() for where the value is used. |
454 | | */ |
455 | 1.17M | bits_size = ROUND_UP(bits_size, align); |
456 | 1.17M | band_height = gdev_mem_max_height(&bdev, band_width, |
457 | 1.17M | band_space - bits_size, page_uses_transparency); |
458 | 1.17M | if (band_height == 0) { |
459 | 91 | if (pbdev->finalize) |
460 | 91 | pbdev->finalize(pbdev); |
461 | 91 | return_error(gs_error_rangecheck); |
462 | 91 | } |
463 | 1.17M | adjusted = (dev_proc(dev, dev_spec_op)(dev, gxdso_adjust_bandheight, NULL, band_height)); |
464 | 1.17M | if (adjusted > 0) |
465 | 0 | band_height = adjusted; |
466 | 1.17M | } |
467 | | /* The above calculated bits_size's include space for line ptrs. What is |
468 | | * the offset for the line_ptrs within the buffer? */ |
469 | 1.17M | if (gdev_mem_bits_size(&bdev, band_width, band_height, &cdev->page_info.line_ptrs_offset) < 0) |
470 | 0 | return_error(gs_error_VMerror); |
471 | 1.17M | } |
472 | 1.43M | cdev->pdf14_trans_group_level = -1; /* to prevent any initial op except PUSH_DEVICE */ |
473 | 1.43M | cdev->ins_count = 0; |
474 | 1.43M | code = clist_init_tile_cache(dev, data, bits_size); |
475 | 1.43M | if (code < 0) { |
476 | 0 | if (pbdev->finalize) |
477 | 0 | pbdev->finalize(pbdev); |
478 | 0 | return code; |
479 | 0 | } |
480 | 1.43M | cdev->page_info.tile_cache_size = bits_size; |
481 | 1.43M | data += bits_size; |
482 | 1.43M | size -= bits_size; |
483 | 1.43M | code = clist_init_bands(dev, &bdev, size, band_width, band_height); |
484 | 1.43M | if (code < 0) { |
485 | 0 | if (pbdev->finalize) |
486 | 0 | pbdev->finalize(pbdev); |
487 | 0 | return code; |
488 | 0 | } |
489 | | |
490 | 1.43M | if (pbdev->finalize) |
491 | 1.43M | pbdev->finalize(pbdev); |
492 | | |
493 | 1.43M | return clist_init_states(dev, data, data_size - bits_size); |
494 | 1.43M | } |
495 | | /* |
496 | | * Reset the device state (for writing). This routine requires only |
497 | | * data, data_size, and target to be set, and is idempotent. |
498 | | */ |
499 | | static int |
500 | | clist_reset(gx_device * dev) |
501 | 1.43M | { |
502 | 1.43M | gx_device_clist_writer * const cdev = |
503 | 1.43M | &((gx_device_clist *)dev)->writer; |
504 | 1.43M | int code = clist_init_data(dev, cdev->data, cdev->data_size); |
505 | 1.43M | int nbands; |
506 | | |
507 | 1.43M | if (code < 0) |
508 | 50.0k | return (cdev->permanent_error = code); |
509 | | /* Now initialize the rest of the state. */ |
510 | 1.38M | cdev->permanent_error = 0; |
511 | 1.38M | nbands = cdev->nbands; |
512 | 1.38M | cdev->ymin = cdev->ymax = -1; /* render_init not done yet */ |
513 | 1.38M | memset(cdev->tile_table, 0, (cdev->tile_hash_mask + 1) * |
514 | 1.38M | sizeof(*cdev->tile_table)); |
515 | 1.38M | cdev->cnext = cdev->cbuf; |
516 | 1.38M | cdev->ccl = 0; |
517 | 1.38M | cdev->band_range_list->head = cdev->band_range_list->tail = 0; |
518 | 1.38M | cdev->band_range_min = 0; |
519 | 1.38M | cdev->band_range_max = nbands - 1; |
520 | 1.38M | if_debug2m('L', cdev->memory, "[L]Resetting: Band range(%d,%d)\n", |
521 | 1.38M | cdev->band_range_min, cdev->band_range_max); |
522 | 1.38M | { |
523 | 1.38M | int band; |
524 | 1.38M | gx_clist_state *states = cdev->states; |
525 | | |
526 | 42.6M | for (band = 0; band < nbands; band++, states++) { |
527 | 41.2M | static const gx_clist_state cls_initial = { cls_initial_values }; |
528 | | |
529 | 41.2M | *states = cls_initial; |
530 | 41.2M | } |
531 | 1.38M | } |
532 | | /* |
533 | | * Round up the size of the per-tile band mask so that the bits, |
534 | | * which follow it, stay aligned. |
535 | | */ |
536 | 1.38M | cdev->tile_band_mask_size = |
537 | 1.38M | ((nbands + (align_bitmap_mod * 8 - 1)) >> 3) & |
538 | 1.38M | ~(align_bitmap_mod - 1); |
539 | | /* |
540 | | * Initialize the all-band parameters to impossible values, |
541 | | * to force them to be written the first time they are used. |
542 | | */ |
543 | 1.38M | memset(&cdev->tile_params, 0, sizeof(cdev->tile_params)); |
544 | 1.38M | cdev->tile_depth = 0; |
545 | 1.38M | cdev->tile_known_min = nbands; |
546 | 1.38M | cdev->tile_known_max = -1; |
547 | 1.38M | GS_STATE_INIT_VALUES_CLIST((&cdev->gs_gstate)); |
548 | 1.38M | cdev->clip_path = NULL; |
549 | 1.38M | cdev->clip_path_id = gs_no_id; |
550 | 1.38M | cdev->color_space.byte1 = 0; |
551 | 1.38M | cdev->color_space.id = gs_no_id; |
552 | 1.38M | cdev->color_space.space = 0; |
553 | 1.38M | { |
554 | 1.38M | int i; |
555 | | |
556 | 6.92M | for (i = 0; i < countof(cdev->transfer_ids); ++i) |
557 | 5.53M | cdev->transfer_ids[i] = gs_no_id; |
558 | 1.38M | } |
559 | 1.38M | cdev->black_generation_id = gs_no_id; |
560 | 1.38M | cdev->undercolor_removal_id = gs_no_id; |
561 | 1.38M | cdev->device_halftone_id = gs_no_id; |
562 | 1.38M | cdev->image_enum_id = gs_no_id; |
563 | 1.38M | cdev->cropping_min = cdev->save_cropping_min = 0; |
564 | 1.38M | cdev->cropping_max = cdev->save_cropping_max = cdev->height; |
565 | 1.38M | cdev->cropping_saved = false; |
566 | 1.38M | cdev->cropping_stack = NULL; |
567 | 1.38M | cdev->cropping_level = 0; |
568 | 1.38M | cdev->mask_id_count = cdev->mask_id = cdev->temp_mask_id = 0; |
569 | 1.38M | cdev->icc_table = NULL; |
570 | 1.38M | cdev->op_fill_active = false; |
571 | 1.38M | cdev->op_stroke_active = false; |
572 | 1.38M | return 0; |
573 | 1.43M | } |
574 | | /* |
575 | | * Initialize the device state (for writing). This routine requires only |
576 | | * data, data_size, and target to be set, and is idempotent. |
577 | | */ |
578 | | static int |
579 | | clist_init(gx_device * dev) |
580 | 1.43M | { |
581 | 1.43M | gx_device_clist_writer * const cdev = |
582 | 1.43M | &((gx_device_clist *)dev)->writer; |
583 | 1.43M | int code = clist_reset(dev); |
584 | | |
585 | 1.43M | if (code >= 0) { |
586 | 1.38M | cdev->image_enum_id = gs_no_id; |
587 | 1.38M | cdev->ignore_lo_mem_warnings = 0; |
588 | 1.38M | } |
589 | 1.43M | return code; |
590 | 1.43M | } |
591 | | |
592 | | /* Write out the current parameters that must be at the head of each page */ |
593 | | /* if async rendering is in effect */ |
594 | | static int |
595 | | clist_emit_page_header(gx_device *dev) |
596 | 1.03M | { |
597 | 1.03M | gx_device_clist_writer * const cdev = |
598 | 1.03M | &((gx_device_clist *)dev)->writer; |
599 | 1.03M | int code = 0; |
600 | | |
601 | 1.03M | if ((cdev->disable_mask & clist_disable_pass_thru_params)) { |
602 | 0 | code = clist_put_current_params(cdev); |
603 | 0 | cdev->permanent_error = (code < 0 ? code : 0); |
604 | 0 | } |
605 | 1.03M | return code; |
606 | 1.03M | } |
607 | | |
608 | | /* Reset parameters for the beginning of a page. */ |
609 | | static void |
610 | | clist_reset_page(gx_device_clist_writer *cwdev) |
611 | 1.03M | { |
612 | 1.03M | cwdev->page_info.bfile_end_pos = 0; |
613 | 1.03M | } |
614 | | |
615 | | /* Open the device's bandfiles */ |
616 | | static int |
617 | | clist_open_output_file(gx_device *dev) |
618 | 349k | { |
619 | 349k | gx_device_clist_writer * const cdev = |
620 | 349k | &((gx_device_clist *)dev)->writer; |
621 | 349k | char fmode[4]; |
622 | 349k | int code; |
623 | | |
624 | 349k | if (cdev->do_not_open_or_close_bandfiles) |
625 | 0 | return 0; /* external bandfile open/close managed externally */ |
626 | 349k | cdev->page_info.cfile = 0; /* in case of failure */ |
627 | 349k | cdev->page_info.bfile = 0; /* ditto */ |
628 | 349k | code = clist_init(dev); |
629 | 349k | if (code < 0) |
630 | 0 | return code; |
631 | 349k | snprintf(fmode, sizeof(fmode), "w+%s", gp_fmode_binary_suffix); |
632 | 349k | cdev->page_info.cfname[0] = 0; /* create a new file */ |
633 | 349k | cdev->page_info.bfname[0] = 0; /* ditto */ |
634 | 349k | clist_reset_page(cdev); |
635 | 349k | if ((code = cdev->page_info.io_procs->fopen(cdev->page_info.cfname, fmode, &cdev->page_info.cfile, |
636 | 349k | cdev->bandlist_memory, cdev->bandlist_memory, |
637 | 349k | true)) < 0 || |
638 | 349k | (code = cdev->page_info.io_procs->fopen(cdev->page_info.bfname, fmode, &cdev->page_info.bfile, |
639 | 349k | cdev->bandlist_memory, cdev->bandlist_memory, |
640 | 349k | false)) < 0 |
641 | 349k | ) { |
642 | 0 | clist_close_output_file(dev); |
643 | 0 | cdev->permanent_error = code; |
644 | 0 | } |
645 | 349k | return code; |
646 | 349k | } |
647 | | |
648 | | /* Close, and free the contents of, the temporary files of a page. */ |
649 | | /* Note that this does not deallocate the buffer. */ |
650 | | int |
651 | | clist_close_page_info(gx_band_page_info_t *ppi) |
652 | 350k | { |
653 | 350k | if (ppi->cfile != NULL) { |
654 | 349k | ppi->io_procs->fclose(ppi->cfile, ppi->cfname, true); |
655 | 349k | ppi->cfile = NULL; |
656 | 349k | ppi->cfname[0] = 0; /* prevent re-use in case this is a fake path */ |
657 | 349k | } |
658 | 350k | if (ppi->bfile != NULL) { |
659 | 349k | ppi->io_procs->fclose(ppi->bfile, ppi->bfname, true); |
660 | 349k | ppi->bfile = NULL; |
661 | 349k | ppi->bfname[0] = 0; /* prevent re-use in case this is a fake path */ |
662 | 349k | } |
663 | 350k | return 0; |
664 | 350k | } |
665 | | |
666 | | /* Close the device by freeing the temporary files. */ |
667 | | /* Note that this does not deallocate the buffer. */ |
668 | | int |
669 | | clist_close_output_file(gx_device *dev) |
670 | 350k | { |
671 | 350k | gx_device_clist_writer * const cdev = |
672 | 350k | &((gx_device_clist *)dev)->writer; |
673 | | |
674 | 350k | return clist_close_page_info(&cdev->page_info); |
675 | 350k | } |
676 | | |
677 | | /* Open the device by initializing the device state and opening the */ |
678 | | /* scratch files. */ |
679 | | int |
680 | | clist_open(gx_device *dev) |
681 | 399k | { |
682 | 399k | gx_device_clist_writer * const cdev = |
683 | 399k | &((gx_device_clist *)dev)->writer; |
684 | 399k | bool save_is_open = dev->is_open; |
685 | 399k | int code; |
686 | | |
687 | 399k | cdev->permanent_error = 0; |
688 | 399k | cdev->is_open = false; |
689 | | |
690 | 399k | cdev->cache_chunk = (gx_bits_cache_chunk *)gs_alloc_bytes(cdev->memory->non_gc_memory, sizeof(gx_bits_cache_chunk), "alloc tile cache for clist"); |
691 | 399k | if (!cdev->cache_chunk) |
692 | 0 | return_error(gs_error_VMerror); |
693 | 399k | memset(cdev->cache_chunk, 0x00, sizeof(gx_bits_cache_chunk)); |
694 | | |
695 | 399k | code = clist_init(dev); |
696 | 399k | if (code < 0) |
697 | 50.0k | goto errxit; |
698 | | |
699 | 349k | cdev->icc_cache_list_len = 0; |
700 | 349k | cdev->icc_cache_list = NULL; |
701 | 349k | code = clist_open_output_file(dev); |
702 | 349k | if ( code >= 0) |
703 | 349k | code = clist_emit_page_header(dev); |
704 | 349k | if (code >= 0) { |
705 | 349k | dev->is_open = save_is_open; |
706 | 349k | return code; /* success */ |
707 | 349k | } |
708 | | /* fall through to clean up and return error code */ |
709 | 50.0k | errxit: |
710 | | /* prevent leak */ |
711 | 50.0k | gs_free_object(cdev->memory->non_gc_memory, cdev->cache_chunk, "free tile cache for clist"); |
712 | 50.0k | dev->is_open = save_is_open; |
713 | 50.0k | cdev->cache_chunk = NULL; |
714 | 50.0k | return code; |
715 | 349k | } |
716 | | |
717 | | int |
718 | | clist_close(gx_device *dev) |
719 | 353k | { |
720 | 353k | int i; |
721 | 353k | gx_device_clist_writer * const cdev = |
722 | 353k | &((gx_device_clist *)dev)->writer; |
723 | | |
724 | | /* I'd like to free the cache chunk in here, but we can't, because the pattern clist |
725 | | * device gets closed, but not discarded, later it gets run. So we have to free the memory |
726 | | * in *2* places, once in gdev_prn_tear_down() for regular clists, and once in |
727 | | * gx_pattern_cache_free_entry() for pattern clists.... |
728 | | */ |
729 | 353k | for(i = 0; i < cdev->icc_cache_list_len; i++) { |
730 | 0 | rc_decrement(cdev->icc_cache_list[i], "clist_close"); |
731 | 0 | } |
732 | 353k | cdev->icc_cache_list_len = 0; |
733 | 353k | gs_free_object(cdev->memory->thread_safe_memory, cdev->icc_cache_list, "clist_close"); |
734 | 353k | cdev->icc_cache_list = NULL; |
735 | | |
736 | | /* So despite the comment above, it seems necessary to free the cache_chunk here, |
737 | | * if the device is not being retained. The code in gx_pattern_cache_free_entry() doesn't |
738 | | * actually free it, in at least some cases. |
739 | | * TODO: Is it sufficient to only free it here, and not in the places mentioned above? |
740 | | */ |
741 | 353k | if (!cdev->retained) { |
742 | 2.91k | gs_free_object(cdev->memory->non_gc_memory, cdev->cache_chunk, |
743 | 2.91k | "clist_close(cache_chunk)"); |
744 | 2.91k | cdev->cache_chunk = NULL; |
745 | 2.91k | } |
746 | | |
747 | 353k | if (cdev->do_not_open_or_close_bandfiles) |
748 | 2.78k | return 0; |
749 | 350k | if (dev_proc(cdev, open_device) == pattern_clist_open_device) { |
750 | 129k | gs_free_object(cdev->bandlist_memory, cdev->data, "clist_close"); |
751 | 129k | cdev->data = NULL; |
752 | 129k | } |
753 | 350k | return clist_close_output_file(dev); |
754 | 353k | } |
755 | | |
756 | | /* The output_page procedure should never be called! */ |
757 | | int |
758 | | clist_output_page(gx_device * dev, int num_copies, int flush) |
759 | 0 | { |
760 | 0 | return_error(gs_error_Fatal); |
761 | 0 | } |
762 | | |
763 | | /* Reset (or prepare to append to) the command list after printing a page. */ |
764 | | int |
765 | | clist_finish_page(gx_device *dev, bool flush) |
766 | 684k | { |
767 | 684k | gx_device_clist_writer *const cdev = &((gx_device_clist *)dev)->writer; |
768 | 684k | int code; |
769 | | |
770 | | /* If this is a reader clist, which is about to be reset to a writer, |
771 | | * free any color_usage array used by same. |
772 | | * since we have been rendering, shut down threads |
773 | | * Also free the icc_table at this time and the icc_cache |
774 | | */ |
775 | 684k | if (!CLIST_IS_WRITER((gx_device_clist *)dev)) { |
776 | 682k | gx_device_clist_reader * const crdev = &((gx_device_clist *)dev)->reader; |
777 | | |
778 | 682k | clist_teardown_render_threads(dev); |
779 | 682k | gs_free_object(cdev->memory, crdev->color_usage_array, "clist_color_usage_array"); |
780 | 682k | crdev->color_usage_array = NULL; |
781 | | |
782 | | /* Free the icc table associated with this device. |
783 | | The threads that may have pointed to this were destroyed in |
784 | | the above call to clist_teardown_render_threads. Since they |
785 | | all maintained a copy of the cache and the table there should not |
786 | | be any issues. */ |
787 | 682k | clist_free_icc_table(crdev->icc_table, crdev->memory); |
788 | 682k | crdev->icc_table = NULL; |
789 | 682k | } |
790 | 684k | if (flush) { |
791 | 684k | if (cdev->page_info.cfile != 0) { |
792 | 684k | code = cdev->page_info.io_procs->rewind(cdev->page_info.cfile, true, cdev->page_info.cfname); |
793 | 684k | if (code < 0) return code; |
794 | 684k | } |
795 | 684k | if (cdev->page_info.bfile != 0) { |
796 | 684k | code = cdev->page_info.io_procs->rewind(cdev->page_info.bfile, true, cdev->page_info.bfname); |
797 | 684k | if (code < 0) return code; |
798 | 684k | } |
799 | 684k | cdev->page_info.bfile_end_pos = 0; |
800 | 684k | clist_reset_page(cdev); |
801 | 684k | } else { |
802 | 0 | if (cdev->page_info.cfile != 0) |
803 | 0 | cdev->page_info.io_procs->fseek(cdev->page_info.cfile, 0L, SEEK_END, cdev->page_info.cfname); |
804 | 0 | if (cdev->page_info.bfile != 0) |
805 | 0 | cdev->page_info.io_procs->fseek(cdev->page_info.bfile, 0L, SEEK_END, cdev->page_info.bfname); |
806 | 0 | } |
807 | 684k | code = clist_init(dev); /* reinitialize */ |
808 | 684k | if (code >= 0) |
809 | 684k | code = clist_emit_page_header(dev); |
810 | | |
811 | 684k | return code; |
812 | 684k | } |
813 | | |
814 | | /* ------ Writing ------ */ |
815 | | |
816 | | /* End a page by flushing the buffer and terminating the command list. */ |
817 | | int /* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */ |
818 | | clist_end_page(gx_device_clist_writer * cldev) |
819 | 690k | { |
820 | 690k | int code; |
821 | 690k | cmd_block cb; |
822 | 690k | int ecode = 0; |
823 | | |
824 | 690k | code = cmd_write_buffer(cldev, cmd_opv_end_page); |
825 | 690k | if (code >= 0) |
826 | 690k | ecode |= code; |
827 | 0 | else |
828 | 0 | ecode = code; |
829 | | |
830 | | /* If we have ICC profiles present in the cfile save the table now, |
831 | | along with the ICC profiles. Table is stored in band maxband + 1. */ |
832 | 690k | if ( cldev->icc_table != NULL ) { |
833 | | /* Save the table */ |
834 | 14.5k | code = clist_icc_writetable(cldev); |
835 | | /* Free the table */ |
836 | 14.5k | clist_free_icc_table(cldev->icc_table, cldev->memory); |
837 | 14.5k | cldev->icc_table = NULL; |
838 | 14.5k | } |
839 | | |
840 | 690k | if (code >= 0) { |
841 | 690k | code = clist_write_color_usage_array(cldev); |
842 | 690k | if (code >= 0) { |
843 | 690k | ecode |= code; |
844 | | /* |
845 | | * Write the terminating entry in the block file. |
846 | | * Note that because of copypage, there may be many such entries. |
847 | | */ |
848 | 690k | memset(&cb, 0, sizeof(cb)); /* Zero the block, including any padding */ |
849 | 690k | cb.band_min = cb.band_max = cmd_band_end; |
850 | 690k | cb.pos = (cldev->page_info.cfile == 0 ? 0 : cldev->page_info.io_procs->ftell(cldev->page_info.cfile)); |
851 | 690k | if_debug3m('l', cldev->memory, "[l]writing end for bands (%d,%d) at %"PRId64"\n", |
852 | 690k | cb.band_min, cb.band_max, cb.pos); |
853 | 690k | code = cldev->page_info.io_procs->fwrite_chars(&cb, sizeof(cb), cldev->page_info.bfile); |
854 | 690k | if (code > 0) |
855 | 690k | code = 0; |
856 | 690k | } |
857 | 690k | } |
858 | 690k | if (code >= 0) { |
859 | 690k | ecode |= code; |
860 | 690k | cldev->page_info.bfile_end_pos = cldev->page_info.io_procs->ftell(cldev->page_info.bfile); |
861 | 690k | } else |
862 | 0 | ecode = code; |
863 | | |
864 | | /* Reset warning margin to 0 to release reserve memory if mem files */ |
865 | 690k | if (cldev->page_info.bfile != 0) |
866 | 690k | cldev->page_info.io_procs->set_memory_warning(cldev->page_info.bfile, 0); |
867 | 690k | if (cldev->page_info.cfile != 0) |
868 | 690k | cldev->page_info.io_procs->set_memory_warning(cldev->page_info.cfile, 0); |
869 | | |
870 | | #ifdef DEBUG |
871 | | if (gs_debug_c('l') | gs_debug_c(':')) { |
872 | | if (cb.pos <= 0xFFFFFFFF) |
873 | | dmlprintf2(cldev->memory, "[:]clist_end_page at cfile=%lu, bfile=%lu\n", |
874 | | (unsigned long)cb.pos, (unsigned long)cldev->page_info.bfile_end_pos); |
875 | | else |
876 | | dmlprintf3(cldev->memory, "[:]clist_end_page at cfile=%lu%0lu, bfile=%lu\n", |
877 | | (unsigned long) (cb.pos >> 32), (unsigned long) (cb.pos & 0xFFFFFFFF), |
878 | | (unsigned long)cldev->page_info.bfile_end_pos); |
879 | | } |
880 | | #endif |
881 | 690k | if (cldev->page_uses_transparency && gs_debug[':']) { |
882 | | /* count how many bands were skipped */ |
883 | 0 | int skip_count = 0; |
884 | 0 | int band; |
885 | |
|
886 | 0 | for (band=0; band < cldev->nbands - 1; band++) { |
887 | 0 | if (cldev->states[band].color_usage.trans_bbox.p.y > |
888 | 0 | cldev->states[band].color_usage.trans_bbox.q.y) |
889 | 0 | skip_count++; |
890 | 0 | } |
891 | 0 | dprintf2("%d bands skipped out of %d\n", skip_count, cldev->nbands); |
892 | 0 | } |
893 | | |
894 | 690k | return ecode; |
895 | 690k | } |
896 | | |
897 | | gx_color_usage_bits |
898 | | gx_color_index2usage(gx_device *dev, gx_color_index color) |
899 | 160M | { |
900 | 160M | gx_color_usage_bits bits = 0; |
901 | 160M | uchar i; |
902 | | |
903 | 160M | if (dev->color_info.polarity == GX_CINFO_POLARITY_ADDITIVE) |
904 | 156M | color = color ^ ~0; /* white is 0 */ |
905 | | |
906 | 531M | for (i = 0; i < dev->color_info.num_components; i++) { |
907 | 370M | if (color & dev->color_info.comp_mask[i]) |
908 | 14.6M | bits |= (((gx_color_usage_bits)1) << i); |
909 | 370M | } |
910 | 160M | return bits; |
911 | 160M | } |
912 | | |
913 | | /* Write the target device's current parameter list */ |
914 | | static int /* ret 0 all ok, -ve error */ |
915 | | clist_put_current_params(gx_device_clist_writer *cldev) |
916 | 0 | { |
917 | 0 | gx_device *target = cldev->target; |
918 | 0 | gs_c_param_list param_list; |
919 | 0 | int code; |
920 | | |
921 | | /* |
922 | | * If a put_params call fails, the device will be left in a closed |
923 | | * state, but higher-level code won't notice this fact. We flag this by |
924 | | * setting permanent_error, which prevents writing to the command list. |
925 | | */ |
926 | |
|
927 | 0 | if (cldev->permanent_error) |
928 | 0 | return cldev->permanent_error; |
929 | 0 | gs_c_param_list_write(¶m_list, cldev->memory); |
930 | 0 | code = (*dev_proc(target, get_params)) |
931 | 0 | (target, (gs_param_list *)¶m_list); |
932 | 0 | if (code >= 0) { |
933 | 0 | gs_c_param_list_read(¶m_list); |
934 | 0 | code = cmd_put_params( cldev, (gs_param_list *)¶m_list ); |
935 | 0 | } |
936 | 0 | gs_c_param_list_release(¶m_list); |
937 | |
|
938 | 0 | return code; |
939 | 0 | } |
940 | | |
941 | | /* ---------------- Driver interface ---------------- */ |
942 | | |
943 | | /* ICC table operations. See gxclist.h for details */ |
944 | | /* This checks the table for a hash code entry */ |
945 | | bool |
946 | | clist_icc_searchtable(gx_device_clist_writer *cdev, int64_t hashcode) |
947 | 474k | { |
948 | 474k | clist_icctable_t *icc_table = cdev->icc_table; |
949 | 474k | clist_icctable_entry_t *curr_entry; |
950 | | |
951 | 474k | if (icc_table == NULL) |
952 | 6.75k | return(false); /* No entry */ |
953 | 468k | curr_entry = icc_table->head; |
954 | 847k | while(curr_entry != NULL) { |
955 | 845k | if (curr_entry->serial_data.hashcode == hashcode){ |
956 | 465k | return(true); |
957 | 465k | } |
958 | 379k | curr_entry = curr_entry->next; |
959 | 379k | } |
960 | 2.37k | return(false); /* No entry */ |
961 | 468k | } |
962 | | |
963 | | static void |
964 | | clist_free_icc_table_contents(clist_icctable_t *icc_table) |
965 | 29.6k | { |
966 | 29.6k | int number_entries; |
967 | 29.6k | clist_icctable_entry_t *curr_entry, *next_entry; |
968 | 29.6k | int k; |
969 | | |
970 | 29.6k | number_entries = icc_table->tablesize; |
971 | 29.6k | curr_entry = icc_table->head; |
972 | 67.6k | for (k = 0; k < number_entries; k++) { |
973 | 37.9k | next_entry = curr_entry->next; |
974 | 37.9k | gsicc_adjust_profile_rc(curr_entry->icc_profile, -1, "clist_free_icc_table"); |
975 | 37.9k | gs_free_object(icc_table->memory, curr_entry, "clist_free_icc_table"); |
976 | 37.9k | curr_entry = next_entry; |
977 | 37.9k | } |
978 | 29.6k | } |
979 | | |
980 | | void |
981 | | clist_icc_table_finalize(const gs_memory_t *memory, void * vptr) |
982 | 29.6k | { |
983 | 29.6k | clist_icctable_t *icc_table = (clist_icctable_t *)vptr; |
984 | | |
985 | 29.6k | clist_free_icc_table_contents(icc_table); |
986 | 29.6k | } |
987 | | |
988 | | /* Free the table */ |
989 | | int |
990 | | clist_free_icc_table(clist_icctable_t *icc_table, gs_memory_t *memory) |
991 | 1.04M | { |
992 | 1.04M | if (icc_table == NULL) |
993 | 1.01M | return(0); |
994 | | |
995 | 29.2k | gs_free_object(icc_table->memory, icc_table, "clist_free_icc_table"); |
996 | 29.2k | return(0); |
997 | 1.04M | } |
998 | | |
999 | | /* This serializes the ICC table and writes it out for maxband+1 */ |
1000 | | int |
1001 | | clist_icc_writetable(gx_device_clist_writer *cldev) |
1002 | 14.5k | { |
1003 | 14.5k | unsigned char *pbuf, *buf; |
1004 | 14.5k | clist_icctable_t *icc_table = cldev->icc_table; |
1005 | 14.5k | int number_entries = icc_table->tablesize; |
1006 | 14.5k | clist_icctable_entry_t *curr_entry; |
1007 | 14.5k | int size_data; |
1008 | 14.5k | int k; |
1009 | 14.5k | bool rend_is_valid; |
1010 | | |
1011 | | /* First we need to write out the ICC profiles themselves and update |
1012 | | in the table where they will be stored and their size. Set the |
1013 | | rend cond valid flag prior to writing */ |
1014 | 14.5k | curr_entry = icc_table->head; |
1015 | 33.2k | for ( k = 0; k < number_entries; k++ ){ |
1016 | 18.7k | rend_is_valid = curr_entry->icc_profile->rend_is_valid; |
1017 | 18.7k | curr_entry->icc_profile->rend_is_valid = curr_entry->render_is_valid; |
1018 | 18.7k | curr_entry->serial_data.file_position = clist_icc_addprofile(cldev, curr_entry->icc_profile, &size_data); |
1019 | 18.7k | curr_entry->icc_profile->rend_is_valid = rend_is_valid; |
1020 | 18.7k | curr_entry->serial_data.size = size_data; |
1021 | 18.7k | gsicc_adjust_profile_rc(curr_entry->icc_profile, -1, "clist_icc_writetable"); |
1022 | 18.7k | curr_entry->icc_profile = NULL; |
1023 | 18.7k | curr_entry = curr_entry->next; |
1024 | 18.7k | } |
1025 | | |
1026 | | /* Now serialize the table data */ |
1027 | 14.5k | size_data = number_entries*sizeof(clist_icc_serial_entry_t) + sizeof(number_entries); |
1028 | 14.5k | buf = gs_alloc_bytes(cldev->memory, size_data, "clist_icc_writetable"); |
1029 | 14.5k | if (buf == NULL) |
1030 | 0 | return gs_rethrow(-1, "insufficient memory for icc table buffer"); |
1031 | 14.5k | pbuf = buf; |
1032 | 14.5k | memcpy(pbuf, &number_entries, sizeof(number_entries)); |
1033 | 14.5k | pbuf += sizeof(number_entries); |
1034 | 14.5k | curr_entry = icc_table->head; |
1035 | 33.2k | for (k = 0; k < number_entries; k++) { |
1036 | 18.7k | memcpy(pbuf, &(curr_entry->serial_data), sizeof(clist_icc_serial_entry_t)); |
1037 | 18.7k | pbuf += sizeof(clist_icc_serial_entry_t); |
1038 | 18.7k | curr_entry = curr_entry->next; |
1039 | 18.7k | } |
1040 | | /* Now go ahead and save the table data */ |
1041 | 14.5k | cmd_write_pseudo_band(cldev, buf, size_data, ICC_TABLE_OFFSET); |
1042 | 14.5k | gs_free_object(cldev->memory, buf, "clist_icc_writetable"); |
1043 | 14.5k | return(0); |
1044 | 14.5k | } |
1045 | | |
1046 | | /* This write the actual data out to the cfile */ |
1047 | | |
1048 | | int64_t |
1049 | | clist_icc_addprofile(gx_device_clist_writer *cldev, cmm_profile_t *iccprofile, int *size) |
1050 | 18.7k | { |
1051 | | |
1052 | 18.7k | clist_file_ptr cfile = cldev->page_info.cfile; |
1053 | 18.7k | int64_t fileposit; |
1054 | 18.7k | #if defined(DEBUG) || defined(PACIFY_VALGRIND) |
1055 | 18.7k | gsicc_serialized_profile_t profile_data = { 0 }; |
1056 | | #else |
1057 | | gsicc_serialized_profile_t profile_data; |
1058 | | #endif |
1059 | 18.7k | int count1, count2; |
1060 | | |
1061 | | /* Get the current position */ |
1062 | 18.7k | fileposit = cldev->page_info.io_procs->ftell(cfile); |
1063 | | /* Get the serialized header */ |
1064 | 18.7k | gsicc_profile_serialize(&profile_data, iccprofile); |
1065 | | /* Write the header */ |
1066 | 18.7k | if_debug1m('l', cldev->memory, "[l]writing icc profile in cfile at pos %"PRId64"\n",fileposit); |
1067 | 18.7k | count1 = cldev->page_info.io_procs->fwrite_chars(&profile_data, GSICC_SERIALIZED_SIZE, cfile); |
1068 | | /* Now write the profile */ |
1069 | 18.7k | count2 = cldev->page_info.io_procs->fwrite_chars(iccprofile->buffer, iccprofile->buffer_size, cfile); |
1070 | | /* Return where we wrote this in the cfile */ |
1071 | 18.7k | *size = count1 + count2; |
1072 | 18.7k | return(fileposit); |
1073 | 18.7k | } |
1074 | | |
1075 | | /* This add a new entry into the table */ |
1076 | | |
1077 | | int |
1078 | | clist_icc_addentry(gx_device_clist_writer *cdev, int64_t hashcode_in, cmm_profile_t *icc_profile) |
1079 | 67.3k | { |
1080 | | |
1081 | 67.3k | clist_icctable_t *icc_table = cdev->icc_table; |
1082 | 67.3k | clist_icctable_entry_t *entry, *curr_entry; |
1083 | 67.3k | int k; |
1084 | 67.3k | int64_t hashcode; |
1085 | 67.3k | gs_memory_t *stable_mem = cdev->memory->stable_memory; |
1086 | | |
1087 | | /* If the hash code is not valid then compute it now */ |
1088 | 67.3k | if (icc_profile->hash_is_valid == false) { |
1089 | 0 | gsicc_get_icc_buff_hash(icc_profile->buffer, &hashcode, |
1090 | 0 | icc_profile->buffer_size); |
1091 | 0 | icc_profile->hashcode = hashcode; |
1092 | 0 | icc_profile->hash_is_valid = true; |
1093 | 67.3k | } else { |
1094 | 67.3k | hashcode = hashcode_in; |
1095 | 67.3k | } |
1096 | 67.3k | if ( icc_table == NULL ) { |
1097 | 15.0k | entry = (clist_icctable_entry_t *) gs_alloc_struct(stable_mem, |
1098 | 15.0k | clist_icctable_entry_t, &st_clist_icctable_entry, |
1099 | 15.0k | "clist_icc_addentry"); |
1100 | 15.0k | if (entry == NULL) |
1101 | 0 | return gs_rethrow(-1, "insufficient memory to allocate entry in icc table"); |
1102 | 15.0k | #ifdef PACIFY_VALGRIND |
1103 | | /* Avoid uninitialised padding upsetting valgrind when it's written |
1104 | | * into the clist. */ |
1105 | 15.0k | memset(entry, 0, sizeof(*entry)); |
1106 | 15.0k | #endif |
1107 | 15.0k | entry->next = NULL; |
1108 | 15.0k | entry->serial_data.hashcode = hashcode; |
1109 | 15.0k | entry->serial_data.size = -1; |
1110 | 15.0k | entry->serial_data.file_position = -1; |
1111 | 15.0k | entry->icc_profile = icc_profile; |
1112 | 15.0k | entry->render_is_valid = icc_profile->rend_is_valid; |
1113 | 15.0k | gsicc_adjust_profile_rc(icc_profile, 1, "clist_icc_addentry"); |
1114 | 15.0k | icc_table = gs_alloc_struct(stable_mem, clist_icctable_t, |
1115 | 15.0k | &st_clist_icctable, "clist_icc_addentry"); |
1116 | 15.0k | if (icc_table == NULL) |
1117 | 0 | return gs_rethrow(-1, "insufficient memory to allocate icc table"); |
1118 | 15.0k | icc_table->tablesize = 1; |
1119 | 15.0k | icc_table->head = entry; |
1120 | 15.0k | icc_table->final = entry; |
1121 | 15.0k | icc_table->memory = stable_mem; |
1122 | | /* For now, we are just going to put the icc_table itself |
1123 | | at band_range_max + 1. The ICC profiles are written |
1124 | | in the cfile at the current stored file position*/ |
1125 | 15.0k | cdev->icc_table = icc_table; |
1126 | 52.3k | } else { |
1127 | | /* First check if we already have this entry */ |
1128 | 52.3k | curr_entry = icc_table->head; |
1129 | 76.4k | for (k = 0; k < icc_table->tablesize; k++) { |
1130 | 72.1k | if (curr_entry->serial_data.hashcode == hashcode) |
1131 | 48.1k | return 0; /* A hit */ |
1132 | 24.0k | curr_entry = curr_entry->next; |
1133 | 24.0k | } |
1134 | | /* Add a new ICC profile */ |
1135 | 4.21k | entry = |
1136 | 4.21k | (clist_icctable_entry_t *) gs_alloc_struct(icc_table->memory, |
1137 | 4.21k | clist_icctable_entry_t, |
1138 | 4.21k | &st_clist_icctable_entry, |
1139 | 4.21k | "clist_icc_addentry"); |
1140 | 4.21k | if (entry == NULL) |
1141 | 0 | return gs_rethrow(-1, "insufficient memory to allocate entry in icc table"); |
1142 | 4.21k | #ifdef PACIFY_VALGRIND |
1143 | | /* Avoid uninitialised padding upsetting valgrind when it's written |
1144 | | * into the clist. */ |
1145 | 4.21k | memset(entry, 0, sizeof(*entry)); |
1146 | 4.21k | #endif |
1147 | 4.21k | entry->next = NULL; |
1148 | 4.21k | entry->serial_data.hashcode = hashcode; |
1149 | 4.21k | entry->serial_data.size = -1; |
1150 | 4.21k | entry->serial_data.file_position = -1; |
1151 | 4.21k | entry->icc_profile = icc_profile; |
1152 | 4.21k | entry->render_is_valid = icc_profile->rend_is_valid; |
1153 | 4.21k | gsicc_adjust_profile_rc(icc_profile, 1, "clist_icc_addentry"); |
1154 | 4.21k | icc_table->final->next = entry; |
1155 | 4.21k | icc_table->final = entry; |
1156 | 4.21k | icc_table->tablesize++; |
1157 | 4.21k | } |
1158 | 19.2k | return(0); |
1159 | 67.3k | } |
1160 | | |
1161 | | /* This writes out the color_usage_array for maxband+1 */ |
1162 | | int |
1163 | | clist_write_color_usage_array(gx_device_clist_writer *cldev) |
1164 | 690k | { |
1165 | 690k | gx_color_usage_t *color_usage_array; |
1166 | 690k | int i, size_data = cldev->nbands * sizeof(gx_color_usage_t); |
1167 | | |
1168 | | /* Now serialize the table data */ |
1169 | 690k | color_usage_array = (gx_color_usage_t *)gs_alloc_bytes(cldev->memory, size_data, |
1170 | 690k | "clist_write_color_usage_array"); |
1171 | 690k | if (color_usage_array == NULL) |
1172 | 0 | return gs_rethrow(-1, "insufficient memory for color_usage_array"); |
1173 | 23.3M | for (i = 0; i < cldev->nbands; i++) { |
1174 | 22.6M | memcpy(&(color_usage_array[i]), &(cldev->states[i].color_usage), sizeof(gx_color_usage_t)); |
1175 | 22.6M | } |
1176 | | /* Now go ahead and save the table data */ |
1177 | 690k | cmd_write_pseudo_band(cldev, (unsigned char *)color_usage_array, |
1178 | 690k | size_data, COLOR_USAGE_OFFSET); |
1179 | 690k | gs_free_object(cldev->memory, color_usage_array, "clist_write_color_usage_array"); |
1180 | 690k | return(0); |
1181 | 690k | } |
1182 | | |
1183 | | /* This writes out the spot equivalent cmyk values for the page. |
1184 | | These are used for overprint simulation. Read back by the |
1185 | | pdf14 device during the put_image operation */ |
1186 | | int |
1187 | | clist_write_op_equiv_cmyk_colors(gx_device_clist_writer *cldev, |
1188 | | equivalent_cmyk_color_params *op_equiv_cmyk) |
1189 | 0 | { |
1190 | 0 | return cmd_write_pseudo_band(cldev, (unsigned char *)op_equiv_cmyk, |
1191 | 0 | sizeof(equivalent_cmyk_color_params), SPOT_EQUIV_COLORS); |
1192 | 0 | } |
1193 | | |
1194 | | /* Compute color_usage over a Y range while writing clist */ |
1195 | | /* Sets color_usage fields and range_start. */ |
1196 | | /* Returns range end (max dev->height) */ |
1197 | | /* NOT expected to be used. */ |
1198 | | int |
1199 | | clist_writer_color_usage(gx_device_clist_writer *cldev, int y, int height, |
1200 | | gx_color_usage_t *color_usage, int *range_start) |
1201 | 0 | { |
1202 | 0 | gx_color_usage_bits or = 0; |
1203 | 0 | bool slow_rop = false; |
1204 | 0 | int i, band_height = cldev->page_info.band_params.BandHeight; |
1205 | 0 | int start = y / band_height, end = (y + height) / band_height; |
1206 | |
|
1207 | 0 | for (i = start; i < end; ++i) { |
1208 | 0 | or |= cldev->states[i].color_usage.or; |
1209 | 0 | slow_rop |= cldev->states[i].color_usage.slow_rop; |
1210 | 0 | } |
1211 | 0 | color_usage->or = or; |
1212 | 0 | color_usage->slow_rop = slow_rop; |
1213 | 0 | *range_start = start * band_height; |
1214 | 0 | return min(end * band_height, cldev->height) - *range_start; |
1215 | 0 | } |
1216 | | |
1217 | | int |
1218 | | clist_writer_push_no_cropping(gx_device_clist_writer *cdev) |
1219 | 67.3k | { |
1220 | 67.3k | clist_writer_cropping_buffer_t *buf = gs_alloc_struct(cdev->memory, |
1221 | 67.3k | clist_writer_cropping_buffer_t, |
1222 | 67.3k | &st_clist_writer_cropping_buffer, "clist_writer_transparency_push"); |
1223 | | |
1224 | 67.3k | if (buf == NULL) |
1225 | 0 | return_error(gs_error_VMerror); |
1226 | 67.3k | if_debug4m('v', cdev->memory, "[v]push cropping[%d], min=%d, max=%d, buf="PRI_INTPTR"\n", |
1227 | 67.3k | cdev->cropping_level, cdev->cropping_min, cdev->cropping_max, (intptr_t)buf); |
1228 | 67.3k | buf->next = cdev->cropping_stack; |
1229 | 67.3k | cdev->cropping_stack = buf; |
1230 | 67.3k | buf->cropping_min = cdev->cropping_min; |
1231 | 67.3k | buf->cropping_max = cdev->cropping_max; |
1232 | 67.3k | buf->mask_id = cdev->mask_id; |
1233 | 67.3k | buf->temp_mask_id = cdev->temp_mask_id; |
1234 | 67.3k | cdev->cropping_level++; |
1235 | 67.3k | return 0; |
1236 | 67.3k | } |
1237 | | |
1238 | | int |
1239 | | clist_writer_push_cropping(gx_device_clist_writer *cdev, int ry, int rheight) |
1240 | 67.3k | { |
1241 | 67.3k | int code = clist_writer_push_no_cropping(cdev); |
1242 | | |
1243 | 67.3k | if (code < 0) |
1244 | 0 | return 0; |
1245 | 67.3k | cdev->cropping_min = max(cdev->cropping_min, ry); |
1246 | 67.3k | cdev->cropping_max = min(cdev->cropping_max, ry + rheight); |
1247 | 67.3k | return 0; |
1248 | 67.3k | } |
1249 | | |
1250 | | int |
1251 | | clist_writer_pop_cropping(gx_device_clist_writer *cdev) |
1252 | 67.3k | { |
1253 | 67.3k | clist_writer_cropping_buffer_t *buf = cdev->cropping_stack; |
1254 | | |
1255 | 67.3k | if (buf == NULL) |
1256 | 0 | return_error(gs_error_unregistered); /*Must not happen. */ |
1257 | 67.3k | cdev->cropping_min = buf->cropping_min; |
1258 | 67.3k | cdev->cropping_max = buf->cropping_max; |
1259 | 67.3k | cdev->mask_id = buf->mask_id; |
1260 | 67.3k | cdev->temp_mask_id = buf->temp_mask_id; |
1261 | 67.3k | cdev->cropping_stack = buf->next; |
1262 | 67.3k | cdev->cropping_level--; |
1263 | 67.3k | if_debug4m('v', cdev->memory, "[v]pop cropping[%d] min=%d, max=%d, buf="PRI_INTPTR"\n", |
1264 | 67.3k | cdev->cropping_level, cdev->cropping_min, cdev->cropping_max, (intptr_t)buf); |
1265 | 67.3k | gs_free_object(cdev->memory, buf, "clist_writer_transparency_pop"); |
1266 | 67.3k | return 0; |
1267 | 67.3k | } |
1268 | | |
1269 | | int |
1270 | | clist_writer_check_empty_cropping_stack(gx_device_clist_writer *cdev) |
1271 | 12.6k | { |
1272 | 12.6k | if (cdev->cropping_stack != NULL) { |
1273 | 0 | if_debug1m('v', cdev->memory, "[v]Error: left %d cropping(s)\n", cdev->cropping_level); |
1274 | 0 | return_error(gs_error_unregistered); /* Must not happen */ |
1275 | 0 | } |
1276 | 12.6k | return 0; |
1277 | 12.6k | } |
1278 | | |
1279 | | /* Retrieve total size for cfile and bfile. */ |
1280 | | int clist_data_size(const gx_device_clist *cdev, int select) |
1281 | 627k | { |
1282 | 627k | const gx_band_page_info_t *pinfo = &cdev->common.page_info; |
1283 | 627k | clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile); |
1284 | 627k | const char *fname = (!select ? pinfo->bfname : pinfo->cfname); |
1285 | 627k | int code, size; |
1286 | | |
1287 | 627k | code = pinfo->io_procs->fseek(pfile, 0, SEEK_END, fname); |
1288 | 627k | if (code < 0) |
1289 | 0 | return_error(gs_error_unregistered); /* Must not happen. */ |
1290 | 627k | code = pinfo->io_procs->ftell(pfile); |
1291 | 627k | if (code < 0) |
1292 | 0 | return_error(gs_error_unregistered); /* Must not happen. */ |
1293 | 627k | size = code; |
1294 | 627k | return size; |
1295 | 627k | } |
1296 | | |
1297 | | /* Get command list data. */ |
1298 | | int |
1299 | | clist_get_data(const gx_device_clist *cdev, int select, int64_t offset, byte *buf, int length) |
1300 | 311k | { |
1301 | 311k | const gx_band_page_info_t *pinfo = &cdev->common.page_info; |
1302 | 311k | clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile); |
1303 | 311k | const char *fname = (!select ? pinfo->bfname : pinfo->cfname); |
1304 | 311k | int code; |
1305 | | |
1306 | 311k | code = pinfo->io_procs->fseek(pfile, offset, SEEK_SET, fname); |
1307 | 311k | if (code < 0) |
1308 | 0 | return_error(gs_error_unregistered); /* Must not happen. */ |
1309 | | /* This assumes that fread_chars doesn't return prematurely |
1310 | | when the buffer is not fully filled and the end of stream is not reached. */ |
1311 | 311k | return pinfo->io_procs->fread_chars(buf, length, pfile); |
1312 | 311k | } |
1313 | | |
1314 | | /* Put command list data. */ |
1315 | | int |
1316 | | clist_put_data(const gx_device_clist *cdev, int select, int64_t offset, const byte *buf, int length) |
1317 | 247k | { |
1318 | 247k | const gx_band_page_info_t *pinfo = &cdev->common.page_info; |
1319 | 247k | clist_file_ptr pfile = (!select ? pinfo->bfile : pinfo->cfile); |
1320 | 247k | int64_t code; |
1321 | | |
1322 | 247k | code = pinfo->io_procs->ftell(pfile); |
1323 | 247k | if (code < 0) |
1324 | 0 | return_error(gs_error_unregistered); /* Must not happen. */ |
1325 | 247k | if (code != offset) { |
1326 | | /* Assuming a consecutive writing only. */ |
1327 | 0 | return_error(gs_error_unregistered); /* Must not happen. */ |
1328 | 0 | } |
1329 | | /* This assumes that fwrite_chars doesn't return prematurely |
1330 | | when the buffer is not fully written, except with an error. */ |
1331 | 247k | return pinfo->io_procs->fwrite_chars(buf, length, pfile); |
1332 | 247k | } |
1333 | | |
1334 | | gx_device_clist * |
1335 | | clist_make_accum_device(gs_memory_t *mem, gx_device *target, const char *dname, void *base, int space, |
1336 | | gx_device_buf_procs_t *buf_procs, gx_band_params_t *band_params, |
1337 | | bool use_memory_clist, bool uses_transparency, |
1338 | | gs_pattern1_instance_t *pinst) |
1339 | 128k | { |
1340 | 128k | gx_device_clist *cdev = gs_alloc_struct(mem->stable_memory, gx_device_clist, |
1341 | 128k | &st_device_clist, "clist_make_accum_device"); |
1342 | 128k | gx_device_clist_writer *cwdev = (gx_device_clist_writer *)cdev; |
1343 | | |
1344 | 128k | if (cdev == 0) |
1345 | 0 | return 0; |
1346 | 128k | memset(cdev, 0, sizeof(*cdev)); |
1347 | 128k | cwdev->params_size = sizeof(gx_device_clist); |
1348 | 128k | cwdev->initialize_device_procs = clist_initialize_device_procs; |
1349 | 128k | cwdev->dname = dname; |
1350 | 128k | cwdev->memory = mem->stable_memory; |
1351 | 128k | cwdev->stype = &st_device_clist; |
1352 | 128k | cwdev->stype_is_dynamic = false; |
1353 | 128k | rc_init(cwdev, mem->stable_memory, 1); |
1354 | 128k | cwdev->retained = true; |
1355 | 128k | cwdev->is_open = false; |
1356 | 128k | cwdev->color_info = target->color_info; |
1357 | 128k | cwdev->pinst = pinst; |
1358 | 128k | cwdev->cached_colors = target->cached_colors; |
1359 | 128k | if (pinst != NULL) { |
1360 | 128k | cwdev->width = pinst->size.x; |
1361 | 128k | cwdev->height = pinst->size.y; |
1362 | 128k | cwdev->band_params.BandHeight = pinst->size.y; |
1363 | 128k | } else { |
1364 | 0 | cwdev->width = target->width; |
1365 | 0 | cwdev->height = target->height; |
1366 | 0 | } |
1367 | 128k | cwdev->LeadingEdge = target->LeadingEdge; |
1368 | 128k | cwdev->num_planar_planes = target->num_planar_planes; |
1369 | 128k | cwdev->HWResolution[0] = target->HWResolution[0]; |
1370 | 128k | cwdev->HWResolution[1] = target->HWResolution[1]; |
1371 | 128k | cwdev->icc_cache_cl = NULL; |
1372 | 128k | cwdev->icc_table = NULL; |
1373 | 128k | cwdev->UseCIEColor = target->UseCIEColor; |
1374 | 128k | cwdev->LockSafetyParams = true; |
1375 | 128k | cwdev->initialize_device_procs((gx_device *)cwdev); |
1376 | 128k | gx_device_fill_in_procs((gx_device *)cwdev); |
1377 | 128k | gx_device_copy_color_params((gx_device *)cwdev, target); |
1378 | 128k | rc_assign(cwdev->target, target, "clist_make_accum_device"); |
1379 | 128k | clist_init_io_procs(cdev, use_memory_clist); |
1380 | 128k | cwdev->data = base; |
1381 | 128k | cwdev->data_size = space; |
1382 | 128k | memcpy (&(cwdev->buf_procs), buf_procs, sizeof(gx_device_buf_procs_t)); |
1383 | 128k | cwdev->page_uses_transparency = uses_transparency; |
1384 | 128k | cwdev->band_params.BandWidth = cwdev->width; |
1385 | 128k | cwdev->band_params.BandBufferSpace = 0; |
1386 | 128k | cwdev->do_not_open_or_close_bandfiles = false; |
1387 | 128k | cwdev->bandlist_memory = mem->non_gc_memory; |
1388 | 128k | set_dev_proc(cwdev, get_clipping_box, gx_default_get_clipping_box); |
1389 | 128k | set_dev_proc(cwdev, get_profile, gx_forward_get_profile); |
1390 | 128k | set_dev_proc(cwdev, set_graphics_type_tag, gx_forward_set_graphics_type_tag); |
1391 | 128k | cwdev->graphics_type_tag = target->graphics_type_tag; /* initialize to same as target */ |
1392 | 128k | cwdev->interpolate_control = target->interpolate_control; /* initialize to same as target */ |
1393 | 128k | cwdev->non_strict_bounds = target->non_strict_bounds; /* initialize to same as target */ |
1394 | | |
1395 | | /* to be set by caller: cwdev->finalize = finalize; */ |
1396 | | |
1397 | | /* Fields left zeroed : |
1398 | | int max_fill_band; |
1399 | | dev_proc_dev_spec_op(orig_spec_op); |
1400 | | float MediaSize[2]; |
1401 | | float ImagingBBox[4]; |
1402 | | bool ImagingBBox_set; |
1403 | | float Margins[2]; |
1404 | | float HWMargins[4]; |
1405 | | long PageCount; |
1406 | | long ShowpageCount; |
1407 | | int NumCopies; |
1408 | | bool NumCopies_set; |
1409 | | bool IgnoreNumCopies; |
1410 | | int disable_mask; |
1411 | | gx_page_device_procs page_procs; |
1412 | | |
1413 | | */ |
1414 | 128k | return cdev; |
1415 | 128k | } |
1416 | | |
1417 | | /* GC information */ |
1418 | | #define DEVICE_MUTATED_TO_CLIST(pdev) \ |
1419 | 9.07M | (((gx_device_clist_mutatable *)(pdev))->buffer_space != 0) |
1420 | | |
1421 | | static |
1422 | 8.89M | ENUM_PTRS_WITH(device_clist_mutatable_enum_ptrs, gx_device_clist_mutatable *pdev) |
1423 | 8.89M | if (DEVICE_MUTATED_TO_CLIST(pdev)) |
1424 | 8.89M | ENUM_PREFIX(st_device_clist, 0); |
1425 | 0 | else |
1426 | 0 | ENUM_PREFIX(st_device_forward, 0); |
1427 | 0 | break; |
1428 | 8.89M | ENUM_PTRS_END |
1429 | | static |
1430 | 181k | RELOC_PTRS_WITH(device_clist_mutatable_reloc_ptrs, gx_device_clist_mutatable *pdev) |
1431 | 181k | { |
1432 | 181k | if (DEVICE_MUTATED_TO_CLIST(pdev)) |
1433 | 181k | RELOC_PREFIX(st_device_clist); |
1434 | 0 | else |
1435 | 181k | RELOC_PREFIX(st_device_forward); |
1436 | 181k | } RELOC_PTRS_END |
1437 | | public_st_device_clist_mutatable(); |
1438 | | |
1439 | | int |
1440 | | clist_mutate_to_clist(gx_device_clist_mutatable *pdev, |
1441 | | gs_memory_t *buffer_memory, |
1442 | | byte **the_memory, |
1443 | | const gdev_space_params *space_params, |
1444 | | bool bufferSpace_is_exact, |
1445 | | const gx_device_buf_procs_t *buf_procs, |
1446 | | dev_proc_dev_spec_op(dev_spec_op), |
1447 | | size_t min_buffer_space) |
1448 | 221k | { |
1449 | 221k | gx_device *target = (gx_device *)pdev; |
1450 | 221k | size_t space; |
1451 | 221k | int code; |
1452 | 221k | gx_device_clist *const pclist_dev = (gx_device_clist *)pdev; |
1453 | 221k | gx_device_clist_common * const pcldev = &pclist_dev->common; |
1454 | 221k | bool reallocate = the_memory != NULL && *the_memory != NULL; |
1455 | 221k | byte *base; |
1456 | 221k | bool save_is_open = pdev->is_open; /* Save around temporary failure in open_c loop */ |
1457 | | |
1458 | 221k | while (target->parent != NULL) { |
1459 | 0 | target = target->parent; |
1460 | 0 | gx_update_from_subclass(target); |
1461 | 0 | } |
1462 | | |
1463 | | /* Try to allocate based simply on param-requested buffer size */ |
1464 | | #ifdef DEBUGGING_HACKS |
1465 | | #define BACKTRACE(first_arg)\ |
1466 | | BEGIN\ |
1467 | | ulong *fp_ = (ulong *)&first_arg - 2;\ |
1468 | | for (; fp_ && (fp_[1] & 0xff000000) == 0x08000000; fp_ = (ulong *)*fp_)\ |
1469 | | dmprintf2(buffer_memory, " fp="PRI_INTPTR" ip=0x%lx\n", (intptr_t)fp_, fp_[1]);\ |
1470 | | END |
1471 | | dmputs(buffer_memory, "alloc buffer:\n"); |
1472 | | BACKTRACE(pdev); |
1473 | | #endif /*DEBUGGING_HACKS*/ |
1474 | 221k | for ( space = space_params->BufferSpace; ; ) { |
1475 | 221k | base = (reallocate ? |
1476 | 117k | (byte *)gs_resize_object(buffer_memory, *the_memory, space, |
1477 | 221k | "cmd list buffer") : |
1478 | 221k | gs_alloc_bytes(buffer_memory, space, |
1479 | 221k | "cmd list buffer")); |
1480 | 221k | if (base != NULL) |
1481 | 221k | break; /* Allocation worked! Stop trying. */ |
1482 | 0 | if (bufferSpace_is_exact) { |
1483 | | /* We wanted a specific size. Accept no substitutes. */ |
1484 | 0 | break; |
1485 | 0 | } |
1486 | | /* Let's try again for half the size. */ |
1487 | 0 | if (space == min_buffer_space) |
1488 | 0 | break; /* We already failed at the minimum size. */ |
1489 | 0 | space >>= 1; |
1490 | 0 | if (space < min_buffer_space) |
1491 | 0 | space = min_buffer_space; |
1492 | 0 | } |
1493 | 221k | if (base == NULL) |
1494 | 0 | return_error(gs_error_VMerror); |
1495 | | |
1496 | | /* Try opening the command list, to see if we allocated */ |
1497 | | /* enough buffer space. */ |
1498 | 271k | open_c: |
1499 | 271k | if (the_memory) |
1500 | 271k | *the_memory = base; |
1501 | 271k | pdev->buf = base; |
1502 | 271k | pdev->buffer_space = space; |
1503 | 271k | pclist_dev->common.orig_spec_op = dev_spec_op; |
1504 | 271k | clist_init_io_procs(pclist_dev, pdev->BLS_force_memory); |
1505 | 271k | clist_init_params(pclist_dev, base, space, target, |
1506 | 271k | *buf_procs, |
1507 | 271k | space_params->band, |
1508 | 271k | false, /* do_not_open_or_close_bandfiles */ |
1509 | 271k | (pdev->bandlist_memory == 0 ? pdev->memory->non_gc_memory: |
1510 | 271k | pdev->bandlist_memory), |
1511 | 271k | pdev->clist_disable_mask, |
1512 | 271k | pdev->page_uses_transparency, |
1513 | 271k | pdev->page_uses_overprint); |
1514 | 271k | code = clist_open( (gx_device *)pcldev ); |
1515 | 271k | if (code < 0) { |
1516 | | /* If there wasn't enough room, and we haven't */ |
1517 | | /* already shrunk the buffer, try enlarging it. */ |
1518 | 50.0k | if ( code == gs_error_rangecheck && |
1519 | 50.0k | space >= space_params->BufferSpace && |
1520 | 50.0k | !bufferSpace_is_exact |
1521 | 50.0k | ) { |
1522 | 50.0k | space += space / 8; |
1523 | 50.0k | if (reallocate) { |
1524 | 42.6k | base = gs_resize_object(buffer_memory, |
1525 | 42.6k | *the_memory, space, |
1526 | 42.6k | "cmd list buf(retry open)"); |
1527 | 42.6k | } else { |
1528 | 7.37k | gs_free_object(buffer_memory, base, |
1529 | 7.37k | "cmd list buf(retry open)"); |
1530 | 7.37k | base = gs_alloc_bytes(buffer_memory, space, |
1531 | 7.37k | "cmd list buf(retry open)"); |
1532 | 7.37k | if (the_memory != NULL) |
1533 | 7.37k | *the_memory = base; |
1534 | 7.37k | } |
1535 | 50.0k | if (base != NULL) { |
1536 | 50.0k | pdev->is_open = save_is_open; /* allow for success when we loop */ |
1537 | 50.0k | goto open_c; |
1538 | 50.0k | } |
1539 | 50.0k | } |
1540 | | /* Failure. */ |
1541 | 0 | if (!reallocate) { |
1542 | 0 | gs_free_object(buffer_memory, base, "cmd list buf"); |
1543 | 0 | pdev->buffer_space = 0; |
1544 | 0 | if (the_memory != NULL) |
1545 | 0 | *the_memory = NULL; |
1546 | 0 | pdev->buf = NULL; |
1547 | 0 | } |
1548 | 0 | } |
1549 | 221k | if (code < 0) |
1550 | 0 | pdev->is_open = save_is_open; |
1551 | 221k | return code; |
1552 | 271k | } |