/src/ghostpdl/base/gxclmem.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Copyright (C) 2001-2022 Artifex Software, Inc. |
2 | | All Rights Reserved. |
3 | | |
4 | | This software is provided AS-IS with no warranty, either express or |
5 | | implied. |
6 | | |
7 | | This software is distributed under license and may not be copied, |
8 | | modified or distributed except as expressly authorized under the terms |
9 | | of the license contained in the file LICENSE in this distribution. |
10 | | |
11 | | Refer to licensing information at http://www.artifex.com or contact |
12 | | Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato, |
13 | | CA 94945, U.S.A., +1(415)492-9861, for further information. |
14 | | */ |
15 | | |
16 | | |
17 | | /* RAM-based command list implementation */ |
18 | | #include "memory_.h" |
19 | | #include "gx.h" |
20 | | #include "gserrors.h" |
21 | | #include "gxclmem.h" |
22 | | #include "gssprintf.h" |
23 | | |
24 | | #include "valgrind.h" |
25 | | |
26 | | /* |
27 | | * Based on: memfile.c Version: 1.4 3/21/95 14:59:33 by Ray Johnston. |
28 | | * Copyright assigned to Aladdin Enterprises. |
29 | | */ |
30 | | |
31 | | /***************************************************************************** |
32 | | |
33 | | This package is more or less optimal for use by the clist routines, with |
34 | | a couple of the more likely to change "tuning" parameters given in the |
35 | | two macros below -- NEED_TO_COMPRESS and GET_NUM_RAW_BUFFERS. Usually |
36 | | the NEED_TO_COMPRESS decision will be deferred as long as possible based |
37 | | on some total system free RAM space remaining. |
38 | | |
39 | | The data structures are in "memfile.h", and the primary 'tuning' parameter |
40 | | is MEMFILE_DATA_SIZE. This should not be too small to keep the overhead |
41 | | ratio of the block structures to the clist data small. A value of 16384 |
42 | | is probably in the ballpark. |
43 | | |
44 | | The concept is that a memory based "file" is created initially without |
45 | | compression, with index blocks every MEMFILE_DATA_SIZE of the file. The |
46 | | primary blocks (used by the memfile_fseek logic) for indexing into the |
47 | | file are called 'logical' (LOG_MEMFILE_BLK) and the data in stored in a |
48 | | different block called a 'physical' block (PHYS_MEMFILE_BLK). When the |
49 | | file is not yet compressed, indicated by (f->phys_curr==NULL), then there |
50 | | is one physical block for each logical block. The physical block also has |
51 | | the 'data_limit' set to NULL if the data is not compressed. Thus when a |
52 | | file is not compressed there is one physical block for each logical block. |
53 | | |
54 | | COMPRESSION. |
55 | | |
56 | | When compression is triggered for a file then all of the blocks except |
57 | | the last are compressed. Compression will result in a physical block |
58 | | that holds data for more than one logical block. Each logical block now |
59 | | points to the start of compressed data in a physical block with the |
60 | | 'phys_pdata' pointer. The 'data_limit' pointer in the physical block is |
61 | | where the compression logic stopped storing data (as stream data |
62 | | compressors are allowed to do). The data for the logical block may span |
63 | | to the next physical block. Once physical blocks are compressed, they are |
64 | | chained together using the 'link' field. |
65 | | |
66 | | The 'f->phys_curr' points to the block being filled by compression, with |
67 | | the 'f->wt.ptr' pointing to the last byte filled in the block. These are |
68 | | used during subsequent compression when the last logical block of the |
69 | | file fills the physical block. |
70 | | |
71 | | DECOMPRESSION. |
72 | | |
73 | | During reading the clist, if the logical block points to an uncompressed |
74 | | physical block, then 'memfile_get_pdata' simply sets the 'pdata' and the |
75 | | 'pdata_end' pointers. If the logical block was compressed, then it may |
76 | | still be resident in a cache of decompression buffers. The number of these |
77 | | decompression buffers is not critical -- even one is enough, but having |
78 | | more may prevent decompressing blocks more than once (a cache_miss). The |
79 | | number of decompression buffers, called "raw" buffers, that are attempted |
80 | | to allocate can be changed with the GET_NUM_RAW_BUFFERS macro, but no |
81 | | error occurs if less than that number can be allocated. |
82 | | |
83 | | If the logical block still resides in a decompression cache buffer, then |
84 | | the 'raw_block' will identify the block. If the data for a logical block |
85 | | only exists in compressed form, then the "tail" of the list of decompression |
86 | | buffers is re-used, marking the 'raw_block' of the logical block that was |
87 | | previously associated with this data to NULL. |
88 | | |
89 | | Whichever raw decompression buffer is accessed is moved to the head of the |
90 | | decompression buffer list in order to keep the tail of the list as the |
91 | | "least recently used". |
92 | | |
93 | | There are some DEBUG global static variables used to count the number of |
94 | | cache hits "tot_cache_hits" and the number of times a logical block is |
95 | | decompressed "tot_cache_miss". Note that the actual number of cache miss |
96 | | events is 'f->log_length/MEMFILE_DATA_SIZE - tot_cache_miss' since we |
97 | | assume that every logical block must be decmpressed at least once. |
98 | | |
99 | | Empirical results so far indicate that if one cache raw buffer for every |
100 | | 32 logical blocks, then the hit/miss ratio exceeds 99%. Of course, the |
101 | | number of raw buffers should be more than 1 if possible, and in many |
102 | | implementations (single threaded), the memory usage does not increase |
103 | | during the page output step so almost all of memory can be used for |
104 | | these raw buffers to prevent the likelihood of a cache miss. |
105 | | |
106 | | Of course, this is dependent on reasonably efficient clist blocking |
107 | | during writing which is dependent on the data and on the BufferSpace |
108 | | value which determines the number of clist band data buffers available. |
109 | | Empirical testing shows that the overall efficiency is best if the |
110 | | BufferSpace value is 1,000,000 (as in the original Ghostscript source). |
111 | | [Note: I expected to be able to use smaller buffer sizes for some cases, |
112 | | but this resulted in a high level of thrashing...RJJ] |
113 | | |
114 | | LIMITATIONS. |
115 | | |
116 | | The most serious limitation is caused by the way 'memfile_fwrite' decides |
117 | | to free up and re-initialize a file. If memfile_fwrite is called after |
118 | | a seek to any location except the start of the file, then an error is |
119 | | issued since logic is not present to properly free up on a partial file. |
120 | | This is not a problem as used by the 'clist' logic since rewind is used |
121 | | to position to the start of a file when re-using it after an 'erasepage'. |
122 | | |
123 | | Since the 'clist' logic always traverses the clist using fseek's to ever |
124 | | increasing locations, no optimizations of backward seeks was implemented. |
125 | | This would be relatively easy with back chain links or bi-directional |
126 | | "X-OR" pointer information to link the logical block chain. The rewind |
127 | | function is optimal and moves directly to the start of the file. |
128 | | |
129 | | ********************************************************************************/ |
130 | | |
131 | | /* |
132 | | The need to compress should be conditional on the amount of available |
133 | | memory, but we don't have a way to communicate this to these routines. |
134 | | Instead, we simply start compressing when we've allocated more than |
135 | | COMPRESSION_THRESHOLD amount of data. The threshold should be at |
136 | | least as large as the fixed overhead of the compressor plus the |
137 | | decompressor, plus the expected compressed size of a block that size. |
138 | | |
139 | | As a testing measure we have a a define TEST_BAND_LIST_COMPRESSION |
140 | | which, if set, will set the threshold to a low value so as to cause |
141 | | compression to trigger. |
142 | | */ |
143 | | static const int64_t COMPRESSION_THRESHOLD = |
144 | | #ifdef TEST_BAND_LIST_COMPRESSION |
145 | | 1024; /* Low value to force compression */ |
146 | | #else |
147 | | 500000000; /* 0.5 Gb for host machines */ |
148 | | #endif |
149 | | |
150 | | #define NEED_TO_COMPRESS(f)\ |
151 | 266k | ((f)->ok_to_compress && (f)->total_space > COMPRESSION_THRESHOLD) |
152 | | |
153 | | /* FOR NOW ALLOCATE 1 raw buffer for every 32 blocks (at least 8, no more than 64) */ |
154 | | #define GET_NUM_RAW_BUFFERS( f ) \ |
155 | 0 | min(64, max(f->log_length/MEMFILE_DATA_SIZE/32, 8)) |
156 | | |
157 | | #define MALLOC(f, siz, cname)\ |
158 | 4.67M | (void *)gs_alloc_bytes((f)->data_memory, siz, cname) |
159 | | #define FREE(f, obj, cname)\ |
160 | 4.67M | do {gs_free_object((f)->data_memory, obj, cname);\ |
161 | 4.67M | (f)->total_space -= sizeof(*(obj));} while (0) |
162 | | |
163 | | /* Structure descriptor for GC */ |
164 | | private_st_MEMFILE(); |
165 | | |
166 | | /* forward references */ |
167 | | static void memfile_free_mem(MEMFILE * f); |
168 | | static int memfile_init_empty(MEMFILE * f); |
169 | | static int memfile_set_memory_warning(clist_file_ptr cf, int bytes_left); |
170 | | static int memfile_fclose(clist_file_ptr cf, const char *fname, bool delete); |
171 | | static int memfile_get_pdata(MEMFILE * f); |
172 | | |
173 | | /************************************************/ |
174 | | /* #define DEBUG /- force statistics -/ */ |
175 | | /************************************************/ |
176 | | |
177 | | #ifdef DEBUG |
178 | | int64_t tot_compressed; |
179 | | int64_t tot_raw; |
180 | | int64_t tot_cache_miss; |
181 | | int64_t tot_cache_hits; |
182 | | int64_t tot_swap_out; |
183 | | |
184 | | /* |
185 | | The following pointers are here only for helping with a dumb debugger |
186 | | that can't inspect local variables! |
187 | | */ |
188 | | byte *decomp_wt_ptr0, *decomp_wt_limit0; |
189 | | const byte *decomp_rd_ptr0, *decomp_rd_limit0; |
190 | | byte *decomp_wt_ptr1, *decomp_wt_limit1; |
191 | | const byte *decomp_rd_ptr1, *decomp_rd_limit1; |
192 | | |
193 | | #endif |
194 | | |
195 | | /* ----------------------------- Memory Allocation --------------------- */ |
196 | | static void * /* allocated memory's address, 0 if failure */ |
197 | | allocateWithReserve( |
198 | | MEMFILE *f, /* file to allocate mem to */ |
199 | | int sizeofBlock, /* size of block to allocate */ |
200 | | int *return_code, /* RET 0 ok, -ve GS-style error, or +1 if OK but low memory */ |
201 | | const char *allocName, /* name to allocate by */ |
202 | | const char *errorMessage /* error message to print */ |
203 | | ) |
204 | 532k | { |
205 | 532k | int code = 0; /* assume success */ |
206 | 532k | void *block = MALLOC(f, sizeofBlock, allocName); |
207 | | |
208 | 532k | if (block == NULL) { |
209 | | /* Try to recover block from reserve */ |
210 | 408 | if (sizeofBlock == sizeof(LOG_MEMFILE_BLK)) { |
211 | 0 | if (f->reserveLogBlockCount > 0) { |
212 | 0 | block = f->reserveLogBlockChain; |
213 | 0 | f->reserveLogBlockChain = f->reserveLogBlockChain->link; |
214 | 0 | --f->reserveLogBlockCount; |
215 | 0 | } |
216 | 408 | } else if (sizeofBlock == sizeof(PHYS_MEMFILE_BLK) || |
217 | 408 | sizeofBlock == sizeof(RAW_BUFFER) |
218 | 408 | ) { |
219 | 408 | if (f->reservePhysBlockCount > 0) { |
220 | 11 | block = f->reservePhysBlockChain; |
221 | 11 | f->reservePhysBlockChain = f->reservePhysBlockChain->link; |
222 | 11 | --f->reservePhysBlockCount; |
223 | 11 | } |
224 | 408 | } |
225 | 408 | if (block != NULL) |
226 | 11 | code = 1; /* successful, but allocated from reserve */ |
227 | 408 | } |
228 | 532k | if (block != NULL) |
229 | 532k | f->total_space += sizeofBlock; |
230 | 397 | else |
231 | 397 | code = gs_note_error(gs_error_VMerror); |
232 | 532k | *return_code = code; |
233 | 532k | return block; |
234 | 532k | } |
235 | | |
236 | | /* ---------------- Open/close/unlink ---------------- */ |
237 | | |
238 | | static int |
239 | | memfile_fopen(char fname[gp_file_name_sizeof], const char *fmode, |
240 | | clist_file_ptr /*MEMFILE * */ * pf, |
241 | | gs_memory_t *mem, gs_memory_t *data_mem, bool ok_to_compress) |
242 | 284k | { |
243 | 284k | MEMFILE *f = NULL; |
244 | 284k | int code = 0; |
245 | | |
246 | 284k | *pf = NULL; /* in case we have an error */ |
247 | | |
248 | | /* fname[0] == 0 if this is not reopening */ |
249 | | /* memfile file names begin with a flag byte == 0xff */ |
250 | 284k | if (fname[0] == '\377' && (fmode[0] == 'r' || fmode[0] == 'a')) { |
251 | 0 | MEMFILE *base_f = NULL; |
252 | | |
253 | | /* reopening an existing file. */ |
254 | 0 | code = sscanf(fname+1, "%p", &base_f); |
255 | 0 | if (code != 1) { |
256 | 0 | code = gs_note_error(gs_error_ioerror); |
257 | 0 | goto finish; |
258 | 0 | } |
259 | | /* Reopen an existing file for 'read' */ |
260 | 0 | if (base_f->is_open == false) { |
261 | | /* File is not is use, just re-use it. */ |
262 | 0 | f = base_f; |
263 | 0 | code = 0; |
264 | 0 | goto finish; |
265 | 0 | } else { |
266 | | /* We need to 'clone' this memfile so that each reader instance */ |
267 | | /* will be able to maintain it's own 'state' */ |
268 | 0 | f = gs_alloc_struct(mem, MEMFILE, &st_MEMFILE, |
269 | 0 | "memfile_fopen_instance(MEMFILE)"); |
270 | 0 | if (f == NULL) { |
271 | 0 | emprintf1(mem, |
272 | 0 | "memfile_open_scratch(%s): gs_alloc_struct failed\n", |
273 | 0 | fname); |
274 | 0 | code = gs_note_error(gs_error_VMerror); |
275 | 0 | goto finish; |
276 | 0 | } |
277 | 0 | memcpy(f, base_f, sizeof(MEMFILE)); |
278 | 0 | f->memory = mem; |
279 | 0 | f->data_memory = data_mem; |
280 | 0 | f->compress_state = 0; /* Not used by reader instance */ |
281 | 0 | f->decompress_state = 0; /* make clean for GC, or alloc'n failure */ |
282 | 0 | f->reservePhysBlockChain = NULL; |
283 | 0 | f->reservePhysBlockCount = 0; |
284 | 0 | f->reserveLogBlockChain = NULL; |
285 | 0 | f->reserveLogBlockCount = 0; |
286 | 0 | f->openlist = base_f->openlist; |
287 | 0 | base_f->openlist = f; /* link this one in to the base memfile */ |
288 | 0 | f->base_memfile = base_f; |
289 | 0 | f->log_curr_pos = 0; |
290 | 0 | f->raw_head = NULL; |
291 | 0 | f->error_code = 0; |
292 | |
|
293 | 0 | if (f->log_head->phys_blk->data_limit != NULL) { |
294 | | /* The file is compressed, so we need to copy the logical block */ |
295 | | /* list so that it is unique to this instance, and initialize */ |
296 | | /* the decompressor. */ |
297 | 0 | LOG_MEMFILE_BLK *log_block, *new_log_block; |
298 | 0 | int i; |
299 | 0 | int num_log_blocks = (f->log_length + MEMFILE_DATA_SIZE - 1) / MEMFILE_DATA_SIZE; |
300 | 0 | const stream_template *decompress_template = clist_decompressor_template(); |
301 | |
|
302 | 0 | new_log_block = MALLOC(f, num_log_blocks * sizeof(LOG_MEMFILE_BLK), "memfile_fopen" ); |
303 | 0 | if (new_log_block == NULL) { |
304 | 0 | code = gs_note_error(gs_error_VMerror); |
305 | 0 | goto finish; |
306 | 0 | } |
307 | | |
308 | | /* copy the logical blocks to the new list just allocated */ |
309 | 0 | for (log_block=f->log_head, i=0; log_block != NULL; log_block=log_block->link, i++) { |
310 | 0 | new_log_block[i].phys_blk = log_block->phys_blk; |
311 | 0 | new_log_block[i].phys_pdata = log_block->phys_pdata; |
312 | 0 | new_log_block[i].raw_block = NULL; |
313 | 0 | new_log_block[i].link = log_block->link == NULL ? NULL : new_log_block + i + 1; |
314 | 0 | } |
315 | 0 | f->log_head = new_log_block; |
316 | | |
317 | | /* NB: don't need compress_state for reading */ |
318 | 0 | f->decompress_state = |
319 | 0 | gs_alloc_struct(mem, stream_state, decompress_template->stype, |
320 | 0 | "memfile_open_scratch(decompress_state)"); |
321 | 0 | if (f->decompress_state == 0) { |
322 | 0 | emprintf1(mem, |
323 | 0 | "memfile_open_scratch(%s): gs_alloc_struct failed\n", |
324 | 0 | fname); |
325 | 0 | code = gs_note_error(gs_error_VMerror); |
326 | 0 | goto finish; |
327 | 0 | } |
328 | 0 | clist_decompressor_init(f->decompress_state); |
329 | 0 | f->decompress_state->memory = mem; |
330 | 0 | if (decompress_template->set_defaults) |
331 | 0 | (*decompress_template->set_defaults) (f->decompress_state); |
332 | 0 | } |
333 | 0 | f->log_curr_blk = f->log_head; |
334 | 0 | memfile_get_pdata(f); /* set up the initial block */ |
335 | |
|
336 | 0 | goto finish; |
337 | 0 | } |
338 | 0 | } |
339 | 284k | fname[0] = 0; /* no file name yet */ |
340 | 284k | f = gs_alloc_struct(mem, MEMFILE, &st_MEMFILE, |
341 | 284k | "memfile_open_scratch(MEMFILE)"); |
342 | 284k | if (f == NULL) { |
343 | 0 | emprintf1(mem, |
344 | 0 | "memfile_open_scratch(%s): gs_alloc_struct failed\n", |
345 | 0 | fname); |
346 | 0 | code = gs_note_error(gs_error_VMerror); |
347 | 0 | goto finish; |
348 | 0 | } |
349 | 284k | f->memory = mem; |
350 | 284k | f->data_memory = data_mem; |
351 | | /* init an empty file, BEFORE allocating de/compress state */ |
352 | 284k | f->compress_state = 0; /* make clean for GC, or alloc'n failure */ |
353 | 284k | f->decompress_state = 0; |
354 | 284k | f->openlist = NULL; |
355 | 284k | f->base_memfile = NULL; |
356 | 284k | f->total_space = 0; |
357 | 284k | f->reservePhysBlockChain = NULL; |
358 | 284k | f->reservePhysBlockCount = 0; |
359 | 284k | f->reserveLogBlockChain = NULL; |
360 | 284k | f->reserveLogBlockCount = 0; |
361 | | /* init an empty file */ |
362 | 284k | if ((code = memfile_init_empty(f)) < 0) |
363 | 0 | goto finish; |
364 | 284k | if ((code = memfile_set_memory_warning(f, 0)) < 0) |
365 | 0 | goto finish; |
366 | | /* |
367 | | * Disregard the ok_to_compress flag, since the size threshold gives us |
368 | | * a much better criterion for deciding when compression is appropriate. |
369 | | */ |
370 | 284k | f->ok_to_compress = /*ok_to_compress */ true; |
371 | 284k | f->compress_state = 0; /* make clean for GC */ |
372 | 284k | f->decompress_state = 0; |
373 | 284k | if (f->ok_to_compress) { |
374 | 284k | const stream_template *compress_template = clist_compressor_template(); |
375 | 284k | const stream_template *decompress_template = clist_decompressor_template(); |
376 | | |
377 | 284k | f->compress_state = |
378 | 284k | gs_alloc_struct(mem, stream_state, compress_template->stype, |
379 | 284k | "memfile_open_scratch(compress_state)"); |
380 | 284k | f->decompress_state = |
381 | 284k | gs_alloc_struct(mem, stream_state, decompress_template->stype, |
382 | 284k | "memfile_open_scratch(decompress_state)"); |
383 | 284k | if (f->compress_state == 0 || f->decompress_state == 0) { |
384 | 0 | emprintf1(mem, |
385 | 0 | "memfile_open_scratch(%s): gs_alloc_struct failed\n", |
386 | 0 | fname); |
387 | 0 | code = gs_note_error(gs_error_VMerror); |
388 | 0 | goto finish; |
389 | 0 | } |
390 | 284k | clist_compressor_init(f->compress_state); |
391 | 284k | clist_decompressor_init(f->decompress_state); |
392 | 284k | f->compress_state->memory = mem; |
393 | 284k | f->decompress_state->memory = mem; |
394 | 284k | if (compress_template->set_defaults) |
395 | 284k | (*compress_template->set_defaults) (f->compress_state); |
396 | 284k | if (decompress_template->set_defaults) |
397 | 284k | (*decompress_template->set_defaults) (f->decompress_state); |
398 | 284k | } |
399 | 284k | f->total_space = 0; |
400 | | |
401 | | /* Return the address of this memfile as a string for use in future clist_fopen calls */ |
402 | 284k | fname[0] = 0xff; /* a flag that this is a memfile name */ |
403 | 284k | gs_snprintf(fname+1, gp_file_name_sizeof-1, "%p", f); |
404 | | |
405 | | #ifdef DEBUG |
406 | | tot_compressed = 0; |
407 | | tot_raw = 0; |
408 | | tot_cache_miss = 0; |
409 | | tot_cache_hits = 0; |
410 | | tot_swap_out = 0; |
411 | | #endif |
412 | | |
413 | 284k | finish: |
414 | | /* 'f' shouldn't be NULL unless code < 0, but be careful */ |
415 | 284k | if (code < 0 || f == NULL) { |
416 | | /* return failure, clean up memory before leaving */ |
417 | 0 | if (f != NULL) |
418 | 0 | memfile_fclose((clist_file_ptr)f, fname, true); |
419 | 0 | if (code >= 0) |
420 | 0 | code = gs_error_ioerror; |
421 | 284k | } else { |
422 | | /* return success */ |
423 | 284k | f->is_open = true; |
424 | 284k | *pf = f; |
425 | 284k | } |
426 | 284k | return code; |
427 | 284k | } |
428 | | |
429 | | static int |
430 | | memfile_fclose(clist_file_ptr cf, const char *fname, bool delete) |
431 | 284k | { |
432 | 284k | MEMFILE *const f = (MEMFILE *)cf; |
433 | | |
434 | 284k | f->is_open = false; |
435 | 284k | if (!delete) { |
436 | 0 | if (f->base_memfile) { |
437 | 0 | MEMFILE *prev_f; |
438 | | |
439 | | /* Here we need to delete this instance from the 'openlist' */ |
440 | | /* in case this file was opened for 'read' on a previously */ |
441 | | /* written file (base_memfile != NULL) */ |
442 | 0 | for (prev_f = f->base_memfile; prev_f != NULL; prev_f = prev_f->openlist) |
443 | 0 | if (prev_f->openlist == f) |
444 | 0 | break; |
445 | 0 | if (prev_f == NULL) { |
446 | 0 | emprintf1(f->memory, |
447 | 0 | "Could not find %p on memfile openlist\n", |
448 | 0 | f); |
449 | 0 | return_error(gs_error_invalidfileaccess); |
450 | 0 | } |
451 | 0 | prev_f->openlist = f->openlist; /* link around the one being fclosed */ |
452 | | /* Now delete this MEMFILE reader instance */ |
453 | | /* NB: we don't delete 'base' instances until we delete */ |
454 | | /* If the file is compressed, free the logical blocks, but not */ |
455 | | /* the phys_blk info (that is still used by the base memfile */ |
456 | 0 | if (f->log_head->phys_blk->data_limit != NULL) { |
457 | 0 | LOG_MEMFILE_BLK *tmpbp, *bp = f->log_head; |
458 | |
|
459 | 0 | while (bp != NULL) { |
460 | 0 | tmpbp = bp->link; |
461 | 0 | FREE(f, bp, "memfile_free_mem(log_blk)"); |
462 | 0 | bp = tmpbp; |
463 | 0 | } |
464 | 0 | f->log_head = NULL; |
465 | | |
466 | | /* Free any internal compressor state. */ |
467 | 0 | if (f->compressor_initialized) { |
468 | 0 | if (f->decompress_state->templat->release != 0) |
469 | 0 | (*f->decompress_state->templat->release) (f->decompress_state); |
470 | 0 | if (f->compress_state->templat->release != 0) |
471 | 0 | (*f->compress_state->templat->release) (f->compress_state); |
472 | 0 | f->compressor_initialized = false; |
473 | 0 | } |
474 | | /* free the raw buffers */ |
475 | 0 | while (f->raw_head != NULL) { |
476 | 0 | RAW_BUFFER *tmpraw = f->raw_head->fwd; |
477 | |
|
478 | 0 | FREE(f, f->raw_head, "memfile_free_mem(raw)"); |
479 | 0 | f->raw_head = tmpraw; |
480 | 0 | } |
481 | 0 | } |
482 | | /* deallocate the memfile object proper */ |
483 | 0 | gs_free_object(f->memory, f, "memfile_close_and_unlink(MEMFILE)"); |
484 | 0 | } |
485 | 0 | return 0; |
486 | 0 | } |
487 | | |
488 | | /* TODO: If there are open read memfile structures, set them so that */ |
489 | | /* future accesses will use the current contents. This may result in */ |
490 | | /* leaks if other users of the memfile don't 'fclose with delete=true */ |
491 | 284k | if (f->openlist != NULL || ((f->base_memfile != NULL) && f->base_memfile->is_open)) { |
492 | | /* TODO: do the cleanup rather than just giving an error */ |
493 | 0 | emprintf1(f->memory, |
494 | 0 | "Attempt to delete a memfile still open for read: "PRI_INTPTR"\n", |
495 | 0 | (intptr_t)f); |
496 | 0 | return_error(gs_error_invalidfileaccess); |
497 | 284k | } else { |
498 | | /* Free the memory used by this memfile */ |
499 | 284k | memfile_free_mem(f); |
500 | | |
501 | | /* Free reserve blocks; don't do it in memfile_free_mem because */ |
502 | | /* that routine gets called to reinit file */ |
503 | 284k | while (f->reserveLogBlockChain != NULL) { |
504 | 0 | LOG_MEMFILE_BLK *block = f->reserveLogBlockChain; |
505 | |
|
506 | 0 | f->reserveLogBlockChain = block->link; |
507 | 0 | FREE(f, block, "memfile_set_block_size"); |
508 | 0 | } |
509 | 569k | while (f->reservePhysBlockChain != NULL) { |
510 | 284k | PHYS_MEMFILE_BLK *block = f->reservePhysBlockChain; |
511 | | |
512 | 284k | f->reservePhysBlockChain = block->link; |
513 | 284k | FREE(f, block, "memfile_set_block_size"); |
514 | 284k | } |
515 | | |
516 | | /* deallocate de/compress state */ |
517 | 284k | gs_free_object(f->memory, f->decompress_state, |
518 | 284k | "memfile_close_and_unlink(decompress_state)"); |
519 | 284k | gs_free_object(f->memory, f->compress_state, |
520 | 284k | "memfile_close_and_unlink(compress_state)"); |
521 | | |
522 | | /* deallocate the memfile object proper */ |
523 | 284k | gs_free_object(f->memory, f, "memfile_close_and_unlink(MEMFILE)"); |
524 | 284k | return 0; |
525 | 284k | } |
526 | 284k | } |
527 | | |
528 | | static int |
529 | | memfile_unlink(const char *fname) |
530 | 0 | { |
531 | 0 | int code; |
532 | 0 | MEMFILE *f; |
533 | | |
534 | | /* memfile file names begin with a flag byte == 0xff */ |
535 | 0 | if (fname[0] == '\377' && (code = sscanf(fname+1, "%p", &f) == 1)) { |
536 | 0 | return memfile_fclose((clist_file_ptr)f, fname, true); |
537 | 0 | } else |
538 | 0 | return_error(gs_error_invalidfileaccess); |
539 | 0 | } |
540 | | |
541 | | /* ---------------- Writing ---------------- */ |
542 | | |
543 | | /* Pre-alloc enough reserve mem blox to guarantee a write of N bytes will succeed */ |
544 | | static int /* returns 0 ok, gs_error_VMerror if insufficient */ |
545 | | memfile_set_memory_warning(clist_file_ptr cf, int bytes_left) |
546 | 1.07M | { |
547 | 1.07M | MEMFILE *const f = (MEMFILE *)cf; |
548 | 1.07M | int code = 0; |
549 | | /* |
550 | | * Determine req'd memory block count from bytes_left. |
551 | | * Allocate enough phys & log blocks to hold bytes_left |
552 | | * + 1 phys blk for compress_log_blk + 1 phys blk for decompress. |
553 | | */ |
554 | 1.07M | int logNeeded = |
555 | 1.07M | (bytes_left + MEMFILE_DATA_SIZE - 1) / MEMFILE_DATA_SIZE; |
556 | 1.07M | int physNeeded = logNeeded; |
557 | | |
558 | 1.07M | if (bytes_left > 0) |
559 | 0 | ++physNeeded; |
560 | 1.07M | if (f->raw_head == NULL) |
561 | 1.07M | ++physNeeded; /* have yet to allocate read buffers */ |
562 | | |
563 | | /* Allocate or free memory depending on need */ |
564 | 1.07M | while (logNeeded > f->reserveLogBlockCount) { |
565 | 0 | LOG_MEMFILE_BLK *block = |
566 | 0 | MALLOC( f, sizeof(LOG_MEMFILE_BLK), "memfile_set_block_size" ); |
567 | |
|
568 | 0 | if (block == NULL) { |
569 | 0 | code = gs_note_error(gs_error_VMerror); |
570 | 0 | goto finish; |
571 | 0 | } |
572 | 0 | block->link = f->reserveLogBlockChain; |
573 | 0 | f->reserveLogBlockChain = block; |
574 | 0 | ++f->reserveLogBlockCount; |
575 | 0 | } |
576 | 1.07M | while (logNeeded < f->reserveLogBlockCount) { |
577 | 0 | LOG_MEMFILE_BLK *block = f->reserveLogBlockChain; |
578 | |
|
579 | 0 | f->reserveLogBlockChain = block->link; |
580 | 0 | FREE(f, block, "memfile_set_block_size"); |
581 | 0 | --f->reserveLogBlockCount; |
582 | 0 | } |
583 | 1.36M | while (physNeeded > f->reservePhysBlockCount) { |
584 | 284k | PHYS_MEMFILE_BLK *block = |
585 | 284k | MALLOC( f, |
586 | 284k | max( sizeof(PHYS_MEMFILE_BLK), sizeof(RAW_BUFFER) ), |
587 | 284k | "memfile_set_block_size"); |
588 | | |
589 | 284k | if (block == NULL) { |
590 | 11 | code = gs_note_error(gs_error_VMerror); |
591 | 11 | goto finish; |
592 | 11 | } |
593 | 284k | block->link = f->reservePhysBlockChain; |
594 | 284k | f->reservePhysBlockChain = block; |
595 | 284k | ++f->reservePhysBlockCount; |
596 | 284k | } |
597 | 1.07M | while (physNeeded < f->reservePhysBlockCount) { |
598 | 0 | PHYS_MEMFILE_BLK *block = f->reservePhysBlockChain; |
599 | |
|
600 | 0 | f->reservePhysBlockChain = block->link; |
601 | 0 | FREE(f, block, "memfile_set_block_size"); |
602 | 0 | --f->reservePhysBlockCount; |
603 | 0 | } |
604 | 1.07M | f->error_code = 0; /* memfile_set_block_size is how user resets this */ |
605 | 1.07M | finish: |
606 | 1.07M | return code; |
607 | 1.07M | } |
608 | | |
609 | | static int |
610 | | compress_log_blk(MEMFILE * f, LOG_MEMFILE_BLK * bp) |
611 | 0 | { |
612 | 0 | int status; |
613 | 0 | int ecode = 0; /* accumulate low-memory warnings */ |
614 | 0 | int code; |
615 | 0 | long compressed_size; |
616 | 0 | byte *start_ptr; |
617 | 0 | PHYS_MEMFILE_BLK *newphys; |
618 | | |
619 | | /* compress this block */ |
620 | 0 | f->rd.ptr = (const byte *)(bp->phys_blk->data) - 1; |
621 | 0 | f->rd.limit = f->rd.ptr + MEMFILE_DATA_SIZE; |
622 | |
|
623 | 0 | bp->phys_blk = f->phys_curr; |
624 | 0 | bp->phys_pdata = (char *)(f->wt.ptr) + 1; |
625 | 0 | if (f->compress_state->templat->reinit != 0) |
626 | 0 | (*f->compress_state->templat->reinit)(f->compress_state); |
627 | 0 | compressed_size = 0; |
628 | |
|
629 | 0 | start_ptr = f->wt.ptr; |
630 | 0 | status = (*f->compress_state->templat->process)(f->compress_state, |
631 | 0 | &(f->rd), &(f->wt), true); |
632 | 0 | bp->phys_blk->data_limit = (char *)(f->wt.ptr); |
633 | |
|
634 | 0 | if (status == 1) { /* More output space needed (see strimpl.h) */ |
635 | | /* allocate another physical block, then compress remainder */ |
636 | 0 | compressed_size = f->wt.limit - start_ptr; |
637 | 0 | newphys = |
638 | 0 | allocateWithReserve(f, sizeof(*newphys), &code, "memfile newphys", |
639 | 0 | "compress_log_blk : MALLOC for 'newphys' failed\n"); |
640 | 0 | if (code < 0) |
641 | 0 | return code; |
642 | 0 | ecode |= code; /* accumulate any low-memory warnings */ |
643 | 0 | newphys->link = NULL; |
644 | 0 | bp->phys_blk->link = newphys; |
645 | 0 | f->phys_curr = newphys; |
646 | 0 | f->wt.ptr = (byte *) (newphys->data) - 1; |
647 | 0 | f->wt.limit = f->wt.ptr + MEMFILE_DATA_SIZE; |
648 | |
|
649 | 0 | start_ptr = f->wt.ptr; |
650 | 0 | status = |
651 | 0 | (*f->compress_state->templat->process)(f->compress_state, |
652 | 0 | &(f->rd), &(f->wt), true); |
653 | 0 | if (status != 0) { |
654 | | /* |
655 | | * You'd think the above line is a bug, but in real life 1 src |
656 | | * block never ends up getting split across 3 dest blocks. |
657 | | */ |
658 | | /* CHANGE memfile_set_memory_warning if this assumption changes. */ |
659 | 0 | emprintf(f->memory, |
660 | 0 | "Compression required more than one full block!\n"); |
661 | 0 | return_error(gs_error_Fatal); |
662 | 0 | } |
663 | 0 | newphys->data_limit = (char *)(f->wt.ptr); |
664 | 0 | } |
665 | 0 | compressed_size += f->wt.ptr - start_ptr; |
666 | 0 | if (compressed_size > MEMFILE_DATA_SIZE) { |
667 | 0 | emprintf2(f->memory, |
668 | 0 | "\nCompression didn't - raw=%d, compressed=%ld\n", |
669 | 0 | MEMFILE_DATA_SIZE, |
670 | 0 | compressed_size); |
671 | 0 | } |
672 | | #ifdef DEBUG |
673 | | tot_compressed += compressed_size; |
674 | | #endif |
675 | 0 | return (status < 0 ? gs_note_error(gs_error_ioerror) : ecode); |
676 | 0 | } /* end "compress_log_blk()" */ |
677 | | |
678 | | /* Internal (private) routine to handle end of logical block */ |
679 | | static int /* ret 0 ok, -ve error, or +ve low-memory warning */ |
680 | | memfile_next_blk(MEMFILE * f) |
681 | 266k | { |
682 | 266k | LOG_MEMFILE_BLK *bp = f->log_curr_blk; |
683 | 266k | LOG_MEMFILE_BLK *newbp; |
684 | 266k | PHYS_MEMFILE_BLK *newphys, *oldphys; |
685 | 266k | int ecode = 0; /* accumulate low-memory warnings */ |
686 | 266k | int code; |
687 | | |
688 | 266k | if (f->phys_curr == NULL) { /* means NOT compressing */ |
689 | | /* allocate a new block */ |
690 | 266k | newphys = |
691 | 266k | allocateWithReserve(f, sizeof(*newphys), &code, "memfile newphys", |
692 | 266k | "memfile_next_blk: MALLOC 1 for 'newphys' failed\n"); |
693 | 266k | if (code < 0) |
694 | 397 | return code; |
695 | 266k | ecode |= code; /* accumulate low-mem warnings */ |
696 | 266k | newphys->link = NULL; |
697 | 266k | newphys->data_limit = NULL; /* raw */ |
698 | | |
699 | 266k | newbp = |
700 | 266k | allocateWithReserve(f, sizeof(*newbp), &code, "memfile newbp", |
701 | 266k | "memfile_next_blk: MALLOC 1 for 'newbp' failed\n"); |
702 | 266k | if (code < 0) { |
703 | 0 | FREE(f, newphys, "memfile newphys"); |
704 | 0 | return code; |
705 | 0 | } |
706 | 266k | ecode |= code; /* accumulate low-mem warnings */ |
707 | 266k | bp->link = newbp; |
708 | 266k | newbp->link = NULL; |
709 | 266k | newbp->raw_block = NULL; |
710 | 266k | f->log_curr_blk = newbp; |
711 | | |
712 | | /* check if need to start compressing */ |
713 | 266k | if (NEED_TO_COMPRESS(f)) { |
714 | 0 | if_debug0m(':', f->memory, "[:]Beginning compression\n"); |
715 | | /* compress the entire file up to this point */ |
716 | 0 | if (!f->compressor_initialized) { |
717 | 0 | int code = 0; |
718 | |
|
719 | 0 | if (f->compress_state->templat->init != 0) |
720 | 0 | code = (*f->compress_state->templat->init) (f->compress_state); |
721 | 0 | if (code < 0) |
722 | 0 | return_error(gs_error_VMerror); /****** BOGUS ******/ |
723 | 0 | f->compressor_initialized = true; |
724 | 0 | } |
725 | | /* Write into the new physical block we just allocated, */ |
726 | | /* replace it after the loop (after some blocks are freed) */ |
727 | 0 | f->phys_curr = newphys; |
728 | 0 | f->wt.ptr = (byte *) (newphys->data) - 1; |
729 | 0 | f->wt.limit = f->wt.ptr + MEMFILE_DATA_SIZE; |
730 | 0 | bp = f->log_head; |
731 | 0 | while (bp != newbp) { /* don't compress last block */ |
732 | 0 | int code; |
733 | |
|
734 | 0 | oldphys = bp->phys_blk; |
735 | 0 | if ((code = compress_log_blk(f, bp)) < 0) |
736 | 0 | return code; |
737 | 0 | ecode |= code; |
738 | 0 | FREE(f, oldphys, "memfile_next_blk(oldphys)"); |
739 | 0 | bp = bp->link; |
740 | 0 | } /* end while( ) compress loop */ |
741 | | /* Allocate a physical block for this (last) logical block */ |
742 | 0 | newphys = |
743 | 0 | allocateWithReserve(f, sizeof(*newphys), &code, |
744 | 0 | "memfile newphys", |
745 | 0 | "memfile_next_blk: MALLOC 2 for 'newphys' failed\n"); |
746 | 0 | if (code < 0) |
747 | 0 | return code; |
748 | 0 | ecode |= code; /* accumulate low-mem warnings */ |
749 | 0 | newphys->link = NULL; |
750 | 0 | newphys->data_limit = NULL; /* raw */ |
751 | |
|
752 | 0 | } /* end convert file to compressed */ |
753 | 266k | newbp->phys_blk = newphys; |
754 | 266k | f->pdata = newphys->data; |
755 | 266k | f->pdata_end = newphys->data + MEMFILE_DATA_SIZE; |
756 | 266k | } /* end if NOT compressing */ |
757 | | /* File IS being compressed */ |
758 | 0 | else { |
759 | 0 | int code; |
760 | |
|
761 | 0 | oldphys = bp->phys_blk; /* save raw phys block ID */ |
762 | | /* compresses bp on phys list */ |
763 | 0 | if ((code = compress_log_blk(f, bp)) < 0) |
764 | 0 | return code; |
765 | 0 | ecode |= code; |
766 | 0 | newbp = |
767 | 0 | allocateWithReserve(f, sizeof(*newbp), &code, "memfile newbp", |
768 | 0 | "memfile_next_blk: MALLOC 2 for 'newbp' failed\n"); |
769 | 0 | if (code < 0) |
770 | 0 | return code; |
771 | 0 | ecode |= code; |
772 | 0 | bp->link = newbp; |
773 | 0 | newbp->link = NULL; |
774 | 0 | newbp->raw_block = NULL; |
775 | | /* Re-use the raw phys block for this new logical blk */ |
776 | 0 | newbp->phys_blk = oldphys; |
777 | 0 | f->pdata = oldphys->data; |
778 | 0 | f->pdata_end = f->pdata + MEMFILE_DATA_SIZE; |
779 | 0 | f->log_curr_blk = newbp; |
780 | 0 | } /* end else (when we are compressing) */ |
781 | | |
782 | 266k | return (ecode); |
783 | 266k | } |
784 | | |
785 | | static int /* returns # of chars actually written */ |
786 | | memfile_fwrite_chars(const void *data, uint len, clist_file_ptr cf) |
787 | 46.8M | { |
788 | 46.8M | const char *str = (const char *)data; |
789 | 46.8M | MEMFILE *f = (MEMFILE *) cf; |
790 | 46.8M | uint count = len; |
791 | 46.8M | int ecode; |
792 | | |
793 | | /* check if we are writing to the start of the file. If so, then */ |
794 | | /* free the file memory and re-initialize it (frees memory) */ |
795 | 46.8M | if (f->log_curr_pos == 0) { |
796 | 851k | int code; |
797 | | |
798 | 851k | memfile_free_mem(f); |
799 | 851k | if ((code = memfile_init_empty(f)) < 0) { |
800 | 0 | f->error_code = code; |
801 | 0 | return 0; |
802 | 0 | } |
803 | 851k | } |
804 | 46.8M | if (f->log_curr_blk->link != 0) { |
805 | 0 | emprintf(f->memory, |
806 | 0 | " Write file truncate -- need to free physical blocks.\n"); |
807 | 0 | } |
808 | 93.9M | while (count) { |
809 | 47.1M | uint move_count = f->pdata_end - f->pdata; |
810 | | |
811 | 47.1M | if (move_count > count) |
812 | 46.8M | move_count = count; |
813 | 47.1M | memmove(f->pdata, str, move_count); |
814 | 47.1M | f->pdata += move_count; |
815 | 47.1M | str += move_count; |
816 | 47.1M | count -= move_count; |
817 | 47.1M | if (f->pdata == f->pdata_end) { |
818 | 266k | if ((ecode = memfile_next_blk(f)) != 0) { |
819 | 408 | f->error_code = ecode; |
820 | 408 | if (ecode < 0) |
821 | 397 | return 0; |
822 | 408 | } |
823 | 266k | } |
824 | 47.1M | } |
825 | 46.8M | f->log_curr_pos += len; |
826 | 46.8M | f->log_length = f->log_curr_pos; /* truncate length to here */ |
827 | | #ifdef DEBUG |
828 | | tot_raw += len; |
829 | | #endif |
830 | 46.8M | return (len); |
831 | 46.8M | } |
832 | | |
833 | | /* */ |
834 | | /* Internal routine to set the f->pdata and f->pdata_end pointers */ |
835 | | /* for the current logical block f->log_curr_blk */ |
836 | | /* */ |
837 | | /* If data only exists in compressed form, allocate a raw buffer */ |
838 | | /* and decompress it. */ |
839 | | /* */ |
840 | | |
841 | | static int |
842 | | memfile_get_pdata(MEMFILE * f) |
843 | 26.1M | { |
844 | 26.1M | int code, i, num_raw_buffers, status; |
845 | 26.1M | LOG_MEMFILE_BLK *bp = f->log_curr_blk; |
846 | | |
847 | 26.1M | if (bp->phys_blk->data_limit == NULL) { |
848 | | /* Not compressed, return this data pointer */ |
849 | 26.1M | f->pdata = (bp->phys_blk)->data; |
850 | 26.1M | i = f->log_curr_pos % MEMFILE_DATA_SIZE; /* pos within block */ |
851 | 26.1M | i = f->log_curr_pos - i; /* base of block */ |
852 | 26.1M | if (i + MEMFILE_DATA_SIZE > f->log_length) |
853 | 18.8M | f->pdata_end = f->pdata + f->log_length - i; |
854 | 7.38M | else |
855 | 7.38M | f->pdata_end = f->pdata + MEMFILE_DATA_SIZE; |
856 | 26.1M | } else { |
857 | | |
858 | | /* data was compressed */ |
859 | 0 | if (f->raw_head == NULL) { |
860 | 0 | code = 0; |
861 | | /* need to allocate the raw buffer pool */ |
862 | 0 | num_raw_buffers = GET_NUM_RAW_BUFFERS(f); |
863 | 0 | if (f->reservePhysBlockCount) { |
864 | | /* HACK: allocate reserve block that's been reserved for |
865 | | * decompression. This buffer's block was pre-allocated to make |
866 | | * sure we won't come up short here. Take from chain instead of |
867 | | * allocateWithReserve() since this buf would just be wasted if |
868 | | * allowed to remain preallocated. */ |
869 | 0 | f->raw_head = (RAW_BUFFER *)f->reservePhysBlockChain; |
870 | 0 | f->reservePhysBlockChain = f->reservePhysBlockChain->link; |
871 | 0 | --f->reservePhysBlockCount; |
872 | 0 | } else { |
873 | 0 | f->raw_head = |
874 | 0 | allocateWithReserve(f, sizeof(*f->raw_head), &code, |
875 | 0 | "memfile raw buffer", |
876 | 0 | "memfile_get_pdata: MALLOC for 'raw_head' failed\n"); |
877 | 0 | if (code < 0) |
878 | 0 | return code; |
879 | 0 | } |
880 | 0 | f->raw_head->back = NULL; |
881 | 0 | f->raw_tail = f->raw_head; |
882 | 0 | f->raw_tail->log_blk = NULL; |
883 | 0 | for (i = 0; i < num_raw_buffers; i++) { |
884 | 0 | f->raw_tail->fwd = (RAW_BUFFER *) MALLOC(f, sizeof(RAW_BUFFER), |
885 | 0 | "memfile raw buffer"); |
886 | | /* if MALLOC fails, then just stop allocating */ |
887 | 0 | if (!f->raw_tail->fwd) |
888 | 0 | break; |
889 | 0 | f->total_space += sizeof(RAW_BUFFER); |
890 | 0 | f->raw_tail->fwd->back = f->raw_tail; |
891 | 0 | f->raw_tail = f->raw_tail->fwd; |
892 | 0 | f->raw_tail->log_blk = NULL; |
893 | 0 | } |
894 | 0 | f->raw_tail->fwd = NULL; |
895 | 0 | num_raw_buffers = i + 1; /* if MALLOC failed, then OK */ |
896 | 0 | if_debug1m(':', f->memory, "[:]Number of raw buffers allocated=%d\n", |
897 | 0 | num_raw_buffers); |
898 | 0 | if (f->decompress_state->templat->init != 0) |
899 | 0 | code = (*f->decompress_state->templat->init) |
900 | 0 | (f->decompress_state); |
901 | 0 | if (code < 0) |
902 | 0 | return_error(gs_error_VMerror); |
903 | |
|
904 | 0 | } /* end allocating the raw buffer pool (first time only) */ |
905 | 0 | if (bp->raw_block == NULL) { |
906 | | #ifdef DEBUG |
907 | | tot_cache_miss++; /* count every decompress */ |
908 | | #endif |
909 | | /* find a raw buffer and decompress */ |
910 | 0 | if (f->raw_tail->log_blk != NULL) { |
911 | | /* This block was in use, grab it */ |
912 | | #ifdef DEBUG |
913 | | tot_swap_out++; |
914 | | #endif |
915 | 0 | f->raw_tail->log_blk->raw_block = NULL; /* data no longer here */ |
916 | 0 | f->raw_tail->log_blk = NULL; |
917 | 0 | } |
918 | | /* Use the last raw block in the chain (the oldest) */ |
919 | 0 | f->raw_tail->back->fwd = NULL; /* disconnect from tail */ |
920 | 0 | f->raw_tail->fwd = f->raw_head; /* new head */ |
921 | 0 | f->raw_head->back = f->raw_tail; |
922 | 0 | f->raw_tail = f->raw_tail->back; |
923 | 0 | f->raw_head = f->raw_head->back; |
924 | 0 | f->raw_head->back = NULL; |
925 | 0 | f->raw_head->log_blk = bp; |
926 | | |
927 | | /* Decompress the data into this raw block */ |
928 | | /* Initialize the decompressor */ |
929 | 0 | if (f->decompress_state->templat->reinit != 0) |
930 | 0 | (*f->decompress_state->templat->reinit) (f->decompress_state); |
931 | | /* Set pointers and call the decompress routine */ |
932 | 0 | f->wt.ptr = (byte *) (f->raw_head->data) - 1; |
933 | 0 | f->wt.limit = f->wt.ptr + MEMFILE_DATA_SIZE; |
934 | 0 | f->rd.ptr = (const byte *)(bp->phys_pdata) - 1; |
935 | 0 | f->rd.limit = (const byte *)bp->phys_blk->data_limit; |
936 | | #ifdef DEBUG |
937 | | decomp_wt_ptr0 = f->wt.ptr; |
938 | | decomp_wt_limit0 = f->wt.limit; |
939 | | decomp_rd_ptr0 = f->rd.ptr; |
940 | | decomp_rd_limit0 = f->rd.limit; |
941 | | #endif |
942 | 0 | status = (*f->decompress_state->templat->process) |
943 | 0 | (f->decompress_state, &(f->rd), &(f->wt), true); |
944 | 0 | if (status == 0) { /* More input data needed */ |
945 | | /* switch to next block and continue decompress */ |
946 | 0 | int back_up = 0; /* adjust pointer backwards */ |
947 | |
|
948 | 0 | if (f->rd.ptr != f->rd.limit) { |
949 | | /* transfer remainder bytes from the previous block */ |
950 | 0 | back_up = f->rd.limit - f->rd.ptr; |
951 | 0 | for (i = 0; i < back_up; i++) |
952 | 0 | *(bp->phys_blk->link->data - back_up + i) = *++f->rd.ptr; |
953 | 0 | } |
954 | 0 | f->rd.ptr = (const byte *)bp->phys_blk->link->data - back_up - 1; |
955 | 0 | f->rd.limit = (const byte *)bp->phys_blk->link->data_limit; |
956 | | #ifdef DEBUG |
957 | | decomp_wt_ptr1 = f->wt.ptr; |
958 | | decomp_wt_limit1 = f->wt.limit; |
959 | | decomp_rd_ptr1 = f->rd.ptr; |
960 | | decomp_rd_limit1 = f->rd.limit; |
961 | | #endif |
962 | 0 | status = (*f->decompress_state->templat->process) |
963 | 0 | (f->decompress_state, &(f->rd), &(f->wt), true); |
964 | 0 | if (status == 0) { |
965 | 0 | emprintf(f->memory, |
966 | 0 | "Decompression required more than one full block!\n"); |
967 | 0 | return_error(gs_error_Fatal); |
968 | 0 | } |
969 | 0 | } |
970 | 0 | bp->raw_block = f->raw_head; /* point to raw block */ |
971 | 0 | } |
972 | | /* end if( raw_block == NULL ) meaning need to decompress data */ |
973 | 0 | else { |
974 | | /* data exists in the raw data cache, if not raw_head, move it */ |
975 | 0 | if (bp->raw_block != f->raw_head) { |
976 | | /* move to raw_head */ |
977 | | /* prev.fwd = this.fwd */ |
978 | 0 | bp->raw_block->back->fwd = bp->raw_block->fwd; |
979 | 0 | if (bp->raw_block->fwd != NULL) |
980 | | /* next.back = this.back */ |
981 | 0 | bp->raw_block->fwd->back = bp->raw_block->back; |
982 | 0 | else |
983 | 0 | f->raw_tail = bp->raw_block->back; /* tail = prev */ |
984 | 0 | f->raw_head->back = bp->raw_block; /* head.back = this */ |
985 | 0 | bp->raw_block->fwd = f->raw_head; /* this.fwd = orig head */ |
986 | 0 | f->raw_head = bp->raw_block; /* head = this */ |
987 | 0 | f->raw_head->back = NULL; /* this.back = NULL */ |
988 | | #ifdef DEBUG |
989 | | tot_cache_hits++; /* counting here prevents repeats since */ |
990 | | /* won't count if already at head */ |
991 | | #endif |
992 | 0 | } |
993 | 0 | } |
994 | 0 | f->pdata = bp->raw_block->data; |
995 | 0 | f->pdata_end = f->pdata + MEMFILE_DATA_SIZE; |
996 | | /* NOTE: last block is never compressed, so a compressed block */ |
997 | | /* is always full size. */ |
998 | 0 | } /* end else (when data was compressed) */ |
999 | | |
1000 | 26.1M | return 0; |
1001 | 26.1M | } |
1002 | | |
1003 | | /* ---------------- Reading ---------------- */ |
1004 | | |
1005 | | static int |
1006 | | memfile_fread_chars(void *data, uint len, clist_file_ptr cf) |
1007 | 220M | { |
1008 | 220M | char *str = (char *)data; |
1009 | 220M | MEMFILE *f = (MEMFILE *) cf; |
1010 | 220M | uint count = len, move_count; |
1011 | 220M | int64_t num_read; |
1012 | | |
1013 | 220M | num_read = f->log_length - f->log_curr_pos; |
1014 | 220M | if ((int64_t)count > num_read) |
1015 | 1 | count = (int)num_read; |
1016 | 220M | num_read = count; |
1017 | | |
1018 | 442M | while (count) { |
1019 | 221M | f->log_curr_pos++; /* move into next byte */ |
1020 | 221M | if (f->pdata == f->pdata_end) { |
1021 | 967k | f->log_curr_blk = (f->log_curr_blk)->link; |
1022 | 967k | memfile_get_pdata(f); |
1023 | 967k | } |
1024 | 221M | move_count = f->pdata_end - f->pdata; |
1025 | 221M | if (move_count > count) |
1026 | 218M | move_count = count; |
1027 | 221M | f->log_curr_pos += move_count - 1; /* new position */ |
1028 | 221M | memmove(str, f->pdata, move_count); |
1029 | 221M | str += move_count; |
1030 | 221M | f->pdata += move_count; |
1031 | 221M | count -= move_count; |
1032 | 221M | } |
1033 | | |
1034 | 220M | return (num_read); |
1035 | 220M | } |
1036 | | |
1037 | | /* ---------------- Position/status ---------------- */ |
1038 | | |
1039 | | static int |
1040 | | memfile_ferror_code(clist_file_ptr cf) |
1041 | 41.2M | { |
1042 | 41.2M | return (((MEMFILE *) cf)->error_code); /* errors stored here */ |
1043 | 41.2M | } |
1044 | | |
1045 | | static int64_t |
1046 | | memfile_ftell(clist_file_ptr cf) |
1047 | 17.8M | { |
1048 | 17.8M | return (((MEMFILE *) cf)->log_curr_pos); |
1049 | 17.8M | } |
1050 | | |
1051 | | static int |
1052 | | memfile_rewind(clist_file_ptr cf, bool discard_data, const char *ignore_fname) |
1053 | 1.27M | { |
1054 | 1.27M | MEMFILE *f = (MEMFILE *) cf; |
1055 | | |
1056 | 1.27M | if (discard_data) { |
1057 | | /* This affects the memfile data, not just the MEMFILE * access struct */ |
1058 | | /* Check first to make sure that we have exclusive access */ |
1059 | 790k | if (f->openlist != NULL || f->base_memfile != NULL) { |
1060 | | /* TODO: Move the data so it is still connected to other open files */ |
1061 | 0 | emprintf1(f->memory, |
1062 | 0 | "memfile_rewind("PRI_INTPTR") with discard_data=true failed: ", |
1063 | 0 | (intptr_t)f); |
1064 | 0 | f->error_code = gs_note_error(gs_error_ioerror); |
1065 | 0 | return f->error_code; |
1066 | 0 | } |
1067 | 790k | memfile_free_mem(f); |
1068 | | /* We have to call memfile_init_empty to preserve invariants. */ |
1069 | 790k | memfile_init_empty(f); |
1070 | 790k | } else { |
1071 | 487k | f->log_curr_blk = f->log_head; |
1072 | 487k | f->log_curr_pos = 0; |
1073 | 487k | memfile_get_pdata(f); |
1074 | 487k | } |
1075 | 1.27M | return 0; |
1076 | 1.27M | } |
1077 | | |
1078 | | static int |
1079 | | memfile_fseek(clist_file_ptr cf, int64_t offset, int mode, const char *ignore_fname) |
1080 | 24.7M | { |
1081 | 24.7M | MEMFILE *f = (MEMFILE *) cf; |
1082 | 24.7M | int64_t i, block_num, new_pos; |
1083 | | |
1084 | 24.7M | switch (mode) { |
1085 | 24.2M | case SEEK_SET: /* offset from the beginning of the file */ |
1086 | 24.2M | new_pos = offset; |
1087 | 24.2M | break; |
1088 | | |
1089 | 0 | case SEEK_CUR: /* offset from the current position in the file */ |
1090 | 0 | new_pos = offset + f->log_curr_pos; |
1091 | 0 | break; |
1092 | | |
1093 | 471k | case SEEK_END: /* offset back from the end of the file */ |
1094 | 471k | new_pos = f->log_length - offset; |
1095 | 471k | break; |
1096 | | |
1097 | 0 | default: |
1098 | 0 | return (-1); |
1099 | 24.7M | } |
1100 | 24.7M | if (new_pos < 0 || new_pos > f->log_length) |
1101 | 0 | return -1; |
1102 | 24.7M | if ((f->pdata == f->pdata_end) && (f->log_curr_blk->link != NULL)) { |
1103 | | /* log_curr_blk is actually one block behind log_curr_pos */ |
1104 | 134 | f->log_curr_blk = f->log_curr_blk->link; |
1105 | 134 | } |
1106 | 24.7M | block_num = new_pos / MEMFILE_DATA_SIZE; |
1107 | 24.7M | i = f->log_curr_pos / MEMFILE_DATA_SIZE; |
1108 | 24.7M | if (block_num < i) { /* if moving backwards, start at beginning */ |
1109 | 259k | f->log_curr_blk = f->log_head; |
1110 | 259k | i = 0; |
1111 | 259k | } |
1112 | 34.6M | for (; i < block_num; i++) { |
1113 | 9.94M | f->log_curr_blk = f->log_curr_blk->link; |
1114 | 9.94M | } |
1115 | 24.7M | f->log_curr_pos = new_pos; |
1116 | 24.7M | memfile_get_pdata(f); /* pointers to start of block */ |
1117 | 24.7M | f->pdata += new_pos - (block_num * MEMFILE_DATA_SIZE); |
1118 | | |
1119 | 24.7M | return 0; /* return "normal" status */ |
1120 | 24.7M | } |
1121 | | |
1122 | | /* ---------------- Internal routines ---------------- */ |
1123 | | |
1124 | | static void |
1125 | | memfile_free_mem(MEMFILE * f) |
1126 | 1.92M | { |
1127 | 1.92M | LOG_MEMFILE_BLK *bp, *tmpbp; |
1128 | | |
1129 | | #ifdef DEBUG |
1130 | | /* output some diagnostics about the effectiveness */ |
1131 | | if (tot_raw > 100) { |
1132 | | if (tot_raw > 0xFFFFFFFF) |
1133 | | if_debug4m(':', f->memory, "[:]tot_raw=%lu%0lu, tot_compressed=%lu%0lu\n", |
1134 | | tot_raw >> 32, tot_raw & 0xFFFFFFFF, |
1135 | | tot_compressed >> 32, tot_compressed & 0xFFFFFFFF); |
1136 | | else |
1137 | | if_debug2m(':', f->memory, "[:]tot_raw=%lu, tot_compressed=%lu\n", |
1138 | | tot_raw, tot_compressed); |
1139 | | } |
1140 | | if (tot_cache_hits != 0) { |
1141 | | if_debug3m(':', f->memory, "[:]Cache hits=%lu, cache misses=%lu, swapouts=%lu\n", |
1142 | | tot_cache_hits, |
1143 | | (long)(tot_cache_miss - (f->log_length / MEMFILE_DATA_SIZE)), |
1144 | | tot_swap_out); |
1145 | | } |
1146 | | tot_raw = 0; |
1147 | | tot_compressed = 0; |
1148 | | tot_cache_hits = 0; |
1149 | | tot_cache_miss = 0; |
1150 | | tot_swap_out = 0; |
1151 | | #endif |
1152 | | |
1153 | | /* Free up memory that was allocated for the memfile */ |
1154 | 1.92M | bp = f->log_head; |
1155 | | |
1156 | 1.92M | if (bp != NULL) { |
1157 | | /* Null out phys_blk pointers to compressed data. */ |
1158 | 1.92M | PHYS_MEMFILE_BLK *pphys = bp->phys_blk; |
1159 | | |
1160 | 1.92M | { |
1161 | 4.12M | for (tmpbp = bp; tmpbp != NULL; tmpbp = tmpbp->link) |
1162 | 2.19M | if (tmpbp->phys_blk->data_limit != NULL) |
1163 | 0 | tmpbp->phys_blk = 0; |
1164 | 1.92M | } |
1165 | | /* Free the physical blocks that make up the compressed data */ |
1166 | 1.92M | if (pphys->data_limit != NULL) { |
1167 | | /* the data was compressed, free the chain of blocks */ |
1168 | 0 | while (pphys != NULL) { |
1169 | 0 | PHYS_MEMFILE_BLK *tmpphys = pphys->link; |
1170 | |
|
1171 | 0 | FREE(f, pphys, "memfile_free_mem(pphys)"); |
1172 | 0 | pphys = tmpphys; |
1173 | 0 | } |
1174 | 0 | } |
1175 | 1.92M | } |
1176 | | /* Now free the logical blocks, and any uncompressed physical blocks. */ |
1177 | 4.12M | while (bp != NULL) { |
1178 | 2.19M | if (bp->phys_blk != NULL) { |
1179 | 2.19M | FREE(f, bp->phys_blk, "memfile_free_mem(phys_blk)"); |
1180 | 2.19M | } |
1181 | 2.19M | tmpbp = bp->link; |
1182 | 2.19M | FREE(f, bp, "memfile_free_mem(log_blk)"); |
1183 | 2.19M | bp = tmpbp; |
1184 | 2.19M | } |
1185 | | |
1186 | 1.92M | f->log_head = NULL; |
1187 | | |
1188 | | /* Free any internal compressor state. */ |
1189 | 1.92M | if (f->compressor_initialized) { |
1190 | 0 | if (f->decompress_state->templat->release != 0) |
1191 | 0 | (*f->decompress_state->templat->release) (f->decompress_state); |
1192 | 0 | if (f->compress_state->templat->release != 0) |
1193 | 0 | (*f->compress_state->templat->release) (f->compress_state); |
1194 | 0 | f->compressor_initialized = false; |
1195 | 0 | } |
1196 | | /* free the raw buffers */ |
1197 | 1.92M | while (f->raw_head != NULL) { |
1198 | 0 | RAW_BUFFER *tmpraw = f->raw_head->fwd; |
1199 | |
|
1200 | 0 | FREE(f, f->raw_head, "memfile_free_mem(raw)"); |
1201 | 0 | f->raw_head = tmpraw; |
1202 | 0 | } |
1203 | 1.92M | } |
1204 | | |
1205 | | static int |
1206 | | memfile_init_empty(MEMFILE * f) |
1207 | 1.92M | { |
1208 | 1.92M | PHYS_MEMFILE_BLK *pphys; |
1209 | 1.92M | LOG_MEMFILE_BLK *plog; |
1210 | | |
1211 | | /* Zero out key fields so that allocation failure will be unwindable */ |
1212 | 1.92M | f->phys_curr = NULL; /* flag as file not compressed */ |
1213 | 1.92M | f->log_head = NULL; |
1214 | 1.92M | f->log_curr_blk = NULL; |
1215 | 1.92M | f->log_curr_pos = 0; |
1216 | 1.92M | f->log_length = 0; |
1217 | 1.92M | f->raw_head = NULL; |
1218 | 1.92M | f->compressor_initialized = false; |
1219 | 1.92M | f->total_space = 0; |
1220 | | |
1221 | | /* File empty - get a physical mem block (includes the buffer area) */ |
1222 | 1.92M | pphys = MALLOC(f, sizeof(*pphys), "memfile pphys"); |
1223 | 1.92M | if (!pphys) { |
1224 | 0 | emprintf(f->memory, "memfile_init_empty: MALLOC for 'pphys' failed\n"); |
1225 | 0 | return_error(gs_error_VMerror); |
1226 | 0 | } |
1227 | 1.92M | f->total_space += sizeof(*pphys); |
1228 | 1.92M | pphys->data_limit = NULL; /* raw data for now */ |
1229 | | |
1230 | | /* Get logical mem block to go with physical one */ |
1231 | 1.92M | plog = (LOG_MEMFILE_BLK *)MALLOC( f, sizeof(*plog), "memfile_init_empty" ); |
1232 | 1.92M | if (plog == NULL) { |
1233 | 0 | FREE(f, pphys, "memfile_init_empty"); |
1234 | 0 | emprintf(f->memory, |
1235 | 0 | "memfile_init_empty: MALLOC for log_curr_blk failed\n"); |
1236 | 0 | return_error(gs_error_VMerror); |
1237 | 0 | } |
1238 | 1.92M | f->total_space += sizeof(*plog); |
1239 | 1.92M | f->log_head = f->log_curr_blk = plog; |
1240 | 1.92M | f->log_curr_blk->link = NULL; |
1241 | 1.92M | f->log_curr_blk->phys_blk = pphys; |
1242 | 1.92M | f->log_curr_blk->phys_pdata = NULL; |
1243 | 1.92M | f->log_curr_blk->raw_block = NULL; |
1244 | | |
1245 | 1.92M | f->pdata = pphys->data; |
1246 | 1.92M | f->pdata_end = f->pdata + MEMFILE_DATA_SIZE; |
1247 | | |
1248 | 1.92M | f->error_code = 0; |
1249 | | |
1250 | 1.92M | return 0; |
1251 | 1.92M | } |
1252 | | |
1253 | | clist_io_procs_t clist_io_procs_memory = { |
1254 | | memfile_fopen, |
1255 | | memfile_fclose, |
1256 | | memfile_unlink, |
1257 | | memfile_fwrite_chars, |
1258 | | memfile_fread_chars, |
1259 | | memfile_set_memory_warning, |
1260 | | memfile_ferror_code, |
1261 | | memfile_ftell, |
1262 | | memfile_rewind, |
1263 | | memfile_fseek, |
1264 | | }; |
1265 | | |
1266 | | init_proc(gs_gxclmem_init); |
1267 | | int |
1268 | | gs_gxclmem_init(gs_memory_t *mem) |
1269 | 89.2k | { |
1270 | 89.2k | gs_lib_ctx_core_t *core = mem->gs_lib_ctx->core; |
1271 | 89.2k | #ifdef PACIFY_VALGRIND |
1272 | 89.2k | VALGRIND_HG_DISABLE_CHECKING(&core->clist_io_procs_memory, sizeof(core->clist_io_procs_memory)); |
1273 | 89.2k | #endif |
1274 | 89.2k | core->clist_io_procs_memory = &clist_io_procs_memory; |
1275 | 89.2k | return 0; |
1276 | 89.2k | } |