/src/libjpeg-turbo.2.1.x/jmemmgr.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |  * jmemmgr.c  | 
3  |  |  *  | 
4  |  |  * This file was part of the Independent JPEG Group's software:  | 
5  |  |  * Copyright (C) 1991-1997, Thomas G. Lane.  | 
6  |  |  * libjpeg-turbo Modifications:  | 
7  |  |  * Copyright (C) 2016, 2021-2022, D. R. Commander.  | 
8  |  |  * For conditions of distribution and use, see the accompanying README.ijg  | 
9  |  |  * file.  | 
10  |  |  *  | 
11  |  |  * This file contains the JPEG system-independent memory management  | 
12  |  |  * routines.  This code is usable across a wide variety of machines; most  | 
13  |  |  * of the system dependencies have been isolated in a separate file.  | 
14  |  |  * The major functions provided here are:  | 
15  |  |  *   * pool-based allocation and freeing of memory;  | 
16  |  |  *   * policy decisions about how to divide available memory among the  | 
17  |  |  *     virtual arrays;  | 
18  |  |  *   * control logic for swapping virtual arrays between main memory and  | 
19  |  |  *     backing storage.  | 
20  |  |  * The separate system-dependent file provides the actual backing-storage  | 
21  |  |  * access code, and it contains the policy decision about how much total  | 
22  |  |  * main memory to use.  | 
23  |  |  * This file is system-dependent in the sense that some of its functions  | 
24  |  |  * are unnecessary in some systems.  For example, if there is enough virtual  | 
25  |  |  * memory so that backing storage will never be used, much of the virtual  | 
26  |  |  * array control logic could be removed.  (Of course, if you have that much  | 
27  |  |  * memory then you shouldn't care about a little bit of unused code...)  | 
28  |  |  */  | 
29  |  |  | 
30  |  | #define JPEG_INTERNALS  | 
31  |  | #define AM_MEMORY_MANAGER       /* we define jvirt_Xarray_control structs */  | 
32  |  | #include "jinclude.h"  | 
33  |  | #include "jpeglib.h"  | 
34  |  | #include "jmemsys.h"            /* import the system-dependent declarations */  | 
35  |  | #if !defined(_MSC_VER) || _MSC_VER > 1600  | 
36  |  | #include <stdint.h>  | 
37  |  | #endif  | 
38  |  | #include <limits.h>  | 
39  |  |  | 
40  |  |  | 
41  |  | LOCAL(size_t)  | 
42  |  | round_up_pow2(size_t a, size_t b)  | 
43  |  | /* a rounded up to the next multiple of b, i.e. ceil(a/b)*b */  | 
44  |  | /* Assumes a >= 0, b > 0, and b is a power of 2 */  | 
45  | 332k  | { | 
46  | 332k  |   return ((a + b - 1) & (~(b - 1)));  | 
47  | 332k  | }  | 
48  |  |  | 
49  |  |  | 
50  |  | /*  | 
51  |  |  * Some important notes:  | 
52  |  |  *   The allocation routines provided here must never return NULL.  | 
53  |  |  *   They should exit to error_exit if unsuccessful.  | 
54  |  |  *  | 
55  |  |  *   It's not a good idea to try to merge the sarray and barray routines,  | 
56  |  |  *   even though they are textually almost the same, because samples are  | 
57  |  |  *   usually stored as bytes while coefficients are shorts or ints.  Thus,  | 
58  |  |  *   in machines where byte pointers have a different representation from  | 
59  |  |  *   word pointers, the resulting machine code could not be the same.  | 
60  |  |  */  | 
61  |  |  | 
62  |  |  | 
63  |  | /*  | 
64  |  |  * Many machines require storage alignment: longs must start on 4-byte  | 
65  |  |  * boundaries, doubles on 8-byte boundaries, etc.  On such machines, malloc()  | 
66  |  |  * always returns pointers that are multiples of the worst-case alignment  | 
67  |  |  * requirement, and we had better do so too.  | 
68  |  |  * There isn't any really portable way to determine the worst-case alignment  | 
69  |  |  * requirement.  This module assumes that the alignment requirement is  | 
70  |  |  * multiples of ALIGN_SIZE.  | 
71  |  |  * By default, we define ALIGN_SIZE as the maximum of sizeof(double) and  | 
72  |  |  * sizeof(void *).  This is necessary on some workstations (where doubles  | 
73  |  |  * really do need 8-byte alignment) and will work fine on nearly everything.  | 
74  |  |  * We use the maximum of sizeof(double) and sizeof(void *) since sizeof(double)  | 
75  |  |  * may be insufficient, for example, on CHERI-enabled platforms with 16-byte  | 
76  |  |  * pointers and a 16-byte alignment requirement.  If your machine has lesser  | 
77  |  |  * alignment needs, you can save a few bytes by making ALIGN_SIZE smaller.  | 
78  |  |  * The only place I know of where this will NOT work is certain Macintosh  | 
79  |  |  * 680x0 compilers that define double as a 10-byte IEEE extended float.  | 
80  |  |  * Doing 10-byte alignment is counterproductive because longwords won't be  | 
81  |  |  * aligned well.  Put "#define ALIGN_SIZE 4" in jconfig.h if you have  | 
82  |  |  * such a compiler.  | 
83  |  |  */  | 
84  |  |  | 
85  |  | #ifndef ALIGN_SIZE              /* so can override from jconfig.h */  | 
86  |  | #ifndef WITH_SIMD  | 
87  |  | #define ALIGN_SIZE  MAX(sizeof(void *), sizeof(double))  | 
88  |  | #else  | 
89  | 1.86M  | #define ALIGN_SIZE  32 /* Most of the SIMD instructions we support require  | 
90  |  |                           16-byte (128-bit) alignment, but AVX2 requires  | 
91  |  |                           32-byte alignment. */  | 
92  |  | #endif  | 
93  |  | #endif  | 
94  |  |  | 
95  |  | /*  | 
96  |  |  * We allocate objects from "pools", where each pool is gotten with a single  | 
97  |  |  * request to jpeg_get_small() or jpeg_get_large().  There is no per-object  | 
98  |  |  * overhead within a pool, except for alignment padding.  Each pool has a  | 
99  |  |  * header with a link to the next pool of the same class.  | 
100  |  |  * Small and large pool headers are identical.  | 
101  |  |  */  | 
102  |  |  | 
103  |  | typedef struct small_pool_struct *small_pool_ptr;  | 
104  |  |  | 
105  |  | typedef struct small_pool_struct { | 
106  |  |   small_pool_ptr next;          /* next in list of pools */  | 
107  |  |   size_t bytes_used;            /* how many bytes already used within pool */  | 
108  |  |   size_t bytes_left;            /* bytes still available in this pool */  | 
109  |  | } small_pool_hdr;  | 
110  |  |  | 
111  |  | typedef struct large_pool_struct *large_pool_ptr;  | 
112  |  |  | 
113  |  | typedef struct large_pool_struct { | 
114  |  |   large_pool_ptr next;          /* next in list of pools */  | 
115  |  |   size_t bytes_used;            /* how many bytes already used within pool */  | 
116  |  |   size_t bytes_left;            /* bytes still available in this pool */  | 
117  |  | } large_pool_hdr;  | 
118  |  |  | 
119  |  | /*  | 
120  |  |  * Here is the full definition of a memory manager object.  | 
121  |  |  */  | 
122  |  |  | 
123  |  | typedef struct { | 
124  |  |   struct jpeg_memory_mgr pub;   /* public fields */  | 
125  |  |  | 
126  |  |   /* Each pool identifier (lifetime class) names a linked list of pools. */  | 
127  |  |   small_pool_ptr small_list[JPOOL_NUMPOOLS];  | 
128  |  |   large_pool_ptr large_list[JPOOL_NUMPOOLS];  | 
129  |  |  | 
130  |  |   /* Since we only have one lifetime class of virtual arrays, only one  | 
131  |  |    * linked list is necessary (for each datatype).  Note that the virtual  | 
132  |  |    * array control blocks being linked together are actually stored somewhere  | 
133  |  |    * in the small-pool list.  | 
134  |  |    */  | 
135  |  |   jvirt_sarray_ptr virt_sarray_list;  | 
136  |  |   jvirt_barray_ptr virt_barray_list;  | 
137  |  |  | 
138  |  |   /* This counts total space obtained from jpeg_get_small/large */  | 
139  |  |   size_t total_space_allocated;  | 
140  |  |  | 
141  |  |   /* alloc_sarray and alloc_barray set this value for use by virtual  | 
142  |  |    * array routines.  | 
143  |  |    */  | 
144  |  |   JDIMENSION last_rowsperchunk; /* from most recent alloc_sarray/barray */  | 
145  |  | } my_memory_mgr;  | 
146  |  |  | 
147  |  | typedef my_memory_mgr *my_mem_ptr;  | 
148  |  |  | 
149  |  |  | 
150  |  | /*  | 
151  |  |  * The control blocks for virtual arrays.  | 
152  |  |  * Note that these blocks are allocated in the "small" pool area.  | 
153  |  |  * System-dependent info for the associated backing store (if any) is hidden  | 
154  |  |  * inside the backing_store_info struct.  | 
155  |  |  */  | 
156  |  |  | 
157  |  | struct jvirt_sarray_control { | 
158  |  |   JSAMPARRAY mem_buffer;        /* => the in-memory buffer */  | 
159  |  |   JDIMENSION rows_in_array;     /* total virtual array height */  | 
160  |  |   JDIMENSION samplesperrow;     /* width of array (and of memory buffer) */  | 
161  |  |   JDIMENSION maxaccess;         /* max rows accessed by access_virt_sarray */  | 
162  |  |   JDIMENSION rows_in_mem;       /* height of memory buffer */  | 
163  |  |   JDIMENSION rowsperchunk;      /* allocation chunk size in mem_buffer */  | 
164  |  |   JDIMENSION cur_start_row;     /* first logical row # in the buffer */  | 
165  |  |   JDIMENSION first_undef_row;   /* row # of first uninitialized row */  | 
166  |  |   boolean pre_zero;             /* pre-zero mode requested? */  | 
167  |  |   boolean dirty;                /* do current buffer contents need written? */  | 
168  |  |   boolean b_s_open;             /* is backing-store data valid? */  | 
169  |  |   jvirt_sarray_ptr next;        /* link to next virtual sarray control block */  | 
170  |  |   backing_store_info b_s_info;  /* System-dependent control info */  | 
171  |  | };  | 
172  |  |  | 
173  |  | struct jvirt_barray_control { | 
174  |  |   JBLOCKARRAY mem_buffer;       /* => the in-memory buffer */  | 
175  |  |   JDIMENSION rows_in_array;     /* total virtual array height */  | 
176  |  |   JDIMENSION blocksperrow;      /* width of array (and of memory buffer) */  | 
177  |  |   JDIMENSION maxaccess;         /* max rows accessed by access_virt_barray */  | 
178  |  |   JDIMENSION rows_in_mem;       /* height of memory buffer */  | 
179  |  |   JDIMENSION rowsperchunk;      /* allocation chunk size in mem_buffer */  | 
180  |  |   JDIMENSION cur_start_row;     /* first logical row # in the buffer */  | 
181  |  |   JDIMENSION first_undef_row;   /* row # of first uninitialized row */  | 
182  |  |   boolean pre_zero;             /* pre-zero mode requested? */  | 
183  |  |   boolean dirty;                /* do current buffer contents need written? */  | 
184  |  |   boolean b_s_open;             /* is backing-store data valid? */  | 
185  |  |   jvirt_barray_ptr next;        /* link to next virtual barray control block */  | 
186  |  |   backing_store_info b_s_info;  /* System-dependent control info */  | 
187  |  | };  | 
188  |  |  | 
189  |  |  | 
190  |  | #ifdef MEM_STATS                /* optional extra stuff for statistics */  | 
191  |  |  | 
192  |  | LOCAL(void)  | 
193  |  | print_mem_stats(j_common_ptr cinfo, int pool_id)  | 
194  |  | { | 
195  |  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
196  |  |   small_pool_ptr shdr_ptr;  | 
197  |  |   large_pool_ptr lhdr_ptr;  | 
198  |  |  | 
199  |  |   /* Since this is only a debugging stub, we can cheat a little by using  | 
200  |  |    * fprintf directly rather than going through the trace message code.  | 
201  |  |    * This is helpful because message parm array can't handle longs.  | 
202  |  |    */  | 
203  |  |   fprintf(stderr, "Freeing pool %d, total space = %ld\n",  | 
204  |  |           pool_id, mem->total_space_allocated);  | 
205  |  |  | 
206  |  |   for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL;  | 
207  |  |        lhdr_ptr = lhdr_ptr->next) { | 
208  |  |     fprintf(stderr, "  Large chunk used %ld\n", (long)lhdr_ptr->bytes_used);  | 
209  |  |   }  | 
210  |  |  | 
211  |  |   for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL;  | 
212  |  |        shdr_ptr = shdr_ptr->next) { | 
213  |  |     fprintf(stderr, "  Small chunk used %ld free %ld\n",  | 
214  |  |             (long)shdr_ptr->bytes_used, (long)shdr_ptr->bytes_left);  | 
215  |  |   }  | 
216  |  | }  | 
217  |  |  | 
218  |  | #endif /* MEM_STATS */  | 
219  |  |  | 
220  |  |  | 
221  |  | LOCAL(void)  | 
222  |  | out_of_memory(j_common_ptr cinfo, int which)  | 
223  |  | /* Report an out-of-memory error and stop execution */  | 
224  |  | /* If we compiled MEM_STATS support, report alloc requests before dying */  | 
225  | 0  | { | 
226  |  | #ifdef MEM_STATS  | 
227  |  |   cinfo->err->trace_level = 2;  /* force self_destruct to report stats */  | 
228  |  | #endif  | 
229  | 0  |   ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, which);  | 
230  | 0  | }  | 
231  |  |  | 
232  |  |  | 
233  |  | /*  | 
234  |  |  * Allocation of "small" objects.  | 
235  |  |  *  | 
236  |  |  * For these, we use pooled storage.  When a new pool must be created,  | 
237  |  |  * we try to get enough space for the current request plus a "slop" factor,  | 
238  |  |  * where the slop will be the amount of leftover space in the new pool.  | 
239  |  |  * The speed vs. space tradeoff is largely determined by the slop values.  | 
240  |  |  * A different slop value is provided for each pool class (lifetime),  | 
241  |  |  * and we also distinguish the first pool of a class from later ones.  | 
242  |  |  * NOTE: the values given work fairly well on both 16- and 32-bit-int  | 
243  |  |  * machines, but may be too small if longs are 64 bits or more.  | 
244  |  |  *  | 
245  |  |  * Since we do not know what alignment malloc() gives us, we have to  | 
246  |  |  * allocate ALIGN_SIZE-1 extra space per pool to have room for alignment  | 
247  |  |  * adjustment.  | 
248  |  |  */  | 
249  |  |  | 
250  |  | static const size_t first_pool_slop[JPOOL_NUMPOOLS] = { | 
251  |  |   1600,                         /* first PERMANENT pool */  | 
252  |  |   16000                         /* first IMAGE pool */  | 
253  |  | };  | 
254  |  |  | 
255  |  | static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = { | 
256  |  |   0,                            /* additional PERMANENT pools */  | 
257  |  |   5000                          /* additional IMAGE pools */  | 
258  |  | };  | 
259  |  |  | 
260  | 0  | #define MIN_SLOP  50            /* greater than 0 to avoid futile looping */  | 
261  |  |  | 
262  |  |  | 
263  |  | METHODDEF(void *)  | 
264  |  | alloc_small(j_common_ptr cinfo, int pool_id, size_t sizeofobject)  | 
265  |  | /* Allocate a "small" object */  | 
266  | 226k  | { | 
267  | 226k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
268  | 226k  |   small_pool_ptr hdr_ptr, prev_hdr_ptr;  | 
269  | 226k  |   char *data_ptr;  | 
270  | 226k  |   size_t min_request, slop;  | 
271  |  |  | 
272  |  |   /*  | 
273  |  |    * Round up the requested size to a multiple of ALIGN_SIZE in order  | 
274  |  |    * to assure alignment for the next object allocated in the same pool  | 
275  |  |    * and so that algorithms can straddle outside the proper area up  | 
276  |  |    * to the next alignment.  | 
277  |  |    */  | 
278  | 226k  |   if (sizeofobject > MAX_ALLOC_CHUNK) { | 
279  |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject  | 
280  |  |        is close to SIZE_MAX. */  | 
281  | 0  |     out_of_memory(cinfo, 7);  | 
282  | 0  |   }  | 
283  | 226k  |   sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE);  | 
284  |  |  | 
285  |  |   /* Check for unsatisfiable request (do now to ensure no overflow below) */  | 
286  | 226k  |   if ((sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) >  | 
287  | 226k  |       MAX_ALLOC_CHUNK)  | 
288  | 0  |     out_of_memory(cinfo, 1);    /* request exceeds malloc's ability */  | 
289  |  |  | 
290  |  |   /* See if space is available in any existing pool */  | 
291  | 226k  |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)  | 
292  | 0  |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */  | 
293  | 226k  |   prev_hdr_ptr = NULL;  | 
294  | 226k  |   hdr_ptr = mem->small_list[pool_id];  | 
295  | 239k  |   while (hdr_ptr != NULL) { | 
296  | 185k  |     if (hdr_ptr->bytes_left >= sizeofobject)  | 
297  | 172k  |       break;                    /* found pool with enough space */  | 
298  | 12.7k  |     prev_hdr_ptr = hdr_ptr;  | 
299  | 12.7k  |     hdr_ptr = hdr_ptr->next;  | 
300  | 12.7k  |   }  | 
301  |  |  | 
302  |  |   /* Time to make a new pool? */  | 
303  | 226k  |   if (hdr_ptr == NULL) { | 
304  |  |     /* min_request is what we need now, slop is what will be leftover */  | 
305  | 54.2k  |     min_request = sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1;  | 
306  | 54.2k  |     if (prev_hdr_ptr == NULL)   /* first pool in class? */  | 
307  | 47.8k  |       slop = first_pool_slop[pool_id];  | 
308  | 6.41k  |     else  | 
309  | 6.41k  |       slop = extra_pool_slop[pool_id];  | 
310  |  |     /* Don't ask for more than MAX_ALLOC_CHUNK */  | 
311  | 54.2k  |     if (slop > (size_t)(MAX_ALLOC_CHUNK - min_request))  | 
312  | 0  |       slop = (size_t)(MAX_ALLOC_CHUNK - min_request);  | 
313  |  |     /* Try to get space, if fail reduce slop and try again */  | 
314  | 54.2k  |     for (;;) { | 
315  | 54.2k  |       hdr_ptr = (small_pool_ptr)jpeg_get_small(cinfo, min_request + slop);  | 
316  | 54.2k  |       if (hdr_ptr != NULL)  | 
317  | 54.2k  |         break;  | 
318  | 0  |       slop /= 2;  | 
319  | 0  |       if (slop < MIN_SLOP)      /* give up when it gets real small */  | 
320  | 0  |         out_of_memory(cinfo, 2); /* jpeg_get_small failed */  | 
321  | 0  |     }  | 
322  | 54.2k  |     mem->total_space_allocated += min_request + slop;  | 
323  |  |     /* Success, initialize the new pool header and add to end of list */  | 
324  | 54.2k  |     hdr_ptr->next = NULL;  | 
325  | 54.2k  |     hdr_ptr->bytes_used = 0;  | 
326  | 54.2k  |     hdr_ptr->bytes_left = sizeofobject + slop;  | 
327  | 54.2k  |     if (prev_hdr_ptr == NULL)   /* first pool in class? */  | 
328  | 47.8k  |       mem->small_list[pool_id] = hdr_ptr;  | 
329  | 6.41k  |     else  | 
330  | 6.41k  |       prev_hdr_ptr->next = hdr_ptr;  | 
331  | 54.2k  |   }  | 
332  |  |  | 
333  |  |   /* OK, allocate the object from the current pool */  | 
334  | 226k  |   data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */  | 
335  | 226k  |   data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */  | 
336  | 226k  |   if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */  | 
337  | 226k  |     data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE;  | 
338  | 226k  |   data_ptr += hdr_ptr->bytes_used; /* point to place for object */  | 
339  | 226k  |   hdr_ptr->bytes_used += sizeofobject;  | 
340  | 226k  |   hdr_ptr->bytes_left -= sizeofobject;  | 
341  |  |  | 
342  | 226k  |   return (void *)data_ptr;  | 
343  | 226k  | }  | 
344  |  |  | 
345  |  |  | 
346  |  | /*  | 
347  |  |  * Allocation of "large" objects.  | 
348  |  |  *  | 
349  |  |  * The external semantics of these are the same as "small" objects.  However,  | 
350  |  |  * the pool management heuristics are quite different.  We assume that each  | 
351  |  |  * request is large enough that it may as well be passed directly to  | 
352  |  |  * jpeg_get_large; the pool management just links everything together  | 
353  |  |  * so that we can free it all on demand.  | 
354  |  |  * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY  | 
355  |  |  * structures.  The routines that create these structures (see below)  | 
356  |  |  * deliberately bunch rows together to ensure a large request size.  | 
357  |  |  */  | 
358  |  |  | 
359  |  | METHODDEF(void *)  | 
360  |  | alloc_large(j_common_ptr cinfo, int pool_id, size_t sizeofobject)  | 
361  |  | /* Allocate a "large" object */  | 
362  | 57.3k  | { | 
363  | 57.3k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
364  | 57.3k  |   large_pool_ptr hdr_ptr;  | 
365  | 57.3k  |   char *data_ptr;  | 
366  |  |  | 
367  |  |   /*  | 
368  |  |    * Round up the requested size to a multiple of ALIGN_SIZE so that  | 
369  |  |    * algorithms can straddle outside the proper area up to the next  | 
370  |  |    * alignment.  | 
371  |  |    */  | 
372  | 57.3k  |   if (sizeofobject > MAX_ALLOC_CHUNK) { | 
373  |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject  | 
374  |  |        is close to SIZE_MAX. */  | 
375  | 0  |     out_of_memory(cinfo, 8);  | 
376  | 0  |   }  | 
377  | 57.3k  |   sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE);  | 
378  |  |  | 
379  |  |   /* Check for unsatisfiable request (do now to ensure no overflow below) */  | 
380  | 57.3k  |   if ((sizeof(large_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) >  | 
381  | 57.3k  |       MAX_ALLOC_CHUNK)  | 
382  | 0  |     out_of_memory(cinfo, 3);    /* request exceeds malloc's ability */  | 
383  |  |  | 
384  |  |   /* Always make a new pool */  | 
385  | 57.3k  |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)  | 
386  | 0  |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */  | 
387  |  |  | 
388  | 57.3k  |   hdr_ptr = (large_pool_ptr)jpeg_get_large(cinfo, sizeofobject +  | 
389  | 57.3k  |                                            sizeof(large_pool_hdr) +  | 
390  | 57.3k  |                                            ALIGN_SIZE - 1);  | 
391  | 57.3k  |   if (hdr_ptr == NULL)  | 
392  | 0  |     out_of_memory(cinfo, 4);    /* jpeg_get_large failed */  | 
393  | 57.3k  |   mem->total_space_allocated += sizeofobject + sizeof(large_pool_hdr) +  | 
394  | 57.3k  |                                 ALIGN_SIZE - 1;  | 
395  |  |  | 
396  |  |   /* Success, initialize the new pool header and add to list */  | 
397  | 57.3k  |   hdr_ptr->next = mem->large_list[pool_id];  | 
398  |  |   /* We maintain space counts in each pool header for statistical purposes,  | 
399  |  |    * even though they are not needed for allocation.  | 
400  |  |    */  | 
401  | 57.3k  |   hdr_ptr->bytes_used = sizeofobject;  | 
402  | 57.3k  |   hdr_ptr->bytes_left = 0;  | 
403  | 57.3k  |   mem->large_list[pool_id] = hdr_ptr;  | 
404  |  |  | 
405  | 57.3k  |   data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */  | 
406  | 57.3k  |   data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */  | 
407  | 57.3k  |   if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */  | 
408  | 57.3k  |     data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE;  | 
409  |  |  | 
410  | 57.3k  |   return (void *)data_ptr;  | 
411  | 57.3k  | }  | 
412  |  |  | 
413  |  |  | 
414  |  | /*  | 
415  |  |  * Creation of 2-D sample arrays.  | 
416  |  |  *  | 
417  |  |  * To minimize allocation overhead and to allow I/O of large contiguous  | 
418  |  |  * blocks, we allocate the sample rows in groups of as many rows as possible  | 
419  |  |  * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request.  | 
420  |  |  * NB: the virtual array control routines, later in this file, know about  | 
421  |  |  * this chunking of rows.  The rowsperchunk value is left in the mem manager  | 
422  |  |  * object so that it can be saved away if this sarray is the workspace for  | 
423  |  |  * a virtual array.  | 
424  |  |  *  | 
425  |  |  * Since we are often upsampling with a factor 2, we align the size (not  | 
426  |  |  * the start) to 2 * ALIGN_SIZE so that the upsampling routines don't have  | 
427  |  |  * to be as careful about size.  | 
428  |  |  */  | 
429  |  |  | 
430  |  | METHODDEF(JSAMPARRAY)  | 
431  |  | alloc_sarray(j_common_ptr cinfo, int pool_id, JDIMENSION samplesperrow,  | 
432  |  |              JDIMENSION numrows)  | 
433  |  | /* Allocate a 2-D sample array */  | 
434  | 48.8k  | { | 
435  | 48.8k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
436  | 48.8k  |   JSAMPARRAY result;  | 
437  | 48.8k  |   JSAMPROW workspace;  | 
438  | 48.8k  |   JDIMENSION rowsperchunk, currow, i;  | 
439  | 48.8k  |   long ltemp;  | 
440  |  |  | 
441  |  |   /* Make sure each row is properly aligned */  | 
442  | 48.8k  |   if ((ALIGN_SIZE % sizeof(JSAMPLE)) != 0)  | 
443  | 0  |     out_of_memory(cinfo, 5);    /* safety check */  | 
444  |  |  | 
445  | 48.8k  |   if (samplesperrow > MAX_ALLOC_CHUNK) { | 
446  |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject  | 
447  |  |        is close to SIZE_MAX. */  | 
448  | 0  |     out_of_memory(cinfo, 9);  | 
449  | 0  |   }  | 
450  | 48.8k  |   samplesperrow = (JDIMENSION)round_up_pow2(samplesperrow, (2 * ALIGN_SIZE) /  | 
451  | 48.8k  |                                                            sizeof(JSAMPLE));  | 
452  |  |  | 
453  |  |   /* Calculate max # of rows allowed in one allocation chunk */  | 
454  | 48.8k  |   ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) /  | 
455  | 48.8k  |           ((long)samplesperrow * sizeof(JSAMPLE));  | 
456  | 48.8k  |   if (ltemp <= 0)  | 
457  | 0  |     ERREXIT(cinfo, JERR_WIDTH_OVERFLOW);  | 
458  | 48.8k  |   if (ltemp < (long)numrows)  | 
459  | 0  |     rowsperchunk = (JDIMENSION)ltemp;  | 
460  | 48.8k  |   else  | 
461  | 48.8k  |     rowsperchunk = numrows;  | 
462  | 48.8k  |   mem->last_rowsperchunk = rowsperchunk;  | 
463  |  |  | 
464  |  |   /* Get space for row pointers (small object) */  | 
465  | 48.8k  |   result = (JSAMPARRAY)alloc_small(cinfo, pool_id,  | 
466  | 48.8k  |                                    (size_t)(numrows * sizeof(JSAMPROW)));  | 
467  |  |  | 
468  |  |   /* Get the rows themselves (large objects) */  | 
469  | 48.8k  |   currow = 0;  | 
470  | 97.6k  |   while (currow < numrows) { | 
471  | 48.8k  |     rowsperchunk = MIN(rowsperchunk, numrows - currow);  | 
472  | 48.8k  |     workspace = (JSAMPROW)alloc_large(cinfo, pool_id,  | 
473  | 48.8k  |       (size_t)((size_t)rowsperchunk * (size_t)samplesperrow *  | 
474  | 48.8k  |                sizeof(JSAMPLE)));  | 
475  | 252k  |     for (i = rowsperchunk; i > 0; i--) { | 
476  | 203k  |       result[currow++] = workspace;  | 
477  | 203k  |       workspace += samplesperrow;  | 
478  | 203k  |     }  | 
479  | 48.8k  |   }  | 
480  |  |  | 
481  | 48.8k  |   return result;  | 
482  | 48.8k  | }  | 
483  |  |  | 
484  |  |  | 
485  |  | /*  | 
486  |  |  * Creation of 2-D coefficient-block arrays.  | 
487  |  |  * This is essentially the same as the code for sample arrays, above.  | 
488  |  |  */  | 
489  |  |  | 
490  |  | METHODDEF(JBLOCKARRAY)  | 
491  |  | alloc_barray(j_common_ptr cinfo, int pool_id, JDIMENSION blocksperrow,  | 
492  |  |              JDIMENSION numrows)  | 
493  |  | /* Allocate a 2-D coefficient-block array */  | 
494  | 2.89k  | { | 
495  | 2.89k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
496  | 2.89k  |   JBLOCKARRAY result;  | 
497  | 2.89k  |   JBLOCKROW workspace;  | 
498  | 2.89k  |   JDIMENSION rowsperchunk, currow, i;  | 
499  | 2.89k  |   long ltemp;  | 
500  |  |  | 
501  |  |   /* Make sure each row is properly aligned */  | 
502  | 2.89k  |   if ((sizeof(JBLOCK) % ALIGN_SIZE) != 0)  | 
503  | 0  |     out_of_memory(cinfo, 6);    /* safety check */  | 
504  |  |  | 
505  |  |   /* Calculate max # of rows allowed in one allocation chunk */  | 
506  | 2.89k  |   ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) /  | 
507  | 2.89k  |           ((long)blocksperrow * sizeof(JBLOCK));  | 
508  | 2.89k  |   if (ltemp <= 0)  | 
509  | 0  |     ERREXIT(cinfo, JERR_WIDTH_OVERFLOW);  | 
510  | 2.89k  |   if (ltemp < (long)numrows)  | 
511  | 0  |     rowsperchunk = (JDIMENSION)ltemp;  | 
512  | 2.89k  |   else  | 
513  | 2.89k  |     rowsperchunk = numrows;  | 
514  | 2.89k  |   mem->last_rowsperchunk = rowsperchunk;  | 
515  |  |  | 
516  |  |   /* Get space for row pointers (small object) */  | 
517  | 2.89k  |   result = (JBLOCKARRAY)alloc_small(cinfo, pool_id,  | 
518  | 2.89k  |                                     (size_t)(numrows * sizeof(JBLOCKROW)));  | 
519  |  |  | 
520  |  |   /* Get the rows themselves (large objects) */  | 
521  | 2.89k  |   currow = 0;  | 
522  | 5.79k  |   while (currow < numrows) { | 
523  | 2.89k  |     rowsperchunk = MIN(rowsperchunk, numrows - currow);  | 
524  | 2.89k  |     workspace = (JBLOCKROW)alloc_large(cinfo, pool_id,  | 
525  | 2.89k  |         (size_t)((size_t)rowsperchunk * (size_t)blocksperrow *  | 
526  | 2.89k  |                   sizeof(JBLOCK)));  | 
527  | 1.06M  |     for (i = rowsperchunk; i > 0; i--) { | 
528  | 1.06M  |       result[currow++] = workspace;  | 
529  | 1.06M  |       workspace += blocksperrow;  | 
530  | 1.06M  |     }  | 
531  | 2.89k  |   }  | 
532  |  |  | 
533  | 2.89k  |   return result;  | 
534  | 2.89k  | }  | 
535  |  |  | 
536  |  |  | 
537  |  | /*  | 
538  |  |  * About virtual array management:  | 
539  |  |  *  | 
540  |  |  * The above "normal" array routines are only used to allocate strip buffers  | 
541  |  |  * (as wide as the image, but just a few rows high).  Full-image-sized buffers  | 
542  |  |  * are handled as "virtual" arrays.  The array is still accessed a strip at a  | 
543  |  |  * time, but the memory manager must save the whole array for repeated  | 
544  |  |  * accesses.  The intended implementation is that there is a strip buffer in  | 
545  |  |  * memory (as high as is possible given the desired memory limit), plus a  | 
546  |  |  * backing file that holds the rest of the array.  | 
547  |  |  *  | 
548  |  |  * The request_virt_array routines are told the total size of the image and  | 
549  |  |  * the maximum number of rows that will be accessed at once.  The in-memory  | 
550  |  |  * buffer must be at least as large as the maxaccess value.  | 
551  |  |  *  | 
552  |  |  * The request routines create control blocks but not the in-memory buffers.  | 
553  |  |  * That is postponed until realize_virt_arrays is called.  At that time the  | 
554  |  |  * total amount of space needed is known (approximately, anyway), so free  | 
555  |  |  * memory can be divided up fairly.  | 
556  |  |  *  | 
557  |  |  * The access_virt_array routines are responsible for making a specific strip  | 
558  |  |  * area accessible (after reading or writing the backing file, if necessary).  | 
559  |  |  * Note that the access routines are told whether the caller intends to modify  | 
560  |  |  * the accessed strip; during a read-only pass this saves having to rewrite  | 
561  |  |  * data to disk.  The access routines are also responsible for pre-zeroing  | 
562  |  |  * any newly accessed rows, if pre-zeroing was requested.  | 
563  |  |  *  | 
564  |  |  * In current usage, the access requests are usually for nonoverlapping  | 
565  |  |  * strips; that is, successive access start_row numbers differ by exactly  | 
566  |  |  * num_rows = maxaccess.  This means we can get good performance with simple  | 
567  |  |  * buffer dump/reload logic, by making the in-memory buffer be a multiple  | 
568  |  |  * of the access height; then there will never be accesses across bufferload  | 
569  |  |  * boundaries.  The code will still work with overlapping access requests,  | 
570  |  |  * but it doesn't handle bufferload overlaps very efficiently.  | 
571  |  |  */  | 
572  |  |  | 
573  |  |  | 
574  |  | METHODDEF(jvirt_sarray_ptr)  | 
575  |  | request_virt_sarray(j_common_ptr cinfo, int pool_id, boolean pre_zero,  | 
576  |  |                     JDIMENSION samplesperrow, JDIMENSION numrows,  | 
577  |  |                     JDIMENSION maxaccess)  | 
578  |  | /* Request a virtual 2-D sample array */  | 
579  | 0  | { | 
580  | 0  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
581  | 0  |   jvirt_sarray_ptr result;  | 
582  |  |  | 
583  |  |   /* Only IMAGE-lifetime virtual arrays are currently supported */  | 
584  | 0  |   if (pool_id != JPOOL_IMAGE)  | 
585  | 0  |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */  | 
586  |  |  | 
587  |  |   /* get control block */  | 
588  | 0  |   result = (jvirt_sarray_ptr)alloc_small(cinfo, pool_id,  | 
589  | 0  |                                          sizeof(struct jvirt_sarray_control));  | 
590  |  | 
  | 
591  | 0  |   result->mem_buffer = NULL;    /* marks array not yet realized */  | 
592  | 0  |   result->rows_in_array = numrows;  | 
593  | 0  |   result->samplesperrow = samplesperrow;  | 
594  | 0  |   result->maxaccess = maxaccess;  | 
595  | 0  |   result->pre_zero = pre_zero;  | 
596  | 0  |   result->b_s_open = FALSE;     /* no associated backing-store object */  | 
597  | 0  |   result->next = mem->virt_sarray_list; /* add to list of virtual arrays */  | 
598  | 0  |   mem->virt_sarray_list = result;  | 
599  |  | 
  | 
600  | 0  |   return result;  | 
601  | 0  | }  | 
602  |  |  | 
603  |  |  | 
604  |  | METHODDEF(jvirt_barray_ptr)  | 
605  |  | request_virt_barray(j_common_ptr cinfo, int pool_id, boolean pre_zero,  | 
606  |  |                     JDIMENSION blocksperrow, JDIMENSION numrows,  | 
607  |  |                     JDIMENSION maxaccess)  | 
608  |  | /* Request a virtual 2-D coefficient-block array */  | 
609  | 2.89k  | { | 
610  | 2.89k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
611  | 2.89k  |   jvirt_barray_ptr result;  | 
612  |  |  | 
613  |  |   /* Only IMAGE-lifetime virtual arrays are currently supported */  | 
614  | 2.89k  |   if (pool_id != JPOOL_IMAGE)  | 
615  | 0  |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */  | 
616  |  |  | 
617  |  |   /* get control block */  | 
618  | 2.89k  |   result = (jvirt_barray_ptr)alloc_small(cinfo, pool_id,  | 
619  | 2.89k  |                                          sizeof(struct jvirt_barray_control));  | 
620  |  |  | 
621  | 2.89k  |   result->mem_buffer = NULL;    /* marks array not yet realized */  | 
622  | 2.89k  |   result->rows_in_array = numrows;  | 
623  | 2.89k  |   result->blocksperrow = blocksperrow;  | 
624  | 2.89k  |   result->maxaccess = maxaccess;  | 
625  | 2.89k  |   result->pre_zero = pre_zero;  | 
626  | 2.89k  |   result->b_s_open = FALSE;     /* no associated backing-store object */  | 
627  | 2.89k  |   result->next = mem->virt_barray_list; /* add to list of virtual arrays */  | 
628  | 2.89k  |   mem->virt_barray_list = result;  | 
629  |  |  | 
630  | 2.89k  |   return result;  | 
631  | 2.89k  | }  | 
632  |  |  | 
633  |  |  | 
634  |  | METHODDEF(void)  | 
635  |  | realize_virt_arrays(j_common_ptr cinfo)  | 
636  |  | /* Allocate the in-memory buffers for any unrealized virtual arrays */  | 
637  | 18.9k  | { | 
638  | 18.9k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
639  | 18.9k  |   size_t space_per_minheight, maximum_space, avail_mem;  | 
640  | 18.9k  |   size_t minheights, max_minheights;  | 
641  | 18.9k  |   jvirt_sarray_ptr sptr;  | 
642  | 18.9k  |   jvirt_barray_ptr bptr;  | 
643  |  |  | 
644  |  |   /* Compute the minimum space needed (maxaccess rows in each buffer)  | 
645  |  |    * and the maximum space needed (full image height in each buffer).  | 
646  |  |    * These may be of use to the system-dependent jpeg_mem_available routine.  | 
647  |  |    */  | 
648  | 18.9k  |   space_per_minheight = 0;  | 
649  | 18.9k  |   maximum_space = 0;  | 
650  | 18.9k  |   for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
651  | 0  |     if (sptr->mem_buffer == NULL) { /* if not realized yet */ | 
652  | 0  |       size_t new_space = (long)sptr->rows_in_array *  | 
653  | 0  |                          (long)sptr->samplesperrow * sizeof(JSAMPLE);  | 
654  |  | 
  | 
655  | 0  |       space_per_minheight += (long)sptr->maxaccess *  | 
656  | 0  |                              (long)sptr->samplesperrow * sizeof(JSAMPLE);  | 
657  | 0  |       if (SIZE_MAX - maximum_space < new_space)  | 
658  | 0  |         out_of_memory(cinfo, 10);  | 
659  | 0  |       maximum_space += new_space;  | 
660  | 0  |     }  | 
661  | 0  |   }  | 
662  | 21.8k  |   for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
663  | 2.89k  |     if (bptr->mem_buffer == NULL) { /* if not realized yet */ | 
664  | 2.89k  |       size_t new_space = (long)bptr->rows_in_array *  | 
665  | 2.89k  |                          (long)bptr->blocksperrow * sizeof(JBLOCK);  | 
666  |  |  | 
667  | 2.89k  |       space_per_minheight += (long)bptr->maxaccess *  | 
668  | 2.89k  |                              (long)bptr->blocksperrow * sizeof(JBLOCK);  | 
669  | 2.89k  |       if (SIZE_MAX - maximum_space < new_space)  | 
670  | 0  |         out_of_memory(cinfo, 11);  | 
671  | 2.89k  |       maximum_space += new_space;  | 
672  | 2.89k  |     }  | 
673  | 2.89k  |   }  | 
674  |  |  | 
675  | 18.9k  |   if (space_per_minheight <= 0)  | 
676  | 18.0k  |     return;                     /* no unrealized arrays, no work */  | 
677  |  |  | 
678  |  |   /* Determine amount of memory to actually use; this is system-dependent. */  | 
679  | 965  |   avail_mem = jpeg_mem_available(cinfo, space_per_minheight, maximum_space,  | 
680  | 965  |                                  mem->total_space_allocated);  | 
681  |  |  | 
682  |  |   /* If the maximum space needed is available, make all the buffers full  | 
683  |  |    * height; otherwise parcel it out with the same number of minheights  | 
684  |  |    * in each buffer.  | 
685  |  |    */  | 
686  | 965  |   if (avail_mem >= maximum_space)  | 
687  | 965  |     max_minheights = 1000000000L;  | 
688  | 0  |   else { | 
689  | 0  |     max_minheights = avail_mem / space_per_minheight;  | 
690  |  |     /* If there doesn't seem to be enough space, try to get the minimum  | 
691  |  |      * anyway.  This allows a "stub" implementation of jpeg_mem_available().  | 
692  |  |      */  | 
693  | 0  |     if (max_minheights <= 0)  | 
694  | 0  |       max_minheights = 1;  | 
695  | 0  |   }  | 
696  |  |  | 
697  |  |   /* Allocate the in-memory buffers and initialize backing store as needed. */  | 
698  |  |  | 
699  | 965  |   for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
700  | 0  |     if (sptr->mem_buffer == NULL) { /* if not realized yet */ | 
701  | 0  |       minheights = ((long)sptr->rows_in_array - 1L) / sptr->maxaccess + 1L;  | 
702  | 0  |       if (minheights <= max_minheights) { | 
703  |  |         /* This buffer fits in memory */  | 
704  | 0  |         sptr->rows_in_mem = sptr->rows_in_array;  | 
705  | 0  |       } else { | 
706  |  |         /* It doesn't fit in memory, create backing store. */  | 
707  | 0  |         sptr->rows_in_mem = (JDIMENSION)(max_minheights * sptr->maxaccess);  | 
708  | 0  |         jpeg_open_backing_store(cinfo, &sptr->b_s_info,  | 
709  | 0  |                                 (long)sptr->rows_in_array *  | 
710  | 0  |                                 (long)sptr->samplesperrow *  | 
711  | 0  |                                 (long)sizeof(JSAMPLE));  | 
712  | 0  |         sptr->b_s_open = TRUE;  | 
713  | 0  |       }  | 
714  | 0  |       sptr->mem_buffer = alloc_sarray(cinfo, JPOOL_IMAGE,  | 
715  | 0  |                                       sptr->samplesperrow, sptr->rows_in_mem);  | 
716  | 0  |       sptr->rowsperchunk = mem->last_rowsperchunk;  | 
717  | 0  |       sptr->cur_start_row = 0;  | 
718  | 0  |       sptr->first_undef_row = 0;  | 
719  | 0  |       sptr->dirty = FALSE;  | 
720  | 0  |     }  | 
721  | 0  |   }  | 
722  |  |  | 
723  | 3.86k  |   for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
724  | 2.89k  |     if (bptr->mem_buffer == NULL) { /* if not realized yet */ | 
725  | 2.89k  |       minheights = ((long)bptr->rows_in_array - 1L) / bptr->maxaccess + 1L;  | 
726  | 2.89k  |       if (minheights <= max_minheights) { | 
727  |  |         /* This buffer fits in memory */  | 
728  | 2.89k  |         bptr->rows_in_mem = bptr->rows_in_array;  | 
729  | 2.89k  |       } else { | 
730  |  |         /* It doesn't fit in memory, create backing store. */  | 
731  | 0  |         bptr->rows_in_mem = (JDIMENSION)(max_minheights * bptr->maxaccess);  | 
732  | 0  |         jpeg_open_backing_store(cinfo, &bptr->b_s_info,  | 
733  | 0  |                                 (long)bptr->rows_in_array *  | 
734  | 0  |                                 (long)bptr->blocksperrow *  | 
735  | 0  |                                 (long)sizeof(JBLOCK));  | 
736  | 0  |         bptr->b_s_open = TRUE;  | 
737  | 0  |       }  | 
738  | 2.89k  |       bptr->mem_buffer = alloc_barray(cinfo, JPOOL_IMAGE,  | 
739  | 2.89k  |                                       bptr->blocksperrow, bptr->rows_in_mem);  | 
740  | 2.89k  |       bptr->rowsperchunk = mem->last_rowsperchunk;  | 
741  | 2.89k  |       bptr->cur_start_row = 0;  | 
742  | 2.89k  |       bptr->first_undef_row = 0;  | 
743  | 2.89k  |       bptr->dirty = FALSE;  | 
744  | 2.89k  |     }  | 
745  | 2.89k  |   }  | 
746  | 965  | }  | 
747  |  |  | 
748  |  |  | 
749  |  | LOCAL(void)  | 
750  |  | do_sarray_io(j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing)  | 
751  |  | /* Do backing store read or write of a virtual sample array */  | 
752  | 0  | { | 
753  | 0  |   long bytesperrow, file_offset, byte_count, rows, thisrow, i;  | 
754  |  | 
  | 
755  | 0  |   bytesperrow = (long)ptr->samplesperrow * sizeof(JSAMPLE);  | 
756  | 0  |   file_offset = ptr->cur_start_row * bytesperrow;  | 
757  |  |   /* Loop to read or write each allocation chunk in mem_buffer */  | 
758  | 0  |   for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) { | 
759  |  |     /* One chunk, but check for short chunk at end of buffer */  | 
760  | 0  |     rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i);  | 
761  |  |     /* Transfer no more than is currently defined */  | 
762  | 0  |     thisrow = (long)ptr->cur_start_row + i;  | 
763  | 0  |     rows = MIN(rows, (long)ptr->first_undef_row - thisrow);  | 
764  |  |     /* Transfer no more than fits in file */  | 
765  | 0  |     rows = MIN(rows, (long)ptr->rows_in_array - thisrow);  | 
766  | 0  |     if (rows <= 0)              /* this chunk might be past end of file! */  | 
767  | 0  |       break;  | 
768  | 0  |     byte_count = rows * bytesperrow;  | 
769  | 0  |     if (writing)  | 
770  | 0  |       (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info,  | 
771  | 0  |                                             (void *)ptr->mem_buffer[i],  | 
772  | 0  |                                             file_offset, byte_count);  | 
773  | 0  |     else  | 
774  | 0  |       (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info,  | 
775  | 0  |                                            (void *)ptr->mem_buffer[i],  | 
776  | 0  |                                            file_offset, byte_count);  | 
777  | 0  |     file_offset += byte_count;  | 
778  | 0  |   }  | 
779  | 0  | }  | 
780  |  |  | 
781  |  |  | 
782  |  | LOCAL(void)  | 
783  |  | do_barray_io(j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing)  | 
784  |  | /* Do backing store read or write of a virtual coefficient-block array */  | 
785  | 0  | { | 
786  | 0  |   long bytesperrow, file_offset, byte_count, rows, thisrow, i;  | 
787  |  | 
  | 
788  | 0  |   bytesperrow = (long)ptr->blocksperrow * sizeof(JBLOCK);  | 
789  | 0  |   file_offset = ptr->cur_start_row * bytesperrow;  | 
790  |  |   /* Loop to read or write each allocation chunk in mem_buffer */  | 
791  | 0  |   for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) { | 
792  |  |     /* One chunk, but check for short chunk at end of buffer */  | 
793  | 0  |     rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i);  | 
794  |  |     /* Transfer no more than is currently defined */  | 
795  | 0  |     thisrow = (long)ptr->cur_start_row + i;  | 
796  | 0  |     rows = MIN(rows, (long)ptr->first_undef_row - thisrow);  | 
797  |  |     /* Transfer no more than fits in file */  | 
798  | 0  |     rows = MIN(rows, (long)ptr->rows_in_array - thisrow);  | 
799  | 0  |     if (rows <= 0)              /* this chunk might be past end of file! */  | 
800  | 0  |       break;  | 
801  | 0  |     byte_count = rows * bytesperrow;  | 
802  | 0  |     if (writing)  | 
803  | 0  |       (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info,  | 
804  | 0  |                                             (void *)ptr->mem_buffer[i],  | 
805  | 0  |                                             file_offset, byte_count);  | 
806  | 0  |     else  | 
807  | 0  |       (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info,  | 
808  | 0  |                                            (void *)ptr->mem_buffer[i],  | 
809  | 0  |                                            file_offset, byte_count);  | 
810  | 0  |     file_offset += byte_count;  | 
811  | 0  |   }  | 
812  | 0  | }  | 
813  |  |  | 
814  |  |  | 
815  |  | METHODDEF(JSAMPARRAY)  | 
816  |  | access_virt_sarray(j_common_ptr cinfo, jvirt_sarray_ptr ptr,  | 
817  |  |                    JDIMENSION start_row, JDIMENSION num_rows, boolean writable)  | 
818  |  | /* Access the part of a virtual sample array starting at start_row */  | 
819  |  | /* and extending for num_rows rows.  writable is true if  */  | 
820  |  | /* caller intends to modify the accessed area. */  | 
821  | 0  | { | 
822  | 0  |   JDIMENSION end_row = start_row + num_rows;  | 
823  | 0  |   JDIMENSION undef_row;  | 
824  |  |  | 
825  |  |   /* debugging check */  | 
826  | 0  |   if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess ||  | 
827  | 0  |       ptr->mem_buffer == NULL)  | 
828  | 0  |     ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
829  |  |  | 
830  |  |   /* Make the desired part of the virtual array accessible */  | 
831  | 0  |   if (start_row < ptr->cur_start_row ||  | 
832  | 0  |       end_row > ptr->cur_start_row + ptr->rows_in_mem) { | 
833  | 0  |     if (!ptr->b_s_open)  | 
834  | 0  |       ERREXIT(cinfo, JERR_VIRTUAL_BUG);  | 
835  |  |     /* Flush old buffer contents if necessary */  | 
836  | 0  |     if (ptr->dirty) { | 
837  | 0  |       do_sarray_io(cinfo, ptr, TRUE);  | 
838  | 0  |       ptr->dirty = FALSE;  | 
839  | 0  |     }  | 
840  |  |     /* Decide what part of virtual array to access.  | 
841  |  |      * Algorithm: if target address > current window, assume forward scan,  | 
842  |  |      * load starting at target address.  If target address < current window,  | 
843  |  |      * assume backward scan, load so that target area is top of window.  | 
844  |  |      * Note that when switching from forward write to forward read, will have  | 
845  |  |      * start_row = 0, so the limiting case applies and we load from 0 anyway.  | 
846  |  |      */  | 
847  | 0  |     if (start_row > ptr->cur_start_row) { | 
848  | 0  |       ptr->cur_start_row = start_row;  | 
849  | 0  |     } else { | 
850  |  |       /* use long arithmetic here to avoid overflow & unsigned problems */  | 
851  | 0  |       long ltemp;  | 
852  |  | 
  | 
853  | 0  |       ltemp = (long)end_row - (long)ptr->rows_in_mem;  | 
854  | 0  |       if (ltemp < 0)  | 
855  | 0  |         ltemp = 0;              /* don't fall off front end of file */  | 
856  | 0  |       ptr->cur_start_row = (JDIMENSION)ltemp;  | 
857  | 0  |     }  | 
858  |  |     /* Read in the selected part of the array.  | 
859  |  |      * During the initial write pass, we will do no actual read  | 
860  |  |      * because the selected part is all undefined.  | 
861  |  |      */  | 
862  | 0  |     do_sarray_io(cinfo, ptr, FALSE);  | 
863  | 0  |   }  | 
864  |  |   /* Ensure the accessed part of the array is defined; prezero if needed.  | 
865  |  |    * To improve locality of access, we only prezero the part of the array  | 
866  |  |    * that the caller is about to access, not the entire in-memory array.  | 
867  |  |    */  | 
868  | 0  |   if (ptr->first_undef_row < end_row) { | 
869  | 0  |     if (ptr->first_undef_row < start_row) { | 
870  | 0  |       if (writable)             /* writer skipped over a section of array */  | 
871  | 0  |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
872  | 0  |       undef_row = start_row;    /* but reader is allowed to read ahead */  | 
873  | 0  |     } else { | 
874  | 0  |       undef_row = ptr->first_undef_row;  | 
875  | 0  |     }  | 
876  | 0  |     if (writable)  | 
877  | 0  |       ptr->first_undef_row = end_row;  | 
878  | 0  |     if (ptr->pre_zero) { | 
879  | 0  |       size_t bytesperrow = (size_t)ptr->samplesperrow * sizeof(JSAMPLE);  | 
880  | 0  |       undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */  | 
881  | 0  |       end_row -= ptr->cur_start_row;  | 
882  | 0  |       while (undef_row < end_row) { | 
883  | 0  |         jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow);  | 
884  | 0  |         undef_row++;  | 
885  | 0  |       }  | 
886  | 0  |     } else { | 
887  | 0  |       if (!writable)            /* reader looking at undefined data */  | 
888  | 0  |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
889  | 0  |     }  | 
890  | 0  |   }  | 
891  |  |   /* Flag the buffer dirty if caller will write in it */  | 
892  | 0  |   if (writable)  | 
893  | 0  |     ptr->dirty = TRUE;  | 
894  |  |   /* Return address of proper part of the buffer */  | 
895  | 0  |   return ptr->mem_buffer + (start_row - ptr->cur_start_row);  | 
896  | 0  | }  | 
897  |  |  | 
898  |  |  | 
899  |  | METHODDEF(JBLOCKARRAY)  | 
900  |  | access_virt_barray(j_common_ptr cinfo, jvirt_barray_ptr ptr,  | 
901  |  |                    JDIMENSION start_row, JDIMENSION num_rows, boolean writable)  | 
902  |  | /* Access the part of a virtual block array starting at start_row */  | 
903  |  | /* and extending for num_rows rows.  writable is true if  */  | 
904  |  | /* caller intends to modify the accessed area. */  | 
905  | 9.92M  | { | 
906  | 9.92M  |   JDIMENSION end_row = start_row + num_rows;  | 
907  | 9.92M  |   JDIMENSION undef_row;  | 
908  |  |  | 
909  |  |   /* debugging check */  | 
910  | 9.92M  |   if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess ||  | 
911  | 9.92M  |       ptr->mem_buffer == NULL)  | 
912  | 0  |     ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
913  |  |  | 
914  |  |   /* Make the desired part of the virtual array accessible */  | 
915  | 9.92M  |   if (start_row < ptr->cur_start_row ||  | 
916  | 9.92M  |       end_row > ptr->cur_start_row + ptr->rows_in_mem) { | 
917  | 0  |     if (!ptr->b_s_open)  | 
918  | 0  |       ERREXIT(cinfo, JERR_VIRTUAL_BUG);  | 
919  |  |     /* Flush old buffer contents if necessary */  | 
920  | 0  |     if (ptr->dirty) { | 
921  | 0  |       do_barray_io(cinfo, ptr, TRUE);  | 
922  | 0  |       ptr->dirty = FALSE;  | 
923  | 0  |     }  | 
924  |  |     /* Decide what part of virtual array to access.  | 
925  |  |      * Algorithm: if target address > current window, assume forward scan,  | 
926  |  |      * load starting at target address.  If target address < current window,  | 
927  |  |      * assume backward scan, load so that target area is top of window.  | 
928  |  |      * Note that when switching from forward write to forward read, will have  | 
929  |  |      * start_row = 0, so the limiting case applies and we load from 0 anyway.  | 
930  |  |      */  | 
931  | 0  |     if (start_row > ptr->cur_start_row) { | 
932  | 0  |       ptr->cur_start_row = start_row;  | 
933  | 0  |     } else { | 
934  |  |       /* use long arithmetic here to avoid overflow & unsigned problems */  | 
935  | 0  |       long ltemp;  | 
936  |  | 
  | 
937  | 0  |       ltemp = (long)end_row - (long)ptr->rows_in_mem;  | 
938  | 0  |       if (ltemp < 0)  | 
939  | 0  |         ltemp = 0;              /* don't fall off front end of file */  | 
940  | 0  |       ptr->cur_start_row = (JDIMENSION)ltemp;  | 
941  | 0  |     }  | 
942  |  |     /* Read in the selected part of the array.  | 
943  |  |      * During the initial write pass, we will do no actual read  | 
944  |  |      * because the selected part is all undefined.  | 
945  |  |      */  | 
946  | 0  |     do_barray_io(cinfo, ptr, FALSE);  | 
947  | 0  |   }  | 
948  |  |   /* Ensure the accessed part of the array is defined; prezero if needed.  | 
949  |  |    * To improve locality of access, we only prezero the part of the array  | 
950  |  |    * that the caller is about to access, not the entire in-memory array.  | 
951  |  |    */  | 
952  | 9.92M  |   if (ptr->first_undef_row < end_row) { | 
953  | 1.06M  |     if (ptr->first_undef_row < start_row) { | 
954  | 0  |       if (writable)             /* writer skipped over a section of array */  | 
955  | 0  |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
956  | 0  |       undef_row = start_row;    /* but reader is allowed to read ahead */  | 
957  | 1.06M  |     } else { | 
958  | 1.06M  |       undef_row = ptr->first_undef_row;  | 
959  | 1.06M  |     }  | 
960  | 1.06M  |     if (writable)  | 
961  | 1.06M  |       ptr->first_undef_row = end_row;  | 
962  | 1.06M  |     if (ptr->pre_zero) { | 
963  | 0  |       size_t bytesperrow = (size_t)ptr->blocksperrow * sizeof(JBLOCK);  | 
964  | 0  |       undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */  | 
965  | 0  |       end_row -= ptr->cur_start_row;  | 
966  | 0  |       while (undef_row < end_row) { | 
967  | 0  |         jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow);  | 
968  | 0  |         undef_row++;  | 
969  | 0  |       }  | 
970  | 1.06M  |     } else { | 
971  | 1.06M  |       if (!writable)            /* reader looking at undefined data */  | 
972  | 0  |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);  | 
973  | 1.06M  |     }  | 
974  | 1.06M  |   }  | 
975  |  |   /* Flag the buffer dirty if caller will write in it */  | 
976  | 9.92M  |   if (writable)  | 
977  | 1.06M  |     ptr->dirty = TRUE;  | 
978  |  |   /* Return address of proper part of the buffer */  | 
979  | 9.92M  |   return ptr->mem_buffer + (start_row - ptr->cur_start_row);  | 
980  | 9.92M  | }  | 
981  |  |  | 
982  |  |  | 
983  |  | /*  | 
984  |  |  * Release all objects belonging to a specified pool.  | 
985  |  |  */  | 
986  |  |  | 
987  |  | METHODDEF(void)  | 
988  |  | free_pool(j_common_ptr cinfo, int pool_id)  | 
989  | 50.7k  | { | 
990  | 50.7k  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem;  | 
991  | 50.7k  |   small_pool_ptr shdr_ptr;  | 
992  | 50.7k  |   large_pool_ptr lhdr_ptr;  | 
993  | 50.7k  |   size_t space_freed;  | 
994  |  |  | 
995  | 50.7k  |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)  | 
996  | 0  |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */  | 
997  |  |  | 
998  |  | #ifdef MEM_STATS  | 
999  |  |   if (cinfo->err->trace_level > 1)  | 
1000  |  |     print_mem_stats(cinfo, pool_id); /* print pool's memory usage statistics */  | 
1001  |  | #endif  | 
1002  |  |  | 
1003  |  |   /* If freeing IMAGE pool, close any virtual arrays first */  | 
1004  | 50.7k  |   if (pool_id == JPOOL_IMAGE) { | 
1005  | 28.6k  |     jvirt_sarray_ptr sptr;  | 
1006  | 28.6k  |     jvirt_barray_ptr bptr;  | 
1007  |  |  | 
1008  | 28.6k  |     for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
1009  | 0  |       if (sptr->b_s_open) {     /* there may be no backing store */ | 
1010  | 0  |         sptr->b_s_open = FALSE; /* prevent recursive close if error */  | 
1011  | 0  |         (*sptr->b_s_info.close_backing_store) (cinfo, &sptr->b_s_info);  | 
1012  | 0  |       }  | 
1013  | 0  |     }  | 
1014  | 28.6k  |     mem->virt_sarray_list = NULL;  | 
1015  | 31.5k  |     for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
1016  | 2.89k  |       if (bptr->b_s_open) {     /* there may be no backing store */ | 
1017  | 0  |         bptr->b_s_open = FALSE; /* prevent recursive close if error */  | 
1018  | 0  |         (*bptr->b_s_info.close_backing_store) (cinfo, &bptr->b_s_info);  | 
1019  | 0  |       }  | 
1020  | 2.89k  |     }  | 
1021  | 28.6k  |     mem->virt_barray_list = NULL;  | 
1022  | 28.6k  |   }  | 
1023  |  |  | 
1024  |  |   /* Release large objects */  | 
1025  | 50.7k  |   lhdr_ptr = mem->large_list[pool_id];  | 
1026  | 50.7k  |   mem->large_list[pool_id] = NULL;  | 
1027  |  |  | 
1028  | 108k  |   while (lhdr_ptr != NULL) { | 
1029  | 57.3k  |     large_pool_ptr next_lhdr_ptr = lhdr_ptr->next;  | 
1030  | 57.3k  |     space_freed = lhdr_ptr->bytes_used +  | 
1031  | 57.3k  |                   lhdr_ptr->bytes_left +  | 
1032  | 57.3k  |                   sizeof(large_pool_hdr) + ALIGN_SIZE - 1;  | 
1033  | 57.3k  |     jpeg_free_large(cinfo, (void *)lhdr_ptr, space_freed);  | 
1034  | 57.3k  |     mem->total_space_allocated -= space_freed;  | 
1035  | 57.3k  |     lhdr_ptr = next_lhdr_ptr;  | 
1036  | 57.3k  |   }  | 
1037  |  |  | 
1038  |  |   /* Release small objects */  | 
1039  | 50.7k  |   shdr_ptr = mem->small_list[pool_id];  | 
1040  | 50.7k  |   mem->small_list[pool_id] = NULL;  | 
1041  |  |  | 
1042  | 104k  |   while (shdr_ptr != NULL) { | 
1043  | 54.2k  |     small_pool_ptr next_shdr_ptr = shdr_ptr->next;  | 
1044  | 54.2k  |     space_freed = shdr_ptr->bytes_used + shdr_ptr->bytes_left +  | 
1045  | 54.2k  |                   sizeof(small_pool_hdr) + ALIGN_SIZE - 1;  | 
1046  | 54.2k  |     jpeg_free_small(cinfo, (void *)shdr_ptr, space_freed);  | 
1047  | 54.2k  |     mem->total_space_allocated -= space_freed;  | 
1048  | 54.2k  |     shdr_ptr = next_shdr_ptr;  | 
1049  | 54.2k  |   }  | 
1050  | 50.7k  | }  | 
1051  |  |  | 
1052  |  |  | 
1053  |  | /*  | 
1054  |  |  * Close up shop entirely.  | 
1055  |  |  * Note that this cannot be called unless cinfo->mem is non-NULL.  | 
1056  |  |  */  | 
1057  |  |  | 
1058  |  | METHODDEF(void)  | 
1059  |  | self_destruct(j_common_ptr cinfo)  | 
1060  | 22.0k  | { | 
1061  | 22.0k  |   int pool;  | 
1062  |  |  | 
1063  |  |   /* Close all backing store, release all memory.  | 
1064  |  |    * Releasing pools in reverse order might help avoid fragmentation  | 
1065  |  |    * with some (brain-damaged) malloc libraries.  | 
1066  |  |    */  | 
1067  | 66.1k  |   for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) { | 
1068  | 44.1k  |     free_pool(cinfo, pool);  | 
1069  | 44.1k  |   }  | 
1070  |  |  | 
1071  |  |   /* Release the memory manager control block too. */  | 
1072  | 22.0k  |   jpeg_free_small(cinfo, (void *)cinfo->mem, sizeof(my_memory_mgr));  | 
1073  | 22.0k  |   cinfo->mem = NULL;            /* ensures I will be called only once */  | 
1074  |  |  | 
1075  | 22.0k  |   jpeg_mem_term(cinfo);         /* system-dependent cleanup */  | 
1076  | 22.0k  | }  | 
1077  |  |  | 
1078  |  |  | 
1079  |  | /*  | 
1080  |  |  * Memory manager initialization.  | 
1081  |  |  * When this is called, only the error manager pointer is valid in cinfo!  | 
1082  |  |  */  | 
1083  |  |  | 
1084  |  | GLOBAL(void)  | 
1085  |  | jinit_memory_mgr(j_common_ptr cinfo)  | 
1086  | 22.0k  | { | 
1087  | 22.0k  |   my_mem_ptr mem;  | 
1088  | 22.0k  |   long max_to_use;  | 
1089  | 22.0k  |   int pool;  | 
1090  | 22.0k  |   size_t test_mac;  | 
1091  |  |  | 
1092  | 22.0k  |   cinfo->mem = NULL;            /* for safety if init fails */  | 
1093  |  |  | 
1094  |  |   /* Check for configuration errors.  | 
1095  |  |    * sizeof(ALIGN_TYPE) should be a power of 2; otherwise, it probably  | 
1096  |  |    * doesn't reflect any real hardware alignment requirement.  | 
1097  |  |    * The test is a little tricky: for X>0, X and X-1 have no one-bits  | 
1098  |  |    * in common if and only if X is a power of 2, ie has only one one-bit.  | 
1099  |  |    * Some compilers may give an "unreachable code" warning here; ignore it.  | 
1100  |  |    */  | 
1101  | 22.0k  |   if ((ALIGN_SIZE & (ALIGN_SIZE - 1)) != 0)  | 
1102  | 0  |     ERREXIT(cinfo, JERR_BAD_ALIGN_TYPE);  | 
1103  |  |   /* MAX_ALLOC_CHUNK must be representable as type size_t, and must be  | 
1104  |  |    * a multiple of ALIGN_SIZE.  | 
1105  |  |    * Again, an "unreachable code" warning may be ignored here.  | 
1106  |  |    * But a "constant too large" warning means you need to fix MAX_ALLOC_CHUNK.  | 
1107  |  |    */  | 
1108  | 22.0k  |   test_mac = (size_t)MAX_ALLOC_CHUNK;  | 
1109  | 22.0k  |   if ((long)test_mac != MAX_ALLOC_CHUNK ||  | 
1110  | 22.0k  |       (MAX_ALLOC_CHUNK % ALIGN_SIZE) != 0)  | 
1111  | 0  |     ERREXIT(cinfo, JERR_BAD_ALLOC_CHUNK);  | 
1112  |  |  | 
1113  | 22.0k  |   max_to_use = jpeg_mem_init(cinfo); /* system-dependent initialization */  | 
1114  |  |  | 
1115  |  |   /* Attempt to allocate memory manager's control block */  | 
1116  | 22.0k  |   mem = (my_mem_ptr)jpeg_get_small(cinfo, sizeof(my_memory_mgr));  | 
1117  |  |  | 
1118  | 22.0k  |   if (mem == NULL) { | 
1119  | 0  |     jpeg_mem_term(cinfo);       /* system-dependent cleanup */  | 
1120  | 0  |     ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 0);  | 
1121  | 0  |   }  | 
1122  |  |  | 
1123  |  |   /* OK, fill in the method pointers */  | 
1124  | 22.0k  |   mem->pub.alloc_small = alloc_small;  | 
1125  | 22.0k  |   mem->pub.alloc_large = alloc_large;  | 
1126  | 22.0k  |   mem->pub.alloc_sarray = alloc_sarray;  | 
1127  | 22.0k  |   mem->pub.alloc_barray = alloc_barray;  | 
1128  | 22.0k  |   mem->pub.request_virt_sarray = request_virt_sarray;  | 
1129  | 22.0k  |   mem->pub.request_virt_barray = request_virt_barray;  | 
1130  | 22.0k  |   mem->pub.realize_virt_arrays = realize_virt_arrays;  | 
1131  | 22.0k  |   mem->pub.access_virt_sarray = access_virt_sarray;  | 
1132  | 22.0k  |   mem->pub.access_virt_barray = access_virt_barray;  | 
1133  | 22.0k  |   mem->pub.free_pool = free_pool;  | 
1134  | 22.0k  |   mem->pub.self_destruct = self_destruct;  | 
1135  |  |  | 
1136  |  |   /* Make MAX_ALLOC_CHUNK accessible to other modules */  | 
1137  | 22.0k  |   mem->pub.max_alloc_chunk = MAX_ALLOC_CHUNK;  | 
1138  |  |  | 
1139  |  |   /* Initialize working state */  | 
1140  | 22.0k  |   mem->pub.max_memory_to_use = max_to_use;  | 
1141  |  |  | 
1142  | 66.1k  |   for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) { | 
1143  | 44.1k  |     mem->small_list[pool] = NULL;  | 
1144  | 44.1k  |     mem->large_list[pool] = NULL;  | 
1145  | 44.1k  |   }  | 
1146  | 22.0k  |   mem->virt_sarray_list = NULL;  | 
1147  | 22.0k  |   mem->virt_barray_list = NULL;  | 
1148  |  |  | 
1149  | 22.0k  |   mem->total_space_allocated = sizeof(my_memory_mgr);  | 
1150  |  |  | 
1151  |  |   /* Declare ourselves open for business */  | 
1152  | 22.0k  |   cinfo->mem = &mem->pub;  | 
1153  |  |  | 
1154  |  |   /* Check for an environment variable JPEGMEM; if found, override the  | 
1155  |  |    * default max_memory setting from jpeg_mem_init.  Note that the  | 
1156  |  |    * surrounding application may again override this value.  | 
1157  |  |    * If your system doesn't support getenv(), define NO_GETENV to disable  | 
1158  |  |    * this feature.  | 
1159  |  |    */  | 
1160  | 22.0k  | #ifndef NO_GETENV  | 
1161  | 22.0k  |   { | 
1162  | 22.0k  |     char memenv[30] = { 0 }; | 
1163  |  |  | 
1164  | 22.0k  |     if (!GETENV_S(memenv, 30, "JPEGMEM") && strlen(memenv) > 0) { | 
1165  | 0  |       char ch = 'x';  | 
1166  |  | 
  | 
1167  |  | #ifdef _MSC_VER  | 
1168  |  |       if (sscanf_s(memenv, "%ld%c", &max_to_use, &ch, 1) > 0) { | 
1169  |  | #else  | 
1170  | 0  |       if (sscanf(memenv, "%ld%c", &max_to_use, &ch) > 0) { | 
1171  | 0  | #endif  | 
1172  | 0  |         if (ch == 'm' || ch == 'M')  | 
1173  | 0  |           max_to_use *= 1000L;  | 
1174  | 0  |         mem->pub.max_memory_to_use = max_to_use * 1000L;  | 
1175  | 0  |       }  | 
1176  | 0  |     }  | 
1177  | 22.0k  |   }  | 
1178  | 22.0k  | #endif  | 
1179  |  |  | 
1180  | 22.0k  | }  |