/src/libjpeg-turbo.2.0.x/jmemmgr.c
| Line | Count | Source (jump to first uncovered line) | 
| 1 |  | /* | 
| 2 |  |  * jmemmgr.c | 
| 3 |  |  * | 
| 4 |  |  * This file was part of the Independent JPEG Group's software: | 
| 5 |  |  * Copyright (C) 1991-1997, Thomas G. Lane. | 
| 6 |  |  * libjpeg-turbo Modifications: | 
| 7 |  |  * Copyright (C) 2016, 2021, D. R. Commander. | 
| 8 |  |  * For conditions of distribution and use, see the accompanying README.ijg | 
| 9 |  |  * file. | 
| 10 |  |  * | 
| 11 |  |  * This file contains the JPEG system-independent memory management | 
| 12 |  |  * routines.  This code is usable across a wide variety of machines; most | 
| 13 |  |  * of the system dependencies have been isolated in a separate file. | 
| 14 |  |  * The major functions provided here are: | 
| 15 |  |  *   * pool-based allocation and freeing of memory; | 
| 16 |  |  *   * policy decisions about how to divide available memory among the | 
| 17 |  |  *     virtual arrays; | 
| 18 |  |  *   * control logic for swapping virtual arrays between main memory and | 
| 19 |  |  *     backing storage. | 
| 20 |  |  * The separate system-dependent file provides the actual backing-storage | 
| 21 |  |  * access code, and it contains the policy decision about how much total | 
| 22 |  |  * main memory to use. | 
| 23 |  |  * This file is system-dependent in the sense that some of its functions | 
| 24 |  |  * are unnecessary in some systems.  For example, if there is enough virtual | 
| 25 |  |  * memory so that backing storage will never be used, much of the virtual | 
| 26 |  |  * array control logic could be removed.  (Of course, if you have that much | 
| 27 |  |  * memory then you shouldn't care about a little bit of unused code...) | 
| 28 |  |  */ | 
| 29 |  |  | 
| 30 |  | #define JPEG_INTERNALS | 
| 31 |  | #define AM_MEMORY_MANAGER       /* we define jvirt_Xarray_control structs */ | 
| 32 |  | #include "jinclude.h" | 
| 33 |  | #include "jpeglib.h" | 
| 34 |  | #include "jmemsys.h"            /* import the system-dependent declarations */ | 
| 35 |  | #if !defined(_MSC_VER) || _MSC_VER > 1600 | 
| 36 |  | #include <stdint.h> | 
| 37 |  | #endif | 
| 38 |  | #include <limits.h> | 
| 39 |  |  | 
| 40 |  | #ifndef NO_GETENV | 
| 41 |  | #ifndef HAVE_STDLIB_H           /* <stdlib.h> should declare getenv() */ | 
| 42 |  | extern char *getenv(const char *name); | 
| 43 |  | #endif | 
| 44 |  | #endif | 
| 45 |  |  | 
| 46 |  |  | 
| 47 |  | LOCAL(size_t) | 
| 48 |  | round_up_pow2(size_t a, size_t b) | 
| 49 |  | /* a rounded up to the next multiple of b, i.e. ceil(a/b)*b */ | 
| 50 |  | /* Assumes a >= 0, b > 0, and b is a power of 2 */ | 
| 51 | 311k | { | 
| 52 | 311k |   return ((a + b - 1) & (~(b - 1))); | 
| 53 | 311k | } | 
| 54 |  |  | 
| 55 |  |  | 
| 56 |  | /* | 
| 57 |  |  * Some important notes: | 
| 58 |  |  *   The allocation routines provided here must never return NULL. | 
| 59 |  |  *   They should exit to error_exit if unsuccessful. | 
| 60 |  |  * | 
| 61 |  |  *   It's not a good idea to try to merge the sarray and barray routines, | 
| 62 |  |  *   even though they are textually almost the same, because samples are | 
| 63 |  |  *   usually stored as bytes while coefficients are shorts or ints.  Thus, | 
| 64 |  |  *   in machines where byte pointers have a different representation from | 
| 65 |  |  *   word pointers, the resulting machine code could not be the same. | 
| 66 |  |  */ | 
| 67 |  |  | 
| 68 |  |  | 
| 69 |  | /* | 
| 70 |  |  * Many machines require storage alignment: longs must start on 4-byte | 
| 71 |  |  * boundaries, doubles on 8-byte boundaries, etc.  On such machines, malloc() | 
| 72 |  |  * always returns pointers that are multiples of the worst-case alignment | 
| 73 |  |  * requirement, and we had better do so too. | 
| 74 |  |  * There isn't any really portable way to determine the worst-case alignment | 
| 75 |  |  * requirement.  This module assumes that the alignment requirement is | 
| 76 |  |  * multiples of ALIGN_SIZE. | 
| 77 |  |  * By default, we define ALIGN_SIZE as sizeof(double).  This is necessary on | 
| 78 |  |  * some workstations (where doubles really do need 8-byte alignment) and will | 
| 79 |  |  * work fine on nearly everything.  If your machine has lesser alignment needs, | 
| 80 |  |  * you can save a few bytes by making ALIGN_SIZE smaller. | 
| 81 |  |  * The only place I know of where this will NOT work is certain Macintosh | 
| 82 |  |  * 680x0 compilers that define double as a 10-byte IEEE extended float. | 
| 83 |  |  * Doing 10-byte alignment is counterproductive because longwords won't be | 
| 84 |  |  * aligned well.  Put "#define ALIGN_SIZE 4" in jconfig.h if you have | 
| 85 |  |  * such a compiler. | 
| 86 |  |  */ | 
| 87 |  |  | 
| 88 |  | #ifndef ALIGN_SIZE              /* so can override from jconfig.h */ | 
| 89 |  | #ifndef WITH_SIMD | 
| 90 |  | #define ALIGN_SIZE  sizeof(double) | 
| 91 |  | #else | 
| 92 | 1.68M | #define ALIGN_SIZE  32 /* Most of the SIMD instructions we support require | 
| 93 |  |                           16-byte (128-bit) alignment, but AVX2 requires | 
| 94 |  |                           32-byte alignment. */ | 
| 95 |  | #endif | 
| 96 |  | #endif | 
| 97 |  |  | 
| 98 |  | /* | 
| 99 |  |  * We allocate objects from "pools", where each pool is gotten with a single | 
| 100 |  |  * request to jpeg_get_small() or jpeg_get_large().  There is no per-object | 
| 101 |  |  * overhead within a pool, except for alignment padding.  Each pool has a | 
| 102 |  |  * header with a link to the next pool of the same class. | 
| 103 |  |  * Small and large pool headers are identical. | 
| 104 |  |  */ | 
| 105 |  |  | 
| 106 |  | typedef struct small_pool_struct *small_pool_ptr; | 
| 107 |  |  | 
| 108 |  | typedef struct small_pool_struct { | 
| 109 |  |   small_pool_ptr next;          /* next in list of pools */ | 
| 110 |  |   size_t bytes_used;            /* how many bytes already used within pool */ | 
| 111 |  |   size_t bytes_left;            /* bytes still available in this pool */ | 
| 112 |  | } small_pool_hdr; | 
| 113 |  |  | 
| 114 |  | typedef struct large_pool_struct *large_pool_ptr; | 
| 115 |  |  | 
| 116 |  | typedef struct large_pool_struct { | 
| 117 |  |   large_pool_ptr next;          /* next in list of pools */ | 
| 118 |  |   size_t bytes_used;            /* how many bytes already used within pool */ | 
| 119 |  |   size_t bytes_left;            /* bytes still available in this pool */ | 
| 120 |  | } large_pool_hdr; | 
| 121 |  |  | 
| 122 |  | /* | 
| 123 |  |  * Here is the full definition of a memory manager object. | 
| 124 |  |  */ | 
| 125 |  |  | 
| 126 |  | typedef struct { | 
| 127 |  |   struct jpeg_memory_mgr pub;   /* public fields */ | 
| 128 |  |  | 
| 129 |  |   /* Each pool identifier (lifetime class) names a linked list of pools. */ | 
| 130 |  |   small_pool_ptr small_list[JPOOL_NUMPOOLS]; | 
| 131 |  |   large_pool_ptr large_list[JPOOL_NUMPOOLS]; | 
| 132 |  |  | 
| 133 |  |   /* Since we only have one lifetime class of virtual arrays, only one | 
| 134 |  |    * linked list is necessary (for each datatype).  Note that the virtual | 
| 135 |  |    * array control blocks being linked together are actually stored somewhere | 
| 136 |  |    * in the small-pool list. | 
| 137 |  |    */ | 
| 138 |  |   jvirt_sarray_ptr virt_sarray_list; | 
| 139 |  |   jvirt_barray_ptr virt_barray_list; | 
| 140 |  |  | 
| 141 |  |   /* This counts total space obtained from jpeg_get_small/large */ | 
| 142 |  |   size_t total_space_allocated; | 
| 143 |  |  | 
| 144 |  |   /* alloc_sarray and alloc_barray set this value for use by virtual | 
| 145 |  |    * array routines. | 
| 146 |  |    */ | 
| 147 |  |   JDIMENSION last_rowsperchunk; /* from most recent alloc_sarray/barray */ | 
| 148 |  | } my_memory_mgr; | 
| 149 |  |  | 
| 150 |  | typedef my_memory_mgr *my_mem_ptr; | 
| 151 |  |  | 
| 152 |  |  | 
| 153 |  | /* | 
| 154 |  |  * The control blocks for virtual arrays. | 
| 155 |  |  * Note that these blocks are allocated in the "small" pool area. | 
| 156 |  |  * System-dependent info for the associated backing store (if any) is hidden | 
| 157 |  |  * inside the backing_store_info struct. | 
| 158 |  |  */ | 
| 159 |  |  | 
| 160 |  | struct jvirt_sarray_control { | 
| 161 |  |   JSAMPARRAY mem_buffer;        /* => the in-memory buffer */ | 
| 162 |  |   JDIMENSION rows_in_array;     /* total virtual array height */ | 
| 163 |  |   JDIMENSION samplesperrow;     /* width of array (and of memory buffer) */ | 
| 164 |  |   JDIMENSION maxaccess;         /* max rows accessed by access_virt_sarray */ | 
| 165 |  |   JDIMENSION rows_in_mem;       /* height of memory buffer */ | 
| 166 |  |   JDIMENSION rowsperchunk;      /* allocation chunk size in mem_buffer */ | 
| 167 |  |   JDIMENSION cur_start_row;     /* first logical row # in the buffer */ | 
| 168 |  |   JDIMENSION first_undef_row;   /* row # of first uninitialized row */ | 
| 169 |  |   boolean pre_zero;             /* pre-zero mode requested? */ | 
| 170 |  |   boolean dirty;                /* do current buffer contents need written? */ | 
| 171 |  |   boolean b_s_open;             /* is backing-store data valid? */ | 
| 172 |  |   jvirt_sarray_ptr next;        /* link to next virtual sarray control block */ | 
| 173 |  |   backing_store_info b_s_info;  /* System-dependent control info */ | 
| 174 |  | }; | 
| 175 |  |  | 
| 176 |  | struct jvirt_barray_control { | 
| 177 |  |   JBLOCKARRAY mem_buffer;       /* => the in-memory buffer */ | 
| 178 |  |   JDIMENSION rows_in_array;     /* total virtual array height */ | 
| 179 |  |   JDIMENSION blocksperrow;      /* width of array (and of memory buffer) */ | 
| 180 |  |   JDIMENSION maxaccess;         /* max rows accessed by access_virt_barray */ | 
| 181 |  |   JDIMENSION rows_in_mem;       /* height of memory buffer */ | 
| 182 |  |   JDIMENSION rowsperchunk;      /* allocation chunk size in mem_buffer */ | 
| 183 |  |   JDIMENSION cur_start_row;     /* first logical row # in the buffer */ | 
| 184 |  |   JDIMENSION first_undef_row;   /* row # of first uninitialized row */ | 
| 185 |  |   boolean pre_zero;             /* pre-zero mode requested? */ | 
| 186 |  |   boolean dirty;                /* do current buffer contents need written? */ | 
| 187 |  |   boolean b_s_open;             /* is backing-store data valid? */ | 
| 188 |  |   jvirt_barray_ptr next;        /* link to next virtual barray control block */ | 
| 189 |  |   backing_store_info b_s_info;  /* System-dependent control info */ | 
| 190 |  | }; | 
| 191 |  |  | 
| 192 |  |  | 
| 193 |  | #ifdef MEM_STATS                /* optional extra stuff for statistics */ | 
| 194 |  |  | 
| 195 |  | LOCAL(void) | 
| 196 |  | print_mem_stats(j_common_ptr cinfo, int pool_id) | 
| 197 |  | { | 
| 198 |  |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 199 |  |   small_pool_ptr shdr_ptr; | 
| 200 |  |   large_pool_ptr lhdr_ptr; | 
| 201 |  |  | 
| 202 |  |   /* Since this is only a debugging stub, we can cheat a little by using | 
| 203 |  |    * fprintf directly rather than going through the trace message code. | 
| 204 |  |    * This is helpful because message parm array can't handle longs. | 
| 205 |  |    */ | 
| 206 |  |   fprintf(stderr, "Freeing pool %d, total space = %ld\n", | 
| 207 |  |           pool_id, mem->total_space_allocated); | 
| 208 |  |  | 
| 209 |  |   for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL; | 
| 210 |  |        lhdr_ptr = lhdr_ptr->next) { | 
| 211 |  |     fprintf(stderr, "  Large chunk used %ld\n", (long)lhdr_ptr->bytes_used); | 
| 212 |  |   } | 
| 213 |  |  | 
| 214 |  |   for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL; | 
| 215 |  |        shdr_ptr = shdr_ptr->next) { | 
| 216 |  |     fprintf(stderr, "  Small chunk used %ld free %ld\n", | 
| 217 |  |             (long)shdr_ptr->bytes_used, (long)shdr_ptr->bytes_left); | 
| 218 |  |   } | 
| 219 |  | } | 
| 220 |  |  | 
| 221 |  | #endif /* MEM_STATS */ | 
| 222 |  |  | 
| 223 |  |  | 
| 224 |  | LOCAL(void) | 
| 225 |  | out_of_memory(j_common_ptr cinfo, int which) | 
| 226 |  | /* Report an out-of-memory error and stop execution */ | 
| 227 |  | /* If we compiled MEM_STATS support, report alloc requests before dying */ | 
| 228 | 0 | { | 
| 229 |  | #ifdef MEM_STATS | 
| 230 |  |   cinfo->err->trace_level = 2;  /* force self_destruct to report stats */ | 
| 231 |  | #endif | 
| 232 | 0 |   ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, which); | 
| 233 | 0 | } | 
| 234 |  |  | 
| 235 |  |  | 
| 236 |  | /* | 
| 237 |  |  * Allocation of "small" objects. | 
| 238 |  |  * | 
| 239 |  |  * For these, we use pooled storage.  When a new pool must be created, | 
| 240 |  |  * we try to get enough space for the current request plus a "slop" factor, | 
| 241 |  |  * where the slop will be the amount of leftover space in the new pool. | 
| 242 |  |  * The speed vs. space tradeoff is largely determined by the slop values. | 
| 243 |  |  * A different slop value is provided for each pool class (lifetime), | 
| 244 |  |  * and we also distinguish the first pool of a class from later ones. | 
| 245 |  |  * NOTE: the values given work fairly well on both 16- and 32-bit-int | 
| 246 |  |  * machines, but may be too small if longs are 64 bits or more. | 
| 247 |  |  * | 
| 248 |  |  * Since we do not know what alignment malloc() gives us, we have to | 
| 249 |  |  * allocate ALIGN_SIZE-1 extra space per pool to have room for alignment | 
| 250 |  |  * adjustment. | 
| 251 |  |  */ | 
| 252 |  |  | 
| 253 |  | static const size_t first_pool_slop[JPOOL_NUMPOOLS] = { | 
| 254 |  |   1600,                         /* first PERMANENT pool */ | 
| 255 |  |   16000                         /* first IMAGE pool */ | 
| 256 |  | }; | 
| 257 |  |  | 
| 258 |  | static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = { | 
| 259 |  |   0,                            /* additional PERMANENT pools */ | 
| 260 |  |   5000                          /* additional IMAGE pools */ | 
| 261 |  | }; | 
| 262 |  |  | 
| 263 | 0 | #define MIN_SLOP  50            /* greater than 0 to avoid futile looping */ | 
| 264 |  |  | 
| 265 |  |  | 
| 266 |  | METHODDEF(void *) | 
| 267 |  | alloc_small(j_common_ptr cinfo, int pool_id, size_t sizeofobject) | 
| 268 |  | /* Allocate a "small" object */ | 
| 269 | 236k | { | 
| 270 | 236k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 271 | 236k |   small_pool_ptr hdr_ptr, prev_hdr_ptr; | 
| 272 | 236k |   char *data_ptr; | 
| 273 | 236k |   size_t min_request, slop; | 
| 274 |  |  | 
| 275 |  |   /* | 
| 276 |  |    * Round up the requested size to a multiple of ALIGN_SIZE in order | 
| 277 |  |    * to assure alignment for the next object allocated in the same pool | 
| 278 |  |    * and so that algorithms can straddle outside the proper area up | 
| 279 |  |    * to the next alignment. | 
| 280 |  |    */ | 
| 281 | 236k |   if (sizeofobject > MAX_ALLOC_CHUNK) { | 
| 282 |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject | 
| 283 |  |        is close to SIZE_MAX. */ | 
| 284 | 0 |     out_of_memory(cinfo, 7); | 
| 285 | 0 |   } | 
| 286 | 236k |   sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE); | 
| 287 |  |  | 
| 288 |  |   /* Check for unsatisfiable request (do now to ensure no overflow below) */ | 
| 289 | 236k |   if ((sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) > | 
| 290 | 236k |       MAX_ALLOC_CHUNK) | 
| 291 | 0 |     out_of_memory(cinfo, 1);    /* request exceeds malloc's ability */ | 
| 292 |  |  | 
| 293 |  |   /* See if space is available in any existing pool */ | 
| 294 | 236k |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) | 
| 295 | 0 |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ | 
| 296 | 236k |   prev_hdr_ptr = NULL; | 
| 297 | 236k |   hdr_ptr = mem->small_list[pool_id]; | 
| 298 | 241k |   while (hdr_ptr != NULL) { | 
| 299 | 214k |     if (hdr_ptr->bytes_left >= sizeofobject) | 
| 300 | 209k |       break;                    /* found pool with enough space */ | 
| 301 | 5.28k |     prev_hdr_ptr = hdr_ptr; | 
| 302 | 5.28k |     hdr_ptr = hdr_ptr->next; | 
| 303 | 5.28k |   } | 
| 304 |  |  | 
| 305 |  |   /* Time to make a new pool? */ | 
| 306 | 236k |   if (hdr_ptr == NULL) { | 
| 307 |  |     /* min_request is what we need now, slop is what will be leftover */ | 
| 308 | 26.5k |     min_request = sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1; | 
| 309 | 26.5k |     if (prev_hdr_ptr == NULL)   /* first pool in class? */ | 
| 310 | 23.9k |       slop = first_pool_slop[pool_id]; | 
| 311 | 2.62k |     else | 
| 312 | 2.62k |       slop = extra_pool_slop[pool_id]; | 
| 313 |  |     /* Don't ask for more than MAX_ALLOC_CHUNK */ | 
| 314 | 26.5k |     if (slop > (size_t)(MAX_ALLOC_CHUNK - min_request)) | 
| 315 | 0 |       slop = (size_t)(MAX_ALLOC_CHUNK - min_request); | 
| 316 |  |     /* Try to get space, if fail reduce slop and try again */ | 
| 317 | 26.5k |     for (;;) { | 
| 318 | 26.5k |       hdr_ptr = (small_pool_ptr)jpeg_get_small(cinfo, min_request + slop); | 
| 319 | 26.5k |       if (hdr_ptr != NULL) | 
| 320 | 26.5k |         break; | 
| 321 | 0 |       slop /= 2; | 
| 322 | 0 |       if (slop < MIN_SLOP)      /* give up when it gets real small */ | 
| 323 | 0 |         out_of_memory(cinfo, 2); /* jpeg_get_small failed */ | 
| 324 | 0 |     } | 
| 325 | 26.5k |     mem->total_space_allocated += min_request + slop; | 
| 326 |  |     /* Success, initialize the new pool header and add to end of list */ | 
| 327 | 26.5k |     hdr_ptr->next = NULL; | 
| 328 | 26.5k |     hdr_ptr->bytes_used = 0; | 
| 329 | 26.5k |     hdr_ptr->bytes_left = sizeofobject + slop; | 
| 330 | 26.5k |     if (prev_hdr_ptr == NULL)   /* first pool in class? */ | 
| 331 | 23.9k |       mem->small_list[pool_id] = hdr_ptr; | 
| 332 | 2.62k |     else | 
| 333 | 2.62k |       prev_hdr_ptr->next = hdr_ptr; | 
| 334 | 26.5k |   } | 
| 335 |  |  | 
| 336 |  |   /* OK, allocate the object from the current pool */ | 
| 337 | 236k |   data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */ | 
| 338 | 236k |   data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */ | 
| 339 | 236k |   if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */ | 
| 340 | 236k |     data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE; | 
| 341 | 236k |   data_ptr += hdr_ptr->bytes_used; /* point to place for object */ | 
| 342 | 236k |   hdr_ptr->bytes_used += sizeofobject; | 
| 343 | 236k |   hdr_ptr->bytes_left -= sizeofobject; | 
| 344 |  |  | 
| 345 | 236k |   return (void *)data_ptr; | 
| 346 | 236k | } | 
| 347 |  |  | 
| 348 |  |  | 
| 349 |  | /* | 
| 350 |  |  * Allocation of "large" objects. | 
| 351 |  |  * | 
| 352 |  |  * The external semantics of these are the same as "small" objects.  However, | 
| 353 |  |  * the pool management heuristics are quite different.  We assume that each | 
| 354 |  |  * request is large enough that it may as well be passed directly to | 
| 355 |  |  * jpeg_get_large; the pool management just links everything together | 
| 356 |  |  * so that we can free it all on demand. | 
| 357 |  |  * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY | 
| 358 |  |  * structures.  The routines that create these structures (see below) | 
| 359 |  |  * deliberately bunch rows together to ensure a large request size. | 
| 360 |  |  */ | 
| 361 |  |  | 
| 362 |  | METHODDEF(void *) | 
| 363 |  | alloc_large(j_common_ptr cinfo, int pool_id, size_t sizeofobject) | 
| 364 |  | /* Allocate a "large" object */ | 
| 365 | 42.1k | { | 
| 366 | 42.1k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 367 | 42.1k |   large_pool_ptr hdr_ptr; | 
| 368 | 42.1k |   char *data_ptr; | 
| 369 |  |  | 
| 370 |  |   /* | 
| 371 |  |    * Round up the requested size to a multiple of ALIGN_SIZE so that | 
| 372 |  |    * algorithms can straddle outside the proper area up to the next | 
| 373 |  |    * alignment. | 
| 374 |  |    */ | 
| 375 | 42.1k |   if (sizeofobject > MAX_ALLOC_CHUNK) { | 
| 376 |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject | 
| 377 |  |        is close to SIZE_MAX. */ | 
| 378 | 0 |     out_of_memory(cinfo, 8); | 
| 379 | 0 |   } | 
| 380 | 42.1k |   sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE); | 
| 381 |  |  | 
| 382 |  |   /* Check for unsatisfiable request (do now to ensure no overflow below) */ | 
| 383 | 42.1k |   if ((sizeof(large_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) > | 
| 384 | 42.1k |       MAX_ALLOC_CHUNK) | 
| 385 | 0 |     out_of_memory(cinfo, 3);    /* request exceeds malloc's ability */ | 
| 386 |  |  | 
| 387 |  |   /* Always make a new pool */ | 
| 388 | 42.1k |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) | 
| 389 | 0 |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ | 
| 390 |  |  | 
| 391 | 42.1k |   hdr_ptr = (large_pool_ptr)jpeg_get_large(cinfo, sizeofobject + | 
| 392 | 42.1k |                                            sizeof(large_pool_hdr) + | 
| 393 | 42.1k |                                            ALIGN_SIZE - 1); | 
| 394 | 42.1k |   if (hdr_ptr == NULL) | 
| 395 | 0 |     out_of_memory(cinfo, 4);    /* jpeg_get_large failed */ | 
| 396 | 42.1k |   mem->total_space_allocated += sizeofobject + sizeof(large_pool_hdr) + | 
| 397 | 42.1k |                                 ALIGN_SIZE - 1; | 
| 398 |  |  | 
| 399 |  |   /* Success, initialize the new pool header and add to list */ | 
| 400 | 42.1k |   hdr_ptr->next = mem->large_list[pool_id]; | 
| 401 |  |   /* We maintain space counts in each pool header for statistical purposes, | 
| 402 |  |    * even though they are not needed for allocation. | 
| 403 |  |    */ | 
| 404 | 42.1k |   hdr_ptr->bytes_used = sizeofobject; | 
| 405 | 42.1k |   hdr_ptr->bytes_left = 0; | 
| 406 | 42.1k |   mem->large_list[pool_id] = hdr_ptr; | 
| 407 |  |  | 
| 408 | 42.1k |   data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */ | 
| 409 | 42.1k |   data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */ | 
| 410 | 42.1k |   if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */ | 
| 411 | 42.1k |     data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE; | 
| 412 |  |  | 
| 413 | 42.1k |   return (void *)data_ptr; | 
| 414 | 42.1k | } | 
| 415 |  |  | 
| 416 |  |  | 
| 417 |  | /* | 
| 418 |  |  * Creation of 2-D sample arrays. | 
| 419 |  |  * | 
| 420 |  |  * To minimize allocation overhead and to allow I/O of large contiguous | 
| 421 |  |  * blocks, we allocate the sample rows in groups of as many rows as possible | 
| 422 |  |  * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request. | 
| 423 |  |  * NB: the virtual array control routines, later in this file, know about | 
| 424 |  |  * this chunking of rows.  The rowsperchunk value is left in the mem manager | 
| 425 |  |  * object so that it can be saved away if this sarray is the workspace for | 
| 426 |  |  * a virtual array. | 
| 427 |  |  * | 
| 428 |  |  * Since we are often upsampling with a factor 2, we align the size (not | 
| 429 |  |  * the start) to 2 * ALIGN_SIZE so that the upsampling routines don't have | 
| 430 |  |  * to be as careful about size. | 
| 431 |  |  */ | 
| 432 |  |  | 
| 433 |  | METHODDEF(JSAMPARRAY) | 
| 434 |  | alloc_sarray(j_common_ptr cinfo, int pool_id, JDIMENSION samplesperrow, | 
| 435 |  |              JDIMENSION numrows) | 
| 436 |  | /* Allocate a 2-D sample array */ | 
| 437 | 33.0k | { | 
| 438 | 33.0k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 439 | 33.0k |   JSAMPARRAY result; | 
| 440 | 33.0k |   JSAMPROW workspace; | 
| 441 | 33.0k |   JDIMENSION rowsperchunk, currow, i; | 
| 442 | 33.0k |   long ltemp; | 
| 443 |  |  | 
| 444 |  |   /* Make sure each row is properly aligned */ | 
| 445 | 33.0k |   if ((ALIGN_SIZE % sizeof(JSAMPLE)) != 0) | 
| 446 | 0 |     out_of_memory(cinfo, 5);    /* safety check */ | 
| 447 |  |  | 
| 448 | 33.0k |   if (samplesperrow > MAX_ALLOC_CHUNK) { | 
| 449 |  |     /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject | 
| 450 |  |        is close to SIZE_MAX. */ | 
| 451 | 0 |     out_of_memory(cinfo, 9); | 
| 452 | 0 |   } | 
| 453 | 33.0k |   samplesperrow = (JDIMENSION)round_up_pow2(samplesperrow, (2 * ALIGN_SIZE) / | 
| 454 | 33.0k |                                                            sizeof(JSAMPLE)); | 
| 455 |  |  | 
| 456 |  |   /* Calculate max # of rows allowed in one allocation chunk */ | 
| 457 | 33.0k |   ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) / | 
| 458 | 33.0k |           ((long)samplesperrow * sizeof(JSAMPLE)); | 
| 459 | 33.0k |   if (ltemp <= 0) | 
| 460 | 0 |     ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); | 
| 461 | 33.0k |   if (ltemp < (long)numrows) | 
| 462 | 0 |     rowsperchunk = (JDIMENSION)ltemp; | 
| 463 | 33.0k |   else | 
| 464 | 33.0k |     rowsperchunk = numrows; | 
| 465 | 33.0k |   mem->last_rowsperchunk = rowsperchunk; | 
| 466 |  |  | 
| 467 |  |   /* Get space for row pointers (small object) */ | 
| 468 | 33.0k |   result = (JSAMPARRAY)alloc_small(cinfo, pool_id, | 
| 469 | 33.0k |                                    (size_t)(numrows * sizeof(JSAMPROW))); | 
| 470 |  |  | 
| 471 |  |   /* Get the rows themselves (large objects) */ | 
| 472 | 33.0k |   currow = 0; | 
| 473 | 66.1k |   while (currow < numrows) { | 
| 474 | 33.0k |     rowsperchunk = MIN(rowsperchunk, numrows - currow); | 
| 475 | 33.0k |     workspace = (JSAMPROW)alloc_large(cinfo, pool_id, | 
| 476 | 33.0k |       (size_t)((size_t)rowsperchunk * (size_t)samplesperrow * | 
| 477 | 33.0k |                sizeof(JSAMPLE))); | 
| 478 | 8.09M |     for (i = rowsperchunk; i > 0; i--) { | 
| 479 | 8.06M |       result[currow++] = workspace; | 
| 480 | 8.06M |       workspace += samplesperrow; | 
| 481 | 8.06M |     } | 
| 482 | 33.0k |   } | 
| 483 |  |  | 
| 484 | 33.0k |   return result; | 
| 485 | 33.0k | } | 
| 486 |  |  | 
| 487 |  |  | 
| 488 |  | /* | 
| 489 |  |  * Creation of 2-D coefficient-block arrays. | 
| 490 |  |  * This is essentially the same as the code for sample arrays, above. | 
| 491 |  |  */ | 
| 492 |  |  | 
| 493 |  | METHODDEF(JBLOCKARRAY) | 
| 494 |  | alloc_barray(j_common_ptr cinfo, int pool_id, JDIMENSION blocksperrow, | 
| 495 |  |              JDIMENSION numrows) | 
| 496 |  | /* Allocate a 2-D coefficient-block array */ | 
| 497 | 6.97k | { | 
| 498 | 6.97k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 499 | 6.97k |   JBLOCKARRAY result; | 
| 500 | 6.97k |   JBLOCKROW workspace; | 
| 501 | 6.97k |   JDIMENSION rowsperchunk, currow, i; | 
| 502 | 6.97k |   long ltemp; | 
| 503 |  |  | 
| 504 |  |   /* Make sure each row is properly aligned */ | 
| 505 | 6.97k |   if ((sizeof(JBLOCK) % ALIGN_SIZE) != 0) | 
| 506 | 0 |     out_of_memory(cinfo, 6);    /* safety check */ | 
| 507 |  |  | 
| 508 |  |   /* Calculate max # of rows allowed in one allocation chunk */ | 
| 509 | 6.97k |   ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) / | 
| 510 | 6.97k |           ((long)blocksperrow * sizeof(JBLOCK)); | 
| 511 | 6.97k |   if (ltemp <= 0) | 
| 512 | 0 |     ERREXIT(cinfo, JERR_WIDTH_OVERFLOW); | 
| 513 | 6.97k |   if (ltemp < (long)numrows) | 
| 514 | 0 |     rowsperchunk = (JDIMENSION)ltemp; | 
| 515 | 6.97k |   else | 
| 516 | 6.97k |     rowsperchunk = numrows; | 
| 517 | 6.97k |   mem->last_rowsperchunk = rowsperchunk; | 
| 518 |  |  | 
| 519 |  |   /* Get space for row pointers (small object) */ | 
| 520 | 6.97k |   result = (JBLOCKARRAY)alloc_small(cinfo, pool_id, | 
| 521 | 6.97k |                                     (size_t)(numrows * sizeof(JBLOCKROW))); | 
| 522 |  |  | 
| 523 |  |   /* Get the rows themselves (large objects) */ | 
| 524 | 6.97k |   currow = 0; | 
| 525 | 13.9k |   while (currow < numrows) { | 
| 526 | 6.97k |     rowsperchunk = MIN(rowsperchunk, numrows - currow); | 
| 527 | 6.97k |     workspace = (JBLOCKROW)alloc_large(cinfo, pool_id, | 
| 528 | 6.97k |         (size_t)((size_t)rowsperchunk * (size_t)blocksperrow * | 
| 529 | 6.97k |                   sizeof(JBLOCK))); | 
| 530 | 3.64M |     for (i = rowsperchunk; i > 0; i--) { | 
| 531 | 3.63M |       result[currow++] = workspace; | 
| 532 | 3.63M |       workspace += blocksperrow; | 
| 533 | 3.63M |     } | 
| 534 | 6.97k |   } | 
| 535 |  |  | 
| 536 | 6.97k |   return result; | 
| 537 | 6.97k | } | 
| 538 |  |  | 
| 539 |  |  | 
| 540 |  | /* | 
| 541 |  |  * About virtual array management: | 
| 542 |  |  * | 
| 543 |  |  * The above "normal" array routines are only used to allocate strip buffers | 
| 544 |  |  * (as wide as the image, but just a few rows high).  Full-image-sized buffers | 
| 545 |  |  * are handled as "virtual" arrays.  The array is still accessed a strip at a | 
| 546 |  |  * time, but the memory manager must save the whole array for repeated | 
| 547 |  |  * accesses.  The intended implementation is that there is a strip buffer in | 
| 548 |  |  * memory (as high as is possible given the desired memory limit), plus a | 
| 549 |  |  * backing file that holds the rest of the array. | 
| 550 |  |  * | 
| 551 |  |  * The request_virt_array routines are told the total size of the image and | 
| 552 |  |  * the maximum number of rows that will be accessed at once.  The in-memory | 
| 553 |  |  * buffer must be at least as large as the maxaccess value. | 
| 554 |  |  * | 
| 555 |  |  * The request routines create control blocks but not the in-memory buffers. | 
| 556 |  |  * That is postponed until realize_virt_arrays is called.  At that time the | 
| 557 |  |  * total amount of space needed is known (approximately, anyway), so free | 
| 558 |  |  * memory can be divided up fairly. | 
| 559 |  |  * | 
| 560 |  |  * The access_virt_array routines are responsible for making a specific strip | 
| 561 |  |  * area accessible (after reading or writing the backing file, if necessary). | 
| 562 |  |  * Note that the access routines are told whether the caller intends to modify | 
| 563 |  |  * the accessed strip; during a read-only pass this saves having to rewrite | 
| 564 |  |  * data to disk.  The access routines are also responsible for pre-zeroing | 
| 565 |  |  * any newly accessed rows, if pre-zeroing was requested. | 
| 566 |  |  * | 
| 567 |  |  * In current usage, the access requests are usually for nonoverlapping | 
| 568 |  |  * strips; that is, successive access start_row numbers differ by exactly | 
| 569 |  |  * num_rows = maxaccess.  This means we can get good performance with simple | 
| 570 |  |  * buffer dump/reload logic, by making the in-memory buffer be a multiple | 
| 571 |  |  * of the access height; then there will never be accesses across bufferload | 
| 572 |  |  * boundaries.  The code will still work with overlapping access requests, | 
| 573 |  |  * but it doesn't handle bufferload overlaps very efficiently. | 
| 574 |  |  */ | 
| 575 |  |  | 
| 576 |  |  | 
| 577 |  | METHODDEF(jvirt_sarray_ptr) | 
| 578 |  | request_virt_sarray(j_common_ptr cinfo, int pool_id, boolean pre_zero, | 
| 579 |  |                     JDIMENSION samplesperrow, JDIMENSION numrows, | 
| 580 |  |                     JDIMENSION maxaccess) | 
| 581 |  | /* Request a virtual 2-D sample array */ | 
| 582 | 2.77k | { | 
| 583 | 2.77k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 584 | 2.77k |   jvirt_sarray_ptr result; | 
| 585 |  |  | 
| 586 |  |   /* Only IMAGE-lifetime virtual arrays are currently supported */ | 
| 587 | 2.77k |   if (pool_id != JPOOL_IMAGE) | 
| 588 | 0 |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ | 
| 589 |  |  | 
| 590 |  |   /* get control block */ | 
| 591 | 2.77k |   result = (jvirt_sarray_ptr)alloc_small(cinfo, pool_id, | 
| 592 | 2.77k |                                          sizeof(struct jvirt_sarray_control)); | 
| 593 |  |  | 
| 594 | 2.77k |   result->mem_buffer = NULL;    /* marks array not yet realized */ | 
| 595 | 2.77k |   result->rows_in_array = numrows; | 
| 596 | 2.77k |   result->samplesperrow = samplesperrow; | 
| 597 | 2.77k |   result->maxaccess = maxaccess; | 
| 598 | 2.77k |   result->pre_zero = pre_zero; | 
| 599 | 2.77k |   result->b_s_open = FALSE;     /* no associated backing-store object */ | 
| 600 | 2.77k |   result->next = mem->virt_sarray_list; /* add to list of virtual arrays */ | 
| 601 | 2.77k |   mem->virt_sarray_list = result; | 
| 602 |  |  | 
| 603 | 2.77k |   return result; | 
| 604 | 2.77k | } | 
| 605 |  |  | 
| 606 |  |  | 
| 607 |  | METHODDEF(jvirt_barray_ptr) | 
| 608 |  | request_virt_barray(j_common_ptr cinfo, int pool_id, boolean pre_zero, | 
| 609 |  |                     JDIMENSION blocksperrow, JDIMENSION numrows, | 
| 610 |  |                     JDIMENSION maxaccess) | 
| 611 |  | /* Request a virtual 2-D coefficient-block array */ | 
| 612 | 6.97k | { | 
| 613 | 6.97k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 614 | 6.97k |   jvirt_barray_ptr result; | 
| 615 |  |  | 
| 616 |  |   /* Only IMAGE-lifetime virtual arrays are currently supported */ | 
| 617 | 6.97k |   if (pool_id != JPOOL_IMAGE) | 
| 618 | 0 |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ | 
| 619 |  |  | 
| 620 |  |   /* get control block */ | 
| 621 | 6.97k |   result = (jvirt_barray_ptr)alloc_small(cinfo, pool_id, | 
| 622 | 6.97k |                                          sizeof(struct jvirt_barray_control)); | 
| 623 |  |  | 
| 624 | 6.97k |   result->mem_buffer = NULL;    /* marks array not yet realized */ | 
| 625 | 6.97k |   result->rows_in_array = numrows; | 
| 626 | 6.97k |   result->blocksperrow = blocksperrow; | 
| 627 | 6.97k |   result->maxaccess = maxaccess; | 
| 628 | 6.97k |   result->pre_zero = pre_zero; | 
| 629 | 6.97k |   result->b_s_open = FALSE;     /* no associated backing-store object */ | 
| 630 | 6.97k |   result->next = mem->virt_barray_list; /* add to list of virtual arrays */ | 
| 631 | 6.97k |   mem->virt_barray_list = result; | 
| 632 |  |  | 
| 633 | 6.97k |   return result; | 
| 634 | 6.97k | } | 
| 635 |  |  | 
| 636 |  |  | 
| 637 |  | METHODDEF(void) | 
| 638 |  | realize_virt_arrays(j_common_ptr cinfo) | 
| 639 |  | /* Allocate the in-memory buffers for any unrealized virtual arrays */ | 
| 640 | 4.89k | { | 
| 641 | 4.89k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 642 | 4.89k |   size_t space_per_minheight, maximum_space, avail_mem; | 
| 643 | 4.89k |   size_t minheights, max_minheights; | 
| 644 | 4.89k |   jvirt_sarray_ptr sptr; | 
| 645 | 4.89k |   jvirt_barray_ptr bptr; | 
| 646 |  |  | 
| 647 |  |   /* Compute the minimum space needed (maxaccess rows in each buffer) | 
| 648 |  |    * and the maximum space needed (full image height in each buffer). | 
| 649 |  |    * These may be of use to the system-dependent jpeg_mem_available routine. | 
| 650 |  |    */ | 
| 651 | 4.89k |   space_per_minheight = 0; | 
| 652 | 4.89k |   maximum_space = 0; | 
| 653 | 7.18k |   for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
| 654 | 2.29k |     if (sptr->mem_buffer == NULL) { /* if not realized yet */ | 
| 655 | 2.29k |       size_t new_space = (long)sptr->rows_in_array * | 
| 656 | 2.29k |                          (long)sptr->samplesperrow * sizeof(JSAMPLE); | 
| 657 |  |  | 
| 658 | 2.29k |       space_per_minheight += (long)sptr->maxaccess * | 
| 659 | 2.29k |                              (long)sptr->samplesperrow * sizeof(JSAMPLE); | 
| 660 | 2.29k |       if (SIZE_MAX - maximum_space < new_space) | 
| 661 | 0 |         out_of_memory(cinfo, 10); | 
| 662 | 2.29k |       maximum_space += new_space; | 
| 663 | 2.29k |     } | 
| 664 | 2.29k |   } | 
| 665 | 11.8k |   for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
| 666 | 6.97k |     if (bptr->mem_buffer == NULL) { /* if not realized yet */ | 
| 667 | 6.97k |       size_t new_space = (long)bptr->rows_in_array * | 
| 668 | 6.97k |                          (long)bptr->blocksperrow * sizeof(JBLOCK); | 
| 669 |  |  | 
| 670 | 6.97k |       space_per_minheight += (long)bptr->maxaccess * | 
| 671 | 6.97k |                              (long)bptr->blocksperrow * sizeof(JBLOCK); | 
| 672 | 6.97k |       if (SIZE_MAX - maximum_space < new_space) | 
| 673 | 0 |         out_of_memory(cinfo, 11); | 
| 674 | 6.97k |       maximum_space += new_space; | 
| 675 | 6.97k |     } | 
| 676 | 6.97k |   } | 
| 677 |  |  | 
| 678 | 4.89k |   if (space_per_minheight <= 0) | 
| 679 | 1.06k |     return;                     /* no unrealized arrays, no work */ | 
| 680 |  |  | 
| 681 |  |   /* Determine amount of memory to actually use; this is system-dependent. */ | 
| 682 | 3.83k |   avail_mem = jpeg_mem_available(cinfo, space_per_minheight, maximum_space, | 
| 683 | 3.83k |                                  mem->total_space_allocated); | 
| 684 |  |  | 
| 685 |  |   /* If the maximum space needed is available, make all the buffers full | 
| 686 |  |    * height; otherwise parcel it out with the same number of minheights | 
| 687 |  |    * in each buffer. | 
| 688 |  |    */ | 
| 689 | 3.83k |   if (avail_mem >= maximum_space) | 
| 690 | 3.83k |     max_minheights = 1000000000L; | 
| 691 | 0 |   else { | 
| 692 | 0 |     max_minheights = avail_mem / space_per_minheight; | 
| 693 |  |     /* If there doesn't seem to be enough space, try to get the minimum | 
| 694 |  |      * anyway.  This allows a "stub" implementation of jpeg_mem_available(). | 
| 695 |  |      */ | 
| 696 | 0 |     if (max_minheights <= 0) | 
| 697 | 0 |       max_minheights = 1; | 
| 698 | 0 |   } | 
| 699 |  |  | 
| 700 |  |   /* Allocate the in-memory buffers and initialize backing store as needed. */ | 
| 701 |  |  | 
| 702 | 6.12k |   for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
| 703 | 2.29k |     if (sptr->mem_buffer == NULL) { /* if not realized yet */ | 
| 704 | 2.29k |       minheights = ((long)sptr->rows_in_array - 1L) / sptr->maxaccess + 1L; | 
| 705 | 2.29k |       if (minheights <= max_minheights) { | 
| 706 |  |         /* This buffer fits in memory */ | 
| 707 | 2.29k |         sptr->rows_in_mem = sptr->rows_in_array; | 
| 708 | 2.29k |       } else { | 
| 709 |  |         /* It doesn't fit in memory, create backing store. */ | 
| 710 | 0 |         sptr->rows_in_mem = (JDIMENSION)(max_minheights * sptr->maxaccess); | 
| 711 | 0 |         jpeg_open_backing_store(cinfo, &sptr->b_s_info, | 
| 712 | 0 |                                 (long)sptr->rows_in_array * | 
| 713 | 0 |                                 (long)sptr->samplesperrow * | 
| 714 | 0 |                                 (long)sizeof(JSAMPLE)); | 
| 715 | 0 |         sptr->b_s_open = TRUE; | 
| 716 | 0 |       } | 
| 717 | 2.29k |       sptr->mem_buffer = alloc_sarray(cinfo, JPOOL_IMAGE, | 
| 718 | 2.29k |                                       sptr->samplesperrow, sptr->rows_in_mem); | 
| 719 | 2.29k |       sptr->rowsperchunk = mem->last_rowsperchunk; | 
| 720 | 2.29k |       sptr->cur_start_row = 0; | 
| 721 | 2.29k |       sptr->first_undef_row = 0; | 
| 722 | 2.29k |       sptr->dirty = FALSE; | 
| 723 | 2.29k |     } | 
| 724 | 2.29k |   } | 
| 725 |  |  | 
| 726 | 10.8k |   for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
| 727 | 6.97k |     if (bptr->mem_buffer == NULL) { /* if not realized yet */ | 
| 728 | 6.97k |       minheights = ((long)bptr->rows_in_array - 1L) / bptr->maxaccess + 1L; | 
| 729 | 6.97k |       if (minheights <= max_minheights) { | 
| 730 |  |         /* This buffer fits in memory */ | 
| 731 | 6.97k |         bptr->rows_in_mem = bptr->rows_in_array; | 
| 732 | 6.97k |       } else { | 
| 733 |  |         /* It doesn't fit in memory, create backing store. */ | 
| 734 | 0 |         bptr->rows_in_mem = (JDIMENSION)(max_minheights * bptr->maxaccess); | 
| 735 | 0 |         jpeg_open_backing_store(cinfo, &bptr->b_s_info, | 
| 736 | 0 |                                 (long)bptr->rows_in_array * | 
| 737 | 0 |                                 (long)bptr->blocksperrow * | 
| 738 | 0 |                                 (long)sizeof(JBLOCK)); | 
| 739 | 0 |         bptr->b_s_open = TRUE; | 
| 740 | 0 |       } | 
| 741 | 6.97k |       bptr->mem_buffer = alloc_barray(cinfo, JPOOL_IMAGE, | 
| 742 | 6.97k |                                       bptr->blocksperrow, bptr->rows_in_mem); | 
| 743 | 6.97k |       bptr->rowsperchunk = mem->last_rowsperchunk; | 
| 744 | 6.97k |       bptr->cur_start_row = 0; | 
| 745 | 6.97k |       bptr->first_undef_row = 0; | 
| 746 | 6.97k |       bptr->dirty = FALSE; | 
| 747 | 6.97k |     } | 
| 748 | 6.97k |   } | 
| 749 | 3.83k | } | 
| 750 |  |  | 
| 751 |  |  | 
| 752 |  | LOCAL(void) | 
| 753 |  | do_sarray_io(j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing) | 
| 754 |  | /* Do backing store read or write of a virtual sample array */ | 
| 755 | 0 | { | 
| 756 | 0 |   long bytesperrow, file_offset, byte_count, rows, thisrow, i; | 
| 757 |  | 
 | 
| 758 | 0 |   bytesperrow = (long)ptr->samplesperrow * sizeof(JSAMPLE); | 
| 759 | 0 |   file_offset = ptr->cur_start_row * bytesperrow; | 
| 760 |  |   /* Loop to read or write each allocation chunk in mem_buffer */ | 
| 761 | 0 |   for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) { | 
| 762 |  |     /* One chunk, but check for short chunk at end of buffer */ | 
| 763 | 0 |     rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i); | 
| 764 |  |     /* Transfer no more than is currently defined */ | 
| 765 | 0 |     thisrow = (long)ptr->cur_start_row + i; | 
| 766 | 0 |     rows = MIN(rows, (long)ptr->first_undef_row - thisrow); | 
| 767 |  |     /* Transfer no more than fits in file */ | 
| 768 | 0 |     rows = MIN(rows, (long)ptr->rows_in_array - thisrow); | 
| 769 | 0 |     if (rows <= 0)              /* this chunk might be past end of file! */ | 
| 770 | 0 |       break; | 
| 771 | 0 |     byte_count = rows * bytesperrow; | 
| 772 | 0 |     if (writing) | 
| 773 | 0 |       (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info, | 
| 774 | 0 |                                             (void *)ptr->mem_buffer[i], | 
| 775 | 0 |                                             file_offset, byte_count); | 
| 776 | 0 |     else | 
| 777 | 0 |       (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info, | 
| 778 | 0 |                                            (void *)ptr->mem_buffer[i], | 
| 779 | 0 |                                            file_offset, byte_count); | 
| 780 | 0 |     file_offset += byte_count; | 
| 781 | 0 |   } | 
| 782 | 0 | } | 
| 783 |  |  | 
| 784 |  |  | 
| 785 |  | LOCAL(void) | 
| 786 |  | do_barray_io(j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing) | 
| 787 |  | /* Do backing store read or write of a virtual coefficient-block array */ | 
| 788 | 0 | { | 
| 789 | 0 |   long bytesperrow, file_offset, byte_count, rows, thisrow, i; | 
| 790 |  | 
 | 
| 791 | 0 |   bytesperrow = (long)ptr->blocksperrow * sizeof(JBLOCK); | 
| 792 | 0 |   file_offset = ptr->cur_start_row * bytesperrow; | 
| 793 |  |   /* Loop to read or write each allocation chunk in mem_buffer */ | 
| 794 | 0 |   for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) { | 
| 795 |  |     /* One chunk, but check for short chunk at end of buffer */ | 
| 796 | 0 |     rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i); | 
| 797 |  |     /* Transfer no more than is currently defined */ | 
| 798 | 0 |     thisrow = (long)ptr->cur_start_row + i; | 
| 799 | 0 |     rows = MIN(rows, (long)ptr->first_undef_row - thisrow); | 
| 800 |  |     /* Transfer no more than fits in file */ | 
| 801 | 0 |     rows = MIN(rows, (long)ptr->rows_in_array - thisrow); | 
| 802 | 0 |     if (rows <= 0)              /* this chunk might be past end of file! */ | 
| 803 | 0 |       break; | 
| 804 | 0 |     byte_count = rows * bytesperrow; | 
| 805 | 0 |     if (writing) | 
| 806 | 0 |       (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info, | 
| 807 | 0 |                                             (void *)ptr->mem_buffer[i], | 
| 808 | 0 |                                             file_offset, byte_count); | 
| 809 | 0 |     else | 
| 810 | 0 |       (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info, | 
| 811 | 0 |                                            (void *)ptr->mem_buffer[i], | 
| 812 | 0 |                                            file_offset, byte_count); | 
| 813 | 0 |     file_offset += byte_count; | 
| 814 | 0 |   } | 
| 815 | 0 | } | 
| 816 |  |  | 
| 817 |  |  | 
| 818 |  | METHODDEF(JSAMPARRAY) | 
| 819 |  | access_virt_sarray(j_common_ptr cinfo, jvirt_sarray_ptr ptr, | 
| 820 |  |                    JDIMENSION start_row, JDIMENSION num_rows, boolean writable) | 
| 821 |  | /* Access the part of a virtual sample array starting at start_row */ | 
| 822 |  | /* and extending for num_rows rows.  writable is true if  */ | 
| 823 |  | /* caller intends to modify the accessed area. */ | 
| 824 | 3.65M | { | 
| 825 | 3.65M |   JDIMENSION end_row = start_row + num_rows; | 
| 826 | 3.65M |   JDIMENSION undef_row; | 
| 827 |  |  | 
| 828 |  |   /* debugging check */ | 
| 829 | 3.65M |   if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || | 
| 830 | 3.65M |       ptr->mem_buffer == NULL) | 
| 831 | 0 |     ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 832 |  |  | 
| 833 |  |   /* Make the desired part of the virtual array accessible */ | 
| 834 | 3.65M |   if (start_row < ptr->cur_start_row || | 
| 835 | 3.65M |       end_row > ptr->cur_start_row + ptr->rows_in_mem) { | 
| 836 | 0 |     if (!ptr->b_s_open) | 
| 837 | 0 |       ERREXIT(cinfo, JERR_VIRTUAL_BUG); | 
| 838 |  |     /* Flush old buffer contents if necessary */ | 
| 839 | 0 |     if (ptr->dirty) { | 
| 840 | 0 |       do_sarray_io(cinfo, ptr, TRUE); | 
| 841 | 0 |       ptr->dirty = FALSE; | 
| 842 | 0 |     } | 
| 843 |  |     /* Decide what part of virtual array to access. | 
| 844 |  |      * Algorithm: if target address > current window, assume forward scan, | 
| 845 |  |      * load starting at target address.  If target address < current window, | 
| 846 |  |      * assume backward scan, load so that target area is top of window. | 
| 847 |  |      * Note that when switching from forward write to forward read, will have | 
| 848 |  |      * start_row = 0, so the limiting case applies and we load from 0 anyway. | 
| 849 |  |      */ | 
| 850 | 0 |     if (start_row > ptr->cur_start_row) { | 
| 851 | 0 |       ptr->cur_start_row = start_row; | 
| 852 | 0 |     } else { | 
| 853 |  |       /* use long arithmetic here to avoid overflow & unsigned problems */ | 
| 854 | 0 |       long ltemp; | 
| 855 |  | 
 | 
| 856 | 0 |       ltemp = (long)end_row - (long)ptr->rows_in_mem; | 
| 857 | 0 |       if (ltemp < 0) | 
| 858 | 0 |         ltemp = 0;              /* don't fall off front end of file */ | 
| 859 | 0 |       ptr->cur_start_row = (JDIMENSION)ltemp; | 
| 860 | 0 |     } | 
| 861 |  |     /* Read in the selected part of the array. | 
| 862 |  |      * During the initial write pass, we will do no actual read | 
| 863 |  |      * because the selected part is all undefined. | 
| 864 |  |      */ | 
| 865 | 0 |     do_sarray_io(cinfo, ptr, FALSE); | 
| 866 | 0 |   } | 
| 867 |  |   /* Ensure the accessed part of the array is defined; prezero if needed. | 
| 868 |  |    * To improve locality of access, we only prezero the part of the array | 
| 869 |  |    * that the caller is about to access, not the entire in-memory array. | 
| 870 |  |    */ | 
| 871 | 3.65M |   if (ptr->first_undef_row < end_row) { | 
| 872 | 1.86M |     if (ptr->first_undef_row < start_row) { | 
| 873 | 0 |       if (writable)             /* writer skipped over a section of array */ | 
| 874 | 0 |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 875 | 0 |       undef_row = start_row;    /* but reader is allowed to read ahead */ | 
| 876 | 1.86M |     } else { | 
| 877 | 1.86M |       undef_row = ptr->first_undef_row; | 
| 878 | 1.86M |     } | 
| 879 | 1.86M |     if (writable) | 
| 880 | 1.86M |       ptr->first_undef_row = end_row; | 
| 881 | 1.86M |     if (ptr->pre_zero) { | 
| 882 | 0 |       size_t bytesperrow = (size_t)ptr->samplesperrow * sizeof(JSAMPLE); | 
| 883 | 0 |       undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ | 
| 884 | 0 |       end_row -= ptr->cur_start_row; | 
| 885 | 0 |       while (undef_row < end_row) { | 
| 886 | 0 |         jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow); | 
| 887 | 0 |         undef_row++; | 
| 888 | 0 |       } | 
| 889 | 1.86M |     } else { | 
| 890 | 1.86M |       if (!writable)            /* reader looking at undefined data */ | 
| 891 | 0 |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 892 | 1.86M |     } | 
| 893 | 1.86M |   } | 
| 894 |  |   /* Flag the buffer dirty if caller will write in it */ | 
| 895 | 3.65M |   if (writable) | 
| 896 | 1.86M |     ptr->dirty = TRUE; | 
| 897 |  |   /* Return address of proper part of the buffer */ | 
| 898 | 3.65M |   return ptr->mem_buffer + (start_row - ptr->cur_start_row); | 
| 899 | 3.65M | } | 
| 900 |  |  | 
| 901 |  |  | 
| 902 |  | METHODDEF(JBLOCKARRAY) | 
| 903 |  | access_virt_barray(j_common_ptr cinfo, jvirt_barray_ptr ptr, | 
| 904 |  |                    JDIMENSION start_row, JDIMENSION num_rows, boolean writable) | 
| 905 |  | /* Access the part of a virtual block array starting at start_row */ | 
| 906 |  | /* and extending for num_rows rows.  writable is true if  */ | 
| 907 |  | /* caller intends to modify the accessed area. */ | 
| 908 | 1.63M | { | 
| 909 | 1.63M |   JDIMENSION end_row = start_row + num_rows; | 
| 910 | 1.63M |   JDIMENSION undef_row; | 
| 911 |  |  | 
| 912 |  |   /* debugging check */ | 
| 913 | 1.63M |   if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess || | 
| 914 | 1.63M |       ptr->mem_buffer == NULL) | 
| 915 | 0 |     ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 916 |  |  | 
| 917 |  |   /* Make the desired part of the virtual array accessible */ | 
| 918 | 1.63M |   if (start_row < ptr->cur_start_row || | 
| 919 | 1.63M |       end_row > ptr->cur_start_row + ptr->rows_in_mem) { | 
| 920 | 0 |     if (!ptr->b_s_open) | 
| 921 | 0 |       ERREXIT(cinfo, JERR_VIRTUAL_BUG); | 
| 922 |  |     /* Flush old buffer contents if necessary */ | 
| 923 | 0 |     if (ptr->dirty) { | 
| 924 | 0 |       do_barray_io(cinfo, ptr, TRUE); | 
| 925 | 0 |       ptr->dirty = FALSE; | 
| 926 | 0 |     } | 
| 927 |  |     /* Decide what part of virtual array to access. | 
| 928 |  |      * Algorithm: if target address > current window, assume forward scan, | 
| 929 |  |      * load starting at target address.  If target address < current window, | 
| 930 |  |      * assume backward scan, load so that target area is top of window. | 
| 931 |  |      * Note that when switching from forward write to forward read, will have | 
| 932 |  |      * start_row = 0, so the limiting case applies and we load from 0 anyway. | 
| 933 |  |      */ | 
| 934 | 0 |     if (start_row > ptr->cur_start_row) { | 
| 935 | 0 |       ptr->cur_start_row = start_row; | 
| 936 | 0 |     } else { | 
| 937 |  |       /* use long arithmetic here to avoid overflow & unsigned problems */ | 
| 938 | 0 |       long ltemp; | 
| 939 |  | 
 | 
| 940 | 0 |       ltemp = (long)end_row - (long)ptr->rows_in_mem; | 
| 941 | 0 |       if (ltemp < 0) | 
| 942 | 0 |         ltemp = 0;              /* don't fall off front end of file */ | 
| 943 | 0 |       ptr->cur_start_row = (JDIMENSION)ltemp; | 
| 944 | 0 |     } | 
| 945 |  |     /* Read in the selected part of the array. | 
| 946 |  |      * During the initial write pass, we will do no actual read | 
| 947 |  |      * because the selected part is all undefined. | 
| 948 |  |      */ | 
| 949 | 0 |     do_barray_io(cinfo, ptr, FALSE); | 
| 950 | 0 |   } | 
| 951 |  |   /* Ensure the accessed part of the array is defined; prezero if needed. | 
| 952 |  |    * To improve locality of access, we only prezero the part of the array | 
| 953 |  |    * that the caller is about to access, not the entire in-memory array. | 
| 954 |  |    */ | 
| 955 | 1.63M |   if (ptr->first_undef_row < end_row) { | 
| 956 | 555k |     if (ptr->first_undef_row < start_row) { | 
| 957 | 0 |       if (writable)             /* writer skipped over a section of array */ | 
| 958 | 0 |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 959 | 0 |       undef_row = start_row;    /* but reader is allowed to read ahead */ | 
| 960 | 555k |     } else { | 
| 961 | 555k |       undef_row = ptr->first_undef_row; | 
| 962 | 555k |     } | 
| 963 | 555k |     if (writable) | 
| 964 | 555k |       ptr->first_undef_row = end_row; | 
| 965 | 555k |     if (ptr->pre_zero) { | 
| 966 | 0 |       size_t bytesperrow = (size_t)ptr->blocksperrow * sizeof(JBLOCK); | 
| 967 | 0 |       undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */ | 
| 968 | 0 |       end_row -= ptr->cur_start_row; | 
| 969 | 0 |       while (undef_row < end_row) { | 
| 970 | 0 |         jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow); | 
| 971 | 0 |         undef_row++; | 
| 972 | 0 |       } | 
| 973 | 555k |     } else { | 
| 974 | 555k |       if (!writable)            /* reader looking at undefined data */ | 
| 975 | 0 |         ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS); | 
| 976 | 555k |     } | 
| 977 | 555k |   } | 
| 978 |  |   /* Flag the buffer dirty if caller will write in it */ | 
| 979 | 1.63M |   if (writable) | 
| 980 | 555k |     ptr->dirty = TRUE; | 
| 981 |  |   /* Return address of proper part of the buffer */ | 
| 982 | 1.63M |   return ptr->mem_buffer + (start_row - ptr->cur_start_row); | 
| 983 | 1.63M | } | 
| 984 |  |  | 
| 985 |  |  | 
| 986 |  | /* | 
| 987 |  |  * Release all objects belonging to a specified pool. | 
| 988 |  |  */ | 
| 989 |  |  | 
| 990 |  | METHODDEF(void) | 
| 991 |  | free_pool(j_common_ptr cinfo, int pool_id) | 
| 992 | 29.6k | { | 
| 993 | 29.6k |   my_mem_ptr mem = (my_mem_ptr)cinfo->mem; | 
| 994 | 29.6k |   small_pool_ptr shdr_ptr; | 
| 995 | 29.6k |   large_pool_ptr lhdr_ptr; | 
| 996 | 29.6k |   size_t space_freed; | 
| 997 |  |  | 
| 998 | 29.6k |   if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS) | 
| 999 | 0 |     ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */ | 
| 1000 |  |  | 
| 1001 |  | #ifdef MEM_STATS | 
| 1002 |  |   if (cinfo->err->trace_level > 1) | 
| 1003 |  |     print_mem_stats(cinfo, pool_id); /* print pool's memory usage statistics */ | 
| 1004 |  | #endif | 
| 1005 |  |  | 
| 1006 |  |   /* If freeing IMAGE pool, close any virtual arrays first */ | 
| 1007 | 29.6k |   if (pool_id == JPOOL_IMAGE) { | 
| 1008 | 17.2k |     jvirt_sarray_ptr sptr; | 
| 1009 | 17.2k |     jvirt_barray_ptr bptr; | 
| 1010 |  |  | 
| 1011 | 20.0k |     for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) { | 
| 1012 | 2.77k |       if (sptr->b_s_open) {     /* there may be no backing store */ | 
| 1013 | 0 |         sptr->b_s_open = FALSE; /* prevent recursive close if error */ | 
| 1014 | 0 |         (*sptr->b_s_info.close_backing_store) (cinfo, &sptr->b_s_info); | 
| 1015 | 0 |       } | 
| 1016 | 2.77k |     } | 
| 1017 | 17.2k |     mem->virt_sarray_list = NULL; | 
| 1018 | 24.2k |     for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) { | 
| 1019 | 6.97k |       if (bptr->b_s_open) {     /* there may be no backing store */ | 
| 1020 | 0 |         bptr->b_s_open = FALSE; /* prevent recursive close if error */ | 
| 1021 | 0 |         (*bptr->b_s_info.close_backing_store) (cinfo, &bptr->b_s_info); | 
| 1022 | 0 |       } | 
| 1023 | 6.97k |     } | 
| 1024 | 17.2k |     mem->virt_barray_list = NULL; | 
| 1025 | 17.2k |   } | 
| 1026 |  |  | 
| 1027 |  |   /* Release large objects */ | 
| 1028 | 29.6k |   lhdr_ptr = mem->large_list[pool_id]; | 
| 1029 | 29.6k |   mem->large_list[pool_id] = NULL; | 
| 1030 |  |  | 
| 1031 | 71.7k |   while (lhdr_ptr != NULL) { | 
| 1032 | 42.1k |     large_pool_ptr next_lhdr_ptr = lhdr_ptr->next; | 
| 1033 | 42.1k |     space_freed = lhdr_ptr->bytes_used + | 
| 1034 | 42.1k |                   lhdr_ptr->bytes_left + | 
| 1035 | 42.1k |                   sizeof(large_pool_hdr) + ALIGN_SIZE - 1; | 
| 1036 | 42.1k |     jpeg_free_large(cinfo, (void *)lhdr_ptr, space_freed); | 
| 1037 | 42.1k |     mem->total_space_allocated -= space_freed; | 
| 1038 | 42.1k |     lhdr_ptr = next_lhdr_ptr; | 
| 1039 | 42.1k |   } | 
| 1040 |  |  | 
| 1041 |  |   /* Release small objects */ | 
| 1042 | 29.6k |   shdr_ptr = mem->small_list[pool_id]; | 
| 1043 | 29.6k |   mem->small_list[pool_id] = NULL; | 
| 1044 |  |  | 
| 1045 | 56.1k |   while (shdr_ptr != NULL) { | 
| 1046 | 26.5k |     small_pool_ptr next_shdr_ptr = shdr_ptr->next; | 
| 1047 | 26.5k |     space_freed = shdr_ptr->bytes_used + shdr_ptr->bytes_left + | 
| 1048 | 26.5k |                   sizeof(small_pool_hdr) + ALIGN_SIZE - 1; | 
| 1049 | 26.5k |     jpeg_free_small(cinfo, (void *)shdr_ptr, space_freed); | 
| 1050 | 26.5k |     mem->total_space_allocated -= space_freed; | 
| 1051 | 26.5k |     shdr_ptr = next_shdr_ptr; | 
| 1052 | 26.5k |   } | 
| 1053 | 29.6k | } | 
| 1054 |  |  | 
| 1055 |  |  | 
| 1056 |  | /* | 
| 1057 |  |  * Close up shop entirely. | 
| 1058 |  |  * Note that this cannot be called unless cinfo->mem is non-NULL. | 
| 1059 |  |  */ | 
| 1060 |  |  | 
| 1061 |  | METHODDEF(void) | 
| 1062 |  | self_destruct(j_common_ptr cinfo) | 
| 1063 | 12.3k | { | 
| 1064 | 12.3k |   int pool; | 
| 1065 |  |  | 
| 1066 |  |   /* Close all backing store, release all memory. | 
| 1067 |  |    * Releasing pools in reverse order might help avoid fragmentation | 
| 1068 |  |    * with some (brain-damaged) malloc libraries. | 
| 1069 |  |    */ | 
| 1070 | 37.1k |   for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) { | 
| 1071 | 24.7k |     free_pool(cinfo, pool); | 
| 1072 | 24.7k |   } | 
| 1073 |  |  | 
| 1074 |  |   /* Release the memory manager control block too. */ | 
| 1075 | 12.3k |   jpeg_free_small(cinfo, (void *)cinfo->mem, sizeof(my_memory_mgr)); | 
| 1076 | 12.3k |   cinfo->mem = NULL;            /* ensures I will be called only once */ | 
| 1077 |  |  | 
| 1078 | 12.3k |   jpeg_mem_term(cinfo);         /* system-dependent cleanup */ | 
| 1079 | 12.3k | } | 
| 1080 |  |  | 
| 1081 |  |  | 
| 1082 |  | /* | 
| 1083 |  |  * Memory manager initialization. | 
| 1084 |  |  * When this is called, only the error manager pointer is valid in cinfo! | 
| 1085 |  |  */ | 
| 1086 |  |  | 
| 1087 |  | GLOBAL(void) | 
| 1088 |  | jinit_memory_mgr(j_common_ptr cinfo) | 
| 1089 | 12.3k | { | 
| 1090 | 12.3k |   my_mem_ptr mem; | 
| 1091 | 12.3k |   long max_to_use; | 
| 1092 | 12.3k |   int pool; | 
| 1093 | 12.3k |   size_t test_mac; | 
| 1094 |  |  | 
| 1095 | 12.3k |   cinfo->mem = NULL;            /* for safety if init fails */ | 
| 1096 |  |  | 
| 1097 |  |   /* Check for configuration errors. | 
| 1098 |  |    * sizeof(ALIGN_TYPE) should be a power of 2; otherwise, it probably | 
| 1099 |  |    * doesn't reflect any real hardware alignment requirement. | 
| 1100 |  |    * The test is a little tricky: for X>0, X and X-1 have no one-bits | 
| 1101 |  |    * in common if and only if X is a power of 2, ie has only one one-bit. | 
| 1102 |  |    * Some compilers may give an "unreachable code" warning here; ignore it. | 
| 1103 |  |    */ | 
| 1104 | 12.3k |   if ((ALIGN_SIZE & (ALIGN_SIZE - 1)) != 0) | 
| 1105 | 0 |     ERREXIT(cinfo, JERR_BAD_ALIGN_TYPE); | 
| 1106 |  |   /* MAX_ALLOC_CHUNK must be representable as type size_t, and must be | 
| 1107 |  |    * a multiple of ALIGN_SIZE. | 
| 1108 |  |    * Again, an "unreachable code" warning may be ignored here. | 
| 1109 |  |    * But a "constant too large" warning means you need to fix MAX_ALLOC_CHUNK. | 
| 1110 |  |    */ | 
| 1111 | 12.3k |   test_mac = (size_t)MAX_ALLOC_CHUNK; | 
| 1112 | 12.3k |   if ((long)test_mac != MAX_ALLOC_CHUNK || | 
| 1113 | 12.3k |       (MAX_ALLOC_CHUNK % ALIGN_SIZE) != 0) | 
| 1114 | 0 |     ERREXIT(cinfo, JERR_BAD_ALLOC_CHUNK); | 
| 1115 |  |  | 
| 1116 | 12.3k |   max_to_use = jpeg_mem_init(cinfo); /* system-dependent initialization */ | 
| 1117 |  |  | 
| 1118 |  |   /* Attempt to allocate memory manager's control block */ | 
| 1119 | 12.3k |   mem = (my_mem_ptr)jpeg_get_small(cinfo, sizeof(my_memory_mgr)); | 
| 1120 |  |  | 
| 1121 | 12.3k |   if (mem == NULL) { | 
| 1122 | 0 |     jpeg_mem_term(cinfo);       /* system-dependent cleanup */ | 
| 1123 | 0 |     ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 0); | 
| 1124 | 0 |   } | 
| 1125 |  |  | 
| 1126 |  |   /* OK, fill in the method pointers */ | 
| 1127 | 12.3k |   mem->pub.alloc_small = alloc_small; | 
| 1128 | 12.3k |   mem->pub.alloc_large = alloc_large; | 
| 1129 | 12.3k |   mem->pub.alloc_sarray = alloc_sarray; | 
| 1130 | 12.3k |   mem->pub.alloc_barray = alloc_barray; | 
| 1131 | 12.3k |   mem->pub.request_virt_sarray = request_virt_sarray; | 
| 1132 | 12.3k |   mem->pub.request_virt_barray = request_virt_barray; | 
| 1133 | 12.3k |   mem->pub.realize_virt_arrays = realize_virt_arrays; | 
| 1134 | 12.3k |   mem->pub.access_virt_sarray = access_virt_sarray; | 
| 1135 | 12.3k |   mem->pub.access_virt_barray = access_virt_barray; | 
| 1136 | 12.3k |   mem->pub.free_pool = free_pool; | 
| 1137 | 12.3k |   mem->pub.self_destruct = self_destruct; | 
| 1138 |  |  | 
| 1139 |  |   /* Make MAX_ALLOC_CHUNK accessible to other modules */ | 
| 1140 | 12.3k |   mem->pub.max_alloc_chunk = MAX_ALLOC_CHUNK; | 
| 1141 |  |  | 
| 1142 |  |   /* Initialize working state */ | 
| 1143 | 12.3k |   mem->pub.max_memory_to_use = max_to_use; | 
| 1144 |  |  | 
| 1145 | 37.1k |   for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) { | 
| 1146 | 24.7k |     mem->small_list[pool] = NULL; | 
| 1147 | 24.7k |     mem->large_list[pool] = NULL; | 
| 1148 | 24.7k |   } | 
| 1149 | 12.3k |   mem->virt_sarray_list = NULL; | 
| 1150 | 12.3k |   mem->virt_barray_list = NULL; | 
| 1151 |  |  | 
| 1152 | 12.3k |   mem->total_space_allocated = sizeof(my_memory_mgr); | 
| 1153 |  |  | 
| 1154 |  |   /* Declare ourselves open for business */ | 
| 1155 | 12.3k |   cinfo->mem = &mem->pub; | 
| 1156 |  |  | 
| 1157 |  |   /* Check for an environment variable JPEGMEM; if found, override the | 
| 1158 |  |    * default max_memory setting from jpeg_mem_init.  Note that the | 
| 1159 |  |    * surrounding application may again override this value. | 
| 1160 |  |    * If your system doesn't support getenv(), define NO_GETENV to disable | 
| 1161 |  |    * this feature. | 
| 1162 |  |    */ | 
| 1163 | 12.3k | #ifndef NO_GETENV | 
| 1164 | 12.3k |   { | 
| 1165 | 12.3k |     char *memenv; | 
| 1166 |  |  | 
| 1167 | 12.3k |     if ((memenv = getenv("JPEGMEM")) != NULL) { | 
| 1168 | 0 |       char ch = 'x'; | 
| 1169 |  | 
 | 
| 1170 | 0 |       if (sscanf(memenv, "%ld%c", &max_to_use, &ch) > 0) { | 
| 1171 | 0 |         if (ch == 'm' || ch == 'M') | 
| 1172 | 0 |           max_to_use *= 1000L; | 
| 1173 | 0 |         mem->pub.max_memory_to_use = max_to_use * 1000L; | 
| 1174 | 0 |       } | 
| 1175 | 0 |     } | 
| 1176 | 12.3k |   } | 
| 1177 | 12.3k | #endif | 
| 1178 |  |  | 
| 1179 | 12.3k | } |