/src/postgres/src/backend/utils/mmgr/generation.c
Line | Count | Source (jump to first uncovered line) |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * generation.c |
4 | | * Generational allocator definitions. |
5 | | * |
6 | | * Generation is a custom MemoryContext implementation designed for cases of |
7 | | * chunks with similar lifespan. |
8 | | * |
9 | | * Portions Copyright (c) 2017-2025, PostgreSQL Global Development Group |
10 | | * |
11 | | * IDENTIFICATION |
12 | | * src/backend/utils/mmgr/generation.c |
13 | | * |
14 | | * |
15 | | * This memory context is based on the assumption that the chunks are freed |
16 | | * roughly in the same order as they were allocated (FIFO), or in groups with |
17 | | * similar lifespan (generations - hence the name of the context). This is |
18 | | * typical for various queue-like use cases, i.e. when tuples are constructed, |
19 | | * processed and then thrown away. |
20 | | * |
21 | | * The memory context uses a very simple approach to free space management. |
22 | | * Instead of a complex global freelist, each block tracks a number |
23 | | * of allocated and freed chunks. The block is classed as empty when the |
24 | | * number of free chunks is equal to the number of allocated chunks. When |
25 | | * this occurs, instead of freeing the block, we try to "recycle" it, i.e. |
26 | | * reuse it for new allocations. This is done by setting the block in the |
27 | | * context's 'freeblock' field. If the freeblock field is already occupied |
28 | | * by another free block we simply return the newly empty block to malloc. |
29 | | * |
30 | | * This approach to free blocks requires fewer malloc/free calls for truly |
31 | | * first allocated, first free'd allocation patterns. |
32 | | * |
33 | | *------------------------------------------------------------------------- |
34 | | */ |
35 | | |
36 | | #include "postgres.h" |
37 | | |
38 | | #include "lib/ilist.h" |
39 | | #include "port/pg_bitutils.h" |
40 | | #include "utils/memdebug.h" |
41 | | #include "utils/memutils.h" |
42 | | #include "utils/memutils_internal.h" |
43 | | #include "utils/memutils_memorychunk.h" |
44 | | |
45 | | |
46 | 0 | #define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock)) |
47 | 0 | #define Generation_CHUNKHDRSZ sizeof(MemoryChunk) |
48 | | |
49 | 0 | #define Generation_CHUNK_FRACTION 8 |
50 | | |
51 | | typedef struct GenerationBlock GenerationBlock; /* forward reference */ |
52 | | |
53 | | typedef void *GenerationPointer; |
54 | | |
55 | | /* |
56 | | * GenerationContext is a simple memory context not reusing allocated chunks, |
57 | | * and freeing blocks once all chunks are freed. |
58 | | */ |
59 | | typedef struct GenerationContext |
60 | | { |
61 | | MemoryContextData header; /* Standard memory-context fields */ |
62 | | |
63 | | /* Generational context parameters */ |
64 | | uint32 initBlockSize; /* initial block size */ |
65 | | uint32 maxBlockSize; /* maximum block size */ |
66 | | uint32 nextBlockSize; /* next block size to allocate */ |
67 | | uint32 allocChunkLimit; /* effective chunk size limit */ |
68 | | |
69 | | GenerationBlock *block; /* current (most recently allocated) block */ |
70 | | GenerationBlock *freeblock; /* pointer to an empty block that's being |
71 | | * recycled, or NULL if there's no such block. */ |
72 | | dlist_head blocks; /* list of blocks */ |
73 | | } GenerationContext; |
74 | | |
75 | | /* |
76 | | * GenerationBlock |
77 | | * GenerationBlock is the unit of memory that is obtained by generation.c |
78 | | * from malloc(). It contains zero or more MemoryChunks, which are the |
79 | | * units requested by palloc() and freed by pfree(). MemoryChunks cannot |
80 | | * be returned to malloc() individually, instead pfree() updates the free |
81 | | * counter of the block and when all chunks in a block are free the whole |
82 | | * block can be returned to malloc(). |
83 | | * |
84 | | * GenerationBlock is the header data for a block --- the usable space |
85 | | * within the block begins at the next alignment boundary. |
86 | | */ |
87 | | struct GenerationBlock |
88 | | { |
89 | | dlist_node node; /* doubly-linked list of blocks */ |
90 | | GenerationContext *context; /* pointer back to the owning context */ |
91 | | Size blksize; /* allocated size of this block */ |
92 | | int nchunks; /* number of chunks in the block */ |
93 | | int nfree; /* number of free chunks */ |
94 | | char *freeptr; /* start of free space in this block */ |
95 | | char *endptr; /* end of space in this block */ |
96 | | }; |
97 | | |
98 | | /* |
99 | | * GenerationIsValid |
100 | | * True iff set is valid generation set. |
101 | | */ |
102 | | #define GenerationIsValid(set) \ |
103 | 0 | (PointerIsValid(set) && IsA(set, GenerationContext)) |
104 | | |
105 | | /* |
106 | | * GenerationBlockIsValid |
107 | | * True iff block is valid block of generation set. |
108 | | */ |
109 | | #define GenerationBlockIsValid(block) \ |
110 | 0 | (PointerIsValid(block) && GenerationIsValid((block)->context)) |
111 | | |
112 | | /* |
113 | | * GenerationBlockIsEmpty |
114 | | * True iff block contains no chunks |
115 | | */ |
116 | | #define GenerationBlockIsEmpty(b) ((b)->nchunks == 0) |
117 | | |
118 | | /* |
119 | | * We always store external chunks on a dedicated block. This makes fetching |
120 | | * the block from an external chunk easy since it's always the first and only |
121 | | * chunk on the block. |
122 | | */ |
123 | | #define ExternalChunkGetBlock(chunk) \ |
124 | 0 | (GenerationBlock *) ((char *) chunk - Generation_BLOCKHDRSZ) |
125 | | |
126 | | /* Obtain the keeper block for a generation context */ |
127 | | #define KeeperBlock(set) \ |
128 | 0 | ((GenerationBlock *) (((char *) set) + \ |
129 | 0 | MAXALIGN(sizeof(GenerationContext)))) |
130 | | |
131 | | /* Check if the block is the keeper block of the given generation context */ |
132 | 0 | #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set))) |
133 | | |
134 | | /* Inlined helper functions */ |
135 | | static inline void GenerationBlockInit(GenerationContext *context, |
136 | | GenerationBlock *block, |
137 | | Size blksize); |
138 | | static inline void GenerationBlockMarkEmpty(GenerationBlock *block); |
139 | | static inline Size GenerationBlockFreeBytes(GenerationBlock *block); |
140 | | static inline void GenerationBlockFree(GenerationContext *set, |
141 | | GenerationBlock *block); |
142 | | |
143 | | |
144 | | /* |
145 | | * Public routines |
146 | | */ |
147 | | |
148 | | |
149 | | /* |
150 | | * GenerationContextCreate |
151 | | * Create a new Generation context. |
152 | | * |
153 | | * parent: parent context, or NULL if top-level context |
154 | | * name: name of context (must be statically allocated) |
155 | | * minContextSize: minimum context size |
156 | | * initBlockSize: initial allocation block size |
157 | | * maxBlockSize: maximum allocation block size |
158 | | */ |
159 | | MemoryContext |
160 | | GenerationContextCreate(MemoryContext parent, |
161 | | const char *name, |
162 | | Size minContextSize, |
163 | | Size initBlockSize, |
164 | | Size maxBlockSize) |
165 | 0 | { |
166 | 0 | Size firstBlockSize; |
167 | 0 | Size allocSize; |
168 | 0 | GenerationContext *set; |
169 | 0 | GenerationBlock *block; |
170 | | |
171 | | /* ensure MemoryChunk's size is properly maxaligned */ |
172 | 0 | StaticAssertDecl(Generation_CHUNKHDRSZ == MAXALIGN(Generation_CHUNKHDRSZ), |
173 | 0 | "sizeof(MemoryChunk) is not maxaligned"); |
174 | | |
175 | | /* |
176 | | * First, validate allocation parameters. Asserts seem sufficient because |
177 | | * nobody varies their parameters at runtime. We somewhat arbitrarily |
178 | | * enforce a minimum 1K block size. We restrict the maximum block size to |
179 | | * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in |
180 | | * regards to addressing the offset between the chunk and the block that |
181 | | * the chunk is stored on. We would be unable to store the offset between |
182 | | * the chunk and block for any chunks that were beyond |
183 | | * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be |
184 | | * larger than this. |
185 | | */ |
186 | 0 | Assert(initBlockSize == MAXALIGN(initBlockSize) && |
187 | 0 | initBlockSize >= 1024); |
188 | 0 | Assert(maxBlockSize == MAXALIGN(maxBlockSize) && |
189 | 0 | maxBlockSize >= initBlockSize && |
190 | 0 | AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */ |
191 | 0 | Assert(minContextSize == 0 || |
192 | 0 | (minContextSize == MAXALIGN(minContextSize) && |
193 | 0 | minContextSize >= 1024 && |
194 | 0 | minContextSize <= maxBlockSize)); |
195 | 0 | Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET); |
196 | | |
197 | | /* Determine size of initial block */ |
198 | 0 | allocSize = MAXALIGN(sizeof(GenerationContext)) + |
199 | 0 | Generation_BLOCKHDRSZ + Generation_CHUNKHDRSZ; |
200 | 0 | if (minContextSize != 0) |
201 | 0 | allocSize = Max(allocSize, minContextSize); |
202 | 0 | else |
203 | 0 | allocSize = Max(allocSize, initBlockSize); |
204 | | |
205 | | /* |
206 | | * Allocate the initial block. Unlike other generation.c blocks, it |
207 | | * starts with the context header and its block header follows that. |
208 | | */ |
209 | 0 | set = (GenerationContext *) malloc(allocSize); |
210 | 0 | if (set == NULL) |
211 | 0 | { |
212 | 0 | MemoryContextStats(TopMemoryContext); |
213 | 0 | ereport(ERROR, |
214 | 0 | (errcode(ERRCODE_OUT_OF_MEMORY), |
215 | 0 | errmsg("out of memory"), |
216 | 0 | errdetail("Failed while creating memory context \"%s\".", |
217 | 0 | name))); |
218 | 0 | } |
219 | | |
220 | | /* |
221 | | * Avoid writing code that can fail between here and MemoryContextCreate; |
222 | | * we'd leak the header if we ereport in this stretch. |
223 | | */ |
224 | 0 | dlist_init(&set->blocks); |
225 | | |
226 | | /* Fill in the initial block's block header */ |
227 | 0 | block = KeeperBlock(set); |
228 | | /* determine the block size and initialize it */ |
229 | 0 | firstBlockSize = allocSize - MAXALIGN(sizeof(GenerationContext)); |
230 | 0 | GenerationBlockInit(set, block, firstBlockSize); |
231 | | |
232 | | /* add it to the doubly-linked list of blocks */ |
233 | 0 | dlist_push_head(&set->blocks, &block->node); |
234 | | |
235 | | /* use it as the current allocation block */ |
236 | 0 | set->block = block; |
237 | | |
238 | | /* No free block, yet */ |
239 | 0 | set->freeblock = NULL; |
240 | | |
241 | | /* Fill in GenerationContext-specific header fields */ |
242 | 0 | set->initBlockSize = (uint32) initBlockSize; |
243 | 0 | set->maxBlockSize = (uint32) maxBlockSize; |
244 | 0 | set->nextBlockSize = (uint32) initBlockSize; |
245 | | |
246 | | /* |
247 | | * Compute the allocation chunk size limit for this context. |
248 | | * |
249 | | * Limit the maximum size a non-dedicated chunk can be so that we can fit |
250 | | * at least Generation_CHUNK_FRACTION of chunks this big onto the maximum |
251 | | * sized block. We must further limit this value so that it's no more |
252 | | * than MEMORYCHUNK_MAX_VALUE. We're unable to have non-external chunks |
253 | | * larger than that value as we store the chunk size in the MemoryChunk |
254 | | * 'value' field in the call to MemoryChunkSetHdrMask(). |
255 | | */ |
256 | 0 | set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE); |
257 | 0 | while ((Size) (set->allocChunkLimit + Generation_CHUNKHDRSZ) > |
258 | 0 | (Size) ((Size) (maxBlockSize - Generation_BLOCKHDRSZ) / Generation_CHUNK_FRACTION)) |
259 | 0 | set->allocChunkLimit >>= 1; |
260 | | |
261 | | /* Finally, do the type-independent part of context creation */ |
262 | 0 | MemoryContextCreate((MemoryContext) set, |
263 | 0 | T_GenerationContext, |
264 | 0 | MCTX_GENERATION_ID, |
265 | 0 | parent, |
266 | 0 | name); |
267 | |
|
268 | 0 | ((MemoryContext) set)->mem_allocated = firstBlockSize; |
269 | |
|
270 | 0 | return (MemoryContext) set; |
271 | 0 | } |
272 | | |
273 | | /* |
274 | | * GenerationReset |
275 | | * Frees all memory which is allocated in the given set. |
276 | | * |
277 | | * The initial "keeper" block (which shares a malloc chunk with the context |
278 | | * header) is not given back to the operating system though. In this way, we |
279 | | * don't thrash malloc() when a context is repeatedly reset after small |
280 | | * allocations. |
281 | | */ |
282 | | void |
283 | | GenerationReset(MemoryContext context) |
284 | 0 | { |
285 | 0 | GenerationContext *set = (GenerationContext *) context; |
286 | 0 | dlist_mutable_iter miter; |
287 | |
|
288 | 0 | Assert(GenerationIsValid(set)); |
289 | |
|
290 | | #ifdef MEMORY_CONTEXT_CHECKING |
291 | | /* Check for corruption and leaks before freeing */ |
292 | | GenerationCheck(context); |
293 | | #endif |
294 | | |
295 | | /* |
296 | | * NULLify the free block pointer. We must do this before calling |
297 | | * GenerationBlockFree as that function never expects to free the |
298 | | * freeblock. |
299 | | */ |
300 | 0 | set->freeblock = NULL; |
301 | |
|
302 | 0 | dlist_foreach_modify(miter, &set->blocks) |
303 | 0 | { |
304 | 0 | GenerationBlock *block = dlist_container(GenerationBlock, node, miter.cur); |
305 | |
|
306 | 0 | if (IsKeeperBlock(set, block)) |
307 | 0 | GenerationBlockMarkEmpty(block); |
308 | 0 | else |
309 | 0 | GenerationBlockFree(set, block); |
310 | 0 | } |
311 | | |
312 | | /* set it so new allocations to make use of the keeper block */ |
313 | 0 | set->block = KeeperBlock(set); |
314 | | |
315 | | /* Reset block size allocation sequence, too */ |
316 | 0 | set->nextBlockSize = set->initBlockSize; |
317 | | |
318 | | /* Ensure there is only 1 item in the dlist */ |
319 | 0 | Assert(!dlist_is_empty(&set->blocks)); |
320 | 0 | Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks))); |
321 | 0 | } |
322 | | |
323 | | /* |
324 | | * GenerationDelete |
325 | | * Free all memory which is allocated in the given context. |
326 | | */ |
327 | | void |
328 | | GenerationDelete(MemoryContext context) |
329 | 0 | { |
330 | | /* Reset to release all releasable GenerationBlocks */ |
331 | 0 | GenerationReset(context); |
332 | | /* And free the context header and keeper block */ |
333 | 0 | free(context); |
334 | 0 | } |
335 | | |
336 | | /* |
337 | | * Helper for GenerationAlloc() that allocates an entire block for the chunk. |
338 | | * |
339 | | * GenerationAlloc()'s comment explains why this is separate. |
340 | | */ |
341 | | pg_noinline |
342 | | static void * |
343 | | GenerationAllocLarge(MemoryContext context, Size size, int flags) |
344 | 0 | { |
345 | 0 | GenerationContext *set = (GenerationContext *) context; |
346 | 0 | GenerationBlock *block; |
347 | 0 | MemoryChunk *chunk; |
348 | 0 | Size chunk_size; |
349 | 0 | Size required_size; |
350 | 0 | Size blksize; |
351 | | |
352 | | /* validate 'size' is within the limits for the given 'flags' */ |
353 | 0 | MemoryContextCheckSize(context, size, flags); |
354 | |
|
355 | | #ifdef MEMORY_CONTEXT_CHECKING |
356 | | /* ensure there's always space for the sentinel byte */ |
357 | | chunk_size = MAXALIGN(size + 1); |
358 | | #else |
359 | 0 | chunk_size = MAXALIGN(size); |
360 | 0 | #endif |
361 | 0 | required_size = chunk_size + Generation_CHUNKHDRSZ; |
362 | 0 | blksize = required_size + Generation_BLOCKHDRSZ; |
363 | |
|
364 | 0 | block = (GenerationBlock *) malloc(blksize); |
365 | 0 | if (block == NULL) |
366 | 0 | return MemoryContextAllocationFailure(context, size, flags); |
367 | | |
368 | 0 | context->mem_allocated += blksize; |
369 | | |
370 | | /* block with a single (used) chunk */ |
371 | 0 | block->context = set; |
372 | 0 | block->blksize = blksize; |
373 | 0 | block->nchunks = 1; |
374 | 0 | block->nfree = 0; |
375 | | |
376 | | /* the block is completely full */ |
377 | 0 | block->freeptr = block->endptr = ((char *) block) + blksize; |
378 | |
|
379 | 0 | chunk = (MemoryChunk *) (((char *) block) + Generation_BLOCKHDRSZ); |
380 | | |
381 | | /* mark the MemoryChunk as externally managed */ |
382 | 0 | MemoryChunkSetHdrMaskExternal(chunk, MCTX_GENERATION_ID); |
383 | |
|
384 | | #ifdef MEMORY_CONTEXT_CHECKING |
385 | | chunk->requested_size = size; |
386 | | /* set mark to catch clobber of "unused" space */ |
387 | | Assert(size < chunk_size); |
388 | | set_sentinel(MemoryChunkGetPointer(chunk), size); |
389 | | #endif |
390 | | #ifdef RANDOMIZE_ALLOCATED_MEMORY |
391 | | /* fill the allocated space with junk */ |
392 | | randomize_mem((char *) MemoryChunkGetPointer(chunk), size); |
393 | | #endif |
394 | | |
395 | | /* add the block to the list of allocated blocks */ |
396 | 0 | dlist_push_head(&set->blocks, &block->node); |
397 | | |
398 | | /* Ensure any padding bytes are marked NOACCESS. */ |
399 | 0 | VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size, |
400 | 0 | chunk_size - size); |
401 | | |
402 | | /* Disallow access to the chunk header. */ |
403 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
404 | |
|
405 | 0 | return MemoryChunkGetPointer(chunk); |
406 | 0 | } |
407 | | |
408 | | /* |
409 | | * Small helper for allocating a new chunk from a chunk, to avoid duplicating |
410 | | * the code between GenerationAlloc() and GenerationAllocFromNewBlock(). |
411 | | */ |
412 | | static inline void * |
413 | | GenerationAllocChunkFromBlock(MemoryContext context, GenerationBlock *block, |
414 | | Size size, Size chunk_size) |
415 | 0 | { |
416 | 0 | MemoryChunk *chunk = (MemoryChunk *) (block->freeptr); |
417 | | |
418 | | /* validate we've been given a block with enough free space */ |
419 | 0 | Assert(block != NULL); |
420 | 0 | Assert((block->endptr - block->freeptr) >= |
421 | 0 | Generation_CHUNKHDRSZ + chunk_size); |
422 | | |
423 | | /* Prepare to initialize the chunk header. */ |
424 | 0 | VALGRIND_MAKE_MEM_UNDEFINED(chunk, Generation_CHUNKHDRSZ); |
425 | |
|
426 | 0 | block->nchunks += 1; |
427 | 0 | block->freeptr += (Generation_CHUNKHDRSZ + chunk_size); |
428 | |
|
429 | 0 | Assert(block->freeptr <= block->endptr); |
430 | |
|
431 | 0 | MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_GENERATION_ID); |
432 | | #ifdef MEMORY_CONTEXT_CHECKING |
433 | | chunk->requested_size = size; |
434 | | /* set mark to catch clobber of "unused" space */ |
435 | | Assert(size < chunk_size); |
436 | | set_sentinel(MemoryChunkGetPointer(chunk), size); |
437 | | #endif |
438 | | #ifdef RANDOMIZE_ALLOCATED_MEMORY |
439 | | /* fill the allocated space with junk */ |
440 | | randomize_mem((char *) MemoryChunkGetPointer(chunk), size); |
441 | | #endif |
442 | | |
443 | | /* Ensure any padding bytes are marked NOACCESS. */ |
444 | 0 | VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size, |
445 | 0 | chunk_size - size); |
446 | | |
447 | | /* Disallow access to the chunk header. */ |
448 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
449 | |
|
450 | 0 | return MemoryChunkGetPointer(chunk); |
451 | 0 | } |
452 | | |
453 | | /* |
454 | | * Helper for GenerationAlloc() that allocates a new block and returns a chunk |
455 | | * allocated from it. |
456 | | * |
457 | | * GenerationAlloc()'s comment explains why this is separate. |
458 | | */ |
459 | | pg_noinline |
460 | | static void * |
461 | | GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags, |
462 | | Size chunk_size) |
463 | 0 | { |
464 | 0 | GenerationContext *set = (GenerationContext *) context; |
465 | 0 | GenerationBlock *block; |
466 | 0 | Size blksize; |
467 | 0 | Size required_size; |
468 | | |
469 | | /* |
470 | | * The first such block has size initBlockSize, and we double the space in |
471 | | * each succeeding block, but not more than maxBlockSize. |
472 | | */ |
473 | 0 | blksize = set->nextBlockSize; |
474 | 0 | set->nextBlockSize <<= 1; |
475 | 0 | if (set->nextBlockSize > set->maxBlockSize) |
476 | 0 | set->nextBlockSize = set->maxBlockSize; |
477 | | |
478 | | /* we'll need space for the chunk, chunk hdr and block hdr */ |
479 | 0 | required_size = chunk_size + Generation_CHUNKHDRSZ + Generation_BLOCKHDRSZ; |
480 | | |
481 | | /* round the size up to the next power of 2 */ |
482 | 0 | if (blksize < required_size) |
483 | 0 | blksize = pg_nextpower2_size_t(required_size); |
484 | |
|
485 | 0 | block = (GenerationBlock *) malloc(blksize); |
486 | |
|
487 | 0 | if (block == NULL) |
488 | 0 | return MemoryContextAllocationFailure(context, size, flags); |
489 | | |
490 | 0 | context->mem_allocated += blksize; |
491 | | |
492 | | /* initialize the new block */ |
493 | 0 | GenerationBlockInit(set, block, blksize); |
494 | | |
495 | | /* add it to the doubly-linked list of blocks */ |
496 | 0 | dlist_push_head(&set->blocks, &block->node); |
497 | | |
498 | | /* make this the current block */ |
499 | 0 | set->block = block; |
500 | |
|
501 | 0 | return GenerationAllocChunkFromBlock(context, block, size, chunk_size); |
502 | 0 | } |
503 | | |
504 | | /* |
505 | | * GenerationAlloc |
506 | | * Returns a pointer to allocated memory of given size or raises an ERROR |
507 | | * on allocation failure, or returns NULL when flags contains |
508 | | * MCXT_ALLOC_NO_OOM. |
509 | | * |
510 | | * No request may exceed: |
511 | | * MAXALIGN_DOWN(SIZE_MAX) - Generation_BLOCKHDRSZ - Generation_CHUNKHDRSZ |
512 | | * All callers use a much-lower limit. |
513 | | * |
514 | | * Note: when using valgrind, it doesn't matter how the returned allocation |
515 | | * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will |
516 | | * return space that is marked NOACCESS - GenerationRealloc has to beware! |
517 | | * |
518 | | * This function should only contain the most common code paths. Everything |
519 | | * else should be in pg_noinline helper functions, thus avoiding the overhead |
520 | | * of creating a stack frame for the common cases. Allocating memory is often |
521 | | * a bottleneck in many workloads, so avoiding stack frame setup is |
522 | | * worthwhile. Helper functions should always directly return the newly |
523 | | * allocated memory so that we can just return that address directly as a tail |
524 | | * call. |
525 | | */ |
526 | | void * |
527 | | GenerationAlloc(MemoryContext context, Size size, int flags) |
528 | 0 | { |
529 | 0 | GenerationContext *set = (GenerationContext *) context; |
530 | 0 | GenerationBlock *block; |
531 | 0 | Size chunk_size; |
532 | 0 | Size required_size; |
533 | |
|
534 | 0 | Assert(GenerationIsValid(set)); |
535 | |
|
536 | | #ifdef MEMORY_CONTEXT_CHECKING |
537 | | /* ensure there's always space for the sentinel byte */ |
538 | | chunk_size = MAXALIGN(size + 1); |
539 | | #else |
540 | 0 | chunk_size = MAXALIGN(size); |
541 | 0 | #endif |
542 | | |
543 | | /* |
544 | | * If requested size exceeds maximum for chunks we hand the request off to |
545 | | * GenerationAllocLarge(). |
546 | | */ |
547 | 0 | if (chunk_size > set->allocChunkLimit) |
548 | 0 | return GenerationAllocLarge(context, size, flags); |
549 | | |
550 | 0 | required_size = chunk_size + Generation_CHUNKHDRSZ; |
551 | | |
552 | | /* |
553 | | * Not an oversized chunk. We try to first make use of the current block, |
554 | | * but if there's not enough space in it, instead of allocating a new |
555 | | * block, we look to see if the empty freeblock has enough space. We |
556 | | * don't try reusing the keeper block. If it's become empty we'll reuse |
557 | | * that again only if the context is reset. |
558 | | * |
559 | | * We only try reusing the freeblock if we've no space for this allocation |
560 | | * on the current block. When a freeblock exists, we'll switch to it once |
561 | | * the first time we can't fit an allocation in the current block. We |
562 | | * avoid ping-ponging between the two as we need to be careful not to |
563 | | * fragment differently sized consecutive allocations between several |
564 | | * blocks. Going between the two could cause fragmentation for FIFO |
565 | | * workloads, which generation is meant to be good at. |
566 | | */ |
567 | 0 | block = set->block; |
568 | |
|
569 | 0 | if (unlikely(GenerationBlockFreeBytes(block) < required_size)) |
570 | 0 | { |
571 | 0 | GenerationBlock *freeblock = set->freeblock; |
572 | | |
573 | | /* freeblock, if set, must be empty */ |
574 | 0 | Assert(freeblock == NULL || GenerationBlockIsEmpty(freeblock)); |
575 | | |
576 | | /* check if we have a freeblock and if it's big enough */ |
577 | 0 | if (freeblock != NULL && |
578 | 0 | GenerationBlockFreeBytes(freeblock) >= required_size) |
579 | 0 | { |
580 | | /* make the freeblock the current block */ |
581 | 0 | set->freeblock = NULL; |
582 | 0 | set->block = freeblock; |
583 | |
|
584 | 0 | return GenerationAllocChunkFromBlock(context, |
585 | 0 | freeblock, |
586 | 0 | size, |
587 | 0 | chunk_size); |
588 | 0 | } |
589 | 0 | else |
590 | 0 | { |
591 | | /* |
592 | | * No freeblock, or it's not big enough for this allocation. Make |
593 | | * a new block. |
594 | | */ |
595 | 0 | return GenerationAllocFromNewBlock(context, size, flags, chunk_size); |
596 | 0 | } |
597 | 0 | } |
598 | | |
599 | | /* The current block has space, so just allocate chunk there. */ |
600 | 0 | return GenerationAllocChunkFromBlock(context, block, size, chunk_size); |
601 | 0 | } |
602 | | |
603 | | /* |
604 | | * GenerationBlockInit |
605 | | * Initializes 'block' assuming 'blksize'. Does not update the context's |
606 | | * mem_allocated field. |
607 | | */ |
608 | | static inline void |
609 | | GenerationBlockInit(GenerationContext *context, GenerationBlock *block, |
610 | | Size blksize) |
611 | 0 | { |
612 | 0 | block->context = context; |
613 | 0 | block->blksize = blksize; |
614 | 0 | block->nchunks = 0; |
615 | 0 | block->nfree = 0; |
616 | |
|
617 | 0 | block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ; |
618 | 0 | block->endptr = ((char *) block) + blksize; |
619 | | |
620 | | /* Mark unallocated space NOACCESS. */ |
621 | 0 | VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, |
622 | 0 | blksize - Generation_BLOCKHDRSZ); |
623 | 0 | } |
624 | | |
625 | | /* |
626 | | * GenerationBlockMarkEmpty |
627 | | * Set a block as empty. Does not free the block. |
628 | | */ |
629 | | static inline void |
630 | | GenerationBlockMarkEmpty(GenerationBlock *block) |
631 | 0 | { |
632 | | #if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY) |
633 | | char *datastart = ((char *) block) + Generation_BLOCKHDRSZ; |
634 | | #endif |
635 | |
|
636 | | #ifdef CLOBBER_FREED_MEMORY |
637 | | wipe_mem(datastart, block->freeptr - datastart); |
638 | | #else |
639 | | /* wipe_mem() would have done this */ |
640 | 0 | VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart); |
641 | 0 | #endif |
642 | | |
643 | | /* Reset the block, but don't return it to malloc */ |
644 | 0 | block->nchunks = 0; |
645 | 0 | block->nfree = 0; |
646 | 0 | block->freeptr = ((char *) block) + Generation_BLOCKHDRSZ; |
647 | 0 | } |
648 | | |
649 | | /* |
650 | | * GenerationBlockFreeBytes |
651 | | * Returns the number of bytes free in 'block' |
652 | | */ |
653 | | static inline Size |
654 | | GenerationBlockFreeBytes(GenerationBlock *block) |
655 | 0 | { |
656 | 0 | return (block->endptr - block->freeptr); |
657 | 0 | } |
658 | | |
659 | | /* |
660 | | * GenerationBlockFree |
661 | | * Remove 'block' from 'set' and release the memory consumed by it. |
662 | | */ |
663 | | static inline void |
664 | | GenerationBlockFree(GenerationContext *set, GenerationBlock *block) |
665 | 0 | { |
666 | | /* Make sure nobody tries to free the keeper block */ |
667 | 0 | Assert(!IsKeeperBlock(set, block)); |
668 | | /* We shouldn't be freeing the freeblock either */ |
669 | 0 | Assert(block != set->freeblock); |
670 | | |
671 | | /* release the block from the list of blocks */ |
672 | 0 | dlist_delete(&block->node); |
673 | |
|
674 | 0 | ((MemoryContext) set)->mem_allocated -= block->blksize; |
675 | |
|
676 | | #ifdef CLOBBER_FREED_MEMORY |
677 | | wipe_mem(block, block->blksize); |
678 | | #endif |
679 | |
|
680 | 0 | free(block); |
681 | 0 | } |
682 | | |
683 | | /* |
684 | | * GenerationFree |
685 | | * Update number of chunks in the block, and consider freeing the block |
686 | | * if it's become empty. |
687 | | */ |
688 | | void |
689 | | GenerationFree(void *pointer) |
690 | 0 | { |
691 | 0 | MemoryChunk *chunk = PointerGetMemoryChunk(pointer); |
692 | 0 | GenerationBlock *block; |
693 | 0 | GenerationContext *set; |
694 | | #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \ |
695 | | || defined(CLOBBER_FREED_MEMORY) |
696 | | Size chunksize; |
697 | | #endif |
698 | | |
699 | | /* Allow access to the chunk header. */ |
700 | 0 | VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ); |
701 | |
|
702 | 0 | if (MemoryChunkIsExternal(chunk)) |
703 | 0 | { |
704 | 0 | block = ExternalChunkGetBlock(chunk); |
705 | | |
706 | | /* |
707 | | * Try to verify that we have a sane block pointer: the block header |
708 | | * should reference a generation context. |
709 | | */ |
710 | 0 | if (!GenerationBlockIsValid(block)) |
711 | 0 | elog(ERROR, "could not find block containing chunk %p", chunk); |
712 | |
|
713 | | #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \ |
714 | | || defined(CLOBBER_FREED_MEMORY) |
715 | | chunksize = block->endptr - (char *) pointer; |
716 | | #endif |
717 | 0 | } |
718 | 0 | else |
719 | 0 | { |
720 | 0 | block = MemoryChunkGetBlock(chunk); |
721 | | |
722 | | /* |
723 | | * In this path, for speed reasons we just Assert that the referenced |
724 | | * block is good. Future field experience may show that this Assert |
725 | | * had better become a regular runtime test-and-elog check. |
726 | | */ |
727 | 0 | Assert(GenerationBlockIsValid(block)); |
728 | |
|
729 | | #if (defined(MEMORY_CONTEXT_CHECKING) && defined(USE_ASSERT_CHECKING)) \ |
730 | | || defined(CLOBBER_FREED_MEMORY) |
731 | | chunksize = MemoryChunkGetValue(chunk); |
732 | | #endif |
733 | 0 | } |
734 | | |
735 | | #ifdef MEMORY_CONTEXT_CHECKING |
736 | | /* Test for someone scribbling on unused space in chunk */ |
737 | | Assert(chunk->requested_size < chunksize); |
738 | | if (!sentinel_ok(pointer, chunk->requested_size)) |
739 | | elog(WARNING, "detected write past chunk end in %s %p", |
740 | | ((MemoryContext) block->context)->name, chunk); |
741 | | #endif |
742 | | |
743 | | #ifdef CLOBBER_FREED_MEMORY |
744 | | wipe_mem(pointer, chunksize); |
745 | | #endif |
746 | | |
747 | | #ifdef MEMORY_CONTEXT_CHECKING |
748 | | /* Reset requested_size to InvalidAllocSize in freed chunks */ |
749 | | chunk->requested_size = InvalidAllocSize; |
750 | | #endif |
751 | | |
752 | 0 | block->nfree += 1; |
753 | |
|
754 | 0 | Assert(block->nchunks > 0); |
755 | 0 | Assert(block->nfree <= block->nchunks); |
756 | 0 | Assert(block != block->context->freeblock); |
757 | | |
758 | | /* If there are still allocated chunks in the block, we're done. */ |
759 | 0 | if (likely(block->nfree < block->nchunks)) |
760 | 0 | return; |
761 | | |
762 | 0 | set = block->context; |
763 | | |
764 | | /*----------------------- |
765 | | * The block this allocation was on has now become completely empty of |
766 | | * chunks. In the general case, we can now return the memory for this |
767 | | * block back to malloc. However, there are cases where we don't want to |
768 | | * do that: |
769 | | * |
770 | | * 1) If it's the keeper block. This block was malloc'd in the same |
771 | | * allocation as the context itself and can't be free'd without |
772 | | * freeing the context. |
773 | | * 2) If it's the current block. We could free this, but doing so would |
774 | | * leave us nothing to set the current block to, so we just mark the |
775 | | * block as empty so new allocations can reuse it again. |
776 | | * 3) If we have no "freeblock" set, then we save a single block for |
777 | | * future allocations to avoid having to malloc a new block again. |
778 | | * This is useful for FIFO workloads as it avoids continual |
779 | | * free/malloc cycles. |
780 | | */ |
781 | 0 | if (IsKeeperBlock(set, block) || set->block == block) |
782 | 0 | GenerationBlockMarkEmpty(block); /* case 1 and 2 */ |
783 | 0 | else if (set->freeblock == NULL) |
784 | 0 | { |
785 | | /* case 3 */ |
786 | 0 | GenerationBlockMarkEmpty(block); |
787 | 0 | set->freeblock = block; |
788 | 0 | } |
789 | 0 | else |
790 | 0 | GenerationBlockFree(set, block); /* Otherwise, free it */ |
791 | 0 | } |
792 | | |
793 | | /* |
794 | | * GenerationRealloc |
795 | | * When handling repalloc, we simply allocate a new chunk, copy the data |
796 | | * and discard the old one. The only exception is when the new size fits |
797 | | * into the old chunk - in that case we just update chunk header. |
798 | | */ |
799 | | void * |
800 | | GenerationRealloc(void *pointer, Size size, int flags) |
801 | 0 | { |
802 | 0 | MemoryChunk *chunk = PointerGetMemoryChunk(pointer); |
803 | 0 | GenerationContext *set; |
804 | 0 | GenerationBlock *block; |
805 | 0 | GenerationPointer newPointer; |
806 | 0 | Size oldsize; |
807 | | |
808 | | /* Allow access to the chunk header. */ |
809 | 0 | VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ); |
810 | |
|
811 | 0 | if (MemoryChunkIsExternal(chunk)) |
812 | 0 | { |
813 | 0 | block = ExternalChunkGetBlock(chunk); |
814 | | |
815 | | /* |
816 | | * Try to verify that we have a sane block pointer: the block header |
817 | | * should reference a generation context. |
818 | | */ |
819 | 0 | if (!GenerationBlockIsValid(block)) |
820 | 0 | elog(ERROR, "could not find block containing chunk %p", chunk); |
821 | | |
822 | 0 | oldsize = block->endptr - (char *) pointer; |
823 | 0 | } |
824 | 0 | else |
825 | 0 | { |
826 | 0 | block = MemoryChunkGetBlock(chunk); |
827 | | |
828 | | /* |
829 | | * In this path, for speed reasons we just Assert that the referenced |
830 | | * block is good. Future field experience may show that this Assert |
831 | | * had better become a regular runtime test-and-elog check. |
832 | | */ |
833 | 0 | Assert(GenerationBlockIsValid(block)); |
834 | |
|
835 | 0 | oldsize = MemoryChunkGetValue(chunk); |
836 | 0 | } |
837 | | |
838 | 0 | set = block->context; |
839 | |
|
840 | | #ifdef MEMORY_CONTEXT_CHECKING |
841 | | /* Test for someone scribbling on unused space in chunk */ |
842 | | Assert(chunk->requested_size < oldsize); |
843 | | if (!sentinel_ok(pointer, chunk->requested_size)) |
844 | | elog(WARNING, "detected write past chunk end in %s %p", |
845 | | ((MemoryContext) set)->name, chunk); |
846 | | #endif |
847 | | |
848 | | /* |
849 | | * Maybe the allocated area already big enough. (In particular, we always |
850 | | * fall out here if the requested size is a decrease.) |
851 | | * |
852 | | * This memory context does not use power-of-2 chunk sizing and instead |
853 | | * carves the chunks to be as small as possible, so most repalloc() calls |
854 | | * will end up in the palloc/memcpy/pfree branch. |
855 | | * |
856 | | * XXX Perhaps we should annotate this condition with unlikely()? |
857 | | */ |
858 | | #ifdef MEMORY_CONTEXT_CHECKING |
859 | | /* With MEMORY_CONTEXT_CHECKING, we need an extra byte for the sentinel */ |
860 | | if (oldsize > size) |
861 | | #else |
862 | 0 | if (oldsize >= size) |
863 | 0 | #endif |
864 | 0 | { |
865 | | #ifdef MEMORY_CONTEXT_CHECKING |
866 | | Size oldrequest = chunk->requested_size; |
867 | | |
868 | | #ifdef RANDOMIZE_ALLOCATED_MEMORY |
869 | | /* We can only fill the extra space if we know the prior request */ |
870 | | if (size > oldrequest) |
871 | | randomize_mem((char *) pointer + oldrequest, |
872 | | size - oldrequest); |
873 | | #endif |
874 | | |
875 | | chunk->requested_size = size; |
876 | | |
877 | | /* |
878 | | * If this is an increase, mark any newly-available part UNDEFINED. |
879 | | * Otherwise, mark the obsolete part NOACCESS. |
880 | | */ |
881 | | if (size > oldrequest) |
882 | | VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest, |
883 | | size - oldrequest); |
884 | | else |
885 | | VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, |
886 | | oldsize - size); |
887 | | |
888 | | /* set mark to catch clobber of "unused" space */ |
889 | | set_sentinel(pointer, size); |
890 | | #else /* !MEMORY_CONTEXT_CHECKING */ |
891 | | |
892 | | /* |
893 | | * We don't have the information to determine whether we're growing |
894 | | * the old request or shrinking it, so we conservatively mark the |
895 | | * entire new allocation DEFINED. |
896 | | */ |
897 | 0 | VALGRIND_MAKE_MEM_NOACCESS(pointer, oldsize); |
898 | 0 | VALGRIND_MAKE_MEM_DEFINED(pointer, size); |
899 | 0 | #endif |
900 | | |
901 | | /* Disallow access to the chunk header. */ |
902 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
903 | |
|
904 | 0 | return pointer; |
905 | 0 | } |
906 | | |
907 | | /* allocate new chunk (this also checks size is valid) */ |
908 | 0 | newPointer = GenerationAlloc((MemoryContext) set, size, flags); |
909 | | |
910 | | /* leave immediately if request was not completed */ |
911 | 0 | if (newPointer == NULL) |
912 | 0 | { |
913 | | /* Disallow access to the chunk header. */ |
914 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
915 | 0 | return MemoryContextAllocationFailure((MemoryContext) set, size, flags); |
916 | 0 | } |
917 | | |
918 | | /* |
919 | | * GenerationAlloc() may have returned a region that is still NOACCESS. |
920 | | * Change it to UNDEFINED for the moment; memcpy() will then transfer |
921 | | * definedness from the old allocation to the new. If we know the old |
922 | | * allocation, copy just that much. Otherwise, make the entire old chunk |
923 | | * defined to avoid errors as we copy the currently-NOACCESS trailing |
924 | | * bytes. |
925 | | */ |
926 | 0 | VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size); |
927 | | #ifdef MEMORY_CONTEXT_CHECKING |
928 | | oldsize = chunk->requested_size; |
929 | | #else |
930 | 0 | VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize); |
931 | 0 | #endif |
932 | | |
933 | | /* transfer existing data (certain to fit) */ |
934 | 0 | memcpy(newPointer, pointer, oldsize); |
935 | | |
936 | | /* free old chunk */ |
937 | 0 | GenerationFree(pointer); |
938 | |
|
939 | 0 | return newPointer; |
940 | 0 | } |
941 | | |
942 | | /* |
943 | | * GenerationGetChunkContext |
944 | | * Return the MemoryContext that 'pointer' belongs to. |
945 | | */ |
946 | | MemoryContext |
947 | | GenerationGetChunkContext(void *pointer) |
948 | 0 | { |
949 | 0 | MemoryChunk *chunk = PointerGetMemoryChunk(pointer); |
950 | 0 | GenerationBlock *block; |
951 | | |
952 | | /* Allow access to the chunk header. */ |
953 | 0 | VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ); |
954 | |
|
955 | 0 | if (MemoryChunkIsExternal(chunk)) |
956 | 0 | block = ExternalChunkGetBlock(chunk); |
957 | 0 | else |
958 | 0 | block = (GenerationBlock *) MemoryChunkGetBlock(chunk); |
959 | | |
960 | | /* Disallow access to the chunk header. */ |
961 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
962 | |
|
963 | 0 | Assert(GenerationBlockIsValid(block)); |
964 | 0 | return &block->context->header; |
965 | 0 | } |
966 | | |
967 | | /* |
968 | | * GenerationGetChunkSpace |
969 | | * Given a currently-allocated chunk, determine the total space |
970 | | * it occupies (including all memory-allocation overhead). |
971 | | */ |
972 | | Size |
973 | | GenerationGetChunkSpace(void *pointer) |
974 | 0 | { |
975 | 0 | MemoryChunk *chunk = PointerGetMemoryChunk(pointer); |
976 | 0 | Size chunksize; |
977 | | |
978 | | /* Allow access to the chunk header. */ |
979 | 0 | VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ); |
980 | |
|
981 | 0 | if (MemoryChunkIsExternal(chunk)) |
982 | 0 | { |
983 | 0 | GenerationBlock *block = ExternalChunkGetBlock(chunk); |
984 | |
|
985 | 0 | Assert(GenerationBlockIsValid(block)); |
986 | 0 | chunksize = block->endptr - (char *) pointer; |
987 | 0 | } |
988 | 0 | else |
989 | 0 | chunksize = MemoryChunkGetValue(chunk); |
990 | | |
991 | | /* Disallow access to the chunk header. */ |
992 | 0 | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
993 | |
|
994 | 0 | return Generation_CHUNKHDRSZ + chunksize; |
995 | 0 | } |
996 | | |
997 | | /* |
998 | | * GenerationIsEmpty |
999 | | * Is a GenerationContext empty of any allocated space? |
1000 | | */ |
1001 | | bool |
1002 | | GenerationIsEmpty(MemoryContext context) |
1003 | 0 | { |
1004 | 0 | GenerationContext *set = (GenerationContext *) context; |
1005 | 0 | dlist_iter iter; |
1006 | |
|
1007 | 0 | Assert(GenerationIsValid(set)); |
1008 | |
|
1009 | 0 | dlist_foreach(iter, &set->blocks) |
1010 | 0 | { |
1011 | 0 | GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur); |
1012 | |
|
1013 | 0 | if (block->nchunks > 0) |
1014 | 0 | return false; |
1015 | 0 | } |
1016 | | |
1017 | 0 | return true; |
1018 | 0 | } |
1019 | | |
1020 | | /* |
1021 | | * GenerationStats |
1022 | | * Compute stats about memory consumption of a Generation context. |
1023 | | * |
1024 | | * printfunc: if not NULL, pass a human-readable stats string to this. |
1025 | | * passthru: pass this pointer through to printfunc. |
1026 | | * totals: if not NULL, add stats about this context into *totals. |
1027 | | * print_to_stderr: print stats to stderr if true, elog otherwise. |
1028 | | * |
1029 | | * XXX freespace only accounts for empty space at the end of the block, not |
1030 | | * space of freed chunks (which is unknown). |
1031 | | */ |
1032 | | void |
1033 | | GenerationStats(MemoryContext context, |
1034 | | MemoryStatsPrintFunc printfunc, void *passthru, |
1035 | | MemoryContextCounters *totals, bool print_to_stderr) |
1036 | 0 | { |
1037 | 0 | GenerationContext *set = (GenerationContext *) context; |
1038 | 0 | Size nblocks = 0; |
1039 | 0 | Size nchunks = 0; |
1040 | 0 | Size nfreechunks = 0; |
1041 | 0 | Size totalspace; |
1042 | 0 | Size freespace = 0; |
1043 | 0 | dlist_iter iter; |
1044 | |
|
1045 | 0 | Assert(GenerationIsValid(set)); |
1046 | | |
1047 | | /* Include context header in totalspace */ |
1048 | 0 | totalspace = MAXALIGN(sizeof(GenerationContext)); |
1049 | |
|
1050 | 0 | dlist_foreach(iter, &set->blocks) |
1051 | 0 | { |
1052 | 0 | GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur); |
1053 | |
|
1054 | 0 | nblocks++; |
1055 | 0 | nchunks += block->nchunks; |
1056 | 0 | nfreechunks += block->nfree; |
1057 | 0 | totalspace += block->blksize; |
1058 | 0 | freespace += (block->endptr - block->freeptr); |
1059 | 0 | } |
1060 | |
|
1061 | 0 | if (printfunc) |
1062 | 0 | { |
1063 | 0 | char stats_string[200]; |
1064 | |
|
1065 | 0 | snprintf(stats_string, sizeof(stats_string), |
1066 | 0 | "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used", |
1067 | 0 | totalspace, nblocks, nchunks, freespace, |
1068 | 0 | nfreechunks, totalspace - freespace); |
1069 | 0 | printfunc(context, passthru, stats_string, print_to_stderr); |
1070 | 0 | } |
1071 | |
|
1072 | 0 | if (totals) |
1073 | 0 | { |
1074 | 0 | totals->nblocks += nblocks; |
1075 | 0 | totals->freechunks += nfreechunks; |
1076 | 0 | totals->totalspace += totalspace; |
1077 | 0 | totals->freespace += freespace; |
1078 | 0 | } |
1079 | 0 | } |
1080 | | |
1081 | | |
1082 | | #ifdef MEMORY_CONTEXT_CHECKING |
1083 | | |
1084 | | /* |
1085 | | * GenerationCheck |
1086 | | * Walk through chunks and check consistency of memory. |
1087 | | * |
1088 | | * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll |
1089 | | * find yourself in an infinite loop when trouble occurs, because this |
1090 | | * routine will be entered again when elog cleanup tries to release memory! |
1091 | | */ |
1092 | | void |
1093 | | GenerationCheck(MemoryContext context) |
1094 | | { |
1095 | | GenerationContext *gen = (GenerationContext *) context; |
1096 | | const char *name = context->name; |
1097 | | dlist_iter iter; |
1098 | | Size total_allocated = 0; |
1099 | | |
1100 | | /* walk all blocks in this context */ |
1101 | | dlist_foreach(iter, &gen->blocks) |
1102 | | { |
1103 | | GenerationBlock *block = dlist_container(GenerationBlock, node, iter.cur); |
1104 | | int nfree, |
1105 | | nchunks; |
1106 | | char *ptr; |
1107 | | bool has_external_chunk = false; |
1108 | | |
1109 | | total_allocated += block->blksize; |
1110 | | |
1111 | | /* |
1112 | | * nfree > nchunks is surely wrong. Equality is allowed as the block |
1113 | | * might completely empty if it's the freeblock. |
1114 | | */ |
1115 | | if (block->nfree > block->nchunks) |
1116 | | elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p exceeds %d allocated", |
1117 | | name, block->nfree, block, block->nchunks); |
1118 | | |
1119 | | /* check block belongs to the correct context */ |
1120 | | if (block->context != gen) |
1121 | | elog(WARNING, "problem in Generation %s: bogus context link in block %p", |
1122 | | name, block); |
1123 | | |
1124 | | /* Now walk through the chunks and count them. */ |
1125 | | nfree = 0; |
1126 | | nchunks = 0; |
1127 | | ptr = ((char *) block) + Generation_BLOCKHDRSZ; |
1128 | | |
1129 | | while (ptr < block->freeptr) |
1130 | | { |
1131 | | MemoryChunk *chunk = (MemoryChunk *) ptr; |
1132 | | GenerationBlock *chunkblock; |
1133 | | Size chunksize; |
1134 | | |
1135 | | /* Allow access to the chunk header. */ |
1136 | | VALGRIND_MAKE_MEM_DEFINED(chunk, Generation_CHUNKHDRSZ); |
1137 | | |
1138 | | if (MemoryChunkIsExternal(chunk)) |
1139 | | { |
1140 | | chunkblock = ExternalChunkGetBlock(chunk); |
1141 | | chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk); |
1142 | | has_external_chunk = true; |
1143 | | } |
1144 | | else |
1145 | | { |
1146 | | chunkblock = MemoryChunkGetBlock(chunk); |
1147 | | chunksize = MemoryChunkGetValue(chunk); |
1148 | | } |
1149 | | |
1150 | | /* move to the next chunk */ |
1151 | | ptr += (chunksize + Generation_CHUNKHDRSZ); |
1152 | | |
1153 | | nchunks += 1; |
1154 | | |
1155 | | /* chunks have both block and context pointers, so check both */ |
1156 | | if (chunkblock != block) |
1157 | | elog(WARNING, "problem in Generation %s: bogus block link in block %p, chunk %p", |
1158 | | name, block, chunk); |
1159 | | |
1160 | | |
1161 | | /* is chunk allocated? */ |
1162 | | if (chunk->requested_size != InvalidAllocSize) |
1163 | | { |
1164 | | /* now make sure the chunk size is correct */ |
1165 | | if (chunksize < chunk->requested_size || |
1166 | | chunksize != MAXALIGN(chunksize)) |
1167 | | elog(WARNING, "problem in Generation %s: bogus chunk size in block %p, chunk %p", |
1168 | | name, block, chunk); |
1169 | | |
1170 | | /* check sentinel */ |
1171 | | Assert(chunk->requested_size < chunksize); |
1172 | | if (!sentinel_ok(chunk, Generation_CHUNKHDRSZ + chunk->requested_size)) |
1173 | | elog(WARNING, "problem in Generation %s: detected write past chunk end in block %p, chunk %p", |
1174 | | name, block, chunk); |
1175 | | } |
1176 | | else |
1177 | | nfree += 1; |
1178 | | |
1179 | | /* if chunk is allocated, disallow access to the chunk header */ |
1180 | | if (chunk->requested_size != InvalidAllocSize) |
1181 | | VALGRIND_MAKE_MEM_NOACCESS(chunk, Generation_CHUNKHDRSZ); |
1182 | | } |
1183 | | |
1184 | | /* |
1185 | | * Make sure we got the expected number of allocated and free chunks |
1186 | | * (as tracked in the block header). |
1187 | | */ |
1188 | | if (nchunks != block->nchunks) |
1189 | | elog(WARNING, "problem in Generation %s: number of allocated chunks %d in block %p does not match header %d", |
1190 | | name, nchunks, block, block->nchunks); |
1191 | | |
1192 | | if (nfree != block->nfree) |
1193 | | elog(WARNING, "problem in Generation %s: number of free chunks %d in block %p does not match header %d", |
1194 | | name, nfree, block, block->nfree); |
1195 | | |
1196 | | if (has_external_chunk && nchunks > 1) |
1197 | | elog(WARNING, "problem in Generation %s: external chunk on non-dedicated block %p", |
1198 | | name, block); |
1199 | | |
1200 | | } |
1201 | | |
1202 | | Assert(total_allocated == context->mem_allocated); |
1203 | | } |
1204 | | |
1205 | | #endif /* MEMORY_CONTEXT_CHECKING */ |