/src/postgres/src/backend/utils/mmgr/bump.c
Line | Count | Source |
1 | | /*------------------------------------------------------------------------- |
2 | | * |
3 | | * bump.c |
4 | | * Bump allocator definitions. |
5 | | * |
6 | | * Bump is a MemoryContext implementation designed for memory usages which |
7 | | * require allocating a large number of chunks, none of which ever need to be |
8 | | * pfree'd or realloc'd. Chunks allocated by this context have no chunk header |
9 | | * and operations which ordinarily require looking at the chunk header cannot |
10 | | * be performed. For example, pfree, realloc, GetMemoryChunkSpace and |
11 | | * GetMemoryChunkContext are all not possible with bump allocated chunks. The |
12 | | * only way to release memory allocated by this context type is to reset or |
13 | | * delete the context. |
14 | | * |
15 | | * Portions Copyright (c) 2024-2025, PostgreSQL Global Development Group |
16 | | * |
17 | | * IDENTIFICATION |
18 | | * src/backend/utils/mmgr/bump.c |
19 | | * |
20 | | * |
21 | | * Bump is best suited to cases which require a large number of short-lived |
22 | | * chunks where performance matters. Because bump allocated chunks don't |
23 | | * have a chunk header, it can fit more chunks on each block. This means we |
24 | | * can do more with less memory and fewer cache lines. The reason it's best |
25 | | * suited for short-lived usages of memory is that ideally, pointers to bump |
26 | | * allocated chunks won't be visible to a large amount of code. The more |
27 | | * code that operates on memory allocated by this allocator, the more chances |
28 | | * that some code will try to perform a pfree or one of the other operations |
29 | | * which are made impossible due to the lack of chunk header. In order to |
30 | | * detect accidental usage of the various disallowed operations, we do add a |
31 | | * MemoryChunk chunk header in MEMORY_CONTEXT_CHECKING builds and have the |
32 | | * various disallowed functions raise an ERROR. |
33 | | * |
34 | | * Allocations are MAXALIGNed. |
35 | | * |
36 | | *------------------------------------------------------------------------- |
37 | | */ |
38 | | |
39 | | #include "postgres.h" |
40 | | |
41 | | #include "lib/ilist.h" |
42 | | #include "port/pg_bitutils.h" |
43 | | #include "utils/memdebug.h" |
44 | | #include "utils/memutils.h" |
45 | | #include "utils/memutils_memorychunk.h" |
46 | | #include "utils/memutils_internal.h" |
47 | | |
48 | 0 | #define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock)) |
49 | | #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(BumpContext)) + \ |
50 | | Bump_BLOCKHDRSZ) |
51 | | |
52 | | /* No chunk header unless built with MEMORY_CONTEXT_CHECKING */ |
53 | | #ifdef MEMORY_CONTEXT_CHECKING |
54 | | #define Bump_CHUNKHDRSZ sizeof(MemoryChunk) |
55 | | #else |
56 | 0 | #define Bump_CHUNKHDRSZ 0 |
57 | | #endif |
58 | | |
59 | 0 | #define Bump_CHUNK_FRACTION 8 |
60 | | |
61 | | /* The keeper block is allocated in the same allocation as the set */ |
62 | 0 | #define KeeperBlock(set) ((BumpBlock *) ((char *) (set) + \ |
63 | 0 | MAXALIGN(sizeof(BumpContext)))) |
64 | 0 | #define IsKeeperBlock(set, blk) (KeeperBlock(set) == (blk)) |
65 | | |
66 | | typedef struct BumpBlock BumpBlock; /* forward reference */ |
67 | | |
68 | | typedef struct BumpContext |
69 | | { |
70 | | MemoryContextData header; /* Standard memory-context fields */ |
71 | | |
72 | | /* Bump context parameters */ |
73 | | uint32 initBlockSize; /* initial block size */ |
74 | | uint32 maxBlockSize; /* maximum block size */ |
75 | | uint32 nextBlockSize; /* next block size to allocate */ |
76 | | uint32 allocChunkLimit; /* effective chunk size limit */ |
77 | | |
78 | | dlist_head blocks; /* list of blocks with the block currently |
79 | | * being filled at the head */ |
80 | | } BumpContext; |
81 | | |
82 | | /* |
83 | | * BumpBlock |
84 | | * BumpBlock is the unit of memory that is obtained by bump.c from |
85 | | * malloc(). It contains zero or more allocations, which are the |
86 | | * units requested by palloc(). |
87 | | */ |
88 | | struct BumpBlock |
89 | | { |
90 | | dlist_node node; /* doubly-linked list of blocks */ |
91 | | #ifdef MEMORY_CONTEXT_CHECKING |
92 | | BumpContext *context; /* pointer back to the owning context */ |
93 | | #endif |
94 | | char *freeptr; /* start of free space in this block */ |
95 | | char *endptr; /* end of space in this block */ |
96 | | }; |
97 | | |
98 | | /* |
99 | | * BumpIsValid |
100 | | * True iff set is valid bump context. |
101 | | */ |
102 | | #define BumpIsValid(set) \ |
103 | | ((set) && IsA(set, BumpContext)) |
104 | | |
105 | | /* |
106 | | * We always store external chunks on a dedicated block. This makes fetching |
107 | | * the block from an external chunk easy since it's always the first and only |
108 | | * chunk on the block. |
109 | | */ |
110 | | #define ExternalChunkGetBlock(chunk) \ |
111 | | (BumpBlock *) ((char *) chunk - Bump_BLOCKHDRSZ) |
112 | | |
113 | | /* Inlined helper functions */ |
114 | | static inline void BumpBlockInit(BumpContext *context, BumpBlock *block, |
115 | | Size blksize); |
116 | | static inline bool BumpBlockIsEmpty(BumpBlock *block); |
117 | | static inline void BumpBlockMarkEmpty(BumpBlock *block); |
118 | | static inline Size BumpBlockFreeBytes(BumpBlock *block); |
119 | | static inline void BumpBlockFree(BumpContext *set, BumpBlock *block); |
120 | | |
121 | | |
122 | | /* |
123 | | * BumpContextCreate |
124 | | * Create a new Bump context. |
125 | | * |
126 | | * parent: parent context, or NULL if top-level context |
127 | | * name: name of context (must be statically allocated) |
128 | | * minContextSize: minimum context size |
129 | | * initBlockSize: initial allocation block size |
130 | | * maxBlockSize: maximum allocation block size |
131 | | */ |
132 | | MemoryContext |
133 | | BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize, |
134 | | Size initBlockSize, Size maxBlockSize) |
135 | 0 | { |
136 | 0 | Size firstBlockSize; |
137 | 0 | Size allocSize; |
138 | 0 | BumpContext *set; |
139 | 0 | BumpBlock *block; |
140 | | |
141 | | /* ensure MemoryChunk's size is properly maxaligned */ |
142 | 0 | StaticAssertDecl(Bump_CHUNKHDRSZ == MAXALIGN(Bump_CHUNKHDRSZ), |
143 | 0 | "sizeof(MemoryChunk) is not maxaligned"); |
144 | | |
145 | | /* |
146 | | * First, validate allocation parameters. Asserts seem sufficient because |
147 | | * nobody varies their parameters at runtime. We somewhat arbitrarily |
148 | | * enforce a minimum 1K block size. We restrict the maximum block size to |
149 | | * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in |
150 | | * regards to addressing the offset between the chunk and the block that |
151 | | * the chunk is stored on. We would be unable to store the offset between |
152 | | * the chunk and block for any chunks that were beyond |
153 | | * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be |
154 | | * larger than this. |
155 | | */ |
156 | 0 | Assert(initBlockSize == MAXALIGN(initBlockSize) && |
157 | 0 | initBlockSize >= 1024); |
158 | 0 | Assert(maxBlockSize == MAXALIGN(maxBlockSize) && |
159 | 0 | maxBlockSize >= initBlockSize && |
160 | 0 | AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */ |
161 | 0 | Assert(minContextSize == 0 || |
162 | 0 | (minContextSize == MAXALIGN(minContextSize) && |
163 | 0 | minContextSize >= 1024 && |
164 | 0 | minContextSize <= maxBlockSize)); |
165 | 0 | Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET); |
166 | | |
167 | | /* Determine size of initial block */ |
168 | 0 | allocSize = MAXALIGN(sizeof(BumpContext)) + Bump_BLOCKHDRSZ + |
169 | 0 | Bump_CHUNKHDRSZ; |
170 | 0 | if (minContextSize != 0) |
171 | 0 | allocSize = Max(allocSize, minContextSize); |
172 | 0 | else |
173 | 0 | allocSize = Max(allocSize, initBlockSize); |
174 | | |
175 | | /* |
176 | | * Allocate the initial block. Unlike other bump.c blocks, it starts with |
177 | | * the context header and its block header follows that. |
178 | | */ |
179 | 0 | set = (BumpContext *) malloc(allocSize); |
180 | 0 | if (set == NULL) |
181 | 0 | { |
182 | 0 | MemoryContextStats(TopMemoryContext); |
183 | 0 | ereport(ERROR, |
184 | 0 | (errcode(ERRCODE_OUT_OF_MEMORY), |
185 | 0 | errmsg("out of memory"), |
186 | 0 | errdetail("Failed while creating memory context \"%s\".", |
187 | 0 | name))); |
188 | 0 | } |
189 | | |
190 | | /* |
191 | | * Avoid writing code that can fail between here and MemoryContextCreate; |
192 | | * we'd leak the header and initial block if we ereport in this stretch. |
193 | | */ |
194 | | |
195 | | /* See comments about Valgrind interactions in aset.c */ |
196 | 0 | VALGRIND_CREATE_MEMPOOL(set, 0, false); |
197 | | /* This vchunk covers the BumpContext and the keeper block header */ |
198 | 0 | VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ); |
199 | |
|
200 | 0 | dlist_init(&set->blocks); |
201 | | |
202 | | /* Fill in the initial block's block header */ |
203 | 0 | block = KeeperBlock(set); |
204 | | /* determine the block size and initialize it */ |
205 | 0 | firstBlockSize = allocSize - MAXALIGN(sizeof(BumpContext)); |
206 | 0 | BumpBlockInit(set, block, firstBlockSize); |
207 | | |
208 | | /* add it to the doubly-linked list of blocks */ |
209 | 0 | dlist_push_head(&set->blocks, &block->node); |
210 | | |
211 | | /* |
212 | | * Fill in BumpContext-specific header fields. The Asserts above should |
213 | | * ensure that these all fit inside a uint32. |
214 | | */ |
215 | 0 | set->initBlockSize = (uint32) initBlockSize; |
216 | 0 | set->maxBlockSize = (uint32) maxBlockSize; |
217 | 0 | set->nextBlockSize = (uint32) initBlockSize; |
218 | | |
219 | | /* |
220 | | * Compute the allocation chunk size limit for this context. |
221 | | * |
222 | | * Limit the maximum size a non-dedicated chunk can be so that we can fit |
223 | | * at least Bump_CHUNK_FRACTION of chunks this big onto the maximum sized |
224 | | * block. We must further limit this value so that it's no more than |
225 | | * MEMORYCHUNK_MAX_VALUE. We're unable to have non-external chunks larger |
226 | | * than that value as we store the chunk size in the MemoryChunk 'value' |
227 | | * field in the call to MemoryChunkSetHdrMask(). |
228 | | */ |
229 | 0 | set->allocChunkLimit = Min(maxBlockSize, MEMORYCHUNK_MAX_VALUE); |
230 | 0 | while ((Size) (set->allocChunkLimit + Bump_CHUNKHDRSZ) > |
231 | 0 | (Size) ((Size) (maxBlockSize - Bump_BLOCKHDRSZ) / Bump_CHUNK_FRACTION)) |
232 | 0 | set->allocChunkLimit >>= 1; |
233 | | |
234 | | /* Finally, do the type-independent part of context creation */ |
235 | 0 | MemoryContextCreate((MemoryContext) set, T_BumpContext, MCTX_BUMP_ID, |
236 | 0 | parent, name); |
237 | |
|
238 | 0 | ((MemoryContext) set)->mem_allocated = allocSize; |
239 | |
|
240 | 0 | return (MemoryContext) set; |
241 | 0 | } |
242 | | |
243 | | /* |
244 | | * BumpReset |
245 | | * Frees all memory which is allocated in the given set. |
246 | | * |
247 | | * The code simply frees all the blocks in the context apart from the keeper |
248 | | * block. |
249 | | */ |
250 | | void |
251 | | BumpReset(MemoryContext context) |
252 | 0 | { |
253 | 0 | BumpContext *set = (BumpContext *) context; |
254 | 0 | dlist_mutable_iter miter; |
255 | |
|
256 | 0 | Assert(BumpIsValid(set)); |
257 | |
|
258 | | #ifdef MEMORY_CONTEXT_CHECKING |
259 | | /* Check for corruption and leaks before freeing */ |
260 | | BumpCheck(context); |
261 | | #endif |
262 | |
|
263 | 0 | dlist_foreach_modify(miter, &set->blocks) |
264 | 0 | { |
265 | 0 | BumpBlock *block = dlist_container(BumpBlock, node, miter.cur); |
266 | |
|
267 | 0 | if (IsKeeperBlock(set, block)) |
268 | 0 | BumpBlockMarkEmpty(block); |
269 | 0 | else |
270 | 0 | BumpBlockFree(set, block); |
271 | 0 | } |
272 | | |
273 | | /* |
274 | | * Instruct Valgrind to throw away all the vchunks associated with this |
275 | | * context, except for the one covering the BumpContext and keeper-block |
276 | | * header. This gets rid of the vchunks for whatever user data is getting |
277 | | * discarded by the context reset. |
278 | | */ |
279 | 0 | VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ); |
280 | | |
281 | | /* Reset block size allocation sequence, too */ |
282 | 0 | set->nextBlockSize = set->initBlockSize; |
283 | | |
284 | | /* Ensure there is only 1 item in the dlist */ |
285 | 0 | Assert(!dlist_is_empty(&set->blocks)); |
286 | 0 | Assert(!dlist_has_next(&set->blocks, dlist_head_node(&set->blocks))); |
287 | 0 | } |
288 | | |
289 | | /* |
290 | | * BumpDelete |
291 | | * Free all memory which is allocated in the given context. |
292 | | */ |
293 | | void |
294 | | BumpDelete(MemoryContext context) |
295 | 0 | { |
296 | | /* Reset to release all releasable BumpBlocks */ |
297 | 0 | BumpReset(context); |
298 | | |
299 | | /* Destroy the vpool -- see notes in aset.c */ |
300 | 0 | VALGRIND_DESTROY_MEMPOOL(context); |
301 | | |
302 | | /* And free the context header and keeper block */ |
303 | 0 | free(context); |
304 | 0 | } |
305 | | |
306 | | /* |
307 | | * Helper for BumpAlloc() that allocates an entire block for the chunk. |
308 | | * |
309 | | * BumpAlloc()'s comment explains why this is separate. |
310 | | */ |
311 | | pg_noinline |
312 | | static void * |
313 | | BumpAllocLarge(MemoryContext context, Size size, int flags) |
314 | 0 | { |
315 | 0 | BumpContext *set = (BumpContext *) context; |
316 | 0 | BumpBlock *block; |
317 | | #ifdef MEMORY_CONTEXT_CHECKING |
318 | | MemoryChunk *chunk; |
319 | | #endif |
320 | 0 | Size chunk_size; |
321 | 0 | Size required_size; |
322 | 0 | Size blksize; |
323 | | |
324 | | /* validate 'size' is within the limits for the given 'flags' */ |
325 | 0 | MemoryContextCheckSize(context, size, flags); |
326 | |
|
327 | | #ifdef MEMORY_CONTEXT_CHECKING |
328 | | /* ensure there's always space for the sentinel byte */ |
329 | | chunk_size = MAXALIGN(size + 1); |
330 | | #else |
331 | 0 | chunk_size = MAXALIGN(size); |
332 | 0 | #endif |
333 | |
|
334 | 0 | required_size = chunk_size + Bump_CHUNKHDRSZ; |
335 | 0 | blksize = required_size + Bump_BLOCKHDRSZ; |
336 | |
|
337 | 0 | block = (BumpBlock *) malloc(blksize); |
338 | 0 | if (block == NULL) |
339 | 0 | return MemoryContextAllocationFailure(context, size, flags); |
340 | | |
341 | | /* Make a vchunk covering the new block's header */ |
342 | 0 | VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); |
343 | |
|
344 | 0 | context->mem_allocated += blksize; |
345 | | |
346 | | /* the block is completely full */ |
347 | 0 | block->freeptr = block->endptr = ((char *) block) + blksize; |
348 | |
|
349 | | #ifdef MEMORY_CONTEXT_CHECKING |
350 | | /* block with a single (used) chunk */ |
351 | | block->context = set; |
352 | | |
353 | | chunk = (MemoryChunk *) (((char *) block) + Bump_BLOCKHDRSZ); |
354 | | |
355 | | /* mark the MemoryChunk as externally managed */ |
356 | | MemoryChunkSetHdrMaskExternal(chunk, MCTX_BUMP_ID); |
357 | | |
358 | | chunk->requested_size = size; |
359 | | /* set mark to catch clobber of "unused" space */ |
360 | | Assert(size < chunk_size); |
361 | | set_sentinel(MemoryChunkGetPointer(chunk), size); |
362 | | #endif |
363 | | #ifdef RANDOMIZE_ALLOCATED_MEMORY |
364 | | /* fill the allocated space with junk */ |
365 | | randomize_mem((char *) MemoryChunkGetPointer(chunk), size); |
366 | | #endif |
367 | | |
368 | | /* |
369 | | * Add the block to the tail of allocated blocks list. The current block |
370 | | * is left at the head of the list as it may still have space for |
371 | | * non-large allocations. |
372 | | */ |
373 | 0 | dlist_push_tail(&set->blocks, &block->node); |
374 | |
|
375 | | #ifdef MEMORY_CONTEXT_CHECKING |
376 | | /* Ensure any padding bytes are marked NOACCESS. */ |
377 | | VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size, |
378 | | chunk_size - size); |
379 | | |
380 | | /* Disallow access to the chunk header. */ |
381 | | VALGRIND_MAKE_MEM_NOACCESS(chunk, Bump_CHUNKHDRSZ); |
382 | | |
383 | | return MemoryChunkGetPointer(chunk); |
384 | | #else |
385 | 0 | return (void *) (((char *) block) + Bump_BLOCKHDRSZ); |
386 | 0 | #endif |
387 | 0 | } |
388 | | |
389 | | /* |
390 | | * Small helper for allocating a new chunk from a chunk, to avoid duplicating |
391 | | * the code between BumpAlloc() and BumpAllocFromNewBlock(). |
392 | | */ |
393 | | static inline void * |
394 | | BumpAllocChunkFromBlock(MemoryContext context, BumpBlock *block, Size size, |
395 | | Size chunk_size) |
396 | 0 | { |
397 | | #ifdef MEMORY_CONTEXT_CHECKING |
398 | | MemoryChunk *chunk; |
399 | | #else |
400 | 0 | void *ptr; |
401 | 0 | #endif |
402 | | |
403 | | /* validate we've been given a block with enough free space */ |
404 | 0 | Assert(block != NULL); |
405 | 0 | Assert((block->endptr - block->freeptr) >= Bump_CHUNKHDRSZ + chunk_size); |
406 | |
|
407 | | #ifdef MEMORY_CONTEXT_CHECKING |
408 | | chunk = (MemoryChunk *) block->freeptr; |
409 | | #else |
410 | 0 | ptr = (void *) block->freeptr; |
411 | 0 | #endif |
412 | | |
413 | | /* point the freeptr beyond this chunk */ |
414 | 0 | block->freeptr += (Bump_CHUNKHDRSZ + chunk_size); |
415 | 0 | Assert(block->freeptr <= block->endptr); |
416 | |
|
417 | | #ifdef MEMORY_CONTEXT_CHECKING |
418 | | /* Prepare to initialize the chunk header. */ |
419 | | VALGRIND_MAKE_MEM_UNDEFINED(chunk, Bump_CHUNKHDRSZ); |
420 | | |
421 | | MemoryChunkSetHdrMask(chunk, block, chunk_size, MCTX_BUMP_ID); |
422 | | chunk->requested_size = size; |
423 | | /* set mark to catch clobber of "unused" space */ |
424 | | Assert(size < chunk_size); |
425 | | set_sentinel(MemoryChunkGetPointer(chunk), size); |
426 | | |
427 | | #ifdef RANDOMIZE_ALLOCATED_MEMORY |
428 | | /* fill the allocated space with junk */ |
429 | | randomize_mem((char *) MemoryChunkGetPointer(chunk), size); |
430 | | #endif |
431 | | |
432 | | /* Ensure any padding bytes are marked NOACCESS. */ |
433 | | VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size, |
434 | | chunk_size - size); |
435 | | |
436 | | /* Disallow access to the chunk header. */ |
437 | | VALGRIND_MAKE_MEM_NOACCESS(chunk, Bump_CHUNKHDRSZ); |
438 | | |
439 | | return MemoryChunkGetPointer(chunk); |
440 | | #else |
441 | 0 | return ptr; |
442 | 0 | #endif /* MEMORY_CONTEXT_CHECKING */ |
443 | 0 | } |
444 | | |
445 | | /* |
446 | | * Helper for BumpAlloc() that allocates a new block and returns a chunk |
447 | | * allocated from it. |
448 | | * |
449 | | * BumpAlloc()'s comment explains why this is separate. |
450 | | */ |
451 | | pg_noinline |
452 | | static void * |
453 | | BumpAllocFromNewBlock(MemoryContext context, Size size, int flags, |
454 | | Size chunk_size) |
455 | 0 | { |
456 | 0 | BumpContext *set = (BumpContext *) context; |
457 | 0 | BumpBlock *block; |
458 | 0 | Size blksize; |
459 | 0 | Size required_size; |
460 | | |
461 | | /* |
462 | | * The first such block has size initBlockSize, and we double the space in |
463 | | * each succeeding block, but not more than maxBlockSize. |
464 | | */ |
465 | 0 | blksize = set->nextBlockSize; |
466 | 0 | set->nextBlockSize <<= 1; |
467 | 0 | if (set->nextBlockSize > set->maxBlockSize) |
468 | 0 | set->nextBlockSize = set->maxBlockSize; |
469 | | |
470 | | /* we'll need space for the chunk, chunk hdr and block hdr */ |
471 | 0 | required_size = chunk_size + Bump_CHUNKHDRSZ + Bump_BLOCKHDRSZ; |
472 | | /* round the size up to the next power of 2 */ |
473 | 0 | if (blksize < required_size) |
474 | 0 | blksize = pg_nextpower2_size_t(required_size); |
475 | |
|
476 | 0 | block = (BumpBlock *) malloc(blksize); |
477 | |
|
478 | 0 | if (block == NULL) |
479 | 0 | return MemoryContextAllocationFailure(context, size, flags); |
480 | | |
481 | | /* Make a vchunk covering the new block's header */ |
482 | 0 | VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ); |
483 | |
|
484 | 0 | context->mem_allocated += blksize; |
485 | | |
486 | | /* initialize the new block */ |
487 | 0 | BumpBlockInit(set, block, blksize); |
488 | | |
489 | | /* add it to the doubly-linked list of blocks */ |
490 | 0 | dlist_push_head(&set->blocks, &block->node); |
491 | |
|
492 | 0 | return BumpAllocChunkFromBlock(context, block, size, chunk_size); |
493 | 0 | } |
494 | | |
495 | | /* |
496 | | * BumpAlloc |
497 | | * Returns a pointer to allocated memory of given size or raises an ERROR |
498 | | * on allocation failure, or returns NULL when flags contains |
499 | | * MCXT_ALLOC_NO_OOM. |
500 | | * |
501 | | * No request may exceed: |
502 | | * MAXALIGN_DOWN(SIZE_MAX) - Bump_BLOCKHDRSZ - Bump_CHUNKHDRSZ |
503 | | * All callers use a much-lower limit. |
504 | | * |
505 | | * |
506 | | * Note: when using valgrind, it doesn't matter how the returned allocation |
507 | | * is marked, as mcxt.c will set it to UNDEFINED. |
508 | | * This function should only contain the most common code paths. Everything |
509 | | * else should be in pg_noinline helper functions, thus avoiding the overhead |
510 | | * of creating a stack frame for the common cases. Allocating memory is often |
511 | | * a bottleneck in many workloads, so avoiding stack frame setup is |
512 | | * worthwhile. Helper functions should always directly return the newly |
513 | | * allocated memory so that we can just return that address directly as a tail |
514 | | * call. |
515 | | */ |
516 | | void * |
517 | | BumpAlloc(MemoryContext context, Size size, int flags) |
518 | 0 | { |
519 | 0 | BumpContext *set = (BumpContext *) context; |
520 | 0 | BumpBlock *block; |
521 | 0 | Size chunk_size; |
522 | 0 | Size required_size; |
523 | |
|
524 | 0 | Assert(BumpIsValid(set)); |
525 | |
|
526 | | #ifdef MEMORY_CONTEXT_CHECKING |
527 | | /* ensure there's always space for the sentinel byte */ |
528 | | chunk_size = MAXALIGN(size + 1); |
529 | | #else |
530 | 0 | chunk_size = MAXALIGN(size); |
531 | 0 | #endif |
532 | | |
533 | | /* |
534 | | * If requested size exceeds maximum for chunks we hand the request off to |
535 | | * BumpAllocLarge(). |
536 | | */ |
537 | 0 | if (chunk_size > set->allocChunkLimit) |
538 | 0 | return BumpAllocLarge(context, size, flags); |
539 | | |
540 | 0 | required_size = chunk_size + Bump_CHUNKHDRSZ; |
541 | | |
542 | | /* |
543 | | * Not an oversized chunk. We try to first make use of the latest block, |
544 | | * but if there's not enough space in it we must allocate a new block. |
545 | | */ |
546 | 0 | block = dlist_container(BumpBlock, node, dlist_head_node(&set->blocks)); |
547 | |
|
548 | 0 | if (BumpBlockFreeBytes(block) < required_size) |
549 | 0 | return BumpAllocFromNewBlock(context, size, flags, chunk_size); |
550 | | |
551 | | /* The current block has space, so just allocate chunk there. */ |
552 | 0 | return BumpAllocChunkFromBlock(context, block, size, chunk_size); |
553 | 0 | } |
554 | | |
555 | | /* |
556 | | * BumpBlockInit |
557 | | * Initializes 'block' assuming 'blksize'. Does not update the context's |
558 | | * mem_allocated field. |
559 | | */ |
560 | | static inline void |
561 | | BumpBlockInit(BumpContext *context, BumpBlock *block, Size blksize) |
562 | 0 | { |
563 | | #ifdef MEMORY_CONTEXT_CHECKING |
564 | | block->context = context; |
565 | | #endif |
566 | 0 | block->freeptr = ((char *) block) + Bump_BLOCKHDRSZ; |
567 | 0 | block->endptr = ((char *) block) + blksize; |
568 | | |
569 | | /* Mark unallocated space NOACCESS. */ |
570 | 0 | VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, blksize - Bump_BLOCKHDRSZ); |
571 | 0 | } |
572 | | |
573 | | /* |
574 | | * BumpBlockIsEmpty |
575 | | * Returns true iff 'block' contains no chunks |
576 | | */ |
577 | | static inline bool |
578 | | BumpBlockIsEmpty(BumpBlock *block) |
579 | 0 | { |
580 | | /* it's empty if the freeptr has not moved */ |
581 | 0 | return (block->freeptr == ((char *) block + Bump_BLOCKHDRSZ)); |
582 | 0 | } |
583 | | |
584 | | /* |
585 | | * BumpBlockMarkEmpty |
586 | | * Set a block as empty. Does not free the block. |
587 | | */ |
588 | | static inline void |
589 | | BumpBlockMarkEmpty(BumpBlock *block) |
590 | 0 | { |
591 | | #if defined(USE_VALGRIND) || defined(CLOBBER_FREED_MEMORY) |
592 | | char *datastart = ((char *) block) + Bump_BLOCKHDRSZ; |
593 | | #endif |
594 | |
|
595 | | #ifdef CLOBBER_FREED_MEMORY |
596 | | wipe_mem(datastart, block->freeptr - datastart); |
597 | | #else |
598 | | /* wipe_mem() would have done this */ |
599 | 0 | VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart); |
600 | 0 | #endif |
601 | | |
602 | | /* Reset the block, but don't return it to malloc */ |
603 | 0 | block->freeptr = ((char *) block) + Bump_BLOCKHDRSZ; |
604 | 0 | } |
605 | | |
606 | | /* |
607 | | * BumpBlockFreeBytes |
608 | | * Returns the number of bytes free in 'block' |
609 | | */ |
610 | | static inline Size |
611 | | BumpBlockFreeBytes(BumpBlock *block) |
612 | 0 | { |
613 | 0 | return (block->endptr - block->freeptr); |
614 | 0 | } |
615 | | |
616 | | /* |
617 | | * BumpBlockFree |
618 | | * Remove 'block' from 'set' and release the memory consumed by it. |
619 | | */ |
620 | | static inline void |
621 | | BumpBlockFree(BumpContext *set, BumpBlock *block) |
622 | 0 | { |
623 | | /* Make sure nobody tries to free the keeper block */ |
624 | 0 | Assert(!IsKeeperBlock(set, block)); |
625 | | |
626 | | /* release the block from the list of blocks */ |
627 | 0 | dlist_delete(&block->node); |
628 | |
|
629 | 0 | ((MemoryContext) set)->mem_allocated -= ((char *) block->endptr - (char *) block); |
630 | |
|
631 | | #ifdef CLOBBER_FREED_MEMORY |
632 | | wipe_mem(block, ((char *) block->endptr - (char *) block)); |
633 | | #endif |
634 | | |
635 | | /* As in aset.c, free block-header vchunks explicitly */ |
636 | 0 | VALGRIND_MEMPOOL_FREE(set, block); |
637 | |
|
638 | 0 | free(block); |
639 | 0 | } |
640 | | |
641 | | /* |
642 | | * BumpFree |
643 | | * Unsupported. |
644 | | */ |
645 | | void |
646 | | BumpFree(void *pointer) |
647 | 0 | { |
648 | 0 | elog(ERROR, "%s is not supported by the bump memory allocator", "pfree"); |
649 | 0 | } |
650 | | |
651 | | /* |
652 | | * BumpRealloc |
653 | | * Unsupported. |
654 | | */ |
655 | | void * |
656 | | BumpRealloc(void *pointer, Size size, int flags) |
657 | 0 | { |
658 | 0 | elog(ERROR, "%s is not supported by the bump memory allocator", "realloc"); |
659 | 0 | return NULL; /* keep compiler quiet */ |
660 | 0 | } |
661 | | |
662 | | /* |
663 | | * BumpGetChunkContext |
664 | | * Unsupported. |
665 | | */ |
666 | | MemoryContext |
667 | | BumpGetChunkContext(void *pointer) |
668 | 0 | { |
669 | 0 | elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkContext"); |
670 | 0 | return NULL; /* keep compiler quiet */ |
671 | 0 | } |
672 | | |
673 | | /* |
674 | | * BumpGetChunkSpace |
675 | | * Unsupported. |
676 | | */ |
677 | | Size |
678 | | BumpGetChunkSpace(void *pointer) |
679 | 0 | { |
680 | 0 | elog(ERROR, "%s is not supported by the bump memory allocator", "GetMemoryChunkSpace"); |
681 | 0 | return 0; /* keep compiler quiet */ |
682 | 0 | } |
683 | | |
684 | | /* |
685 | | * BumpIsEmpty |
686 | | * Is a BumpContext empty of any allocated space? |
687 | | */ |
688 | | bool |
689 | | BumpIsEmpty(MemoryContext context) |
690 | 0 | { |
691 | 0 | BumpContext *set = (BumpContext *) context; |
692 | 0 | dlist_iter iter; |
693 | |
|
694 | 0 | Assert(BumpIsValid(set)); |
695 | |
|
696 | 0 | dlist_foreach(iter, &set->blocks) |
697 | 0 | { |
698 | 0 | BumpBlock *block = dlist_container(BumpBlock, node, iter.cur); |
699 | |
|
700 | 0 | if (!BumpBlockIsEmpty(block)) |
701 | 0 | return false; |
702 | 0 | } |
703 | | |
704 | 0 | return true; |
705 | 0 | } |
706 | | |
707 | | /* |
708 | | * BumpStats |
709 | | * Compute stats about memory consumption of a Bump context. |
710 | | * |
711 | | * printfunc: if not NULL, pass a human-readable stats string to this. |
712 | | * passthru: pass this pointer through to printfunc. |
713 | | * totals: if not NULL, add stats about this context into *totals. |
714 | | * print_to_stderr: print stats to stderr if true, elog otherwise. |
715 | | */ |
716 | | void |
717 | | BumpStats(MemoryContext context, MemoryStatsPrintFunc printfunc, |
718 | | void *passthru, MemoryContextCounters *totals, bool print_to_stderr) |
719 | 0 | { |
720 | 0 | BumpContext *set = (BumpContext *) context; |
721 | 0 | Size nblocks = 0; |
722 | 0 | Size totalspace = 0; |
723 | 0 | Size freespace = 0; |
724 | 0 | dlist_iter iter; |
725 | |
|
726 | 0 | Assert(BumpIsValid(set)); |
727 | |
|
728 | 0 | dlist_foreach(iter, &set->blocks) |
729 | 0 | { |
730 | 0 | BumpBlock *block = dlist_container(BumpBlock, node, iter.cur); |
731 | |
|
732 | 0 | nblocks++; |
733 | 0 | totalspace += (block->endptr - (char *) block); |
734 | 0 | freespace += (block->endptr - block->freeptr); |
735 | 0 | } |
736 | |
|
737 | 0 | if (printfunc) |
738 | 0 | { |
739 | 0 | char stats_string[200]; |
740 | |
|
741 | 0 | snprintf(stats_string, sizeof(stats_string), |
742 | 0 | "%zu total in %zu blocks; %zu free; %zu used", |
743 | 0 | totalspace, nblocks, freespace, totalspace - freespace); |
744 | 0 | printfunc(context, passthru, stats_string, print_to_stderr); |
745 | 0 | } |
746 | |
|
747 | 0 | if (totals) |
748 | 0 | { |
749 | 0 | totals->nblocks += nblocks; |
750 | 0 | totals->totalspace += totalspace; |
751 | 0 | totals->freespace += freespace; |
752 | 0 | } |
753 | 0 | } |
754 | | |
755 | | |
756 | | #ifdef MEMORY_CONTEXT_CHECKING |
757 | | |
758 | | /* |
759 | | * BumpCheck |
760 | | * Walk through chunks and check consistency of memory. |
761 | | * |
762 | | * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll |
763 | | * find yourself in an infinite loop when trouble occurs, because this |
764 | | * routine will be entered again when elog cleanup tries to release memory! |
765 | | */ |
766 | | void |
767 | | BumpCheck(MemoryContext context) |
768 | | { |
769 | | BumpContext *bump = (BumpContext *) context; |
770 | | const char *name = context->name; |
771 | | dlist_iter iter; |
772 | | Size total_allocated = 0; |
773 | | |
774 | | /* walk all blocks in this context */ |
775 | | dlist_foreach(iter, &bump->blocks) |
776 | | { |
777 | | BumpBlock *block = dlist_container(BumpBlock, node, iter.cur); |
778 | | int nchunks; |
779 | | char *ptr; |
780 | | bool has_external_chunk = false; |
781 | | |
782 | | if (IsKeeperBlock(bump, block)) |
783 | | total_allocated += block->endptr - (char *) bump; |
784 | | else |
785 | | total_allocated += block->endptr - (char *) block; |
786 | | |
787 | | /* check block belongs to the correct context */ |
788 | | if (block->context != bump) |
789 | | elog(WARNING, "problem in Bump %s: bogus context link in block %p", |
790 | | name, block); |
791 | | |
792 | | /* now walk through the chunks and count them */ |
793 | | nchunks = 0; |
794 | | ptr = ((char *) block) + Bump_BLOCKHDRSZ; |
795 | | |
796 | | while (ptr < block->freeptr) |
797 | | { |
798 | | MemoryChunk *chunk = (MemoryChunk *) ptr; |
799 | | BumpBlock *chunkblock; |
800 | | Size chunksize; |
801 | | |
802 | | /* allow access to the chunk header */ |
803 | | VALGRIND_MAKE_MEM_DEFINED(chunk, Bump_CHUNKHDRSZ); |
804 | | |
805 | | if (MemoryChunkIsExternal(chunk)) |
806 | | { |
807 | | chunkblock = ExternalChunkGetBlock(chunk); |
808 | | chunksize = block->endptr - (char *) MemoryChunkGetPointer(chunk); |
809 | | has_external_chunk = true; |
810 | | } |
811 | | else |
812 | | { |
813 | | chunkblock = MemoryChunkGetBlock(chunk); |
814 | | chunksize = MemoryChunkGetValue(chunk); |
815 | | } |
816 | | |
817 | | /* move to the next chunk */ |
818 | | ptr += (chunksize + Bump_CHUNKHDRSZ); |
819 | | |
820 | | nchunks += 1; |
821 | | |
822 | | /* chunks have both block and context pointers, so check both */ |
823 | | if (chunkblock != block) |
824 | | elog(WARNING, "problem in Bump %s: bogus block link in block %p, chunk %p", |
825 | | name, block, chunk); |
826 | | } |
827 | | |
828 | | if (has_external_chunk && nchunks > 1) |
829 | | elog(WARNING, "problem in Bump %s: external chunk on non-dedicated block %p", |
830 | | name, block); |
831 | | |
832 | | } |
833 | | |
834 | | Assert(total_allocated == context->mem_allocated); |
835 | | } |
836 | | |
837 | | #endif /* MEMORY_CONTEXT_CHECKING */ |