Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * aset.c
4 : : * Allocation set definitions.
5 : : *
6 : : * AllocSet is our standard implementation of the abstract MemoryContext
7 : : * type.
8 : : *
9 : : *
10 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
11 : : * Portions Copyright (c) 1994, Regents of the University of California
12 : : *
13 : : * IDENTIFICATION
14 : : * src/backend/utils/mmgr/aset.c
15 : : *
16 : : * NOTE:
17 : : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : : * Instead it manages allocations in a block pool by itself, combining
20 : : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : : * doesn't free() memory really. It just add's the free'd area to some
22 : : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : : * at once on AllocSetReset(), which happens when the memory context gets
24 : : * destroyed.
25 : : * Jan Wieck
26 : : *
27 : : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : : * sizes, we do want to be able to give the memory back to free() as soon
29 : : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : : * freelist entries that might never be usable. This is specially needed
31 : : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : : * the previous instances of the block were guaranteed to be wasted until
33 : : * AllocSetReset() under the old way.
34 : : *
35 : : * Further improvement 12/00: as the code stood, request sizes in the
36 : : * midrange between "small" and "large" were handled very inefficiently,
37 : : * because any sufficiently large free chunk would be used to satisfy a
38 : : * request, even if it was much larger than necessary. This led to more
39 : : * and more wasted space in allocated chunks over time. To fix, get rid
40 : : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : : * the number of freelists to change the small/large boundary.
43 : : *
44 : : *-------------------------------------------------------------------------
45 : : */
46 : :
47 : : #include "postgres.h"
48 : :
49 : : #include "port/pg_bitutils.h"
50 : : #include "utils/memdebug.h"
51 : : #include "utils/memutils.h"
52 : : #include "utils/memutils_internal.h"
53 : : #include "utils/memutils_memorychunk.h"
54 : :
55 : : /*--------------------
56 : : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : : *
59 : : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : : * improves recyclability: we may waste some space, but the wasted space
61 : : * should stay pretty constant as requests are made and released.
62 : : *
63 : : * A request too large for the last freelist is handled by allocating a
64 : : * dedicated block from malloc(). The block still has a block header and
65 : : * chunk header, but when the chunk is freed we'll return the whole block
66 : : * to malloc(), not put it on our freelists.
67 : : *
68 : : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : : * or we may fail to align the smallest chunks adequately.
71 : : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : : * minimum also allows us to store a pointer to the next freelist item within
73 : : * the chunk of memory itself.
74 : : *
75 : : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : : *--------------------
81 : : */
82 : :
83 : : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : : #define ALLOCSET_NUM_FREELISTS 11
85 : : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : : /* Size of largest chunk that we use a fixed size for */
87 : : #define ALLOC_CHUNK_FRACTION 4
88 : : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 : :
90 : : /*--------------------
91 : : * The first block allocated for an allocset has size initBlockSize.
92 : : * Each time we have to allocate another block, we double the block size
93 : : * (if possible, and without exceeding maxBlockSize), so as to reduce
94 : : * the bookkeeping load on malloc().
95 : : *
96 : : * Blocks allocated to hold oversize chunks do not follow this rule, however;
97 : : * they are just however big they need to be to hold that single chunk.
98 : : *
99 : : * Also, if a minContextSize is specified, the first block has that size,
100 : : * and then initBlockSize is used for the next one.
101 : : *--------------------
102 : : */
103 : :
104 : : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 : : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 : : #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
107 : : ALLOC_BLOCKHDRSZ)
108 : :
109 : : typedef struct AllocBlockData *AllocBlock; /* forward reference */
110 : :
111 : : /*
112 : : * AllocPointer
113 : : * Aligned pointer which may be a member of an allocation set.
114 : : */
115 : : typedef void *AllocPointer;
116 : :
117 : : /*
118 : : * AllocFreeListLink
119 : : * When pfreeing memory, if we maintain a freelist for the given chunk's
120 : : * size then we use a AllocFreeListLink to point to the current item in
121 : : * the AllocSetContext's freelist and then set the given freelist element
122 : : * to point to the chunk being freed.
123 : : */
124 : : typedef struct AllocFreeListLink
125 : : {
126 : : MemoryChunk *next;
127 : : } AllocFreeListLink;
128 : :
129 : : /*
130 : : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
131 : : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
132 : : * itself to store the freelist link.
133 : : */
134 : : #define GetFreeListLink(chkptr) \
135 : : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
136 : :
137 : : /* Validate a freelist index retrieved from a chunk header */
138 : : #define FreeListIdxIsValid(fidx) \
139 : : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
140 : :
141 : : /* Determine the size of the chunk based on the freelist index */
142 : : #define GetChunkSizeFromFreeListIdx(fidx) \
143 : : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
144 : :
145 : : /*
146 : : * AllocSetContext is our standard implementation of MemoryContext.
147 : : *
148 : : * Note: header.isReset means there is nothing for AllocSetReset to do.
149 : : * This is different from the aset being physically empty (empty blocks list)
150 : : * because we will still have a keeper block. It's also different from the set
151 : : * being logically empty, because we don't attempt to detect pfree'ing the
152 : : * last active chunk.
153 : : */
154 : : typedef struct AllocSetContext
155 : : {
156 : : MemoryContextData header; /* Standard memory-context fields */
157 : : /* Info about storage allocated in this context: */
158 : : AllocBlock blocks; /* head of list of blocks in this set */
159 : : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
160 : : /* Allocation parameters for this context: */
161 : : uint32 initBlockSize; /* initial block size */
162 : : uint32 maxBlockSize; /* maximum block size */
163 : : uint32 nextBlockSize; /* next block size to allocate */
164 : : uint32 allocChunkLimit; /* effective chunk size limit */
165 : : /* freelist this context could be put in, or -1 if not a candidate: */
166 : : int freeListIndex; /* index in context_freelists[], or -1 */
167 : : } AllocSetContext;
168 : :
169 : : typedef AllocSetContext *AllocSet;
170 : :
171 : : /*
172 : : * AllocBlock
173 : : * An AllocBlock is the unit of memory that is obtained by aset.c
174 : : * from malloc(). It contains one or more MemoryChunks, which are
175 : : * the units requested by palloc() and freed by pfree(). MemoryChunks
176 : : * cannot be returned to malloc() individually, instead they are put
177 : : * on freelists by pfree() and re-used by the next palloc() that has
178 : : * a matching request size.
179 : : *
180 : : * AllocBlockData is the header data for a block --- the usable space
181 : : * within the block begins at the next alignment boundary.
182 : : */
183 : : typedef struct AllocBlockData
184 : : {
185 : : AllocSet aset; /* aset that owns this block */
186 : : AllocBlock prev; /* prev block in aset's blocks list, if any */
187 : : AllocBlock next; /* next block in aset's blocks list, if any */
188 : : char *freeptr; /* start of free space in this block */
189 : : char *endptr; /* end of space in this block */
190 : : } AllocBlockData;
191 : :
192 : : /*
193 : : * AllocPointerIsValid
194 : : * True iff pointer is valid allocation pointer.
195 : : */
196 : : #define AllocPointerIsValid(pointer) PointerIsValid(pointer)
197 : :
198 : : /*
199 : : * AllocSetIsValid
200 : : * True iff set is valid allocation set.
201 : : */
202 : : #define AllocSetIsValid(set) \
203 : : (PointerIsValid(set) && IsA(set, AllocSetContext))
204 : :
205 : : /*
206 : : * AllocBlockIsValid
207 : : * True iff block is valid block of allocation set.
208 : : */
209 : : #define AllocBlockIsValid(block) \
210 : : (PointerIsValid(block) && AllocSetIsValid((block)->aset))
211 : :
212 : : /*
213 : : * We always store external chunks on a dedicated block. This makes fetching
214 : : * the block from an external chunk easy since it's always the first and only
215 : : * chunk on the block.
216 : : */
217 : : #define ExternalChunkGetBlock(chunk) \
218 : : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
219 : :
220 : : /*
221 : : * Rather than repeatedly creating and deleting memory contexts, we keep some
222 : : * freed contexts in freelists so that we can hand them out again with little
223 : : * work. Before putting a context in a freelist, we reset it so that it has
224 : : * only its initial malloc chunk and no others. To be a candidate for a
225 : : * freelist, a context must have the same minContextSize/initBlockSize as
226 : : * other contexts in the list; but its maxBlockSize is irrelevant since that
227 : : * doesn't affect the size of the initial chunk.
228 : : *
229 : : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
230 : : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
231 : : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
232 : : *
233 : : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
234 : : * hopes of improving locality of reference. But if there get to be too
235 : : * many contexts in the list, we'd prefer to drop the most-recently-created
236 : : * contexts in hopes of keeping the process memory map compact.
237 : : * We approximate that by simply deleting all existing entries when the list
238 : : * overflows, on the assumption that queries that allocate a lot of contexts
239 : : * will probably free them in more or less reverse order of allocation.
240 : : *
241 : : * Contexts in a freelist are chained via their nextchild pointers.
242 : : */
243 : : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
244 : :
245 : : /* Obtain the keeper block for an allocation set */
246 : : #define KeeperBlock(set) \
247 : : ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
248 : :
249 : : /* Check if the block is the keeper block of the given allocation set */
250 : : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
251 : :
252 : : typedef struct AllocSetFreeList
253 : : {
254 : : int num_free; /* current list length */
255 : : AllocSetContext *first_free; /* list header */
256 : : } AllocSetFreeList;
257 : :
258 : : /* context_freelists[0] is for default params, [1] for small params */
259 : : static AllocSetFreeList context_freelists[2] =
260 : : {
261 : : {
262 : : 0, NULL
263 : : },
264 : : {
265 : : 0, NULL
266 : : }
267 : : };
268 : :
269 : :
270 : : /* ----------
271 : : * AllocSetFreeIndex -
272 : : *
273 : : * Depending on the size of an allocation compute which freechunk
274 : : * list of the alloc set it belongs to. Caller must have verified
275 : : * that size <= ALLOC_CHUNK_LIMIT.
276 : : * ----------
277 : : */
278 : : static inline int
9709 JanWieck@Yahoo.com 279 :CBC 520162078 : AllocSetFreeIndex(Size size)
280 : : {
281 : : int idx;
282 : :
5891 tgl@sss.pgh.pa.us 283 [ + + ]: 520162078 : if (size > (1 << ALLOC_MINBITS))
284 : : {
285 : : /*----------
286 : : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
287 : : * This is the same as
288 : : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
289 : : * or equivalently
290 : : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
291 : : *
292 : : * However, for platforms without intrinsic support, we duplicate the
293 : : * logic here, allowing an additional optimization. It's reasonable
294 : : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
295 : : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
296 : : * the last two bytes.
297 : : *
298 : : * Yes, this function is enough of a hot-spot to make it worth this
299 : : * much trouble.
300 : : *----------
301 : : */
302 : : #ifdef HAVE_BITSCAN_REVERSE
941 john.naylor@postgres 303 : 452550472 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
304 : : #else
305 : : uint32 t,
306 : : tsize;
307 : :
308 : : /* Statically assert that we only have a 16-bit input value. */
309 : : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
310 : : "ALLOC_CHUNK_LIMIT must be less than 64kB");
311 : :
312 : : tsize = size - 1;
313 : : t = tsize >> 8;
314 : : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
315 : : idx -= ALLOC_MINBITS - 1;
316 : : #endif
317 : :
9045 tgl@sss.pgh.pa.us 318 [ - + ]: 452550472 : Assert(idx < ALLOCSET_NUM_FREELISTS);
319 : : }
320 : : else
5891 321 : 67611606 : idx = 0;
322 : :
9709 JanWieck@Yahoo.com 323 : 520162078 : return idx;
324 : : }
325 : :
326 : :
327 : : /*
328 : : * Public routines
329 : : */
330 : :
331 : :
332 : : /*
333 : : * AllocSetContextCreateInternal
334 : : * Create a new AllocSet context.
335 : : *
336 : : * parent: parent context, or NULL if top-level context
337 : : * name: name of context (must be statically allocated)
338 : : * minContextSize: minimum context size
339 : : * initBlockSize: initial allocation block size
340 : : * maxBlockSize: maximum allocation block size
341 : : *
342 : : * Most callers should abstract the context size parameters using a macro
343 : : * such as ALLOCSET_DEFAULT_SIZES.
344 : : *
345 : : * Note: don't call this directly; go through the wrapper macro
346 : : * AllocSetContextCreate.
347 : : */
348 : : MemoryContext
2521 tgl@sss.pgh.pa.us 349 : 6025553 : AllocSetContextCreateInternal(MemoryContext parent,
350 : : const char *name,
351 : : Size minContextSize,
352 : : Size initBlockSize,
353 : : Size maxBlockSize)
354 : : {
355 : : int freeListIndex;
356 : : Size firstBlockSize;
357 : : AllocSet set;
358 : : AllocBlock block;
359 : :
360 : : /* ensure MemoryChunk's size is properly maxaligned */
361 : : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
362 : : "sizeof(MemoryChunk) is not maxaligned");
363 : : /* check we have enough space to store the freelist link */
364 : : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
365 : : "sizeof(AllocFreeListLink) larger than minimum allocation size");
366 : :
367 : : /*
368 : : * First, validate allocation parameters. Once these were regular runtime
369 : : * tests and elog's, but in practice Asserts seem sufficient because
370 : : * nobody varies their parameters at runtime. We somewhat arbitrarily
371 : : * enforce a minimum 1K block size. We restrict the maximum block size to
372 : : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
373 : : * regards to addressing the offset between the chunk and the block that
374 : : * the chunk is stored on. We would be unable to store the offset between
375 : : * the chunk and block for any chunks that were beyond
376 : : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
377 : : * larger than this.
378 : : */
2824 379 [ + - - + ]: 6025553 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
380 : : initBlockSize >= 1024);
381 [ + - + - : 6025553 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
- + ]
382 : : maxBlockSize >= initBlockSize &&
383 : : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
384 [ + + + - : 6025553 : Assert(minContextSize == 0 ||
+ - - + ]
385 : : (minContextSize == MAXALIGN(minContextSize) &&
386 : : minContextSize >= 1024 &&
387 : : minContextSize <= maxBlockSize));
1104 drowley@postgresql.o 388 [ - + ]: 6025553 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
389 : :
390 : : /*
391 : : * Check whether the parameters match either available freelist. We do
392 : : * not need to demand a match of maxBlockSize.
393 : : */
2720 tgl@sss.pgh.pa.us 394 [ + + + + ]: 6025553 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
395 : : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
2824 396 : 3998682 : freeListIndex = 0;
2720 397 [ + + + - ]: 2026871 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
398 : : initBlockSize == ALLOCSET_SMALL_INITSIZE)
2824 399 : 2010127 : freeListIndex = 1;
400 : : else
401 : 16744 : freeListIndex = -1;
402 : :
403 : : /*
404 : : * If a suitable freelist entry exists, just recycle that context.
405 : : */
406 [ + + ]: 6025553 : if (freeListIndex >= 0)
407 : : {
408 : 6008809 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
409 : :
410 [ + + ]: 6008809 : if (freelist->first_free != NULL)
411 : : {
412 : : /* Remove entry from freelist */
413 : 4293101 : set = freelist->first_free;
414 : 4293101 : freelist->first_free = (AllocSet) set->header.nextchild;
415 : 4293101 : freelist->num_free--;
416 : :
417 : : /* Update its maxBlockSize; everything else should be OK */
418 : 4293101 : set->maxBlockSize = maxBlockSize;
419 : :
420 : : /* Reinitialize its header, installing correct name and parent */
421 : 4293101 : MemoryContextCreate((MemoryContext) set,
422 : : T_AllocSetContext,
423 : : MCTX_ASET_ID,
424 : : parent,
425 : : name);
426 : :
1997 jdavis@postgresql.or 427 : 4293101 : ((MemoryContext) set)->mem_allocated =
782 drowley@postgresql.o 428 : 4293101 : KeeperBlock(set)->endptr - ((char *) set);
429 : :
2824 tgl@sss.pgh.pa.us 430 : 4293101 : return (MemoryContext) set;
431 : : }
432 : : }
433 : :
434 : : /* Determine size of initial block */
2720 435 : 1732452 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
436 : : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
2824 437 [ + + ]: 1732452 : if (minContextSize != 0)
438 : 16744 : firstBlockSize = Max(firstBlockSize, minContextSize);
439 : : else
440 : 1715708 : firstBlockSize = Max(firstBlockSize, initBlockSize);
441 : :
442 : : /*
443 : : * Allocate the initial block. Unlike other aset.c blocks, it starts with
444 : : * the context header and its block header follows that.
445 : : */
446 : 1732452 : set = (AllocSet) malloc(firstBlockSize);
447 [ - + ]: 1732452 : if (set == NULL)
448 : : {
2824 tgl@sss.pgh.pa.us 449 [ # # ]:UBC 0 : if (TopMemoryContext)
450 : 0 : MemoryContextStats(TopMemoryContext);
451 [ # # ]: 0 : ereport(ERROR,
452 : : (errcode(ERRCODE_OUT_OF_MEMORY),
453 : : errmsg("out of memory"),
454 : : errdetail("Failed while creating memory context \"%s\".",
455 : : name)));
456 : : }
457 : :
458 : : /*
459 : : * Avoid writing code that can fail between here and MemoryContextCreate;
460 : : * we'd leak the header/initial block if we ereport in this stretch.
461 : : */
462 : :
463 : : /* Create a vpool associated with the context */
464 : : VALGRIND_CREATE_MEMPOOL(set, 0, false);
465 : :
466 : : /*
467 : : * Create a vchunk covering both the AllocSetContext struct and the keeper
468 : : * block's header. (Perhaps it would be more sensible for these to be two
469 : : * separate vchunks, but doing that seems to tickle bugs in some versions
470 : : * of Valgrind.) We must have these vchunks, and also a vchunk for each
471 : : * subsequently-added block header, so that Valgrind considers the
472 : : * pointers within them while checking for leaked memory. Note that
473 : : * Valgrind doesn't distinguish between these vchunks and those created by
474 : : * mcxt.c for the user-accessible-data chunks we allocate.
475 : : */
476 : : VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
477 : :
478 : : /* Fill in the initial block's block header */
782 drowley@postgresql.o 479 :CBC 1732452 : block = KeeperBlock(set);
2824 tgl@sss.pgh.pa.us 480 : 1732452 : block->aset = set;
481 : 1732452 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
482 : 1732452 : block->endptr = ((char *) set) + firstBlockSize;
483 : 1732452 : block->prev = NULL;
484 : 1732452 : block->next = NULL;
485 : :
486 : : /* Mark unallocated space NOACCESS; leave the block header alone. */
487 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
488 : :
489 : : /* Remember block as part of block list */
490 : 1732452 : set->blocks = block;
491 : :
492 : : /* Finish filling in aset-specific parts of the context header */
493 [ + - + - : 20789424 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
494 : :
782 drowley@postgresql.o 495 : 1732452 : set->initBlockSize = (uint32) initBlockSize;
496 : 1732452 : set->maxBlockSize = (uint32) maxBlockSize;
497 : 1732452 : set->nextBlockSize = (uint32) initBlockSize;
2824 tgl@sss.pgh.pa.us 498 : 1732452 : set->freeListIndex = freeListIndex;
499 : :
500 : : /*
501 : : * Compute the allocation chunk size limit for this context. It can't be
502 : : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
503 : : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
504 : : * even a significant fraction of it, should be treated as large chunks
505 : : * too. For the typical case of maxBlockSize a power of 2, the chunk size
506 : : * limit will be at most 1/8th maxBlockSize, so that given a stream of
507 : : * requests that are all the maximum chunk size we will waste at most
508 : : * 1/8th of the allocated space.
509 : : *
510 : : * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
511 : : */
512 : : StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
513 : : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
514 : :
515 : : /*
516 : : * Determine the maximum size that a chunk can be before we allocate an
517 : : * entire AllocBlock dedicated for that chunk. We set the absolute limit
518 : : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
519 : : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
520 : : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
521 : : * primarily for legacy reasons rather than calculating it so that exactly
522 : : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
523 : : */
3850 jdavis@postgresql.or 524 : 1732452 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
525 : 1732452 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
5241 tgl@sss.pgh.pa.us 526 [ + + ]: 5708994 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
3850 jdavis@postgresql.or 527 : 3976542 : set->allocChunkLimit >>= 1;
528 : :
529 : : /* Finally, do the type-independent part of context creation */
2824 tgl@sss.pgh.pa.us 530 : 1732452 : MemoryContextCreate((MemoryContext) set,
531 : : T_AllocSetContext,
532 : : MCTX_ASET_ID,
533 : : parent,
534 : : name);
535 : :
1997 jdavis@postgresql.or 536 : 1732452 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
537 : :
3850 538 : 1732452 : return (MemoryContext) set;
539 : : }
540 : :
541 : : /*
542 : : * AllocSetReset
543 : : * Frees all memory which is allocated in the given set.
544 : : *
545 : : * Actually, this routine has some discretion about what to do.
546 : : * It should mark all allocated chunks freed, but it need not necessarily
547 : : * give back all the resources the set owns. Our actual implementation is
548 : : * that we give back all but the "keeper" block (which we must keep, since
549 : : * it shares a malloc chunk with the context header). In this way, we don't
550 : : * thrash malloc() when a context is repeatedly reset after small allocations,
551 : : * which is typical behavior for per-tuple contexts.
552 : : */
553 : : void
9201 tgl@sss.pgh.pa.us 554 : 25628141 : AllocSetReset(MemoryContext context)
555 : : {
556 : 25628141 : AllocSet set = (AllocSet) context;
557 : : AllocBlock block;
558 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
559 : :
1044 peter@eisentraut.org 560 [ + - - + ]: 25628141 : Assert(AllocSetIsValid(set));
561 : :
562 : : #ifdef MEMORY_CONTEXT_CHECKING
563 : : /* Check for corruption and leaks before freeing */
9045 tgl@sss.pgh.pa.us 564 : 25628141 : AllocSetCheck(context);
565 : : #endif
566 : :
567 : : /* Remember keeper block size for Assert below */
782 drowley@postgresql.o 568 : 25628141 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
569 : :
570 : : /* Clear chunk freelists */
7420 tgl@sss.pgh.pa.us 571 [ + - + - : 307537692 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
572 : :
7310 573 : 25628141 : block = set->blocks;
574 : :
575 : : /* New blocks list will be just the keeper block */
782 drowley@postgresql.o 576 : 25628141 : set->blocks = KeeperBlock(set);
577 : :
7310 tgl@sss.pgh.pa.us 578 [ + + ]: 56191740 : while (block != NULL)
579 : : {
9201 580 : 30563599 : AllocBlock next = block->next;
581 : :
782 drowley@postgresql.o 582 [ + + ]: 30563599 : if (IsKeeperBlock(set, block))
583 : : {
584 : : /* Reset the block, but don't return it to malloc */
8934 bruce@momjian.us 585 : 25628141 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
586 : :
587 : : #ifdef CLOBBER_FREED_MEMORY
4455 noah@leadboat.com 588 : 25628141 : wipe_mem(datastart, block->freeptr - datastart);
589 : : #else
590 : : /* wipe_mem() would have done this */
591 : : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
592 : : #endif
9187 tgl@sss.pgh.pa.us 593 : 25628141 : block->freeptr = datastart;
3104 594 : 25628141 : block->prev = NULL;
9187 595 : 25628141 : block->next = NULL;
596 : : }
597 : : else
598 : : {
599 : : /* Normal case, release the block */
1941 600 : 4935458 : context->mem_allocated -= block->endptr - ((char *) block);
601 : :
602 : : #ifdef CLOBBER_FREED_MEMORY
4455 noah@leadboat.com 603 : 4935458 : wipe_mem(block, block->freeptr - ((char *) block));
604 : : #endif
605 : :
606 : : /*
607 : : * We need to free the block header's vchunk explicitly, although
608 : : * the user-data vchunks within will go away in the TRIM below.
609 : : * Otherwise Valgrind complains about leaked allocations.
610 : : */
611 : : VALGRIND_MEMPOOL_FREE(set, block);
612 : :
9201 tgl@sss.pgh.pa.us 613 : 4935458 : free(block);
614 : : }
9709 JanWieck@Yahoo.com 615 : 30563599 : block = next;
616 : : }
617 : :
1997 jdavis@postgresql.or 618 [ - + ]: 25628141 : Assert(context->mem_allocated == keepersize);
619 : :
620 : : /*
621 : : * Instruct Valgrind to throw away all the vchunks associated with this
622 : : * context, except for the one covering the AllocSetContext and
623 : : * keeper-block header. This gets rid of the vchunks for whatever user
624 : : * data is getting discarded by the context reset.
625 : : */
626 : : VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
627 : :
628 : : /* Reset block size allocation sequence, too */
6877 tgl@sss.pgh.pa.us 629 : 25628141 : set->nextBlockSize = set->initBlockSize;
10651 scrappy@hub.org 630 : 25628141 : }
631 : :
632 : : /*
633 : : * AllocSetDelete
634 : : * Frees all memory which is allocated in the given set,
635 : : * in preparation for deletion of the set.
636 : : *
637 : : * Unlike AllocSetReset, this *must* free all resources of the set.
638 : : */
639 : : void
9201 tgl@sss.pgh.pa.us 640 : 4527818 : AllocSetDelete(MemoryContext context)
641 : : {
642 : 4527818 : AllocSet set = (AllocSet) context;
643 : 4527818 : AllocBlock block = set->blocks;
644 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
645 : :
1044 peter@eisentraut.org 646 [ + - - + ]: 4527818 : Assert(AllocSetIsValid(set));
647 : :
648 : : #ifdef MEMORY_CONTEXT_CHECKING
649 : : /* Check for corruption and leaks before freeing */
9045 tgl@sss.pgh.pa.us 650 : 4527818 : AllocSetCheck(context);
651 : : #endif
652 : :
653 : : /* Remember keeper block size for Assert below */
782 drowley@postgresql.o 654 : 4527818 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
655 : :
656 : : /*
657 : : * If the context is a candidate for a freelist, put it into that freelist
658 : : * instead of destroying it.
659 : : */
2824 tgl@sss.pgh.pa.us 660 [ + - ]: 4527818 : if (set->freeListIndex >= 0)
661 : : {
662 : 4527818 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
663 : :
664 : : /*
665 : : * Reset the context, if it needs it, so that we aren't hanging on to
666 : : * more than the initial malloc chunk.
667 : : */
668 [ + + ]: 4527818 : if (!context->isReset)
669 : 2812769 : MemoryContextResetOnly(context);
670 : :
671 : : /*
672 : : * If the freelist is full, just discard what's already in it. See
673 : : * comments with context_freelists[].
674 : : */
675 [ + + ]: 4527818 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
676 : : {
677 [ + + ]: 31310 : while (freelist->first_free != NULL)
678 : : {
679 : 31000 : AllocSetContext *oldset = freelist->first_free;
680 : :
681 : 31000 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
682 : 31000 : freelist->num_free--;
683 : :
684 : : /* Destroy the context's vpool --- see notes below */
685 : : VALGRIND_DESTROY_MEMPOOL(oldset);
686 : :
687 : : /* All that remains is to free the header/initial block */
688 : 31000 : free(oldset);
689 : : }
690 [ - + ]: 310 : Assert(freelist->num_free == 0);
691 : : }
692 : :
693 : : /* Now add the just-deleted context to the freelist. */
694 : 4527818 : set->header.nextchild = (MemoryContext) freelist->first_free;
695 : 4527818 : freelist->first_free = set;
696 : 4527818 : freelist->num_free++;
697 : :
698 : 4527818 : return;
699 : : }
700 : :
701 : : /* Free all blocks, except the keeper which is part of context header */
9201 tgl@sss.pgh.pa.us 702 [ # # ]:UBC 0 : while (block != NULL)
703 : : {
704 : 0 : AllocBlock next = block->next;
705 : :
782 drowley@postgresql.o 706 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
1997 jdavis@postgresql.or 707 : 0 : context->mem_allocated -= block->endptr - ((char *) block);
708 : :
709 : : #ifdef CLOBBER_FREED_MEMORY
4455 noah@leadboat.com 710 : 0 : wipe_mem(block, block->freeptr - ((char *) block));
711 : : #endif
712 : :
782 drowley@postgresql.o 713 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
714 : : {
715 : : /* As in AllocSetReset, free block-header vchunks explicitly */
716 : : VALGRIND_MEMPOOL_FREE(set, block);
2824 tgl@sss.pgh.pa.us 717 : 0 : free(block);
718 : : }
719 : :
9201 720 : 0 : block = next;
721 : : }
722 : :
1997 jdavis@postgresql.or 723 [ # # ]: 0 : Assert(context->mem_allocated == keepersize);
724 : :
725 : : /*
726 : : * Destroy the vpool. We don't seem to need to explicitly free the
727 : : * initial block's header vchunk, nor any user-data vchunks that Valgrind
728 : : * still knows about; they'll all go away automatically.
729 : : */
730 : : VALGRIND_DESTROY_MEMPOOL(set);
731 : :
732 : : /* Finally, free the context header, including the keeper block */
2824 tgl@sss.pgh.pa.us 733 : 0 : free(set);
734 : : }
735 : :
736 : : /*
737 : : * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
738 : : *
739 : : * AllocSetAlloc()'s comment explains why this is separate.
740 : : */
741 : : pg_noinline
742 : : static void *
556 drowley@postgresql.o 743 :CBC 9406069 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
744 : : {
9201 tgl@sss.pgh.pa.us 745 : 9406069 : AllocSet set = (AllocSet) context;
746 : : AllocBlock block;
747 : : MemoryChunk *chunk;
748 : : Size chunk_size;
749 : : Size blksize;
750 : :
751 : : /* validate 'size' is within the limits for the given 'flags' */
556 drowley@postgresql.o 752 : 9406069 : MemoryContextCheckSize(context, size, flags);
753 : :
754 : : #ifdef MEMORY_CONTEXT_CHECKING
755 : : /* ensure there's always space for the sentinel byte */
756 : 9406069 : chunk_size = MAXALIGN(size + 1);
757 : : #else
758 : : chunk_size = MAXALIGN(size);
759 : : #endif
760 : :
761 : 9406069 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
762 : 9406069 : block = (AllocBlock) malloc(blksize);
763 [ - + ]: 9406069 : if (block == NULL)
556 drowley@postgresql.o 764 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
765 : :
766 : : /* Make a vchunk covering the new block's header */
767 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
768 : :
556 drowley@postgresql.o 769 :CBC 9406069 : context->mem_allocated += blksize;
770 : :
771 : 9406069 : block->aset = set;
772 : 9406069 : block->freeptr = block->endptr = ((char *) block) + blksize;
773 : :
774 : 9406069 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
775 : :
776 : : /* mark the MemoryChunk as externally managed */
777 : 9406069 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
778 : :
779 : : #ifdef MEMORY_CONTEXT_CHECKING
780 : 9406069 : chunk->requested_size = size;
781 : : /* set mark to catch clobber of "unused" space */
782 [ - + ]: 9406069 : Assert(size < chunk_size);
783 : 9406069 : set_sentinel(MemoryChunkGetPointer(chunk), size);
784 : : #endif
785 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
786 : : /* fill the allocated space with junk */
787 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
788 : : #endif
789 : :
790 : : /*
791 : : * Stick the new block underneath the active allocation block, if any, so
792 : : * that we don't lose the use of the space remaining therein.
793 : : */
794 [ + - ]: 9406069 : if (set->blocks != NULL)
795 : : {
796 : 9406069 : block->prev = set->blocks;
797 : 9406069 : block->next = set->blocks->next;
798 [ + + ]: 9406069 : if (block->next)
799 : 7649058 : block->next->prev = block;
800 : 9406069 : set->blocks->next = block;
801 : : }
802 : : else
803 : : {
556 drowley@postgresql.o 804 :UBC 0 : block->prev = NULL;
805 : 0 : block->next = NULL;
806 : 0 : set->blocks = block;
807 : : }
808 : :
809 : : /* Ensure any padding bytes are marked NOACCESS. */
810 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
811 : : chunk_size - size);
812 : :
813 : : /* Disallow access to the chunk header. */
814 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
815 : :
556 drowley@postgresql.o 816 :CBC 9406069 : return MemoryChunkGetPointer(chunk);
817 : : }
818 : :
819 : : /*
820 : : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
821 : : * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
822 : : */
823 : : static inline void *
824 : 319762374 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
825 : : Size size, Size chunk_size, int fidx)
826 : : {
827 : : MemoryChunk *chunk;
828 : :
829 : 319762374 : chunk = (MemoryChunk *) (block->freeptr);
830 : :
831 : : /* Prepare to initialize the chunk header. */
832 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
833 : :
834 : 319762374 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
835 [ - + ]: 319762374 : Assert(block->freeptr <= block->endptr);
836 : :
837 : : /* store the free list index in the value field */
838 : 319762374 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
839 : :
840 : : #ifdef MEMORY_CONTEXT_CHECKING
841 : 319762374 : chunk->requested_size = size;
842 : : /* set mark to catch clobber of "unused" space */
843 [ + + ]: 319762374 : if (size < chunk_size)
1095 844 : 218637270 : set_sentinel(MemoryChunkGetPointer(chunk), size);
845 : : #endif
846 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
847 : : /* fill the allocated space with junk */
848 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
849 : : #endif
850 : :
851 : : /* Ensure any padding bytes are marked NOACCESS. */
852 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
853 : : chunk_size - size);
854 : :
855 : : /* Disallow access to the chunk header. */
856 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
857 : :
556 858 : 319762374 : return MemoryChunkGetPointer(chunk);
859 : : }
860 : :
861 : : /*
862 : : * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
863 : : * allocated from it.
864 : : *
865 : : * AllocSetAlloc()'s comment explains why this is separate.
866 : : */
867 : : pg_noinline
868 : : static void *
869 : 5755574 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
870 : : int fidx)
871 : : {
872 : 5755574 : AllocSet set = (AllocSet) context;
873 : : AllocBlock block;
874 : : Size availspace;
875 : : Size blksize;
876 : : Size required_size;
877 : : Size chunk_size;
878 : :
879 : : /* due to the keeper block set->blocks should always be valid */
880 [ - + ]: 5755574 : Assert(set->blocks != NULL);
881 : 5755574 : block = set->blocks;
882 : 5755574 : availspace = block->endptr - block->freeptr;
883 : :
884 : : /*
885 : : * The existing active (top) block does not have enough room for the
886 : : * requested allocation, but it might still have a useful amount of space
887 : : * in it. Once we push it down in the block list, we'll never try to
888 : : * allocate more space from it. So, before we do that, carve up its free
889 : : * space into chunks that we can put on the set's freelists.
890 : : *
891 : : * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
892 : : * left in the block, this loop cannot iterate more than
893 : : * ALLOCSET_NUM_FREELISTS-1 times.
894 : : */
895 [ + + ]: 20139266 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
896 : : {
897 : : AllocFreeListLink *link;
898 : : MemoryChunk *chunk;
899 : 14383692 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
900 : 14383692 : int a_fidx = AllocSetFreeIndex(availchunk);
901 : :
902 : : /*
903 : : * In most cases, we'll get back the index of the next larger freelist
904 : : * than the one we need to put this chunk on. The exception is when
905 : : * availchunk is exactly a power of 2.
906 : : */
907 [ + + ]: 14383692 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
908 : : {
909 : 11404154 : a_fidx--;
910 [ - + ]: 11404154 : Assert(a_fidx >= 0);
911 : 11404154 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
912 : : }
913 : :
914 : 14383692 : chunk = (MemoryChunk *) (block->freeptr);
915 : :
916 : : /* Prepare to initialize the chunk header. */
917 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
918 : 14383692 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
919 : 14383692 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
920 : :
921 : : /* store the freelist index in the value field */
922 : 14383692 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
923 : : #ifdef MEMORY_CONTEXT_CHECKING
924 : 14383692 : chunk->requested_size = InvalidAllocSize; /* mark it free */
925 : : #endif
926 : : /* push this chunk onto the free list */
927 : 14383692 : link = GetFreeListLink(chunk);
928 : :
929 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
930 : 14383692 : link->next = set->freelist[a_fidx];
931 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
932 : :
933 : 14383692 : set->freelist[a_fidx] = chunk;
934 : : }
935 : :
936 : : /*
937 : : * The first such block has size initBlockSize, and we double the space in
938 : : * each succeeding block, but not more than maxBlockSize.
939 : : */
940 : 5755574 : blksize = set->nextBlockSize;
941 : 5755574 : set->nextBlockSize <<= 1;
942 [ + + ]: 5755574 : if (set->nextBlockSize > set->maxBlockSize)
943 : 329048 : set->nextBlockSize = set->maxBlockSize;
944 : :
945 : : /* Choose the actual chunk size to allocate */
946 : 5755574 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
947 [ - + ]: 5755574 : Assert(chunk_size >= size);
948 : :
949 : : /*
950 : : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
951 : : * space... but try to keep it a power of 2.
952 : : */
953 : 5755574 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
954 [ + + ]: 8512075 : while (blksize < required_size)
955 : 2756501 : blksize <<= 1;
956 : :
957 : : /* Try to allocate it */
958 : 5755574 : block = (AllocBlock) malloc(blksize);
959 : :
960 : : /*
961 : : * We could be asking for pretty big blocks here, so cope if malloc fails.
962 : : * But give up if there's less than 1 MB or so available...
963 : : */
964 [ - + - - ]: 5755574 : while (block == NULL && blksize > 1024 * 1024)
965 : : {
556 drowley@postgresql.o 966 :UBC 0 : blksize >>= 1;
967 [ # # ]: 0 : if (blksize < required_size)
968 : 0 : break;
969 : 0 : block = (AllocBlock) malloc(blksize);
970 : : }
971 : :
556 drowley@postgresql.o 972 [ - + ]:CBC 5755574 : if (block == NULL)
556 drowley@postgresql.o 973 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
974 : :
975 : : /* Make a vchunk covering the new block's header */
976 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
977 : :
556 drowley@postgresql.o 978 :CBC 5755574 : context->mem_allocated += blksize;
979 : :
980 : 5755574 : block->aset = set;
981 : 5755574 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
982 : 5755574 : block->endptr = ((char *) block) + blksize;
983 : :
984 : : /* Mark unallocated space NOACCESS. */
985 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
986 : : blksize - ALLOC_BLOCKHDRSZ);
987 : :
988 : 5755574 : block->prev = NULL;
989 : 5755574 : block->next = set->blocks;
990 [ + - ]: 5755574 : if (block->next)
991 : 5755574 : block->next->prev = block;
992 : 5755574 : set->blocks = block;
993 : :
994 : 5755574 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
995 : : }
996 : :
997 : : /*
998 : : * AllocSetAlloc
999 : : * Returns a pointer to allocated memory of given size or raises an ERROR
1000 : : * on allocation failure, or returns NULL when flags contains
1001 : : * MCXT_ALLOC_NO_OOM.
1002 : : *
1003 : : * No request may exceed:
1004 : : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
1005 : : * All callers use a much-lower limit.
1006 : : *
1007 : : * Note: when using valgrind, it doesn't matter how the returned allocation
1008 : : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1009 : : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1010 : : *
1011 : : * This function should only contain the most common code paths. Everything
1012 : : * else should be in pg_noinline helper functions, thus avoiding the overhead
1013 : : * of creating a stack frame for the common cases. Allocating memory is often
1014 : : * a bottleneck in many workloads, so avoiding stack frame setup is
1015 : : * worthwhile. Helper functions should always directly return the newly
1016 : : * allocated memory so that we can just return that address directly as a tail
1017 : : * call.
1018 : : */
1019 : : void *
1020 : 515184455 : AllocSetAlloc(MemoryContext context, Size size, int flags)
1021 : : {
1022 : 515184455 : AllocSet set = (AllocSet) context;
1023 : : AllocBlock block;
1024 : : MemoryChunk *chunk;
1025 : : int fidx;
1026 : : Size chunk_size;
1027 : : Size availspace;
1028 : :
1029 [ + - - + ]: 515184455 : Assert(AllocSetIsValid(set));
1030 : :
1031 : : /* due to the keeper block set->blocks should never be NULL */
1032 [ - + ]: 515184455 : Assert(set->blocks != NULL);
1033 : :
1034 : : /*
1035 : : * If requested size exceeds maximum for chunks we hand the request off to
1036 : : * AllocSetAllocLarge().
1037 : : */
1038 [ + + ]: 515184455 : if (size > set->allocChunkLimit)
1039 : 9406069 : return AllocSetAllocLarge(context, size, flags);
1040 : :
1041 : : /*
1042 : : * Request is small enough to be treated as a chunk. Look in the
1043 : : * corresponding free list to see if there is a free chunk we could reuse.
1044 : : * If one is found, remove it from the free list, make it again a member
1045 : : * of the alloc set and return its data address.
1046 : : *
1047 : : * Note that we don't attempt to ensure there's space for the sentinel
1048 : : * byte here. We expect a large proportion of allocations to be for sizes
1049 : : * which are already a power of 2. If we were to always make space for a
1050 : : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1051 : : * doubling the memory requirements for such allocations.
1052 : : */
6704 tgl@sss.pgh.pa.us 1053 : 505778386 : fidx = AllocSetFreeIndex(size);
1054 : 505778386 : chunk = set->freelist[fidx];
9045 1055 [ + + ]: 505778386 : if (chunk != NULL)
1056 : : {
1104 drowley@postgresql.o 1057 : 186016012 : AllocFreeListLink *link = GetFreeListLink(chunk);
1058 : :
1059 : : /* Allow access to the chunk header. */
1060 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1061 : :
1062 [ - + ]: 186016012 : Assert(fidx == MemoryChunkGetValue(chunk));
1063 : :
1064 : : /* pop this chunk off the freelist */
1065 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1066 : 186016012 : set->freelist[fidx] = link->next;
1067 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1068 : :
1069 : : #ifdef MEMORY_CONTEXT_CHECKING
9045 tgl@sss.pgh.pa.us 1070 : 186016012 : chunk->requested_size = size;
1071 : : /* set mark to catch clobber of "unused" space */
1104 drowley@postgresql.o 1072 [ + + ]: 186016012 : if (size < GetChunkSizeFromFreeListIdx(fidx))
1073 : 109626319 : set_sentinel(MemoryChunkGetPointer(chunk), size);
1074 : : #endif
1075 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1076 : : /* fill the allocated space with junk */
1077 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1078 : : #endif
1079 : :
1080 : : /* Ensure any padding bytes are marked NOACCESS. */
1081 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
1082 : : GetChunkSizeFromFreeListIdx(fidx) - size);
1083 : :
1084 : : /* Disallow access to the chunk header. */
1085 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1086 : :
1087 : 186016012 : return MemoryChunkGetPointer(chunk);
1088 : : }
1089 : :
1090 : : /*
1091 : : * Choose the actual chunk size to allocate.
1092 : : */
1093 : 319762374 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
9604 tgl@sss.pgh.pa.us 1094 [ - + ]: 319762374 : Assert(chunk_size >= size);
1095 : :
556 drowley@postgresql.o 1096 : 319762374 : block = set->blocks;
1097 : 319762374 : availspace = block->endptr - block->freeptr;
1098 : :
1099 : : /*
1100 : : * If there is enough room in the active allocation block, we will put the
1101 : : * chunk into that block. Else must start a new one.
1102 : : */
1103 [ + + ]: 319762374 : if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1104 : 5755574 : return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1105 : :
1106 : : /* There's enough space on the current block, so allocate from that */
1107 : 314006800 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1108 : : }
1109 : :
1110 : : /*
1111 : : * AllocSetFree
1112 : : * Frees allocated memory; memory is removed from the set.
1113 : : */
1114 : : void
1104 1115 : 229418054 : AllocSetFree(void *pointer)
1116 : : {
1117 : : AllocSet set;
1118 : 229418054 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1119 : :
1120 : : /* Allow access to the chunk header. */
1121 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1122 : :
1123 [ + + ]: 229418054 : if (MemoryChunkIsExternal(chunk))
1124 : : {
1125 : : /* Release single-chunk block. */
1126 : 8737939 : AllocBlock block = ExternalChunkGetBlock(chunk);
1127 : :
1128 : : /*
1129 : : * Try to verify that we have a sane block pointer: the block header
1130 : : * should reference an aset and the freeptr should match the endptr.
1131 : : */
1062 tgl@sss.pgh.pa.us 1132 [ + - + - : 8737939 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1062 tgl@sss.pgh.pa.us 1133 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1134 : :
1104 drowley@postgresql.o 1135 :CBC 8737939 : set = block->aset;
1136 : :
1137 : : #ifdef MEMORY_CONTEXT_CHECKING
1138 : : {
1139 : : /* Test for someone scribbling on unused space in chunk */
975 1140 [ - + ]: 8737939 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1095 1141 [ - + ]: 8737939 : if (!sentinel_ok(pointer, chunk->requested_size))
1095 drowley@postgresql.o 1142 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1143 : : set->header.name, chunk);
1144 : : }
1145 : : #endif
1146 : :
1147 : : /* OK, remove block from aset's list and free it */
3104 tgl@sss.pgh.pa.us 1148 [ + - ]:CBC 8737939 : if (block->prev)
1149 : 8737939 : block->prev->next = block->next;
1150 : : else
3104 tgl@sss.pgh.pa.us 1151 :UBC 0 : set->blocks = block->next;
3104 tgl@sss.pgh.pa.us 1152 [ + + ]:CBC 8737939 : if (block->next)
1153 : 7143222 : block->next->prev = block->prev;
1154 : :
1104 drowley@postgresql.o 1155 : 8737939 : set->header.mem_allocated -= block->endptr - ((char *) block);
1156 : :
1157 : : #ifdef CLOBBER_FREED_MEMORY
4455 noah@leadboat.com 1158 : 8737939 : wipe_mem(block, block->freeptr - ((char *) block));
1159 : : #endif
1160 : :
1161 : : /* As in AllocSetReset, free block-header vchunks explicitly */
1162 : : VALGRIND_MEMPOOL_FREE(set, block);
1163 : :
9510 tgl@sss.pgh.pa.us 1164 : 8737939 : free(block);
1165 : : }
1166 : : else
1167 : : {
1104 drowley@postgresql.o 1168 : 220680115 : AllocBlock block = MemoryChunkGetBlock(chunk);
1169 : : int fidx;
1170 : : AllocFreeListLink *link;
1171 : :
1172 : : /*
1173 : : * In this path, for speed reasons we just Assert that the referenced
1174 : : * block is good. We can also Assert that the value field is sane.
1175 : : * Future field experience may show that these Asserts had better
1176 : : * become regular runtime test-and-elog checks.
1177 : : */
1044 peter@eisentraut.org 1178 [ + - + - : 220680115 : Assert(AllocBlockIsValid(block));
- + ]
1104 drowley@postgresql.o 1179 : 220680115 : set = block->aset;
1180 : :
1062 tgl@sss.pgh.pa.us 1181 : 220680115 : fidx = MemoryChunkGetValue(chunk);
1182 [ + - - + ]: 220680115 : Assert(FreeListIdxIsValid(fidx));
1183 : 220680115 : link = GetFreeListLink(chunk);
1184 : :
1185 : : #ifdef MEMORY_CONTEXT_CHECKING
1186 : : /* Test for someone scribbling on unused space in chunk */
1104 drowley@postgresql.o 1187 [ + + ]: 220680115 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1188 [ - + ]: 140072521 : if (!sentinel_ok(pointer, chunk->requested_size))
1104 drowley@postgresql.o 1189 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1190 : : set->header.name, chunk);
1191 : : #endif
1192 : :
1193 : : #ifdef CLOBBER_FREED_MEMORY
1104 drowley@postgresql.o 1194 :CBC 220680115 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1195 : : #endif
1196 : : /* push this chunk onto the top of the free list */
1197 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1198 : 220680115 : link->next = set->freelist[fidx];
1199 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1200 : 220680115 : set->freelist[fidx] = chunk;
1201 : :
1202 : : #ifdef MEMORY_CONTEXT_CHECKING
1203 : :
1204 : : /*
1205 : : * Reset requested_size to InvalidAllocSize in chunks that are on free
1206 : : * list.
1207 : : */
1208 : 220680115 : chunk->requested_size = InvalidAllocSize;
1209 : : #endif
1210 : : }
10651 scrappy@hub.org 1211 : 229418054 : }
1212 : :
1213 : : /*
1214 : : * AllocSetRealloc
1215 : : * Returns new pointer to allocated memory of given size or NULL if
1216 : : * request could not be completed; this memory is added to the set.
1217 : : * Memory associated with given pointer is copied into the new memory,
1218 : : * and the old memory is freed.
1219 : : *
1220 : : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1221 : : * makes our Valgrind client requests less-precise, hazarding false negatives.
1222 : : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1223 : : * request size.)
1224 : : */
1225 : : void *
557 drowley@postgresql.o 1226 : 2585338 : AllocSetRealloc(void *pointer, Size size, int flags)
1227 : : {
1228 : : AllocBlock block;
1229 : : AllocSet set;
1104 1230 : 2585338 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1231 : : Size oldchksize;
1232 : : int fidx;
1233 : :
1234 : : /* Allow access to the chunk header. */
1235 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1236 : :
1237 [ + + ]: 2585338 : if (MemoryChunkIsExternal(chunk))
1238 : : {
1239 : : /*
1240 : : * The chunk must have been allocated as a single-chunk block. Use
1241 : : * realloc() to make the containing block bigger, or smaller, with
1242 : : * minimum space wastage.
1243 : : */
1244 : : AllocBlock newblock;
1245 : : Size chksize;
1246 : : Size blksize;
1247 : : Size oldblksize;
1248 : :
1249 : 41489 : block = ExternalChunkGetBlock(chunk);
1250 : :
1251 : : /*
1252 : : * Try to verify that we have a sane block pointer: the block header
1253 : : * should reference an aset and the freeptr should match the endptr.
1254 : : */
1062 tgl@sss.pgh.pa.us 1255 [ + - + - : 41489 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1062 tgl@sss.pgh.pa.us 1256 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1257 : :
1104 drowley@postgresql.o 1258 :CBC 41489 : set = block->aset;
1259 : :
1260 : : /* only check size in paths where the limits could be hit */
557 1261 : 41489 : MemoryContextCheckSize((MemoryContext) set, size, flags);
1262 : :
928 tgl@sss.pgh.pa.us 1263 : 41489 : oldchksize = block->endptr - (char *) pointer;
1264 : :
1265 : : #ifdef MEMORY_CONTEXT_CHECKING
1266 : : /* Test for someone scribbling on unused space in chunk */
1267 [ - + ]: 41489 : Assert(chunk->requested_size < oldchksize);
1095 drowley@postgresql.o 1268 [ - + ]: 41489 : if (!sentinel_ok(pointer, chunk->requested_size))
1095 drowley@postgresql.o 1269 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1270 : : set->header.name, chunk);
1271 : : #endif
1272 : :
1273 : : #ifdef MEMORY_CONTEXT_CHECKING
1274 : : /* ensure there's always space for the sentinel byte */
1095 drowley@postgresql.o 1275 :CBC 41489 : chksize = MAXALIGN(size + 1);
1276 : : #else
1277 : : chksize = MAXALIGN(size);
1278 : : #endif
1279 : :
1280 : : /* Do the realloc */
9045 tgl@sss.pgh.pa.us 1281 : 41489 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1941 1282 : 41489 : oldblksize = block->endptr - ((char *) block);
1283 : :
35 tgl@sss.pgh.pa.us 1284 :GNC 41489 : newblock = (AllocBlock) realloc(block, blksize);
1285 [ - + ]: 41489 : if (newblock == NULL)
1286 : : {
1287 : : /* Disallow access to the chunk header. */
1288 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
557 drowley@postgresql.o 1289 :UBC 0 : return MemoryContextAllocationFailure(&set->header, size, flags);
1290 : : }
1291 : :
1292 : : /*
1293 : : * Move the block-header vchunk explicitly. (mcxt.c will take care of
1294 : : * moving the vchunk for the user data.)
1295 : : */
1296 : : VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
35 tgl@sss.pgh.pa.us 1297 :GNC 41489 : block = newblock;
1298 : :
1299 : : /* updated separately, not to underflow when (oldblksize > blksize) */
1104 drowley@postgresql.o 1300 :CBC 41489 : set->header.mem_allocated -= oldblksize;
1301 : 41489 : set->header.mem_allocated += blksize;
1302 : :
9510 tgl@sss.pgh.pa.us 1303 : 41489 : block->freeptr = block->endptr = ((char *) block) + blksize;
1304 : :
1305 : : /* Update pointers since block has likely been moved */
1104 drowley@postgresql.o 1306 : 41489 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1307 : 41489 : pointer = MemoryChunkGetPointer(chunk);
3104 tgl@sss.pgh.pa.us 1308 [ + - ]: 41489 : if (block->prev)
1309 : 41489 : block->prev->next = block;
1310 : : else
3104 tgl@sss.pgh.pa.us 1311 :UBC 0 : set->blocks = block;
3104 tgl@sss.pgh.pa.us 1312 [ + + ]:CBC 41489 : if (block->next)
1313 : 29201 : block->next->prev = block;
1314 : :
1315 : : #ifdef MEMORY_CONTEXT_CHECKING
1316 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1317 : :
1318 : : /*
1319 : : * We can only randomize the extra space if we know the prior request.
1320 : : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1321 : : */
1322 : : if (size > chunk->requested_size)
1323 : : randomize_mem((char *) pointer + chunk->requested_size,
1324 : : size - chunk->requested_size);
1325 : : #else
1326 : :
1327 : : /*
1328 : : * If this is an increase, realloc() will have marked any
1329 : : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1330 : : * also need to adjust trailing bytes from the old allocation (from
1331 : : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1332 : : * Make sure not to mark too many bytes in case chunk->requested_size
1333 : : * < size < oldchksize.
1334 : : */
1335 : : #ifdef USE_VALGRIND
1336 : : if (Min(size, oldchksize) > chunk->requested_size)
1337 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1338 : : Min(size, oldchksize) - chunk->requested_size);
1339 : : #endif
1340 : : #endif
1341 : :
9045 1342 : 41489 : chunk->requested_size = size;
1343 : : /* set mark to catch clobber of "unused" space */
1095 drowley@postgresql.o 1344 [ - + ]: 41489 : Assert(size < chksize);
1345 : 41489 : set_sentinel(pointer, size);
1346 : : #else /* !MEMORY_CONTEXT_CHECKING */
1347 : :
1348 : : /*
1349 : : * We may need to adjust marking of bytes from the old allocation as
1350 : : * some of them may be marked NOACCESS. We don't know how much of the
1351 : : * old chunk size was the requested size; it could have been as small
1352 : : * as one byte. We have to be conservative and just mark the entire
1353 : : * old portion DEFINED. Make sure not to mark memory beyond the new
1354 : : * allocation in case it's smaller than the old one.
1355 : : */
1356 : : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1357 : : #endif
1358 : :
1359 : : /* Ensure any padding bytes are marked NOACCESS. */
1360 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1361 : :
1362 : : /* Disallow access to the chunk header. */
1363 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1364 : :
3104 tgl@sss.pgh.pa.us 1365 : 41489 : return pointer;
1366 : : }
1367 : :
1104 drowley@postgresql.o 1368 : 2543849 : block = MemoryChunkGetBlock(chunk);
1369 : :
1370 : : /*
1371 : : * In this path, for speed reasons we just Assert that the referenced
1372 : : * block is good. We can also Assert that the value field is sane. Future
1373 : : * field experience may show that these Asserts had better become regular
1374 : : * runtime test-and-elog checks.
1375 : : */
1044 peter@eisentraut.org 1376 [ + - + - : 2543849 : Assert(AllocBlockIsValid(block));
- + ]
1104 drowley@postgresql.o 1377 : 2543849 : set = block->aset;
1378 : :
1062 tgl@sss.pgh.pa.us 1379 : 2543849 : fidx = MemoryChunkGetValue(chunk);
1380 [ + - - + ]: 2543849 : Assert(FreeListIdxIsValid(fidx));
928 1381 : 2543849 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1382 : :
1383 : : #ifdef MEMORY_CONTEXT_CHECKING
1384 : : /* Test for someone scribbling on unused space in chunk */
1385 [ + + ]: 2543849 : if (chunk->requested_size < oldchksize)
1104 drowley@postgresql.o 1386 [ - + ]: 1218024 : if (!sentinel_ok(pointer, chunk->requested_size))
1104 drowley@postgresql.o 1387 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1388 : : set->header.name, chunk);
1389 : : #endif
1390 : :
1391 : : /*
1392 : : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1393 : : * allocated area already is >= the new size. (In particular, we will
1394 : : * fall out here if the requested size is a decrease.)
1395 : : */
928 tgl@sss.pgh.pa.us 1396 [ + + ]:CBC 2543849 : if (oldchksize >= size)
1397 : : {
1398 : : #ifdef MEMORY_CONTEXT_CHECKING
2165 1399 : 1248591 : Size oldrequest = chunk->requested_size;
1400 : :
1401 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1402 : : /* We can only fill the extra space if we know the prior request */
1403 : : if (size > oldrequest)
1404 : : randomize_mem((char *) pointer + oldrequest,
1405 : : size - oldrequest);
1406 : : #endif
1407 : :
1408 : 1248591 : chunk->requested_size = size;
1409 : :
1410 : : /*
1411 : : * If this is an increase, mark any newly-available part UNDEFINED.
1412 : : * Otherwise, mark the obsolete part NOACCESS.
1413 : : */
1414 : : if (size > oldrequest)
1415 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1416 : : size - oldrequest);
1417 : : else
1418 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1419 : : oldchksize - size);
1420 : :
1421 : : /* set mark to catch clobber of "unused" space */
928 1422 [ + + ]: 1248591 : if (size < oldchksize)
2165 1423 : 1233811 : set_sentinel(pointer, size);
1424 : : #else /* !MEMORY_CONTEXT_CHECKING */
1425 : :
1426 : : /*
1427 : : * We don't have the information to determine whether we're growing
1428 : : * the old request or shrinking it, so we conservatively mark the
1429 : : * entire new allocation DEFINED.
1430 : : */
1431 : : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1432 : : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1433 : : #endif
1434 : :
1435 : : /* Disallow access to the chunk header. */
1436 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1437 : :
1438 : 1248591 : return pointer;
1439 : : }
1440 : : else
1441 : : {
1442 : : /*
1443 : : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1444 : : * allocate a new chunk and copy the data. Since we know the existing
1445 : : * data isn't huge, this won't involve any great memcpy expense, so
1446 : : * it's not worth being smarter. (At one time we tried to avoid
1447 : : * memcpy when it was possible to enlarge the chunk in-place, but that
1448 : : * turns out to misbehave unpleasantly for repeated cycles of
1449 : : * palloc/repalloc/pfree: the eventually freed chunks go into the
1450 : : * wrong freelist for the next initial palloc request, and so we leak
1451 : : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1452 : : */
1453 : : AllocPointer newPointer;
1454 : : Size oldsize;
1455 : :
1456 : : /* allocate new chunk (this also checks size is valid) */
557 drowley@postgresql.o 1457 : 1295258 : newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1458 : :
1459 : : /* leave immediately if request was not completed */
3873 rhaas@postgresql.org 1460 [ - + ]: 1295258 : if (newPointer == NULL)
1461 : : {
1462 : : /* Disallow access to the chunk header. */
1463 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
557 drowley@postgresql.o 1464 :UBC 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1465 : : }
1466 : :
1467 : : /*
1468 : : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1469 : : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1470 : : * definedness from the old allocation to the new. If we know the old
1471 : : * allocation, copy just that much. Otherwise, make the entire old
1472 : : * chunk defined to avoid errors as we copy the currently-NOACCESS
1473 : : * trailing bytes.
1474 : : */
1475 : : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1476 : : #ifdef MEMORY_CONTEXT_CHECKING
4455 noah@leadboat.com 1477 :CBC 1295258 : oldsize = chunk->requested_size;
1478 : : #else
1479 : : oldsize = oldchksize;
1480 : : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1481 : : #endif
1482 : :
1483 : : /* transfer existing data (certain to fit) */
9510 tgl@sss.pgh.pa.us 1484 : 1295258 : memcpy(newPointer, pointer, oldsize);
1485 : :
1486 : : /* free old chunk */
1104 drowley@postgresql.o 1487 : 1295258 : AllocSetFree(pointer);
1488 : :
9510 tgl@sss.pgh.pa.us 1489 : 1295258 : return newPointer;
1490 : : }
1491 : : }
1492 : :
1493 : : /*
1494 : : * AllocSetGetChunkContext
1495 : : * Return the MemoryContext that 'pointer' belongs to.
1496 : : */
1497 : : MemoryContext
1104 drowley@postgresql.o 1498 : 4082829 : AllocSetGetChunkContext(void *pointer)
1499 : : {
1500 : 4082829 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1501 : : AllocBlock block;
1502 : : AllocSet set;
1503 : :
1504 : : /* Allow access to the chunk header. */
1505 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1506 : :
1507 [ + + ]: 4082829 : if (MemoryChunkIsExternal(chunk))
1508 : 41489 : block = ExternalChunkGetBlock(chunk);
1509 : : else
1510 : 4041340 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1511 : :
1512 : : /* Disallow access to the chunk header. */
1513 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1514 : :
1044 peter@eisentraut.org 1515 [ + - + - : 4082829 : Assert(AllocBlockIsValid(block));
- + ]
1104 drowley@postgresql.o 1516 : 4082829 : set = block->aset;
1517 : :
1518 : 4082829 : return &set->header;
1519 : : }
1520 : :
1521 : : /*
1522 : : * AllocSetGetChunkSpace
1523 : : * Given a currently-allocated chunk, determine the total space
1524 : : * it occupies (including all memory-allocation overhead).
1525 : : */
1526 : : Size
1527 : 4399948 : AllocSetGetChunkSpace(void *pointer)
1528 : : {
1529 : 4399948 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1530 : : int fidx;
1531 : :
1532 : : /* Allow access to the chunk header. */
1533 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1534 : :
1535 [ + + ]: 4399948 : if (MemoryChunkIsExternal(chunk))
1536 : : {
1537 : 487890 : AllocBlock block = ExternalChunkGetBlock(chunk);
1538 : :
1539 : : /* Disallow access to the chunk header. */
1540 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1541 : :
1044 peter@eisentraut.org 1542 [ + - + - : 487890 : Assert(AllocBlockIsValid(block));
- + ]
1543 : :
1104 drowley@postgresql.o 1544 : 487890 : return block->endptr - (char *) chunk;
1545 : : }
1546 : :
1062 tgl@sss.pgh.pa.us 1547 : 3912058 : fidx = MemoryChunkGetValue(chunk);
1548 [ + - - + ]: 3912058 : Assert(FreeListIdxIsValid(fidx));
1549 : :
1550 : : /* Disallow access to the chunk header. */
1551 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1552 : :
1553 : 3912058 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1554 : : }
1555 : :
1556 : : /*
1557 : : * AllocSetIsEmpty
1558 : : * Is an allocset empty of any allocated space?
1559 : : */
1560 : : bool
7660 1561 : 4428 : AllocSetIsEmpty(MemoryContext context)
1562 : : {
1044 peter@eisentraut.org 1563 [ + - - + ]: 4428 : Assert(AllocSetIsValid(context));
1564 : :
1565 : : /*
1566 : : * For now, we say "empty" only if the context is new or just reset. We
1567 : : * could examine the freelists to determine if all space has been freed,
1568 : : * but it's not really worth the trouble for present uses of this
1569 : : * functionality.
1570 : : */
5222 heikki.linnakangas@i 1571 [ + + ]: 4428 : if (context->isReset)
7660 tgl@sss.pgh.pa.us 1572 : 4416 : return true;
1573 : 12 : return false;
1574 : : }
1575 : :
1576 : : /*
1577 : : * AllocSetStats
1578 : : * Compute stats about memory consumption of an allocset.
1579 : : *
1580 : : * printfunc: if not NULL, pass a human-readable stats string to this.
1581 : : * passthru: pass this pointer through to printfunc.
1582 : : * totals: if not NULL, add stats about this context into *totals.
1583 : : * print_to_stderr: print stats to stderr if true, elog otherwise.
1584 : : */
1585 : : void
2720 1586 : 2388 : AllocSetStats(MemoryContext context,
1587 : : MemoryStatsPrintFunc printfunc, void *passthru,
1588 : : MemoryContextCounters *totals, bool print_to_stderr)
1589 : : {
9201 1590 : 2388 : AllocSet set = (AllocSet) context;
4244 1591 : 2388 : Size nblocks = 0;
3665 1592 : 2388 : Size freechunks = 0;
1593 : : Size totalspace;
4244 1594 : 2388 : Size freespace = 0;
1595 : : AllocBlock block;
1596 : : int fidx;
1597 : :
1044 peter@eisentraut.org 1598 [ + - - + ]: 2388 : Assert(AllocSetIsValid(set));
1599 : :
1600 : : /* Include context header in totalspace */
2720 tgl@sss.pgh.pa.us 1601 : 2388 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1602 : :
9239 1603 [ + + ]: 7376 : for (block = set->blocks; block != NULL; block = block->next)
1604 : : {
1605 : 4988 : nblocks++;
1606 : 4988 : totalspace += block->endptr - ((char *) block);
1607 : 4988 : freespace += block->endptr - block->freeptr;
1608 : : }
1609 [ + + ]: 28656 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1610 : : {
1062 1611 : 26268 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1104 drowley@postgresql.o 1612 : 26268 : MemoryChunk *chunk = set->freelist[fidx];
1613 : :
1614 [ + + ]: 35527 : while (chunk != NULL)
1615 : : {
1616 : 9259 : AllocFreeListLink *link = GetFreeListLink(chunk);
1617 : :
1618 : : /* Allow access to the chunk header. */
1619 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1062 tgl@sss.pgh.pa.us 1620 [ - + ]: 9259 : Assert(MemoryChunkGetValue(chunk) == fidx);
1621 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1622 : :
3665 1623 : 9259 : freechunks++;
1104 drowley@postgresql.o 1624 : 9259 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1625 : :
1626 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1627 : 9259 : chunk = link->next;
1628 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1629 : : }
1630 : : }
1631 : :
2720 tgl@sss.pgh.pa.us 1632 [ + + ]: 2388 : if (printfunc)
1633 : : {
1634 : : char stats_string[200];
1635 : :
1636 : 810 : snprintf(stats_string, sizeof(stats_string),
1637 : : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1638 : : totalspace, nblocks, freespace, freechunks,
1639 : : totalspace - freespace);
1614 fujii@postgresql.org 1640 : 810 : printfunc(context, passthru, stats_string, print_to_stderr);
1641 : : }
1642 : :
3665 tgl@sss.pgh.pa.us 1643 [ + - ]: 2388 : if (totals)
1644 : : {
1645 : 2388 : totals->nblocks += nblocks;
1646 : 2388 : totals->freechunks += freechunks;
1647 : 2388 : totals->totalspace += totalspace;
1648 : 2388 : totals->freespace += freespace;
1649 : : }
9239 1650 : 2388 : }
1651 : :
1652 : :
1653 : : #ifdef MEMORY_CONTEXT_CHECKING
1654 : :
1655 : : /*
1656 : : * AllocSetCheck
1657 : : * Walk through chunks and check consistency of memory.
1658 : : *
1659 : : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1660 : : * find yourself in an infinite loop when trouble occurs, because this
1661 : : * routine will be entered again when elog cleanup tries to release memory!
1662 : : */
1663 : : void
9188 bruce@momjian.us 1664 : 90732677 : AllocSetCheck(MemoryContext context)
1665 : : {
8934 1666 : 90732677 : AllocSet set = (AllocSet) context;
2824 tgl@sss.pgh.pa.us 1667 : 90732677 : const char *name = set->header.name;
1668 : : AllocBlock prevblock;
1669 : : AllocBlock block;
2164 tomas.vondra@postgre 1670 : 90732677 : Size total_allocated = 0;
1671 : :
3104 tgl@sss.pgh.pa.us 1672 : 90732677 : for (prevblock = NULL, block = set->blocks;
1673 [ + + ]: 256168050 : block != NULL;
1674 : 165435373 : prevblock = block, block = block->next)
1675 : : {
8934 bruce@momjian.us 1676 : 165435373 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1677 : 165435373 : long blk_used = block->freeptr - bpoz;
1678 : 165435373 : long blk_data = 0;
1679 : 165435373 : long nchunks = 0;
1104 drowley@postgresql.o 1680 : 165435373 : bool has_external_chunk = false;
1681 : :
782 1682 [ + + ]: 165435373 : if (IsKeeperBlock(set, block))
2167 tomas.vondra@postgre 1683 : 90732677 : total_allocated += block->endptr - ((char *) set);
1684 : : else
1685 : 74702696 : total_allocated += block->endptr - ((char *) block);
1686 : :
1687 : : /*
1688 : : * Empty block - empty can be keeper-block only
1689 : : */
9188 bruce@momjian.us 1690 [ + + ]: 165435373 : if (!blk_used)
1691 : : {
782 drowley@postgresql.o 1692 [ - + ]: 2825980 : if (!IsKeeperBlock(set, block))
8079 tgl@sss.pgh.pa.us 1693 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
1694 : : name, block);
1695 : : }
1696 : :
1697 : : /*
1698 : : * Check block header fields
1699 : : */
3104 tgl@sss.pgh.pa.us 1700 [ + - ]:CBC 165435373 : if (block->aset != set ||
1701 [ + - ]: 165435373 : block->prev != prevblock ||
1702 [ + - ]: 165435373 : block->freeptr < bpoz ||
1703 [ - + ]: 165435373 : block->freeptr > block->endptr)
3104 tgl@sss.pgh.pa.us 1704 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1705 : : name, block);
1706 : :
1707 : : /*
1708 : : * Chunk walker
1709 : : */
9045 tgl@sss.pgh.pa.us 1710 [ + + ]:CBC 2753204959 : while (bpoz < block->freeptr)
1711 : : {
1104 drowley@postgresql.o 1712 : 2587769586 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1713 : : Size chsize,
1714 : : dsize;
1715 : :
1716 : : /* Allow access to the chunk header. */
1717 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1718 : :
1719 [ + + ]: 2587769586 : if (MemoryChunkIsExternal(chunk))
1720 : : {
1721 : 5605818 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1722 : 5605818 : has_external_chunk = true;
1723 : :
1724 : : /* make sure this chunk consumes the entire block */
1725 [ - + ]: 5605818 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1104 drowley@postgresql.o 1726 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1727 : : name, chunk, block);
1728 : : }
1729 : : else
1730 : : {
1062 tgl@sss.pgh.pa.us 1731 :CBC 2582163768 : int fidx = MemoryChunkGetValue(chunk);
1732 : :
1733 [ + - - + ]: 2582163768 : if (!FreeListIdxIsValid(fidx))
1062 tgl@sss.pgh.pa.us 1734 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1735 : : name, chunk, block);
1736 : :
1062 tgl@sss.pgh.pa.us 1737 :CBC 2582163768 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1738 : :
1739 : : /*
1740 : : * Check the stored block offset correctly references this
1741 : : * block.
1742 : : */
1104 drowley@postgresql.o 1743 [ - + ]: 2582163768 : if (block != MemoryChunkGetBlock(chunk))
1104 drowley@postgresql.o 1744 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1745 : : name, chunk, block);
1746 : : }
2999 tgl@sss.pgh.pa.us 1747 :CBC 2587769586 : dsize = chunk->requested_size; /* real data */
1748 : :
1749 : : /* an allocated chunk's requested size must be <= the chsize */
1104 drowley@postgresql.o 1750 [ + + - + ]: 2587769586 : if (dsize != InvalidAllocSize && dsize > chsize)
8079 tgl@sss.pgh.pa.us 1751 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1752 : : name, chunk, block);
1753 : :
1754 : : /* chsize must not be smaller than the first freelist's size */
9188 bruce@momjian.us 1755 [ - + ]:CBC 2587769586 : if (chsize < (1 << ALLOC_MINBITS))
4244 tgl@sss.pgh.pa.us 1756 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1757 : : name, chsize, chunk, block);
1758 : :
1759 : : /*
1760 : : * Check for overwrite of padding space in an allocated chunk.
1761 : : */
1104 drowley@postgresql.o 1762 [ + + + + ]:CBC 2587769586 : if (dsize != InvalidAllocSize && dsize < chsize &&
4455 noah@leadboat.com 1763 [ - + ]: 1719910121 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
8079 tgl@sss.pgh.pa.us 1764 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1765 : : name, block, chunk);
1766 : :
1767 : : /* if chunk is allocated, disallow access to the chunk header */
1768 : : if (dsize != InvalidAllocSize)
1769 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1770 : :
9188 bruce@momjian.us 1771 :CBC 2587769586 : blk_data += chsize;
1772 : 2587769586 : nchunks++;
1773 : :
9045 tgl@sss.pgh.pa.us 1774 : 2587769586 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1775 : : }
1776 : :
9188 bruce@momjian.us 1777 [ - + ]: 165435373 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
8079 tgl@sss.pgh.pa.us 1778 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1779 : : name, block);
1780 : :
1104 drowley@postgresql.o 1781 [ + + - + ]:CBC 165435373 : if (has_external_chunk && nchunks > 1)
1104 drowley@postgresql.o 1782 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1783 : : name, block);
1784 : : }
1785 : :
1997 jdavis@postgresql.or 1786 [ - + ]:CBC 90732677 : Assert(total_allocated == context->mem_allocated);
9188 bruce@momjian.us 1787 : 90732677 : }
1788 : :
1789 : : #endif /* MEMORY_CONTEXT_CHECKING */
|