Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * aset.c
4 : : * Allocation set definitions.
5 : : *
6 : : * AllocSet is our standard implementation of the abstract MemoryContext
7 : : * type.
8 : : *
9 : : *
10 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
11 : : * Portions Copyright (c) 1994, Regents of the University of California
12 : : *
13 : : * IDENTIFICATION
14 : : * src/backend/utils/mmgr/aset.c
15 : : *
16 : : * NOTE:
17 : : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : : * Instead it manages allocations in a block pool by itself, combining
20 : : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : : * doesn't free() memory really. It just add's the free'd area to some
22 : : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : : * at once on AllocSetReset(), which happens when the memory context gets
24 : : * destroyed.
25 : : * Jan Wieck
26 : : *
27 : : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : : * sizes, we do want to be able to give the memory back to free() as soon
29 : : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : : * freelist entries that might never be usable. This is specially needed
31 : : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : : * the previous instances of the block were guaranteed to be wasted until
33 : : * AllocSetReset() under the old way.
34 : : *
35 : : * Further improvement 12/00: as the code stood, request sizes in the
36 : : * midrange between "small" and "large" were handled very inefficiently,
37 : : * because any sufficiently large free chunk would be used to satisfy a
38 : : * request, even if it was much larger than necessary. This led to more
39 : : * and more wasted space in allocated chunks over time. To fix, get rid
40 : : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : : * the number of freelists to change the small/large boundary.
43 : : *
44 : : *-------------------------------------------------------------------------
45 : : */
46 : :
47 : : #include "postgres.h"
48 : :
49 : : #include "port/pg_bitutils.h"
50 : : #include "utils/memdebug.h"
51 : : #include "utils/memutils.h"
52 : : #include "utils/memutils_internal.h"
53 : : #include "utils/memutils_memorychunk.h"
54 : :
55 : : /*--------------------
56 : : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : : *
59 : : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : : * improves recyclability: we may waste some space, but the wasted space
61 : : * should stay pretty constant as requests are made and released.
62 : : *
63 : : * A request too large for the last freelist is handled by allocating a
64 : : * dedicated block from malloc(). The block still has a block header and
65 : : * chunk header, but when the chunk is freed we'll return the whole block
66 : : * to malloc(), not put it on our freelists.
67 : : *
68 : : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : : * or we may fail to align the smallest chunks adequately.
71 : : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : : * minimum also allows us to store a pointer to the next freelist item within
73 : : * the chunk of memory itself.
74 : : *
75 : : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : : *--------------------
81 : : */
82 : :
83 : : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : : #define ALLOCSET_NUM_FREELISTS 11
85 : : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : : /* Size of largest chunk that we use a fixed size for */
87 : : #define ALLOC_CHUNK_FRACTION 4
88 : : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 : :
90 : : /* ALLOC_CHUNK_LIMIT must be equal to ALLOCSET_SEPARATE_THRESHOLD */
91 : : StaticAssertDecl(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
92 : : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
93 : :
94 : : /*--------------------
95 : : * The first block allocated for an allocset has size initBlockSize.
96 : : * Each time we have to allocate another block, we double the block size
97 : : * (if possible, and without exceeding maxBlockSize), so as to reduce
98 : : * the bookkeeping load on malloc().
99 : : *
100 : : * Blocks allocated to hold oversize chunks do not follow this rule, however;
101 : : * they are just however big they need to be to hold that single chunk.
102 : : *
103 : : * Also, if a minContextSize is specified, the first block has that size,
104 : : * and then initBlockSize is used for the next one.
105 : : *--------------------
106 : : */
107 : :
108 : : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
109 : : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
110 : : #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
111 : : ALLOC_BLOCKHDRSZ)
112 : :
113 : : typedef struct AllocBlockData *AllocBlock; /* forward reference */
114 : :
115 : : /*
116 : : * AllocPointer
117 : : * Aligned pointer which may be a member of an allocation set.
118 : : */
119 : : typedef void *AllocPointer;
120 : :
121 : : /*
122 : : * AllocFreeListLink
123 : : * When pfreeing memory, if we maintain a freelist for the given chunk's
124 : : * size then we use a AllocFreeListLink to point to the current item in
125 : : * the AllocSetContext's freelist and then set the given freelist element
126 : : * to point to the chunk being freed.
127 : : */
128 : : typedef struct AllocFreeListLink
129 : : {
130 : : MemoryChunk *next;
131 : : } AllocFreeListLink;
132 : :
133 : : /*
134 : : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
135 : : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
136 : : * itself to store the freelist link.
137 : : */
138 : : #define GetFreeListLink(chkptr) \
139 : : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
140 : :
141 : : /* Validate a freelist index retrieved from a chunk header */
142 : : #define FreeListIdxIsValid(fidx) \
143 : : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
144 : :
145 : : /* Determine the size of the chunk based on the freelist index */
146 : : #define GetChunkSizeFromFreeListIdx(fidx) \
147 : : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
148 : :
149 : : /*
150 : : * AllocSetContext is our standard implementation of MemoryContext.
151 : : *
152 : : * Note: header.isReset means there is nothing for AllocSetReset to do.
153 : : * This is different from the aset being physically empty (empty blocks list)
154 : : * because we will still have a keeper block. It's also different from the set
155 : : * being logically empty, because we don't attempt to detect pfree'ing the
156 : : * last active chunk.
157 : : */
158 : : typedef struct AllocSetContext
159 : : {
160 : : MemoryContextData header; /* Standard memory-context fields */
161 : : /* Info about storage allocated in this context: */
162 : : AllocBlock blocks; /* head of list of blocks in this set */
163 : : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
164 : : /* Allocation parameters for this context: */
165 : : uint32 initBlockSize; /* initial block size */
166 : : uint32 maxBlockSize; /* maximum block size */
167 : : uint32 nextBlockSize; /* next block size to allocate */
168 : : uint32 allocChunkLimit; /* effective chunk size limit */
169 : : /* freelist this context could be put in, or -1 if not a candidate: */
170 : : int freeListIndex; /* index in context_freelists[], or -1 */
171 : : } AllocSetContext;
172 : :
173 : : typedef AllocSetContext *AllocSet;
174 : :
175 : : /*
176 : : * AllocBlock
177 : : * An AllocBlock is the unit of memory that is obtained by aset.c
178 : : * from malloc(). It contains one or more MemoryChunks, which are
179 : : * the units requested by palloc() and freed by pfree(). MemoryChunks
180 : : * cannot be returned to malloc() individually, instead they are put
181 : : * on freelists by pfree() and re-used by the next palloc() that has
182 : : * a matching request size.
183 : : *
184 : : * AllocBlockData is the header data for a block --- the usable space
185 : : * within the block begins at the next alignment boundary.
186 : : */
187 : : typedef struct AllocBlockData
188 : : {
189 : : AllocSet aset; /* aset that owns this block */
190 : : AllocBlock prev; /* prev block in aset's blocks list, if any */
191 : : AllocBlock next; /* next block in aset's blocks list, if any */
192 : : char *freeptr; /* start of free space in this block */
193 : : char *endptr; /* end of space in this block */
194 : : } AllocBlockData;
195 : :
196 : : /*
197 : : * AllocSetIsValid
198 : : * True iff set is valid allocation set.
199 : : */
200 : : #define AllocSetIsValid(set) \
201 : : ((set) && IsA(set, AllocSetContext))
202 : :
203 : : /*
204 : : * AllocBlockIsValid
205 : : * True iff block is valid block of allocation set.
206 : : */
207 : : #define AllocBlockIsValid(block) \
208 : : ((block) && AllocSetIsValid((block)->aset))
209 : :
210 : : /*
211 : : * We always store external chunks on a dedicated block. This makes fetching
212 : : * the block from an external chunk easy since it's always the first and only
213 : : * chunk on the block.
214 : : */
215 : : #define ExternalChunkGetBlock(chunk) \
216 : : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
217 : :
218 : : /*
219 : : * Rather than repeatedly creating and deleting memory contexts, we keep some
220 : : * freed contexts in freelists so that we can hand them out again with little
221 : : * work. Before putting a context in a freelist, we reset it so that it has
222 : : * only its initial malloc chunk and no others. To be a candidate for a
223 : : * freelist, a context must have the same minContextSize/initBlockSize as
224 : : * other contexts in the list; but its maxBlockSize is irrelevant since that
225 : : * doesn't affect the size of the initial chunk.
226 : : *
227 : : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
228 : : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
229 : : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
230 : : *
231 : : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
232 : : * hopes of improving locality of reference. But if there get to be too
233 : : * many contexts in the list, we'd prefer to drop the most-recently-created
234 : : * contexts in hopes of keeping the process memory map compact.
235 : : * We approximate that by simply deleting all existing entries when the list
236 : : * overflows, on the assumption that queries that allocate a lot of contexts
237 : : * will probably free them in more or less reverse order of allocation.
238 : : *
239 : : * Contexts in a freelist are chained via their nextchild pointers.
240 : : */
241 : : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
242 : :
243 : : /* Obtain the keeper block for an allocation set */
244 : : #define KeeperBlock(set) \
245 : : ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
246 : :
247 : : /* Check if the block is the keeper block of the given allocation set */
248 : : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
249 : :
250 : : typedef struct AllocSetFreeList
251 : : {
252 : : int num_free; /* current list length */
253 : : AllocSetContext *first_free; /* list header */
254 : : } AllocSetFreeList;
255 : :
256 : : /* context_freelists[0] is for default params, [1] for small params */
257 : : static AllocSetFreeList context_freelists[2] =
258 : : {
259 : : {
260 : : 0, NULL
261 : : },
262 : : {
263 : : 0, NULL
264 : : }
265 : : };
266 : :
267 : :
268 : : /* ----------
269 : : * AllocSetFreeIndex -
270 : : *
271 : : * Depending on the size of an allocation compute which freechunk
272 : : * list of the alloc set it belongs to. Caller must have verified
273 : : * that size <= ALLOC_CHUNK_LIMIT.
274 : : * ----------
275 : : */
276 : : static inline int
9950 JanWieck@Yahoo.com 277 :CBC 734607894 : AllocSetFreeIndex(Size size)
278 : : {
279 : : int idx;
280 : :
6132 tgl@sss.pgh.pa.us 281 [ + + ]: 734607894 : if (size > (1 << ALLOC_MINBITS))
282 : : {
283 : : /*----------
284 : : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
285 : : * This is the same as
286 : : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
287 : : * or equivalently
288 : : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
289 : : *
290 : : * However, for platforms without intrinsic support, we duplicate the
291 : : * logic here, allowing an additional optimization. It's reasonable
292 : : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
293 : : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
294 : : * the last two bytes.
295 : : *
296 : : * Yes, this function is enough of a hot-spot to make it worth this
297 : : * much trouble.
298 : : *----------
299 : : */
300 : : #ifdef HAVE_BITSCAN_REVERSE
1182 john.naylor@postgres 301 : 636383792 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
302 : : #else
303 : : uint32 t,
304 : : tsize;
305 : :
306 : : /* Statically assert that we only have a 16-bit input value. */
307 : : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
308 : : "ALLOC_CHUNK_LIMIT must be less than 64kB");
309 : :
310 : : tsize = size - 1;
311 : : t = tsize >> 8;
312 : : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
313 : : idx -= ALLOC_MINBITS - 1;
314 : : #endif
315 : :
9286 tgl@sss.pgh.pa.us 316 [ - + ]: 636383792 : Assert(idx < ALLOCSET_NUM_FREELISTS);
317 : : }
318 : : else
6132 319 : 98224102 : idx = 0;
320 : :
9950 JanWieck@Yahoo.com 321 : 734607894 : return idx;
322 : : }
323 : :
324 : :
325 : : /*
326 : : * Public routines
327 : : */
328 : :
329 : :
330 : : /*
331 : : * AllocSetContextCreateInternal
332 : : * Create a new AllocSet context.
333 : : *
334 : : * parent: parent context, or NULL if top-level context
335 : : * name: name of context (must be statically allocated)
336 : : * minContextSize: minimum context size
337 : : * initBlockSize: initial allocation block size
338 : : * maxBlockSize: maximum allocation block size
339 : : *
340 : : * Most callers should abstract the context size parameters using a macro
341 : : * such as ALLOCSET_DEFAULT_SIZES.
342 : : *
343 : : * Note: don't call this directly; go through the wrapper macro
344 : : * AllocSetContextCreate.
345 : : */
346 : : MemoryContext
2762 tgl@sss.pgh.pa.us 347 : 8060696 : AllocSetContextCreateInternal(MemoryContext parent,
348 : : const char *name,
349 : : Size minContextSize,
350 : : Size initBlockSize,
351 : : Size maxBlockSize)
352 : : {
353 : : int freeListIndex;
354 : : Size firstBlockSize;
355 : : AllocSet set;
356 : : AllocBlock block;
357 : :
358 : : /* ensure MemoryChunk's size is properly maxaligned */
359 : : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
360 : : "sizeof(MemoryChunk) is not maxaligned");
361 : : /* check we have enough space to store the freelist link */
362 : : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
363 : : "sizeof(AllocFreeListLink) larger than minimum allocation size");
364 : :
365 : : /*
366 : : * First, validate allocation parameters. Once these were regular runtime
367 : : * tests and elog's, but in practice Asserts seem sufficient because
368 : : * nobody varies their parameters at runtime. We somewhat arbitrarily
369 : : * enforce a minimum 1K block size. We restrict the maximum block size to
370 : : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
371 : : * regards to addressing the offset between the chunk and the block that
372 : : * the chunk is stored on. We would be unable to store the offset between
373 : : * the chunk and block for any chunks that were beyond
374 : : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
375 : : * larger than this.
376 : : */
3065 377 [ + - - + ]: 8060696 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
378 : : initBlockSize >= 1024);
379 [ + - + - : 8060696 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
- + ]
380 : : maxBlockSize >= initBlockSize &&
381 : : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
382 [ + + + - : 8060696 : Assert(minContextSize == 0 ||
+ - - + ]
383 : : (minContextSize == MAXALIGN(minContextSize) &&
384 : : minContextSize >= 1024 &&
385 : : minContextSize <= maxBlockSize));
1345 drowley@postgresql.o 386 [ - + ]: 8060696 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
387 : :
388 : : /*
389 : : * Check whether the parameters match either available freelist. We do
390 : : * not need to demand a match of maxBlockSize.
391 : : */
2961 tgl@sss.pgh.pa.us 392 [ + + + + ]: 8060696 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
393 : : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
3065 394 : 5253303 : freeListIndex = 0;
2961 395 [ + + + - ]: 2807393 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
396 : : initBlockSize == ALLOCSET_SMALL_INITSIZE)
3065 397 : 2786524 : freeListIndex = 1;
398 : : else
399 : 20869 : freeListIndex = -1;
400 : :
401 : : /*
402 : : * If a suitable freelist entry exists, just recycle that context.
403 : : */
404 [ + + ]: 8060696 : if (freeListIndex >= 0)
405 : : {
406 : 8039827 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
407 : :
408 [ + + ]: 8039827 : if (freelist->first_free != NULL)
409 : : {
410 : : /* Remove entry from freelist */
411 : 5766028 : set = freelist->first_free;
412 : 5766028 : freelist->first_free = (AllocSet) set->header.nextchild;
413 : 5766028 : freelist->num_free--;
414 : :
415 : : /* Update its maxBlockSize; everything else should be OK */
416 : 5766028 : set->maxBlockSize = maxBlockSize;
417 : :
418 : : /* Reinitialize its header, installing correct name and parent */
419 : 5766028 : MemoryContextCreate((MemoryContext) set,
420 : : T_AllocSetContext,
421 : : MCTX_ASET_ID,
422 : : parent,
423 : : name);
424 : :
2238 jdavis@postgresql.or 425 : 5766028 : ((MemoryContext) set)->mem_allocated =
1023 drowley@postgresql.o 426 : 5766028 : KeeperBlock(set)->endptr - ((char *) set);
427 : :
3065 tgl@sss.pgh.pa.us 428 : 5766028 : return (MemoryContext) set;
429 : : }
430 : : }
431 : :
432 : : /* Determine size of initial block */
2961 433 : 2294668 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
434 : : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
3065 435 [ + + ]: 2294668 : if (minContextSize != 0)
436 : 20869 : firstBlockSize = Max(firstBlockSize, minContextSize);
437 : : else
438 : 2273799 : firstBlockSize = Max(firstBlockSize, initBlockSize);
439 : :
440 : : /*
441 : : * Allocate the initial block. Unlike other aset.c blocks, it starts with
442 : : * the context header and its block header follows that.
443 : : */
444 : 2294668 : set = (AllocSet) malloc(firstBlockSize);
445 [ - + ]: 2294668 : if (set == NULL)
446 : : {
3065 tgl@sss.pgh.pa.us 447 [ # # ]:UBC 0 : if (TopMemoryContext)
448 : 0 : MemoryContextStats(TopMemoryContext);
449 [ # # ]: 0 : ereport(ERROR,
450 : : (errcode(ERRCODE_OUT_OF_MEMORY),
451 : : errmsg("out of memory"),
452 : : errdetail("Failed while creating memory context \"%s\".",
453 : : name)));
454 : : }
455 : :
456 : : /*
457 : : * Avoid writing code that can fail between here and MemoryContextCreate;
458 : : * we'd leak the header/initial block if we ereport in this stretch.
459 : : */
460 : :
461 : : /* Create a vpool associated with the context */
462 : : VALGRIND_CREATE_MEMPOOL(set, 0, false);
463 : :
464 : : /*
465 : : * Create a vchunk covering both the AllocSetContext struct and the keeper
466 : : * block's header. (Perhaps it would be more sensible for these to be two
467 : : * separate vchunks, but doing that seems to tickle bugs in some versions
468 : : * of Valgrind.) We must have these vchunks, and also a vchunk for each
469 : : * subsequently-added block header, so that Valgrind considers the
470 : : * pointers within them while checking for leaked memory. Note that
471 : : * Valgrind doesn't distinguish between these vchunks and those created by
472 : : * mcxt.c for the user-accessible-data chunks we allocate.
473 : : */
474 : : VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
475 : :
476 : : /* Fill in the initial block's block header */
1023 drowley@postgresql.o 477 :CBC 2294668 : block = KeeperBlock(set);
3065 tgl@sss.pgh.pa.us 478 : 2294668 : block->aset = set;
479 : 2294668 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
480 : 2294668 : block->endptr = ((char *) set) + firstBlockSize;
481 : 2294668 : block->prev = NULL;
482 : 2294668 : block->next = NULL;
483 : :
484 : : /* Mark unallocated space NOACCESS; leave the block header alone. */
485 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
486 : :
487 : : /* Remember block as part of block list */
488 : 2294668 : set->blocks = block;
489 : :
490 : : /* Finish filling in aset-specific parts of the context header */
491 [ + - + - : 27536016 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
492 : :
1023 drowley@postgresql.o 493 : 2294668 : set->initBlockSize = (uint32) initBlockSize;
494 : 2294668 : set->maxBlockSize = (uint32) maxBlockSize;
495 : 2294668 : set->nextBlockSize = (uint32) initBlockSize;
3065 tgl@sss.pgh.pa.us 496 : 2294668 : set->freeListIndex = freeListIndex;
497 : :
498 : : /*
499 : : * Compute the allocation chunk size limit for this context. It can't be
500 : : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
501 : : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
502 : : * even a significant fraction of it, should be treated as large chunks
503 : : * too. For the typical case of maxBlockSize a power of 2, the chunk size
504 : : * limit will be at most 1/8th maxBlockSize, so that given a stream of
505 : : * requests that are all the maximum chunk size we will waste at most
506 : : * 1/8th of the allocated space.
507 : : *
508 : : * Determine the maximum size that a chunk can be before we allocate an
509 : : * entire AllocBlock dedicated for that chunk. We set the absolute limit
510 : : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
511 : : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
512 : : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
513 : : * primarily for legacy reasons rather than calculating it so that exactly
514 : : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
515 : : */
4091 jdavis@postgresql.or 516 : 2294668 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
517 : 2294668 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
5482 tgl@sss.pgh.pa.us 518 [ + + ]: 7678417 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
4091 jdavis@postgresql.or 519 : 5383749 : set->allocChunkLimit >>= 1;
520 : :
521 : : /* Finally, do the type-independent part of context creation */
3065 tgl@sss.pgh.pa.us 522 : 2294668 : MemoryContextCreate((MemoryContext) set,
523 : : T_AllocSetContext,
524 : : MCTX_ASET_ID,
525 : : parent,
526 : : name);
527 : :
2238 jdavis@postgresql.or 528 : 2294668 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
529 : :
4091 530 : 2294668 : return (MemoryContext) set;
531 : : }
532 : :
533 : : /*
534 : : * AllocSetReset
535 : : * Frees all memory which is allocated in the given set.
536 : : *
537 : : * Actually, this routine has some discretion about what to do.
538 : : * It should mark all allocated chunks freed, but it need not necessarily
539 : : * give back all the resources the set owns. Our actual implementation is
540 : : * that we give back all but the "keeper" block (which we must keep, since
541 : : * it shares a malloc chunk with the context header). In this way, we don't
542 : : * thrash malloc() when a context is repeatedly reset after small allocations,
543 : : * which is typical behavior for per-tuple contexts.
544 : : */
545 : : void
9442 tgl@sss.pgh.pa.us 546 : 34280547 : AllocSetReset(MemoryContext context)
547 : : {
548 : 34280547 : AllocSet set = (AllocSet) context;
549 : : AllocBlock block;
550 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
551 : :
1285 peter@eisentraut.org 552 [ + - - + ]: 34280547 : Assert(AllocSetIsValid(set));
553 : :
554 : : #ifdef MEMORY_CONTEXT_CHECKING
555 : : /* Check for corruption and leaks before freeing */
9286 tgl@sss.pgh.pa.us 556 : 34280547 : AllocSetCheck(context);
557 : : #endif
558 : :
559 : : /* Remember keeper block size for Assert below */
1023 drowley@postgresql.o 560 : 34280547 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
561 : :
562 : : /* Clear chunk freelists */
7661 tgl@sss.pgh.pa.us 563 [ + - + - : 411366564 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
564 : :
7551 565 : 34280547 : block = set->blocks;
566 : :
567 : : /* New blocks list will be just the keeper block */
1023 drowley@postgresql.o 568 : 34280547 : set->blocks = KeeperBlock(set);
569 : :
7551 tgl@sss.pgh.pa.us 570 [ + + ]: 75435256 : while (block != NULL)
571 : : {
9442 572 : 41154709 : AllocBlock next = block->next;
573 : :
1023 drowley@postgresql.o 574 [ + + ]: 41154709 : if (IsKeeperBlock(set, block))
575 : : {
576 : : /* Reset the block, but don't return it to malloc */
9175 bruce@momjian.us 577 : 34280547 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
578 : :
579 : : #ifdef CLOBBER_FREED_MEMORY
4696 noah@leadboat.com 580 : 34280547 : wipe_mem(datastart, block->freeptr - datastart);
581 : : #else
582 : : /* wipe_mem() would have done this */
583 : : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
584 : : #endif
9428 tgl@sss.pgh.pa.us 585 : 34280547 : block->freeptr = datastart;
3345 586 : 34280547 : block->prev = NULL;
9428 587 : 34280547 : block->next = NULL;
588 : : }
589 : : else
590 : : {
591 : : /* Normal case, release the block */
2182 592 : 6874162 : context->mem_allocated -= block->endptr - ((char *) block);
593 : :
594 : : #ifdef CLOBBER_FREED_MEMORY
4696 noah@leadboat.com 595 : 6874162 : wipe_mem(block, block->freeptr - ((char *) block));
596 : : #endif
597 : :
598 : : /*
599 : : * We need to free the block header's vchunk explicitly, although
600 : : * the user-data vchunks within will go away in the TRIM below.
601 : : * Otherwise Valgrind complains about leaked allocations.
602 : : */
603 : : VALGRIND_MEMPOOL_FREE(set, block);
604 : :
9442 tgl@sss.pgh.pa.us 605 : 6874162 : free(block);
606 : : }
9950 JanWieck@Yahoo.com 607 : 41154709 : block = next;
608 : : }
609 : :
2238 jdavis@postgresql.or 610 [ - + ]: 34280547 : Assert(context->mem_allocated == keepersize);
611 : :
612 : : /*
613 : : * Instruct Valgrind to throw away all the vchunks associated with this
614 : : * context, except for the one covering the AllocSetContext and
615 : : * keeper-block header. This gets rid of the vchunks for whatever user
616 : : * data is getting discarded by the context reset.
617 : : */
618 : : VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
619 : :
620 : : /* Reset block size allocation sequence, too */
7118 tgl@sss.pgh.pa.us 621 : 34280547 : set->nextBlockSize = set->initBlockSize;
10892 scrappy@hub.org 622 : 34280547 : }
623 : :
624 : : /*
625 : : * AllocSetDelete
626 : : * Frees all memory which is allocated in the given set,
627 : : * in preparation for deletion of the set.
628 : : *
629 : : * Unlike AllocSetReset, this *must* free all resources of the set.
630 : : */
631 : : void
9442 tgl@sss.pgh.pa.us 632 : 6114806 : AllocSetDelete(MemoryContext context)
633 : : {
634 : 6114806 : AllocSet set = (AllocSet) context;
635 : 6114806 : AllocBlock block = set->blocks;
636 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
637 : :
1285 peter@eisentraut.org 638 [ + - - + ]: 6114806 : Assert(AllocSetIsValid(set));
639 : :
640 : : #ifdef MEMORY_CONTEXT_CHECKING
641 : : /* Check for corruption and leaks before freeing */
9286 tgl@sss.pgh.pa.us 642 : 6114806 : AllocSetCheck(context);
643 : : #endif
644 : :
645 : : /* Remember keeper block size for Assert below */
1023 drowley@postgresql.o 646 : 6114806 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
647 : :
648 : : /*
649 : : * If the context is a candidate for a freelist, put it into that freelist
650 : : * instead of destroying it.
651 : : */
3065 tgl@sss.pgh.pa.us 652 [ + - ]: 6114806 : if (set->freeListIndex >= 0)
653 : : {
654 : 6114806 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
655 : :
656 : : /*
657 : : * Reset the context, if it needs it, so that we aren't hanging on to
658 : : * more than the initial malloc chunk.
659 : : */
660 [ + + ]: 6114806 : if (!context->isReset)
661 : 3748959 : MemoryContextResetOnly(context);
662 : :
663 : : /*
664 : : * If the freelist is full, just discard what's already in it. See
665 : : * comments with context_freelists[].
666 : : */
667 [ + + ]: 6114806 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
668 : : {
669 [ + + ]: 43026 : while (freelist->first_free != NULL)
670 : : {
671 : 42600 : AllocSetContext *oldset = freelist->first_free;
672 : :
673 : 42600 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
674 : 42600 : freelist->num_free--;
675 : :
676 : : /* Destroy the context's vpool --- see notes below */
677 : : VALGRIND_DESTROY_MEMPOOL(oldset);
678 : :
679 : : /* All that remains is to free the header/initial block */
680 : 42600 : free(oldset);
681 : : }
682 [ - + ]: 426 : Assert(freelist->num_free == 0);
683 : : }
684 : :
685 : : /* Now add the just-deleted context to the freelist. */
686 : 6114806 : set->header.nextchild = (MemoryContext) freelist->first_free;
687 : 6114806 : freelist->first_free = set;
688 : 6114806 : freelist->num_free++;
689 : :
690 : 6114806 : return;
691 : : }
692 : :
693 : : /* Free all blocks, except the keeper which is part of context header */
9442 tgl@sss.pgh.pa.us 694 [ # # ]:UBC 0 : while (block != NULL)
695 : : {
696 : 0 : AllocBlock next = block->next;
697 : :
1023 drowley@postgresql.o 698 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
2238 jdavis@postgresql.or 699 : 0 : context->mem_allocated -= block->endptr - ((char *) block);
700 : :
701 : : #ifdef CLOBBER_FREED_MEMORY
4696 noah@leadboat.com 702 : 0 : wipe_mem(block, block->freeptr - ((char *) block));
703 : : #endif
704 : :
1023 drowley@postgresql.o 705 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
706 : : {
707 : : /* As in AllocSetReset, free block-header vchunks explicitly */
708 : : VALGRIND_MEMPOOL_FREE(set, block);
3065 tgl@sss.pgh.pa.us 709 : 0 : free(block);
710 : : }
711 : :
9442 712 : 0 : block = next;
713 : : }
714 : :
2238 jdavis@postgresql.or 715 [ # # ]: 0 : Assert(context->mem_allocated == keepersize);
716 : :
717 : : /*
718 : : * Destroy the vpool. We don't seem to need to explicitly free the
719 : : * initial block's header vchunk, nor any user-data vchunks that Valgrind
720 : : * still knows about; they'll all go away automatically.
721 : : */
722 : : VALGRIND_DESTROY_MEMPOOL(set);
723 : :
724 : : /* Finally, free the context header, including the keeper block */
3065 tgl@sss.pgh.pa.us 725 : 0 : free(set);
726 : : }
727 : :
728 : : /*
729 : : * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
730 : : *
731 : : * AllocSetAlloc()'s comment explains why this is separate.
732 : : */
733 : : pg_noinline
734 : : static void *
797 drowley@postgresql.o 735 :CBC 12872434 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
736 : : {
9442 tgl@sss.pgh.pa.us 737 : 12872434 : AllocSet set = (AllocSet) context;
738 : : AllocBlock block;
739 : : MemoryChunk *chunk;
740 : : Size chunk_size;
741 : : Size blksize;
742 : :
743 : : /* validate 'size' is within the limits for the given 'flags' */
797 drowley@postgresql.o 744 : 12872434 : MemoryContextCheckSize(context, size, flags);
745 : :
746 : : #ifdef MEMORY_CONTEXT_CHECKING
747 : : /* ensure there's always space for the sentinel byte */
748 : 12872434 : chunk_size = MAXALIGN(size + 1);
749 : : #else
750 : : chunk_size = MAXALIGN(size);
751 : : #endif
752 : :
753 : 12872434 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
754 : 12872434 : block = (AllocBlock) malloc(blksize);
755 [ - + ]: 12872434 : if (block == NULL)
797 drowley@postgresql.o 756 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
757 : :
758 : : /* Make a vchunk covering the new block's header */
759 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
760 : :
797 drowley@postgresql.o 761 :CBC 12872434 : context->mem_allocated += blksize;
762 : :
763 : 12872434 : block->aset = set;
764 : 12872434 : block->freeptr = block->endptr = ((char *) block) + blksize;
765 : :
766 : 12872434 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
767 : :
768 : : /* mark the MemoryChunk as externally managed */
769 : 12872434 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
770 : :
771 : : #ifdef MEMORY_CONTEXT_CHECKING
772 : 12872434 : chunk->requested_size = size;
773 : : /* set mark to catch clobber of "unused" space */
774 [ - + ]: 12872434 : Assert(size < chunk_size);
775 : 12872434 : set_sentinel(MemoryChunkGetPointer(chunk), size);
776 : : #endif
777 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
778 : : /* fill the allocated space with junk */
779 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
780 : : #endif
781 : :
782 : : /*
783 : : * Stick the new block underneath the active allocation block, if any, so
784 : : * that we don't lose the use of the space remaining therein.
785 : : */
786 [ + - ]: 12872434 : if (set->blocks != NULL)
787 : : {
788 : 12872434 : block->prev = set->blocks;
789 : 12872434 : block->next = set->blocks->next;
790 [ + + ]: 12872434 : if (block->next)
791 : 10399032 : block->next->prev = block;
792 : 12872434 : set->blocks->next = block;
793 : : }
794 : : else
795 : : {
797 drowley@postgresql.o 796 :UBC 0 : block->prev = NULL;
797 : 0 : block->next = NULL;
798 : 0 : set->blocks = block;
799 : : }
800 : :
801 : : /* Ensure any padding bytes are marked NOACCESS. */
802 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
803 : : chunk_size - size);
804 : :
805 : : /* Disallow access to the chunk header. */
806 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
807 : :
797 drowley@postgresql.o 808 :CBC 12872434 : return MemoryChunkGetPointer(chunk);
809 : : }
810 : :
811 : : /*
812 : : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
813 : : * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
814 : : */
815 : : static inline void *
816 : 460626856 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
817 : : Size size, Size chunk_size, int fidx)
818 : : {
819 : : MemoryChunk *chunk;
820 : :
821 : 460626856 : chunk = (MemoryChunk *) (block->freeptr);
822 : :
823 : : /* Prepare to initialize the chunk header. */
824 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
825 : :
826 : 460626856 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
827 [ - + ]: 460626856 : Assert(block->freeptr <= block->endptr);
828 : :
829 : : /* store the free list index in the value field */
830 : 460626856 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
831 : :
832 : : #ifdef MEMORY_CONTEXT_CHECKING
833 : 460626856 : chunk->requested_size = size;
834 : : /* set mark to catch clobber of "unused" space */
835 [ + + ]: 460626856 : if (size < chunk_size)
1336 836 : 306551342 : set_sentinel(MemoryChunkGetPointer(chunk), size);
837 : : #endif
838 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
839 : : /* fill the allocated space with junk */
840 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
841 : : #endif
842 : :
843 : : /* Ensure any padding bytes are marked NOACCESS. */
844 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
845 : : chunk_size - size);
846 : :
847 : : /* Disallow access to the chunk header. */
848 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
849 : :
797 850 : 460626856 : return MemoryChunkGetPointer(chunk);
851 : : }
852 : :
853 : : /*
854 : : * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
855 : : * allocated from it.
856 : : *
857 : : * AllocSetAlloc()'s comment explains why this is separate.
858 : : */
859 : : pg_noinline
860 : : static void *
861 : 7365694 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
862 : : int fidx)
863 : : {
864 : 7365694 : AllocSet set = (AllocSet) context;
865 : : AllocBlock block;
866 : : Size availspace;
867 : : Size blksize;
868 : : Size required_size;
869 : : Size chunk_size;
870 : :
871 : : /* due to the keeper block set->blocks should always be valid */
872 [ - + ]: 7365694 : Assert(set->blocks != NULL);
873 : 7365694 : block = set->blocks;
874 : 7365694 : availspace = block->endptr - block->freeptr;
875 : :
876 : : /*
877 : : * The existing active (top) block does not have enough room for the
878 : : * requested allocation, but it might still have a useful amount of space
879 : : * in it. Once we push it down in the block list, we'll never try to
880 : : * allocate more space from it. So, before we do that, carve up its free
881 : : * space into chunks that we can put on the set's freelists.
882 : : *
883 : : * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
884 : : * left in the block, this loop cannot iterate more than
885 : : * ALLOCSET_NUM_FREELISTS-1 times.
886 : : */
887 [ + + ]: 25558664 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
888 : : {
889 : : AllocFreeListLink *link;
890 : : MemoryChunk *chunk;
891 : 18192970 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
892 : 18192970 : int a_fidx = AllocSetFreeIndex(availchunk);
893 : :
894 : : /*
895 : : * In most cases, we'll get back the index of the next larger freelist
896 : : * than the one we need to put this chunk on. The exception is when
897 : : * availchunk is exactly a power of 2.
898 : : */
899 [ + + ]: 18192970 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
900 : : {
901 : 14655143 : a_fidx--;
902 [ - + ]: 14655143 : Assert(a_fidx >= 0);
903 : 14655143 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
904 : : }
905 : :
906 : 18192970 : chunk = (MemoryChunk *) (block->freeptr);
907 : :
908 : : /* Prepare to initialize the chunk header. */
909 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
910 : 18192970 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
911 : 18192970 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
912 : :
913 : : /* store the freelist index in the value field */
914 : 18192970 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
915 : : #ifdef MEMORY_CONTEXT_CHECKING
916 : 18192970 : chunk->requested_size = InvalidAllocSize; /* mark it free */
917 : : #endif
918 : : /* push this chunk onto the free list */
919 : 18192970 : link = GetFreeListLink(chunk);
920 : :
921 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
922 : 18192970 : link->next = set->freelist[a_fidx];
923 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
924 : :
925 : 18192970 : set->freelist[a_fidx] = chunk;
926 : : }
927 : :
928 : : /*
929 : : * The first such block has size initBlockSize, and we double the space in
930 : : * each succeeding block, but not more than maxBlockSize.
931 : : */
932 : 7365694 : blksize = set->nextBlockSize;
933 : 7365694 : set->nextBlockSize <<= 1;
934 [ + + ]: 7365694 : if (set->nextBlockSize > set->maxBlockSize)
935 : 515424 : set->nextBlockSize = set->maxBlockSize;
936 : :
937 : : /* Choose the actual chunk size to allocate */
938 : 7365694 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
939 [ - + ]: 7365694 : Assert(chunk_size >= size);
940 : :
941 : : /*
942 : : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
943 : : * space... but try to keep it a power of 2.
944 : : */
945 : 7365694 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
946 [ + + ]: 11366243 : while (blksize < required_size)
947 : 4000549 : blksize <<= 1;
948 : :
949 : : /* Try to allocate it */
950 : 7365694 : block = (AllocBlock) malloc(blksize);
951 : :
952 : : /*
953 : : * We could be asking for pretty big blocks here, so cope if malloc fails.
954 : : * But give up if there's less than 1 MB or so available...
955 : : */
956 [ - + - - ]: 7365694 : while (block == NULL && blksize > 1024 * 1024)
957 : : {
797 drowley@postgresql.o 958 :UBC 0 : blksize >>= 1;
959 [ # # ]: 0 : if (blksize < required_size)
960 : 0 : break;
961 : 0 : block = (AllocBlock) malloc(blksize);
962 : : }
963 : :
797 drowley@postgresql.o 964 [ - + ]:CBC 7365694 : if (block == NULL)
797 drowley@postgresql.o 965 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
966 : :
967 : : /* Make a vchunk covering the new block's header */
968 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
969 : :
797 drowley@postgresql.o 970 :CBC 7365694 : context->mem_allocated += blksize;
971 : :
972 : 7365694 : block->aset = set;
973 : 7365694 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
974 : 7365694 : block->endptr = ((char *) block) + blksize;
975 : :
976 : : /* Mark unallocated space NOACCESS. */
977 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
978 : : blksize - ALLOC_BLOCKHDRSZ);
979 : :
980 : 7365694 : block->prev = NULL;
981 : 7365694 : block->next = set->blocks;
982 [ + - ]: 7365694 : if (block->next)
983 : 7365694 : block->next->prev = block;
984 : 7365694 : set->blocks = block;
985 : :
986 : 7365694 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
987 : : }
988 : :
989 : : /*
990 : : * AllocSetAlloc
991 : : * Returns a pointer to allocated memory of given size or raises an ERROR
992 : : * on allocation failure, or returns NULL when flags contains
993 : : * MCXT_ALLOC_NO_OOM.
994 : : *
995 : : * No request may exceed:
996 : : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
997 : : * All callers use a much-lower limit.
998 : : *
999 : : * Note: when using valgrind, it doesn't matter how the returned allocation
1000 : : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1001 : : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1002 : : *
1003 : : * This function should only contain the most common code paths. Everything
1004 : : * else should be in pg_noinline helper functions, thus avoiding the overhead
1005 : : * of creating a stack frame for the common cases. Allocating memory is often
1006 : : * a bottleneck in many workloads, so avoiding stack frame setup is
1007 : : * worthwhile. Helper functions should always directly return the newly
1008 : : * allocated memory so that we can just return that address directly as a tail
1009 : : * call.
1010 : : */
1011 : : void *
1012 : 729287358 : AllocSetAlloc(MemoryContext context, Size size, int flags)
1013 : : {
1014 : 729287358 : AllocSet set = (AllocSet) context;
1015 : : AllocBlock block;
1016 : : MemoryChunk *chunk;
1017 : : int fidx;
1018 : : Size chunk_size;
1019 : : Size availspace;
1020 : :
1021 [ + - - + ]: 729287358 : Assert(AllocSetIsValid(set));
1022 : :
1023 : : /* due to the keeper block set->blocks should never be NULL */
1024 [ - + ]: 729287358 : Assert(set->blocks != NULL);
1025 : :
1026 : : /*
1027 : : * If requested size exceeds maximum for chunks we hand the request off to
1028 : : * AllocSetAllocLarge().
1029 : : */
1030 [ + + ]: 729287358 : if (size > set->allocChunkLimit)
1031 : 12872434 : return AllocSetAllocLarge(context, size, flags);
1032 : :
1033 : : /*
1034 : : * Request is small enough to be treated as a chunk. Look in the
1035 : : * corresponding free list to see if there is a free chunk we could reuse.
1036 : : * If one is found, remove it from the free list, make it again a member
1037 : : * of the alloc set and return its data address.
1038 : : *
1039 : : * Note that we don't attempt to ensure there's space for the sentinel
1040 : : * byte here. We expect a large proportion of allocations to be for sizes
1041 : : * which are already a power of 2. If we were to always make space for a
1042 : : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1043 : : * doubling the memory requirements for such allocations.
1044 : : */
6945 tgl@sss.pgh.pa.us 1045 : 716414924 : fidx = AllocSetFreeIndex(size);
1046 : 716414924 : chunk = set->freelist[fidx];
9286 1047 [ + + ]: 716414924 : if (chunk != NULL)
1048 : : {
1345 drowley@postgresql.o 1049 : 255788068 : AllocFreeListLink *link = GetFreeListLink(chunk);
1050 : :
1051 : : /* Allow access to the chunk header. */
1052 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1053 : :
1054 [ - + ]: 255788068 : Assert(fidx == MemoryChunkGetValue(chunk));
1055 : :
1056 : : /* pop this chunk off the freelist */
1057 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1058 : 255788068 : set->freelist[fidx] = link->next;
1059 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1060 : :
1061 : : #ifdef MEMORY_CONTEXT_CHECKING
9286 tgl@sss.pgh.pa.us 1062 : 255788068 : chunk->requested_size = size;
1063 : : /* set mark to catch clobber of "unused" space */
1345 drowley@postgresql.o 1064 [ + + ]: 255788068 : if (size < GetChunkSizeFromFreeListIdx(fidx))
1065 : 158317460 : set_sentinel(MemoryChunkGetPointer(chunk), size);
1066 : : #endif
1067 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1068 : : /* fill the allocated space with junk */
1069 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1070 : : #endif
1071 : :
1072 : : /* Ensure any padding bytes are marked NOACCESS. */
1073 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
1074 : : GetChunkSizeFromFreeListIdx(fidx) - size);
1075 : :
1076 : : /* Disallow access to the chunk header. */
1077 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1078 : :
1079 : 255788068 : return MemoryChunkGetPointer(chunk);
1080 : : }
1081 : :
1082 : : /*
1083 : : * Choose the actual chunk size to allocate.
1084 : : */
1085 : 460626856 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
9845 tgl@sss.pgh.pa.us 1086 [ - + ]: 460626856 : Assert(chunk_size >= size);
1087 : :
797 drowley@postgresql.o 1088 : 460626856 : block = set->blocks;
1089 : 460626856 : availspace = block->endptr - block->freeptr;
1090 : :
1091 : : /*
1092 : : * If there is enough room in the active allocation block, we will put the
1093 : : * chunk into that block. Else must start a new one.
1094 : : */
1095 [ + + ]: 460626856 : if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1096 : 7365694 : return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1097 : :
1098 : : /* There's enough space on the current block, so allocate from that */
1099 : 453261162 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1100 : : }
1101 : :
1102 : : /*
1103 : : * AllocSetFree
1104 : : * Frees allocated memory; memory is removed from the set.
1105 : : */
1106 : : void
1345 1107 : 316943156 : AllocSetFree(void *pointer)
1108 : : {
1109 : : AllocSet set;
1110 : 316943156 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1111 : :
1112 : : /* Allow access to the chunk header. */
1113 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1114 : :
1115 [ + + ]: 316943156 : if (MemoryChunkIsExternal(chunk))
1116 : : {
1117 : : /* Release single-chunk block. */
1118 : 12043646 : AllocBlock block = ExternalChunkGetBlock(chunk);
1119 : :
1120 : : /*
1121 : : * Try to verify that we have a sane block pointer: the block header
1122 : : * should reference an aset and the freeptr should match the endptr.
1123 : : */
1303 tgl@sss.pgh.pa.us 1124 [ + - + - : 12043646 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1303 tgl@sss.pgh.pa.us 1125 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1126 : :
1345 drowley@postgresql.o 1127 :CBC 12043646 : set = block->aset;
1128 : :
1129 : : #ifdef MEMORY_CONTEXT_CHECKING
1130 : : {
1131 : : /* Test for someone scribbling on unused space in chunk */
1216 1132 [ - + ]: 12043646 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1336 1133 [ - + ]: 12043646 : if (!sentinel_ok(pointer, chunk->requested_size))
1336 drowley@postgresql.o 1134 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1135 : : set->header.name, chunk);
1136 : : }
1137 : : #endif
1138 : :
1139 : : /* OK, remove block from aset's list and free it */
3345 tgl@sss.pgh.pa.us 1140 [ + - ]:CBC 12043646 : if (block->prev)
1141 : 12043646 : block->prev->next = block->next;
1142 : : else
3345 tgl@sss.pgh.pa.us 1143 :UBC 0 : set->blocks = block->next;
3345 tgl@sss.pgh.pa.us 1144 [ + + ]:CBC 12043646 : if (block->next)
1145 : 9762266 : block->next->prev = block->prev;
1146 : :
1345 drowley@postgresql.o 1147 : 12043646 : set->header.mem_allocated -= block->endptr - ((char *) block);
1148 : :
1149 : : #ifdef CLOBBER_FREED_MEMORY
4696 noah@leadboat.com 1150 : 12043646 : wipe_mem(block, block->freeptr - ((char *) block));
1151 : : #endif
1152 : :
1153 : : /* As in AllocSetReset, free block-header vchunks explicitly */
1154 : : VALGRIND_MEMPOOL_FREE(set, block);
1155 : :
9751 tgl@sss.pgh.pa.us 1156 : 12043646 : free(block);
1157 : : }
1158 : : else
1159 : : {
1345 drowley@postgresql.o 1160 : 304899510 : AllocBlock block = MemoryChunkGetBlock(chunk);
1161 : : int fidx;
1162 : : AllocFreeListLink *link;
1163 : :
1164 : : /*
1165 : : * In this path, for speed reasons we just Assert that the referenced
1166 : : * block is good. We can also Assert that the value field is sane.
1167 : : * Future field experience may show that these Asserts had better
1168 : : * become regular runtime test-and-elog checks.
1169 : : */
1285 peter@eisentraut.org 1170 [ + - + - : 304899510 : Assert(AllocBlockIsValid(block));
- + ]
1345 drowley@postgresql.o 1171 : 304899510 : set = block->aset;
1172 : :
1303 tgl@sss.pgh.pa.us 1173 : 304899510 : fidx = MemoryChunkGetValue(chunk);
1174 [ + - - + ]: 304899510 : Assert(FreeListIdxIsValid(fidx));
1175 : 304899510 : link = GetFreeListLink(chunk);
1176 : :
1177 : : /*
1178 : : * It might seem odd that we use elevel ERROR for double-pfree but
1179 : : * only WARNING for write-past-chunk-end. But the two conditions are
1180 : : * not very comparable. In the double-pfree case we can prevent
1181 : : * corruption before it happens; while if we let it go through, the
1182 : : * result would be a corrupted freelist that allows this chunk to get
1183 : : * re-allocated twice. Thus the original bug could cascade into
1184 : : * hard-to-understand misbehavior that might manifest far away from
1185 : : * the actual source of the problem. On the other hand, a write past
1186 : : * chunk end can be relatively benign if just a few bytes too many
1187 : : * were written: often, only padding or unused space gets affected.
1188 : : * Moreover, whatever damage was done is already done, and we're just
1189 : : * reporting after the fact with no ability to clean it up. So just
1190 : : * warn, like AllocSetCheck would do if the chunk didn't get freed.
1191 : : */
1192 : : #ifdef MEMORY_CONTEXT_CHECKING
1193 : : /* Test for previously-freed chunk */
36 1194 [ - + ]: 304899510 : if (unlikely(chunk->requested_size == InvalidAllocSize))
36 tgl@sss.pgh.pa.us 1195 [ # # ]:UBC 0 : elog(ERROR, "detected double pfree in %s %p",
1196 : : set->header.name, chunk);
1197 : : /* Test for someone scribbling on unused space in chunk */
1345 drowley@postgresql.o 1198 [ + + ]:CBC 304899510 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1199 [ - + ]: 200953795 : if (!sentinel_ok(pointer, chunk->requested_size))
1345 drowley@postgresql.o 1200 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1201 : : set->header.name, chunk);
1202 : : #endif
1203 : :
1204 : : #ifdef CLOBBER_FREED_MEMORY
1345 drowley@postgresql.o 1205 :CBC 304899510 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1206 : : #endif
1207 : : /* push this chunk onto the top of the free list */
1208 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1209 : 304899510 : link->next = set->freelist[fidx];
1210 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1211 : 304899510 : set->freelist[fidx] = chunk;
1212 : :
1213 : : #ifdef MEMORY_CONTEXT_CHECKING
1214 : :
1215 : : /*
1216 : : * Reset requested_size to InvalidAllocSize in chunks that are on free
1217 : : * list.
1218 : : */
1219 : 304899510 : chunk->requested_size = InvalidAllocSize;
1220 : : #endif
1221 : : }
10892 scrappy@hub.org 1222 : 316943156 : }
1223 : :
1224 : : /*
1225 : : * AllocSetRealloc
1226 : : * Returns new pointer to allocated memory of given size or NULL if
1227 : : * request could not be completed; this memory is added to the set.
1228 : : * Memory associated with given pointer is copied into the new memory,
1229 : : * and the old memory is freed.
1230 : : *
1231 : : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1232 : : * makes our Valgrind client requests less-precise, hazarding false negatives.
1233 : : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1234 : : * request size.)
1235 : : */
1236 : : void *
798 drowley@postgresql.o 1237 : 3221885 : AllocSetRealloc(void *pointer, Size size, int flags)
1238 : : {
1239 : : AllocBlock block;
1240 : : AllocSet set;
1345 1241 : 3221885 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1242 : : Size oldchksize;
1243 : : int fidx;
1244 : :
1245 : : /* Allow access to the chunk header. */
1246 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1247 : :
1248 [ + + ]: 3221885 : if (MemoryChunkIsExternal(chunk))
1249 : : {
1250 : : /*
1251 : : * The chunk must have been allocated as a single-chunk block. Use
1252 : : * realloc() to make the containing block bigger, or smaller, with
1253 : : * minimum space wastage.
1254 : : */
1255 : : AllocBlock newblock;
1256 : : Size chksize;
1257 : : Size blksize;
1258 : : Size oldblksize;
1259 : :
1260 : 62888 : block = ExternalChunkGetBlock(chunk);
1261 : :
1262 : : /*
1263 : : * Try to verify that we have a sane block pointer: the block header
1264 : : * should reference an aset and the freeptr should match the endptr.
1265 : : */
1303 tgl@sss.pgh.pa.us 1266 [ + - + - : 62888 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1303 tgl@sss.pgh.pa.us 1267 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1268 : :
1345 drowley@postgresql.o 1269 :CBC 62888 : set = block->aset;
1270 : :
1271 : : /* only check size in paths where the limits could be hit */
798 1272 : 62888 : MemoryContextCheckSize((MemoryContext) set, size, flags);
1273 : :
1169 tgl@sss.pgh.pa.us 1274 : 62888 : oldchksize = block->endptr - (char *) pointer;
1275 : :
1276 : : #ifdef MEMORY_CONTEXT_CHECKING
1277 : : /* Test for someone scribbling on unused space in chunk */
1278 [ - + ]: 62888 : Assert(chunk->requested_size < oldchksize);
1336 drowley@postgresql.o 1279 [ - + ]: 62888 : if (!sentinel_ok(pointer, chunk->requested_size))
1336 drowley@postgresql.o 1280 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1281 : : set->header.name, chunk);
1282 : : #endif
1283 : :
1284 : : #ifdef MEMORY_CONTEXT_CHECKING
1285 : : /* ensure there's always space for the sentinel byte */
1336 drowley@postgresql.o 1286 :CBC 62888 : chksize = MAXALIGN(size + 1);
1287 : : #else
1288 : : chksize = MAXALIGN(size);
1289 : : #endif
1290 : :
1291 : : /* Do the realloc */
9286 tgl@sss.pgh.pa.us 1292 : 62888 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
2182 1293 : 62888 : oldblksize = block->endptr - ((char *) block);
1294 : :
276 tgl@sss.pgh.pa.us 1295 :GNC 62888 : newblock = (AllocBlock) realloc(block, blksize);
1296 [ - + ]: 62888 : if (newblock == NULL)
1297 : : {
1298 : : /* Disallow access to the chunk header. */
1299 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
798 drowley@postgresql.o 1300 :UBC 0 : return MemoryContextAllocationFailure(&set->header, size, flags);
1301 : : }
1302 : :
1303 : : /*
1304 : : * Move the block-header vchunk explicitly. (mcxt.c will take care of
1305 : : * moving the vchunk for the user data.)
1306 : : */
1307 : : VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
276 tgl@sss.pgh.pa.us 1308 :GNC 62888 : block = newblock;
1309 : :
1310 : : /* updated separately, not to underflow when (oldblksize > blksize) */
1345 drowley@postgresql.o 1311 :CBC 62888 : set->header.mem_allocated -= oldblksize;
1312 : 62888 : set->header.mem_allocated += blksize;
1313 : :
9751 tgl@sss.pgh.pa.us 1314 : 62888 : block->freeptr = block->endptr = ((char *) block) + blksize;
1315 : :
1316 : : /* Update pointers since block has likely been moved */
1345 drowley@postgresql.o 1317 : 62888 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1318 : 62888 : pointer = MemoryChunkGetPointer(chunk);
3345 tgl@sss.pgh.pa.us 1319 [ + - ]: 62888 : if (block->prev)
1320 : 62888 : block->prev->next = block;
1321 : : else
3345 tgl@sss.pgh.pa.us 1322 :UBC 0 : set->blocks = block;
3345 tgl@sss.pgh.pa.us 1323 [ + + ]:CBC 62888 : if (block->next)
1324 : 46788 : block->next->prev = block;
1325 : :
1326 : : #ifdef MEMORY_CONTEXT_CHECKING
1327 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1328 : :
1329 : : /*
1330 : : * We can only randomize the extra space if we know the prior request.
1331 : : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1332 : : */
1333 : : if (size > chunk->requested_size)
1334 : : randomize_mem((char *) pointer + chunk->requested_size,
1335 : : size - chunk->requested_size);
1336 : : #else
1337 : :
1338 : : /*
1339 : : * If this is an increase, realloc() will have marked any
1340 : : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1341 : : * also need to adjust trailing bytes from the old allocation (from
1342 : : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1343 : : * Make sure not to mark too many bytes in case chunk->requested_size
1344 : : * < size < oldchksize.
1345 : : */
1346 : : #ifdef USE_VALGRIND
1347 : : if (Min(size, oldchksize) > chunk->requested_size)
1348 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1349 : : Min(size, oldchksize) - chunk->requested_size);
1350 : : #endif
1351 : : #endif
1352 : :
9286 1353 : 62888 : chunk->requested_size = size;
1354 : : /* set mark to catch clobber of "unused" space */
1336 drowley@postgresql.o 1355 [ - + ]: 62888 : Assert(size < chksize);
1356 : 62888 : set_sentinel(pointer, size);
1357 : : #else /* !MEMORY_CONTEXT_CHECKING */
1358 : :
1359 : : /*
1360 : : * We may need to adjust marking of bytes from the old allocation as
1361 : : * some of them may be marked NOACCESS. We don't know how much of the
1362 : : * old chunk size was the requested size; it could have been as small
1363 : : * as one byte. We have to be conservative and just mark the entire
1364 : : * old portion DEFINED. Make sure not to mark memory beyond the new
1365 : : * allocation in case it's smaller than the old one.
1366 : : */
1367 : : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1368 : : #endif
1369 : :
1370 : : /* Ensure any padding bytes are marked NOACCESS. */
1371 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1372 : :
1373 : : /* Disallow access to the chunk header. */
1374 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1375 : :
3345 tgl@sss.pgh.pa.us 1376 : 62888 : return pointer;
1377 : : }
1378 : :
1345 drowley@postgresql.o 1379 : 3158997 : block = MemoryChunkGetBlock(chunk);
1380 : :
1381 : : /*
1382 : : * In this path, for speed reasons we just Assert that the referenced
1383 : : * block is good. We can also Assert that the value field is sane. Future
1384 : : * field experience may show that these Asserts had better become regular
1385 : : * runtime test-and-elog checks.
1386 : : */
1285 peter@eisentraut.org 1387 [ + - + - : 3158997 : Assert(AllocBlockIsValid(block));
- + ]
1345 drowley@postgresql.o 1388 : 3158997 : set = block->aset;
1389 : :
1303 tgl@sss.pgh.pa.us 1390 : 3158997 : fidx = MemoryChunkGetValue(chunk);
1391 [ + - - + ]: 3158997 : Assert(FreeListIdxIsValid(fidx));
1169 1392 : 3158997 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1393 : :
1394 : : #ifdef MEMORY_CONTEXT_CHECKING
1395 : : /* See comments in AllocSetFree about uses of ERROR and WARNING here */
1396 : : /* Test for previously-freed chunk */
36 1397 [ - + ]: 3158997 : if (unlikely(chunk->requested_size == InvalidAllocSize))
36 tgl@sss.pgh.pa.us 1398 [ # # ]:UBC 0 : elog(ERROR, "detected realloc of freed chunk in %s %p",
1399 : : set->header.name, chunk);
1400 : : /* Test for someone scribbling on unused space in chunk */
1169 tgl@sss.pgh.pa.us 1401 [ + + ]:CBC 3158997 : if (chunk->requested_size < oldchksize)
1345 drowley@postgresql.o 1402 [ - + ]: 1328519 : if (!sentinel_ok(pointer, chunk->requested_size))
1345 drowley@postgresql.o 1403 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1404 : : set->header.name, chunk);
1405 : : #endif
1406 : :
1407 : : /*
1408 : : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1409 : : * allocated area already is >= the new size. (In particular, we will
1410 : : * fall out here if the requested size is a decrease.)
1411 : : */
1169 tgl@sss.pgh.pa.us 1412 [ + + ]:CBC 3158997 : if (oldchksize >= size)
1413 : : {
1414 : : #ifdef MEMORY_CONTEXT_CHECKING
2406 1415 : 1326109 : Size oldrequest = chunk->requested_size;
1416 : :
1417 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1418 : : /* We can only fill the extra space if we know the prior request */
1419 : : if (size > oldrequest)
1420 : : randomize_mem((char *) pointer + oldrequest,
1421 : : size - oldrequest);
1422 : : #endif
1423 : :
1424 : 1326109 : chunk->requested_size = size;
1425 : :
1426 : : /*
1427 : : * If this is an increase, mark any newly-available part UNDEFINED.
1428 : : * Otherwise, mark the obsolete part NOACCESS.
1429 : : */
1430 : : if (size > oldrequest)
1431 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1432 : : size - oldrequest);
1433 : : else
1434 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1435 : : oldchksize - size);
1436 : :
1437 : : /* set mark to catch clobber of "unused" space */
1169 1438 [ + + ]: 1326109 : if (size < oldchksize)
2406 1439 : 1298358 : set_sentinel(pointer, size);
1440 : : #else /* !MEMORY_CONTEXT_CHECKING */
1441 : :
1442 : : /*
1443 : : * We don't have the information to determine whether we're growing
1444 : : * the old request or shrinking it, so we conservatively mark the
1445 : : * entire new allocation DEFINED.
1446 : : */
1447 : : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1448 : : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1449 : : #endif
1450 : :
1451 : : /* Disallow access to the chunk header. */
1452 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1453 : :
1454 : 1326109 : return pointer;
1455 : : }
1456 : : else
1457 : : {
1458 : : /*
1459 : : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1460 : : * allocate a new chunk and copy the data. Since we know the existing
1461 : : * data isn't huge, this won't involve any great memcpy expense, so
1462 : : * it's not worth being smarter. (At one time we tried to avoid
1463 : : * memcpy when it was possible to enlarge the chunk in-place, but that
1464 : : * turns out to misbehave unpleasantly for repeated cycles of
1465 : : * palloc/repalloc/pfree: the eventually freed chunks go into the
1466 : : * wrong freelist for the next initial palloc request, and so we leak
1467 : : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1468 : : */
1469 : : AllocPointer newPointer;
1470 : : Size oldsize;
1471 : :
1472 : : /* allocate new chunk (this also checks size is valid) */
798 drowley@postgresql.o 1473 : 1832888 : newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1474 : :
1475 : : /* leave immediately if request was not completed */
4114 rhaas@postgresql.org 1476 [ - + ]: 1832888 : if (newPointer == NULL)
1477 : : {
1478 : : /* Disallow access to the chunk header. */
1479 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
798 drowley@postgresql.o 1480 :UBC 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1481 : : }
1482 : :
1483 : : /*
1484 : : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1485 : : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1486 : : * definedness from the old allocation to the new. If we know the old
1487 : : * allocation, copy just that much. Otherwise, make the entire old
1488 : : * chunk defined to avoid errors as we copy the currently-NOACCESS
1489 : : * trailing bytes.
1490 : : */
1491 : : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1492 : : #ifdef MEMORY_CONTEXT_CHECKING
4696 noah@leadboat.com 1493 :CBC 1832888 : oldsize = chunk->requested_size;
1494 : : #else
1495 : : oldsize = oldchksize;
1496 : : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1497 : : #endif
1498 : :
1499 : : /* transfer existing data (certain to fit) */
9751 tgl@sss.pgh.pa.us 1500 : 1832888 : memcpy(newPointer, pointer, oldsize);
1501 : :
1502 : : /* free old chunk */
1345 drowley@postgresql.o 1503 : 1832888 : AllocSetFree(pointer);
1504 : :
9751 tgl@sss.pgh.pa.us 1505 : 1832888 : return newPointer;
1506 : : }
1507 : : }
1508 : :
1509 : : /*
1510 : : * AllocSetGetChunkContext
1511 : : * Return the MemoryContext that 'pointer' belongs to.
1512 : : */
1513 : : MemoryContext
1345 drowley@postgresql.o 1514 : 5523910 : AllocSetGetChunkContext(void *pointer)
1515 : : {
1516 : 5523910 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1517 : : AllocBlock block;
1518 : : AllocSet set;
1519 : :
1520 : : /* Allow access to the chunk header. */
1521 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1522 : :
1523 [ + + ]: 5523910 : if (MemoryChunkIsExternal(chunk))
1524 : 62888 : block = ExternalChunkGetBlock(chunk);
1525 : : else
1526 : 5461022 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1527 : :
1528 : : /* Disallow access to the chunk header. */
1529 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1530 : :
1285 peter@eisentraut.org 1531 [ + - + - : 5523910 : Assert(AllocBlockIsValid(block));
- + ]
1345 drowley@postgresql.o 1532 : 5523910 : set = block->aset;
1533 : :
1534 : 5523910 : return &set->header;
1535 : : }
1536 : :
1537 : : /*
1538 : : * AllocSetGetChunkSpace
1539 : : * Given a currently-allocated chunk, determine the total space
1540 : : * it occupies (including all memory-allocation overhead).
1541 : : */
1542 : : Size
1543 : 5075198 : AllocSetGetChunkSpace(void *pointer)
1544 : : {
1545 : 5075198 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1546 : : int fidx;
1547 : :
1548 : : /* Allow access to the chunk header. */
1549 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1550 : :
1551 [ + + ]: 5075198 : if (MemoryChunkIsExternal(chunk))
1552 : : {
1553 : 566816 : AllocBlock block = ExternalChunkGetBlock(chunk);
1554 : :
1555 : : /* Disallow access to the chunk header. */
1556 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1557 : :
1285 peter@eisentraut.org 1558 [ + - + - : 566816 : Assert(AllocBlockIsValid(block));
- + ]
1559 : :
1345 drowley@postgresql.o 1560 : 566816 : return block->endptr - (char *) chunk;
1561 : : }
1562 : :
1303 tgl@sss.pgh.pa.us 1563 : 4508382 : fidx = MemoryChunkGetValue(chunk);
1564 [ + - - + ]: 4508382 : Assert(FreeListIdxIsValid(fidx));
1565 : :
1566 : : /* Disallow access to the chunk header. */
1567 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1568 : :
1569 : 4508382 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1570 : : }
1571 : :
1572 : : /*
1573 : : * AllocSetIsEmpty
1574 : : * Is an allocset empty of any allocated space?
1575 : : */
1576 : : bool
7901 1577 : 7278 : AllocSetIsEmpty(MemoryContext context)
1578 : : {
1285 peter@eisentraut.org 1579 [ + - - + ]: 7278 : Assert(AllocSetIsValid(context));
1580 : :
1581 : : /*
1582 : : * For now, we say "empty" only if the context is new or just reset. We
1583 : : * could examine the freelists to determine if all space has been freed,
1584 : : * but it's not really worth the trouble for present uses of this
1585 : : * functionality.
1586 : : */
5463 heikki.linnakangas@i 1587 [ + + ]: 7278 : if (context->isReset)
7901 tgl@sss.pgh.pa.us 1588 : 7263 : return true;
1589 : 15 : return false;
1590 : : }
1591 : :
1592 : : /*
1593 : : * AllocSetStats
1594 : : * Compute stats about memory consumption of an allocset.
1595 : : *
1596 : : * printfunc: if not NULL, pass a human-readable stats string to this.
1597 : : * passthru: pass this pointer through to printfunc.
1598 : : * totals: if not NULL, add stats about this context into *totals.
1599 : : * print_to_stderr: print stats to stderr if true, elog otherwise.
1600 : : */
1601 : : void
2961 1602 : 3434 : AllocSetStats(MemoryContext context,
1603 : : MemoryStatsPrintFunc printfunc, void *passthru,
1604 : : MemoryContextCounters *totals, bool print_to_stderr)
1605 : : {
9442 1606 : 3434 : AllocSet set = (AllocSet) context;
4485 1607 : 3434 : Size nblocks = 0;
3906 1608 : 3434 : Size freechunks = 0;
1609 : : Size totalspace;
4485 1610 : 3434 : Size freespace = 0;
1611 : : AllocBlock block;
1612 : : int fidx;
1613 : :
1285 peter@eisentraut.org 1614 [ + - - + ]: 3434 : Assert(AllocSetIsValid(set));
1615 : :
1616 : : /* Include context header in totalspace */
2961 tgl@sss.pgh.pa.us 1617 : 3434 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1618 : :
9480 1619 [ + + ]: 9597 : for (block = set->blocks; block != NULL; block = block->next)
1620 : : {
1621 : 6163 : nblocks++;
1622 : 6163 : totalspace += block->endptr - ((char *) block);
1623 : 6163 : freespace += block->endptr - block->freeptr;
1624 : : }
1625 [ + + ]: 41208 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1626 : : {
1303 1627 : 37774 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1345 drowley@postgresql.o 1628 : 37774 : MemoryChunk *chunk = set->freelist[fidx];
1629 : :
1630 [ + + ]: 48852 : while (chunk != NULL)
1631 : : {
1632 : 11078 : AllocFreeListLink *link = GetFreeListLink(chunk);
1633 : :
1634 : : /* Allow access to the chunk header. */
1635 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1303 tgl@sss.pgh.pa.us 1636 [ - + ]: 11078 : Assert(MemoryChunkGetValue(chunk) == fidx);
1637 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1638 : :
3906 1639 : 11078 : freechunks++;
1345 drowley@postgresql.o 1640 : 11078 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1641 : :
1642 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1643 : 11078 : chunk = link->next;
1644 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1645 : : }
1646 : : }
1647 : :
2961 tgl@sss.pgh.pa.us 1648 [ + + ]: 3434 : if (printfunc)
1649 : : {
1650 : : char stats_string[200];
1651 : :
1652 : 1202 : snprintf(stats_string, sizeof(stats_string),
1653 : : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1654 : : totalspace, nblocks, freespace, freechunks,
1655 : : totalspace - freespace);
1855 fujii@postgresql.org 1656 : 1202 : printfunc(context, passthru, stats_string, print_to_stderr);
1657 : : }
1658 : :
3906 tgl@sss.pgh.pa.us 1659 [ + - ]: 3434 : if (totals)
1660 : : {
1661 : 3434 : totals->nblocks += nblocks;
1662 : 3434 : totals->freechunks += freechunks;
1663 : 3434 : totals->totalspace += totalspace;
1664 : 3434 : totals->freespace += freespace;
1665 : : }
9480 1666 : 3434 : }
1667 : :
1668 : :
1669 : : #ifdef MEMORY_CONTEXT_CHECKING
1670 : :
1671 : : /*
1672 : : * AllocSetCheck
1673 : : * Walk through chunks and check consistency of memory.
1674 : : *
1675 : : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1676 : : * find yourself in an infinite loop when trouble occurs, because this
1677 : : * routine will be entered again when elog cleanup tries to release memory!
1678 : : */
1679 : : void
9429 bruce@momjian.us 1680 : 121696537 : AllocSetCheck(MemoryContext context)
1681 : : {
9175 1682 : 121696537 : AllocSet set = (AllocSet) context;
3065 tgl@sss.pgh.pa.us 1683 : 121696537 : const char *name = set->header.name;
1684 : : AllocBlock prevblock;
1685 : : AllocBlock block;
2405 tomas.vondra@postgre 1686 : 121696537 : Size total_allocated = 0;
1687 : :
3345 tgl@sss.pgh.pa.us 1688 : 121696537 : for (prevblock = NULL, block = set->blocks;
1689 [ + + ]: 331628846 : block != NULL;
1690 : 209932309 : prevblock = block, block = block->next)
1691 : : {
9175 bruce@momjian.us 1692 : 209932309 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
187 drowley@postgresql.o 1693 : 209932309 : Size blk_used = block->freeptr - bpoz;
1694 : 209932309 : Size blk_data = 0;
1695 : 209932309 : Size nchunks = 0;
1345 1696 : 209932309 : bool has_external_chunk = false;
1697 : :
1023 1698 [ + + ]: 209932309 : if (IsKeeperBlock(set, block))
2408 tomas.vondra@postgre 1699 : 121696537 : total_allocated += block->endptr - ((char *) set);
1700 : : else
1701 : 88235772 : total_allocated += block->endptr - ((char *) block);
1702 : :
1703 : : /*
1704 : : * Empty block - empty can be keeper-block only
1705 : : */
9429 bruce@momjian.us 1706 [ + + ]: 209932309 : if (!blk_used)
1707 : : {
1023 drowley@postgresql.o 1708 [ - + ]: 4096266 : if (!IsKeeperBlock(set, block))
8320 tgl@sss.pgh.pa.us 1709 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
1710 : : name, block);
1711 : : }
1712 : :
1713 : : /*
1714 : : * Check block header fields
1715 : : */
3345 tgl@sss.pgh.pa.us 1716 [ + - ]:CBC 209932309 : if (block->aset != set ||
1717 [ + - ]: 209932309 : block->prev != prevblock ||
1718 [ + - ]: 209932309 : block->freeptr < bpoz ||
1719 [ - + ]: 209932309 : block->freeptr > block->endptr)
3345 tgl@sss.pgh.pa.us 1720 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1721 : : name, block);
1722 : :
1723 : : /*
1724 : : * Chunk walker
1725 : : */
9286 tgl@sss.pgh.pa.us 1726 [ + + ]:CBC 3817355206 : while (bpoz < block->freeptr)
1727 : : {
1345 drowley@postgresql.o 1728 : 3607422897 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1729 : : Size chsize,
1730 : : dsize;
1731 : :
1732 : : /* Allow access to the chunk header. */
1733 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1734 : :
1735 [ + + ]: 3607422897 : if (MemoryChunkIsExternal(chunk))
1736 : : {
1737 : 7304937 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1738 : 7304937 : has_external_chunk = true;
1739 : :
1740 : : /* make sure this chunk consumes the entire block */
1741 [ - + ]: 7304937 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1345 drowley@postgresql.o 1742 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1743 : : name, chunk, block);
1744 : : }
1745 : : else
1746 : : {
1303 tgl@sss.pgh.pa.us 1747 :CBC 3600117960 : int fidx = MemoryChunkGetValue(chunk);
1748 : :
1749 [ + - - + ]: 3600117960 : if (!FreeListIdxIsValid(fidx))
1303 tgl@sss.pgh.pa.us 1750 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1751 : : name, chunk, block);
1752 : :
1303 tgl@sss.pgh.pa.us 1753 :CBC 3600117960 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1754 : :
1755 : : /*
1756 : : * Check the stored block offset correctly references this
1757 : : * block.
1758 : : */
1345 drowley@postgresql.o 1759 [ - + ]: 3600117960 : if (block != MemoryChunkGetBlock(chunk))
1345 drowley@postgresql.o 1760 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1761 : : name, chunk, block);
1762 : : }
3240 tgl@sss.pgh.pa.us 1763 :CBC 3607422897 : dsize = chunk->requested_size; /* real data */
1764 : :
1765 : : /* an allocated chunk's requested size must be <= the chsize */
1345 drowley@postgresql.o 1766 [ + + - + ]: 3607422897 : if (dsize != InvalidAllocSize && dsize > chsize)
8320 tgl@sss.pgh.pa.us 1767 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1768 : : name, chunk, block);
1769 : :
1770 : : /* chsize must not be smaller than the first freelist's size */
9429 bruce@momjian.us 1771 [ - + ]:CBC 3607422897 : if (chsize < (1 << ALLOC_MINBITS))
4485 tgl@sss.pgh.pa.us 1772 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1773 : : name, chsize, chunk, block);
1774 : :
1775 : : /*
1776 : : * Check for overwrite of padding space in an allocated chunk.
1777 : : */
1345 drowley@postgresql.o 1778 [ + + + + ]:CBC 3607422897 : if (dsize != InvalidAllocSize && dsize < chsize &&
4696 noah@leadboat.com 1779 [ - + ]: 2366339720 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
8320 tgl@sss.pgh.pa.us 1780 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1781 : : name, block, chunk);
1782 : :
1783 : : /* if chunk is allocated, disallow access to the chunk header */
1784 : : if (dsize != InvalidAllocSize)
1785 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1786 : :
9429 bruce@momjian.us 1787 :CBC 3607422897 : blk_data += chsize;
1788 : 3607422897 : nchunks++;
1789 : :
9286 tgl@sss.pgh.pa.us 1790 : 3607422897 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1791 : : }
1792 : :
9429 bruce@momjian.us 1793 [ - + ]: 209932309 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
8320 tgl@sss.pgh.pa.us 1794 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1795 : : name, block);
1796 : :
1345 drowley@postgresql.o 1797 [ + + - + ]:CBC 209932309 : if (has_external_chunk && nchunks > 1)
1345 drowley@postgresql.o 1798 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1799 : : name, block);
1800 : : }
1801 : :
2238 jdavis@postgresql.or 1802 [ - + ]:CBC 121696537 : Assert(total_allocated == context->mem_allocated);
9429 bruce@momjian.us 1803 : 121696537 : }
1804 : :
1805 : : #endif /* MEMORY_CONTEXT_CHECKING */
|