Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * aset.c
4 : : * Allocation set definitions.
5 : : *
6 : : * AllocSet is our standard implementation of the abstract MemoryContext
7 : : * type.
8 : : *
9 : : *
10 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
11 : : * Portions Copyright (c) 1994, Regents of the University of California
12 : : *
13 : : * IDENTIFICATION
14 : : * src/backend/utils/mmgr/aset.c
15 : : *
16 : : * NOTE:
17 : : * This is a new (Feb. 05, 1999) implementation of the allocation set
18 : : * routines. AllocSet...() does not use OrderedSet...() any more.
19 : : * Instead it manages allocations in a block pool by itself, combining
20 : : * many small allocations in a few bigger blocks. AllocSetFree() normally
21 : : * doesn't free() memory really. It just add's the free'd area to some
22 : : * list for later reuse by AllocSetAlloc(). All memory blocks are free()'d
23 : : * at once on AllocSetReset(), which happens when the memory context gets
24 : : * destroyed.
25 : : * Jan Wieck
26 : : *
27 : : * Performance improvement from Tom Lane, 8/99: for extremely large request
28 : : * sizes, we do want to be able to give the memory back to free() as soon
29 : : * as it is pfree()'d. Otherwise we risk tying up a lot of memory in
30 : : * freelist entries that might never be usable. This is specially needed
31 : : * when the caller is repeatedly repalloc()'ing a block bigger and bigger;
32 : : * the previous instances of the block were guaranteed to be wasted until
33 : : * AllocSetReset() under the old way.
34 : : *
35 : : * Further improvement 12/00: as the code stood, request sizes in the
36 : : * midrange between "small" and "large" were handled very inefficiently,
37 : : * because any sufficiently large free chunk would be used to satisfy a
38 : : * request, even if it was much larger than necessary. This led to more
39 : : * and more wasted space in allocated chunks over time. To fix, get rid
40 : : * of the midrange behavior: we now handle only "small" power-of-2-size
41 : : * chunks as chunks. Anything "large" is passed off to malloc(). Change
42 : : * the number of freelists to change the small/large boundary.
43 : : *
44 : : *-------------------------------------------------------------------------
45 : : */
46 : :
47 : : #include "postgres.h"
48 : :
49 : : #include "port/pg_bitutils.h"
50 : : #include "utils/memdebug.h"
51 : : #include "utils/memutils.h"
52 : : #include "utils/memutils_internal.h"
53 : : #include "utils/memutils_memorychunk.h"
54 : :
55 : : /*--------------------
56 : : * Chunk freelist k holds chunks of size 1 << (k + ALLOC_MINBITS),
57 : : * for k = 0 .. ALLOCSET_NUM_FREELISTS-1.
58 : : *
59 : : * Note that all chunks in the freelists have power-of-2 sizes. This
60 : : * improves recyclability: we may waste some space, but the wasted space
61 : : * should stay pretty constant as requests are made and released.
62 : : *
63 : : * A request too large for the last freelist is handled by allocating a
64 : : * dedicated block from malloc(). The block still has a block header and
65 : : * chunk header, but when the chunk is freed we'll return the whole block
66 : : * to malloc(), not put it on our freelists.
67 : : *
68 : : * CAUTION: ALLOC_MINBITS must be large enough so that
69 : : * 1<<ALLOC_MINBITS is at least MAXALIGN,
70 : : * or we may fail to align the smallest chunks adequately.
71 : : * 8-byte alignment is enough on all currently known machines. This 8-byte
72 : : * minimum also allows us to store a pointer to the next freelist item within
73 : : * the chunk of memory itself.
74 : : *
75 : : * With the current parameters, request sizes up to 8K are treated as chunks,
76 : : * larger requests go into dedicated blocks. Change ALLOCSET_NUM_FREELISTS
77 : : * to adjust the boundary point; and adjust ALLOCSET_SEPARATE_THRESHOLD in
78 : : * memutils.h to agree. (Note: in contexts with small maxBlockSize, we may
79 : : * set the allocChunkLimit to less than 8K, so as to avoid space wastage.)
80 : : *--------------------
81 : : */
82 : :
83 : : #define ALLOC_MINBITS 3 /* smallest chunk size is 8 bytes */
84 : : #define ALLOCSET_NUM_FREELISTS 11
85 : : #define ALLOC_CHUNK_LIMIT (1 << (ALLOCSET_NUM_FREELISTS-1+ALLOC_MINBITS))
86 : : /* Size of largest chunk that we use a fixed size for */
87 : : #define ALLOC_CHUNK_FRACTION 4
88 : : /* We allow chunks to be at most 1/4 of maxBlockSize (less overhead) */
89 : :
90 : : /*--------------------
91 : : * The first block allocated for an allocset has size initBlockSize.
92 : : * Each time we have to allocate another block, we double the block size
93 : : * (if possible, and without exceeding maxBlockSize), so as to reduce
94 : : * the bookkeeping load on malloc().
95 : : *
96 : : * Blocks allocated to hold oversize chunks do not follow this rule, however;
97 : : * they are just however big they need to be to hold that single chunk.
98 : : *
99 : : * Also, if a minContextSize is specified, the first block has that size,
100 : : * and then initBlockSize is used for the next one.
101 : : *--------------------
102 : : */
103 : :
104 : : #define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
105 : : #define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
106 : : #define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
107 : : ALLOC_BLOCKHDRSZ)
108 : :
109 : : typedef struct AllocBlockData *AllocBlock; /* forward reference */
110 : :
111 : : /*
112 : : * AllocPointer
113 : : * Aligned pointer which may be a member of an allocation set.
114 : : */
115 : : typedef void *AllocPointer;
116 : :
117 : : /*
118 : : * AllocFreeListLink
119 : : * When pfreeing memory, if we maintain a freelist for the given chunk's
120 : : * size then we use a AllocFreeListLink to point to the current item in
121 : : * the AllocSetContext's freelist and then set the given freelist element
122 : : * to point to the chunk being freed.
123 : : */
124 : : typedef struct AllocFreeListLink
125 : : {
126 : : MemoryChunk *next;
127 : : } AllocFreeListLink;
128 : :
129 : : /*
130 : : * Obtain a AllocFreeListLink for the given chunk. Allocation sizes are
131 : : * always at least sizeof(AllocFreeListLink), so we reuse the pointer's memory
132 : : * itself to store the freelist link.
133 : : */
134 : : #define GetFreeListLink(chkptr) \
135 : : (AllocFreeListLink *) ((char *) (chkptr) + ALLOC_CHUNKHDRSZ)
136 : :
137 : : /* Validate a freelist index retrieved from a chunk header */
138 : : #define FreeListIdxIsValid(fidx) \
139 : : ((fidx) >= 0 && (fidx) < ALLOCSET_NUM_FREELISTS)
140 : :
141 : : /* Determine the size of the chunk based on the freelist index */
142 : : #define GetChunkSizeFromFreeListIdx(fidx) \
143 : : ((((Size) 1) << ALLOC_MINBITS) << (fidx))
144 : :
145 : : /*
146 : : * AllocSetContext is our standard implementation of MemoryContext.
147 : : *
148 : : * Note: header.isReset means there is nothing for AllocSetReset to do.
149 : : * This is different from the aset being physically empty (empty blocks list)
150 : : * because we will still have a keeper block. It's also different from the set
151 : : * being logically empty, because we don't attempt to detect pfree'ing the
152 : : * last active chunk.
153 : : */
154 : : typedef struct AllocSetContext
155 : : {
156 : : MemoryContextData header; /* Standard memory-context fields */
157 : : /* Info about storage allocated in this context: */
158 : : AllocBlock blocks; /* head of list of blocks in this set */
159 : : MemoryChunk *freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
160 : : /* Allocation parameters for this context: */
161 : : uint32 initBlockSize; /* initial block size */
162 : : uint32 maxBlockSize; /* maximum block size */
163 : : uint32 nextBlockSize; /* next block size to allocate */
164 : : uint32 allocChunkLimit; /* effective chunk size limit */
165 : : /* freelist this context could be put in, or -1 if not a candidate: */
166 : : int freeListIndex; /* index in context_freelists[], or -1 */
167 : : } AllocSetContext;
168 : :
169 : : typedef AllocSetContext *AllocSet;
170 : :
171 : : /*
172 : : * AllocBlock
173 : : * An AllocBlock is the unit of memory that is obtained by aset.c
174 : : * from malloc(). It contains one or more MemoryChunks, which are
175 : : * the units requested by palloc() and freed by pfree(). MemoryChunks
176 : : * cannot be returned to malloc() individually, instead they are put
177 : : * on freelists by pfree() and re-used by the next palloc() that has
178 : : * a matching request size.
179 : : *
180 : : * AllocBlockData is the header data for a block --- the usable space
181 : : * within the block begins at the next alignment boundary.
182 : : */
183 : : typedef struct AllocBlockData
184 : : {
185 : : AllocSet aset; /* aset that owns this block */
186 : : AllocBlock prev; /* prev block in aset's blocks list, if any */
187 : : AllocBlock next; /* next block in aset's blocks list, if any */
188 : : char *freeptr; /* start of free space in this block */
189 : : char *endptr; /* end of space in this block */
190 : : } AllocBlockData;
191 : :
192 : : /*
193 : : * AllocSetIsValid
194 : : * True iff set is valid allocation set.
195 : : */
196 : : #define AllocSetIsValid(set) \
197 : : ((set) && IsA(set, AllocSetContext))
198 : :
199 : : /*
200 : : * AllocBlockIsValid
201 : : * True iff block is valid block of allocation set.
202 : : */
203 : : #define AllocBlockIsValid(block) \
204 : : ((block) && AllocSetIsValid((block)->aset))
205 : :
206 : : /*
207 : : * We always store external chunks on a dedicated block. This makes fetching
208 : : * the block from an external chunk easy since it's always the first and only
209 : : * chunk on the block.
210 : : */
211 : : #define ExternalChunkGetBlock(chunk) \
212 : : (AllocBlock) ((char *) chunk - ALLOC_BLOCKHDRSZ)
213 : :
214 : : /*
215 : : * Rather than repeatedly creating and deleting memory contexts, we keep some
216 : : * freed contexts in freelists so that we can hand them out again with little
217 : : * work. Before putting a context in a freelist, we reset it so that it has
218 : : * only its initial malloc chunk and no others. To be a candidate for a
219 : : * freelist, a context must have the same minContextSize/initBlockSize as
220 : : * other contexts in the list; but its maxBlockSize is irrelevant since that
221 : : * doesn't affect the size of the initial chunk.
222 : : *
223 : : * We currently provide one freelist for ALLOCSET_DEFAULT_SIZES contexts
224 : : * and one for ALLOCSET_SMALL_SIZES contexts; the latter works for
225 : : * ALLOCSET_START_SMALL_SIZES too, since only the maxBlockSize differs.
226 : : *
227 : : * Ordinarily, we re-use freelist contexts in last-in-first-out order, in
228 : : * hopes of improving locality of reference. But if there get to be too
229 : : * many contexts in the list, we'd prefer to drop the most-recently-created
230 : : * contexts in hopes of keeping the process memory map compact.
231 : : * We approximate that by simply deleting all existing entries when the list
232 : : * overflows, on the assumption that queries that allocate a lot of contexts
233 : : * will probably free them in more or less reverse order of allocation.
234 : : *
235 : : * Contexts in a freelist are chained via their nextchild pointers.
236 : : */
237 : : #define MAX_FREE_CONTEXTS 100 /* arbitrary limit on freelist length */
238 : :
239 : : /* Obtain the keeper block for an allocation set */
240 : : #define KeeperBlock(set) \
241 : : ((AllocBlock) (((char *) set) + MAXALIGN(sizeof(AllocSetContext))))
242 : :
243 : : /* Check if the block is the keeper block of the given allocation set */
244 : : #define IsKeeperBlock(set, block) ((block) == (KeeperBlock(set)))
245 : :
246 : : typedef struct AllocSetFreeList
247 : : {
248 : : int num_free; /* current list length */
249 : : AllocSetContext *first_free; /* list header */
250 : : } AllocSetFreeList;
251 : :
252 : : /* context_freelists[0] is for default params, [1] for small params */
253 : : static AllocSetFreeList context_freelists[2] =
254 : : {
255 : : {
256 : : 0, NULL
257 : : },
258 : : {
259 : : 0, NULL
260 : : }
261 : : };
262 : :
263 : :
264 : : /* ----------
265 : : * AllocSetFreeIndex -
266 : : *
267 : : * Depending on the size of an allocation compute which freechunk
268 : : * list of the alloc set it belongs to. Caller must have verified
269 : : * that size <= ALLOC_CHUNK_LIMIT.
270 : : * ----------
271 : : */
272 : : static inline int
9761 JanWieck@Yahoo.com 273 :CBC 537891571 : AllocSetFreeIndex(Size size)
274 : : {
275 : : int idx;
276 : :
5943 tgl@sss.pgh.pa.us 277 [ + + ]: 537891571 : if (size > (1 << ALLOC_MINBITS))
278 : : {
279 : : /*----------
280 : : * At this point we must compute ceil(log2(size >> ALLOC_MINBITS)).
281 : : * This is the same as
282 : : * pg_leftmost_one_pos32((size - 1) >> ALLOC_MINBITS) + 1
283 : : * or equivalently
284 : : * pg_leftmost_one_pos32(size - 1) - ALLOC_MINBITS + 1
285 : : *
286 : : * However, for platforms without intrinsic support, we duplicate the
287 : : * logic here, allowing an additional optimization. It's reasonable
288 : : * to assume that ALLOC_CHUNK_LIMIT fits in 16 bits, so we can unroll
289 : : * the byte-at-a-time loop in pg_leftmost_one_pos32 and just handle
290 : : * the last two bytes.
291 : : *
292 : : * Yes, this function is enough of a hot-spot to make it worth this
293 : : * much trouble.
294 : : *----------
295 : : */
296 : : #ifdef HAVE_BITSCAN_REVERSE
993 john.naylor@postgres 297 : 463675281 : idx = pg_leftmost_one_pos32((uint32) size - 1) - ALLOC_MINBITS + 1;
298 : : #else
299 : : uint32 t,
300 : : tsize;
301 : :
302 : : /* Statically assert that we only have a 16-bit input value. */
303 : : StaticAssertDecl(ALLOC_CHUNK_LIMIT < (1 << 16),
304 : : "ALLOC_CHUNK_LIMIT must be less than 64kB");
305 : :
306 : : tsize = size - 1;
307 : : t = tsize >> 8;
308 : : idx = t ? pg_leftmost_one_pos[t] + 8 : pg_leftmost_one_pos[tsize];
309 : : idx -= ALLOC_MINBITS - 1;
310 : : #endif
311 : :
9097 tgl@sss.pgh.pa.us 312 [ - + ]: 463675281 : Assert(idx < ALLOCSET_NUM_FREELISTS);
313 : : }
314 : : else
5943 315 : 74216290 : idx = 0;
316 : :
9761 JanWieck@Yahoo.com 317 : 537891571 : return idx;
318 : : }
319 : :
320 : :
321 : : /*
322 : : * Public routines
323 : : */
324 : :
325 : :
326 : : /*
327 : : * AllocSetContextCreateInternal
328 : : * Create a new AllocSet context.
329 : : *
330 : : * parent: parent context, or NULL if top-level context
331 : : * name: name of context (must be statically allocated)
332 : : * minContextSize: minimum context size
333 : : * initBlockSize: initial allocation block size
334 : : * maxBlockSize: maximum allocation block size
335 : : *
336 : : * Most callers should abstract the context size parameters using a macro
337 : : * such as ALLOCSET_DEFAULT_SIZES.
338 : : *
339 : : * Note: don't call this directly; go through the wrapper macro
340 : : * AllocSetContextCreate.
341 : : */
342 : : MemoryContext
2573 tgl@sss.pgh.pa.us 343 : 6445477 : AllocSetContextCreateInternal(MemoryContext parent,
344 : : const char *name,
345 : : Size minContextSize,
346 : : Size initBlockSize,
347 : : Size maxBlockSize)
348 : : {
349 : : int freeListIndex;
350 : : Size firstBlockSize;
351 : : AllocSet set;
352 : : AllocBlock block;
353 : :
354 : : /* ensure MemoryChunk's size is properly maxaligned */
355 : : StaticAssertDecl(ALLOC_CHUNKHDRSZ == MAXALIGN(ALLOC_CHUNKHDRSZ),
356 : : "sizeof(MemoryChunk) is not maxaligned");
357 : : /* check we have enough space to store the freelist link */
358 : : StaticAssertDecl(sizeof(AllocFreeListLink) <= (1 << ALLOC_MINBITS),
359 : : "sizeof(AllocFreeListLink) larger than minimum allocation size");
360 : :
361 : : /*
362 : : * First, validate allocation parameters. Once these were regular runtime
363 : : * tests and elog's, but in practice Asserts seem sufficient because
364 : : * nobody varies their parameters at runtime. We somewhat arbitrarily
365 : : * enforce a minimum 1K block size. We restrict the maximum block size to
366 : : * MEMORYCHUNK_MAX_BLOCKOFFSET as MemoryChunks are limited to this in
367 : : * regards to addressing the offset between the chunk and the block that
368 : : * the chunk is stored on. We would be unable to store the offset between
369 : : * the chunk and block for any chunks that were beyond
370 : : * MEMORYCHUNK_MAX_BLOCKOFFSET bytes into the block if the block was to be
371 : : * larger than this.
372 : : */
2876 373 [ + - - + ]: 6445477 : Assert(initBlockSize == MAXALIGN(initBlockSize) &&
374 : : initBlockSize >= 1024);
375 [ + - + - : 6445477 : Assert(maxBlockSize == MAXALIGN(maxBlockSize) &&
- + ]
376 : : maxBlockSize >= initBlockSize &&
377 : : AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
378 [ + + + - : 6445477 : Assert(minContextSize == 0 ||
+ - - + ]
379 : : (minContextSize == MAXALIGN(minContextSize) &&
380 : : minContextSize >= 1024 &&
381 : : minContextSize <= maxBlockSize));
1156 drowley@postgresql.o 382 [ - + ]: 6445477 : Assert(maxBlockSize <= MEMORYCHUNK_MAX_BLOCKOFFSET);
383 : :
384 : : /*
385 : : * Check whether the parameters match either available freelist. We do
386 : : * not need to demand a match of maxBlockSize.
387 : : */
2772 tgl@sss.pgh.pa.us 388 [ + + + + ]: 6445477 : if (minContextSize == ALLOCSET_DEFAULT_MINSIZE &&
389 : : initBlockSize == ALLOCSET_DEFAULT_INITSIZE)
2876 390 : 4398132 : freeListIndex = 0;
2772 391 [ + + + - ]: 2047345 : else if (minContextSize == ALLOCSET_SMALL_MINSIZE &&
392 : : initBlockSize == ALLOCSET_SMALL_INITSIZE)
2876 393 : 2030371 : freeListIndex = 1;
394 : : else
395 : 16974 : freeListIndex = -1;
396 : :
397 : : /*
398 : : * If a suitable freelist entry exists, just recycle that context.
399 : : */
400 [ + + ]: 6445477 : if (freeListIndex >= 0)
401 : : {
402 : 6428503 : AllocSetFreeList *freelist = &context_freelists[freeListIndex];
403 : :
404 [ + + ]: 6428503 : if (freelist->first_free != NULL)
405 : : {
406 : : /* Remove entry from freelist */
407 : 4681672 : set = freelist->first_free;
408 : 4681672 : freelist->first_free = (AllocSet) set->header.nextchild;
409 : 4681672 : freelist->num_free--;
410 : :
411 : : /* Update its maxBlockSize; everything else should be OK */
412 : 4681672 : set->maxBlockSize = maxBlockSize;
413 : :
414 : : /* Reinitialize its header, installing correct name and parent */
415 : 4681672 : MemoryContextCreate((MemoryContext) set,
416 : : T_AllocSetContext,
417 : : MCTX_ASET_ID,
418 : : parent,
419 : : name);
420 : :
2049 jdavis@postgresql.or 421 : 4681672 : ((MemoryContext) set)->mem_allocated =
834 drowley@postgresql.o 422 : 4681672 : KeeperBlock(set)->endptr - ((char *) set);
423 : :
2876 tgl@sss.pgh.pa.us 424 : 4681672 : return (MemoryContext) set;
425 : : }
426 : : }
427 : :
428 : : /* Determine size of initial block */
2772 429 : 1763805 : firstBlockSize = MAXALIGN(sizeof(AllocSetContext)) +
430 : : ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
2876 431 [ + + ]: 1763805 : if (minContextSize != 0)
432 : 16974 : firstBlockSize = Max(firstBlockSize, minContextSize);
433 : : else
434 : 1746831 : firstBlockSize = Max(firstBlockSize, initBlockSize);
435 : :
436 : : /*
437 : : * Allocate the initial block. Unlike other aset.c blocks, it starts with
438 : : * the context header and its block header follows that.
439 : : */
440 : 1763805 : set = (AllocSet) malloc(firstBlockSize);
441 [ - + ]: 1763805 : if (set == NULL)
442 : : {
2876 tgl@sss.pgh.pa.us 443 [ # # ]:UBC 0 : if (TopMemoryContext)
444 : 0 : MemoryContextStats(TopMemoryContext);
445 [ # # ]: 0 : ereport(ERROR,
446 : : (errcode(ERRCODE_OUT_OF_MEMORY),
447 : : errmsg("out of memory"),
448 : : errdetail("Failed while creating memory context \"%s\".",
449 : : name)));
450 : : }
451 : :
452 : : /*
453 : : * Avoid writing code that can fail between here and MemoryContextCreate;
454 : : * we'd leak the header/initial block if we ereport in this stretch.
455 : : */
456 : :
457 : : /* Create a vpool associated with the context */
458 : : VALGRIND_CREATE_MEMPOOL(set, 0, false);
459 : :
460 : : /*
461 : : * Create a vchunk covering both the AllocSetContext struct and the keeper
462 : : * block's header. (Perhaps it would be more sensible for these to be two
463 : : * separate vchunks, but doing that seems to tickle bugs in some versions
464 : : * of Valgrind.) We must have these vchunks, and also a vchunk for each
465 : : * subsequently-added block header, so that Valgrind considers the
466 : : * pointers within them while checking for leaked memory. Note that
467 : : * Valgrind doesn't distinguish between these vchunks and those created by
468 : : * mcxt.c for the user-accessible-data chunks we allocate.
469 : : */
470 : : VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
471 : :
472 : : /* Fill in the initial block's block header */
834 drowley@postgresql.o 473 :CBC 1763805 : block = KeeperBlock(set);
2876 tgl@sss.pgh.pa.us 474 : 1763805 : block->aset = set;
475 : 1763805 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
476 : 1763805 : block->endptr = ((char *) set) + firstBlockSize;
477 : 1763805 : block->prev = NULL;
478 : 1763805 : block->next = NULL;
479 : :
480 : : /* Mark unallocated space NOACCESS; leave the block header alone. */
481 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr, block->endptr - block->freeptr);
482 : :
483 : : /* Remember block as part of block list */
484 : 1763805 : set->blocks = block;
485 : :
486 : : /* Finish filling in aset-specific parts of the context header */
487 [ + - + - : 21165660 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
488 : :
834 drowley@postgresql.o 489 : 1763805 : set->initBlockSize = (uint32) initBlockSize;
490 : 1763805 : set->maxBlockSize = (uint32) maxBlockSize;
491 : 1763805 : set->nextBlockSize = (uint32) initBlockSize;
2876 tgl@sss.pgh.pa.us 492 : 1763805 : set->freeListIndex = freeListIndex;
493 : :
494 : : /*
495 : : * Compute the allocation chunk size limit for this context. It can't be
496 : : * more than ALLOC_CHUNK_LIMIT because of the fixed number of freelists.
497 : : * If maxBlockSize is small then requests exceeding the maxBlockSize, or
498 : : * even a significant fraction of it, should be treated as large chunks
499 : : * too. For the typical case of maxBlockSize a power of 2, the chunk size
500 : : * limit will be at most 1/8th maxBlockSize, so that given a stream of
501 : : * requests that are all the maximum chunk size we will waste at most
502 : : * 1/8th of the allocated space.
503 : : *
504 : : * Also, allocChunkLimit must not exceed ALLOCSET_SEPARATE_THRESHOLD.
505 : : */
506 : : StaticAssertStmt(ALLOC_CHUNK_LIMIT == ALLOCSET_SEPARATE_THRESHOLD,
507 : : "ALLOC_CHUNK_LIMIT != ALLOCSET_SEPARATE_THRESHOLD");
508 : :
509 : : /*
510 : : * Determine the maximum size that a chunk can be before we allocate an
511 : : * entire AllocBlock dedicated for that chunk. We set the absolute limit
512 : : * of that size as ALLOC_CHUNK_LIMIT but we reduce it further so that we
513 : : * can fit about ALLOC_CHUNK_FRACTION chunks this size on a maximally
514 : : * sized block. (We opt to keep allocChunkLimit a power-of-2 value
515 : : * primarily for legacy reasons rather than calculating it so that exactly
516 : : * ALLOC_CHUNK_FRACTION chunks fit on a maximally sized block.)
517 : : */
3902 jdavis@postgresql.or 518 : 1763805 : set->allocChunkLimit = ALLOC_CHUNK_LIMIT;
519 : 1763805 : while ((Size) (set->allocChunkLimit + ALLOC_CHUNKHDRSZ) >
5293 tgl@sss.pgh.pa.us 520 [ + + ]: 5785126 : (Size) ((maxBlockSize - ALLOC_BLOCKHDRSZ) / ALLOC_CHUNK_FRACTION))
3902 jdavis@postgresql.or 521 : 4021321 : set->allocChunkLimit >>= 1;
522 : :
523 : : /* Finally, do the type-independent part of context creation */
2876 tgl@sss.pgh.pa.us 524 : 1763805 : MemoryContextCreate((MemoryContext) set,
525 : : T_AllocSetContext,
526 : : MCTX_ASET_ID,
527 : : parent,
528 : : name);
529 : :
2049 jdavis@postgresql.or 530 : 1763805 : ((MemoryContext) set)->mem_allocated = firstBlockSize;
531 : :
3902 532 : 1763805 : return (MemoryContext) set;
533 : : }
534 : :
535 : : /*
536 : : * AllocSetReset
537 : : * Frees all memory which is allocated in the given set.
538 : : *
539 : : * Actually, this routine has some discretion about what to do.
540 : : * It should mark all allocated chunks freed, but it need not necessarily
541 : : * give back all the resources the set owns. Our actual implementation is
542 : : * that we give back all but the "keeper" block (which we must keep, since
543 : : * it shares a malloc chunk with the context header). In this way, we don't
544 : : * thrash malloc() when a context is repeatedly reset after small allocations,
545 : : * which is typical behavior for per-tuple contexts.
546 : : */
547 : : void
9253 tgl@sss.pgh.pa.us 548 : 26218380 : AllocSetReset(MemoryContext context)
549 : : {
550 : 26218380 : AllocSet set = (AllocSet) context;
551 : : AllocBlock block;
552 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
553 : :
1096 peter@eisentraut.org 554 [ + - - + ]: 26218380 : Assert(AllocSetIsValid(set));
555 : :
556 : : #ifdef MEMORY_CONTEXT_CHECKING
557 : : /* Check for corruption and leaks before freeing */
9097 tgl@sss.pgh.pa.us 558 : 26218380 : AllocSetCheck(context);
559 : : #endif
560 : :
561 : : /* Remember keeper block size for Assert below */
834 drowley@postgresql.o 562 : 26218380 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
563 : :
564 : : /* Clear chunk freelists */
7472 tgl@sss.pgh.pa.us 565 [ + - + - : 314620560 : MemSetAligned(set->freelist, 0, sizeof(set->freelist));
+ - + + ]
566 : :
7362 567 : 26218380 : block = set->blocks;
568 : :
569 : : /* New blocks list will be just the keeper block */
834 drowley@postgresql.o 570 : 26218380 : set->blocks = KeeperBlock(set);
571 : :
7362 tgl@sss.pgh.pa.us 572 [ + + ]: 57403786 : while (block != NULL)
573 : : {
9253 574 : 31185406 : AllocBlock next = block->next;
575 : :
834 drowley@postgresql.o 576 [ + + ]: 31185406 : if (IsKeeperBlock(set, block))
577 : : {
578 : : /* Reset the block, but don't return it to malloc */
8986 bruce@momjian.us 579 : 26218380 : char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
580 : :
581 : : #ifdef CLOBBER_FREED_MEMORY
4507 noah@leadboat.com 582 : 26218380 : wipe_mem(datastart, block->freeptr - datastart);
583 : : #else
584 : : /* wipe_mem() would have done this */
585 : : VALGRIND_MAKE_MEM_NOACCESS(datastart, block->freeptr - datastart);
586 : : #endif
9239 tgl@sss.pgh.pa.us 587 : 26218380 : block->freeptr = datastart;
3156 588 : 26218380 : block->prev = NULL;
9239 589 : 26218380 : block->next = NULL;
590 : : }
591 : : else
592 : : {
593 : : /* Normal case, release the block */
1993 594 : 4967026 : context->mem_allocated -= block->endptr - ((char *) block);
595 : :
596 : : #ifdef CLOBBER_FREED_MEMORY
4507 noah@leadboat.com 597 : 4967026 : wipe_mem(block, block->freeptr - ((char *) block));
598 : : #endif
599 : :
600 : : /*
601 : : * We need to free the block header's vchunk explicitly, although
602 : : * the user-data vchunks within will go away in the TRIM below.
603 : : * Otherwise Valgrind complains about leaked allocations.
604 : : */
605 : : VALGRIND_MEMPOOL_FREE(set, block);
606 : :
9253 tgl@sss.pgh.pa.us 607 : 4967026 : free(block);
608 : : }
9761 JanWieck@Yahoo.com 609 : 31185406 : block = next;
610 : : }
611 : :
2049 jdavis@postgresql.or 612 [ - + ]: 26218380 : Assert(context->mem_allocated == keepersize);
613 : :
614 : : /*
615 : : * Instruct Valgrind to throw away all the vchunks associated with this
616 : : * context, except for the one covering the AllocSetContext and
617 : : * keeper-block header. This gets rid of the vchunks for whatever user
618 : : * data is getting discarded by the context reset.
619 : : */
620 : : VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
621 : :
622 : : /* Reset block size allocation sequence, too */
6929 tgl@sss.pgh.pa.us 623 : 26218380 : set->nextBlockSize = set->initBlockSize;
10703 scrappy@hub.org 624 : 26218380 : }
625 : :
626 : : /*
627 : : * AllocSetDelete
628 : : * Frees all memory which is allocated in the given set,
629 : : * in preparation for deletion of the set.
630 : : *
631 : : * Unlike AllocSetReset, this *must* free all resources of the set.
632 : : */
633 : : void
9253 tgl@sss.pgh.pa.us 634 : 4930420 : AllocSetDelete(MemoryContext context)
635 : : {
636 : 4930420 : AllocSet set = (AllocSet) context;
637 : 4930420 : AllocBlock block = set->blocks;
638 : : Size keepersize PG_USED_FOR_ASSERTS_ONLY;
639 : :
1096 peter@eisentraut.org 640 [ + - - + ]: 4930420 : Assert(AllocSetIsValid(set));
641 : :
642 : : #ifdef MEMORY_CONTEXT_CHECKING
643 : : /* Check for corruption and leaks before freeing */
9097 tgl@sss.pgh.pa.us 644 : 4930420 : AllocSetCheck(context);
645 : : #endif
646 : :
647 : : /* Remember keeper block size for Assert below */
834 drowley@postgresql.o 648 : 4930420 : keepersize = KeeperBlock(set)->endptr - ((char *) set);
649 : :
650 : : /*
651 : : * If the context is a candidate for a freelist, put it into that freelist
652 : : * instead of destroying it.
653 : : */
2876 tgl@sss.pgh.pa.us 654 [ + - ]: 4930420 : if (set->freeListIndex >= 0)
655 : : {
656 : 4930420 : AllocSetFreeList *freelist = &context_freelists[set->freeListIndex];
657 : :
658 : : /*
659 : : * Reset the context, if it needs it, so that we aren't hanging on to
660 : : * more than the initial malloc chunk.
661 : : */
662 [ + + ]: 4930420 : if (!context->isReset)
663 : 3069706 : MemoryContextResetOnly(context);
664 : :
665 : : /*
666 : : * If the freelist is full, just discard what's already in it. See
667 : : * comments with context_freelists[].
668 : : */
669 [ + + ]: 4930420 : if (freelist->num_free >= MAX_FREE_CONTEXTS)
670 : : {
671 [ + + ]: 31916 : while (freelist->first_free != NULL)
672 : : {
673 : 31600 : AllocSetContext *oldset = freelist->first_free;
674 : :
675 : 31600 : freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
676 : 31600 : freelist->num_free--;
677 : :
678 : : /* Destroy the context's vpool --- see notes below */
679 : : VALGRIND_DESTROY_MEMPOOL(oldset);
680 : :
681 : : /* All that remains is to free the header/initial block */
682 : 31600 : free(oldset);
683 : : }
684 [ - + ]: 316 : Assert(freelist->num_free == 0);
685 : : }
686 : :
687 : : /* Now add the just-deleted context to the freelist. */
688 : 4930420 : set->header.nextchild = (MemoryContext) freelist->first_free;
689 : 4930420 : freelist->first_free = set;
690 : 4930420 : freelist->num_free++;
691 : :
692 : 4930420 : return;
693 : : }
694 : :
695 : : /* Free all blocks, except the keeper which is part of context header */
9253 tgl@sss.pgh.pa.us 696 [ # # ]:UBC 0 : while (block != NULL)
697 : : {
698 : 0 : AllocBlock next = block->next;
699 : :
834 drowley@postgresql.o 700 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
2049 jdavis@postgresql.or 701 : 0 : context->mem_allocated -= block->endptr - ((char *) block);
702 : :
703 : : #ifdef CLOBBER_FREED_MEMORY
4507 noah@leadboat.com 704 : 0 : wipe_mem(block, block->freeptr - ((char *) block));
705 : : #endif
706 : :
834 drowley@postgresql.o 707 [ # # ]: 0 : if (!IsKeeperBlock(set, block))
708 : : {
709 : : /* As in AllocSetReset, free block-header vchunks explicitly */
710 : : VALGRIND_MEMPOOL_FREE(set, block);
2876 tgl@sss.pgh.pa.us 711 : 0 : free(block);
712 : : }
713 : :
9253 714 : 0 : block = next;
715 : : }
716 : :
2049 jdavis@postgresql.or 717 [ # # ]: 0 : Assert(context->mem_allocated == keepersize);
718 : :
719 : : /*
720 : : * Destroy the vpool. We don't seem to need to explicitly free the
721 : : * initial block's header vchunk, nor any user-data vchunks that Valgrind
722 : : * still knows about; they'll all go away automatically.
723 : : */
724 : : VALGRIND_DESTROY_MEMPOOL(set);
725 : :
726 : : /* Finally, free the context header, including the keeper block */
2876 tgl@sss.pgh.pa.us 727 : 0 : free(set);
728 : : }
729 : :
730 : : /*
731 : : * Helper for AllocSetAlloc() that allocates an entire block for the chunk.
732 : : *
733 : : * AllocSetAlloc()'s comment explains why this is separate.
734 : : */
735 : : pg_noinline
736 : : static void *
608 drowley@postgresql.o 737 :CBC 9542145 : AllocSetAllocLarge(MemoryContext context, Size size, int flags)
738 : : {
9253 tgl@sss.pgh.pa.us 739 : 9542145 : AllocSet set = (AllocSet) context;
740 : : AllocBlock block;
741 : : MemoryChunk *chunk;
742 : : Size chunk_size;
743 : : Size blksize;
744 : :
745 : : /* validate 'size' is within the limits for the given 'flags' */
608 drowley@postgresql.o 746 : 9542145 : MemoryContextCheckSize(context, size, flags);
747 : :
748 : : #ifdef MEMORY_CONTEXT_CHECKING
749 : : /* ensure there's always space for the sentinel byte */
750 : 9542145 : chunk_size = MAXALIGN(size + 1);
751 : : #else
752 : : chunk_size = MAXALIGN(size);
753 : : #endif
754 : :
755 : 9542145 : blksize = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
756 : 9542145 : block = (AllocBlock) malloc(blksize);
757 [ - + ]: 9542145 : if (block == NULL)
608 drowley@postgresql.o 758 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
759 : :
760 : : /* Make a vchunk covering the new block's header */
761 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
762 : :
608 drowley@postgresql.o 763 :CBC 9542145 : context->mem_allocated += blksize;
764 : :
765 : 9542145 : block->aset = set;
766 : 9542145 : block->freeptr = block->endptr = ((char *) block) + blksize;
767 : :
768 : 9542145 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
769 : :
770 : : /* mark the MemoryChunk as externally managed */
771 : 9542145 : MemoryChunkSetHdrMaskExternal(chunk, MCTX_ASET_ID);
772 : :
773 : : #ifdef MEMORY_CONTEXT_CHECKING
774 : 9542145 : chunk->requested_size = size;
775 : : /* set mark to catch clobber of "unused" space */
776 [ - + ]: 9542145 : Assert(size < chunk_size);
777 : 9542145 : set_sentinel(MemoryChunkGetPointer(chunk), size);
778 : : #endif
779 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
780 : : /* fill the allocated space with junk */
781 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
782 : : #endif
783 : :
784 : : /*
785 : : * Stick the new block underneath the active allocation block, if any, so
786 : : * that we don't lose the use of the space remaining therein.
787 : : */
788 [ + - ]: 9542145 : if (set->blocks != NULL)
789 : : {
790 : 9542145 : block->prev = set->blocks;
791 : 9542145 : block->next = set->blocks->next;
792 [ + + ]: 9542145 : if (block->next)
793 : 7786189 : block->next->prev = block;
794 : 9542145 : set->blocks->next = block;
795 : : }
796 : : else
797 : : {
608 drowley@postgresql.o 798 :UBC 0 : block->prev = NULL;
799 : 0 : block->next = NULL;
800 : 0 : set->blocks = block;
801 : : }
802 : :
803 : : /* Ensure any padding bytes are marked NOACCESS. */
804 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
805 : : chunk_size - size);
806 : :
807 : : /* Disallow access to the chunk header. */
808 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
809 : :
608 drowley@postgresql.o 810 :CBC 9542145 : return MemoryChunkGetPointer(chunk);
811 : : }
812 : :
813 : : /*
814 : : * Small helper for allocating a new chunk from a chunk, to avoid duplicating
815 : : * the code between AllocSetAlloc() and AllocSetAllocFromNewBlock().
816 : : */
817 : : static inline void *
818 : 329244437 : AllocSetAllocChunkFromBlock(MemoryContext context, AllocBlock block,
819 : : Size size, Size chunk_size, int fidx)
820 : : {
821 : : MemoryChunk *chunk;
822 : :
823 : 329244437 : chunk = (MemoryChunk *) (block->freeptr);
824 : :
825 : : /* Prepare to initialize the chunk header. */
826 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
827 : :
828 : 329244437 : block->freeptr += (chunk_size + ALLOC_CHUNKHDRSZ);
829 [ - + ]: 329244437 : Assert(block->freeptr <= block->endptr);
830 : :
831 : : /* store the free list index in the value field */
832 : 329244437 : MemoryChunkSetHdrMask(chunk, block, fidx, MCTX_ASET_ID);
833 : :
834 : : #ifdef MEMORY_CONTEXT_CHECKING
835 : 329244437 : chunk->requested_size = size;
836 : : /* set mark to catch clobber of "unused" space */
837 [ + + ]: 329244437 : if (size < chunk_size)
1147 838 : 223611661 : set_sentinel(MemoryChunkGetPointer(chunk), size);
839 : : #endif
840 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
841 : : /* fill the allocated space with junk */
842 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
843 : : #endif
844 : :
845 : : /* Ensure any padding bytes are marked NOACCESS. */
846 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
847 : : chunk_size - size);
848 : :
849 : : /* Disallow access to the chunk header. */
850 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
851 : :
608 852 : 329244437 : return MemoryChunkGetPointer(chunk);
853 : : }
854 : :
855 : : /*
856 : : * Helper for AllocSetAlloc() that allocates a new block and returns a chunk
857 : : * allocated from it.
858 : : *
859 : : * AllocSetAlloc()'s comment explains why this is separate.
860 : : */
861 : : pg_noinline
862 : : static void *
863 : 5806226 : AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
864 : : int fidx)
865 : : {
866 : 5806226 : AllocSet set = (AllocSet) context;
867 : : AllocBlock block;
868 : : Size availspace;
869 : : Size blksize;
870 : : Size required_size;
871 : : Size chunk_size;
872 : :
873 : : /* due to the keeper block set->blocks should always be valid */
874 [ - + ]: 5806226 : Assert(set->blocks != NULL);
875 : 5806226 : block = set->blocks;
876 : 5806226 : availspace = block->endptr - block->freeptr;
877 : :
878 : : /*
879 : : * The existing active (top) block does not have enough room for the
880 : : * requested allocation, but it might still have a useful amount of space
881 : : * in it. Once we push it down in the block list, we'll never try to
882 : : * allocate more space from it. So, before we do that, carve up its free
883 : : * space into chunks that we can put on the set's freelists.
884 : : *
885 : : * Because we can only get here when there's less than ALLOC_CHUNK_LIMIT
886 : : * left in the block, this loop cannot iterate more than
887 : : * ALLOCSET_NUM_FREELISTS-1 times.
888 : : */
889 [ + + ]: 20288114 : while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
890 : : {
891 : : AllocFreeListLink *link;
892 : : MemoryChunk *chunk;
893 : 14481888 : Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
894 : 14481888 : int a_fidx = AllocSetFreeIndex(availchunk);
895 : :
896 : : /*
897 : : * In most cases, we'll get back the index of the next larger freelist
898 : : * than the one we need to put this chunk on. The exception is when
899 : : * availchunk is exactly a power of 2.
900 : : */
901 [ + + ]: 14481888 : if (availchunk != GetChunkSizeFromFreeListIdx(a_fidx))
902 : : {
903 : 11484824 : a_fidx--;
904 [ - + ]: 11484824 : Assert(a_fidx >= 0);
905 : 11484824 : availchunk = GetChunkSizeFromFreeListIdx(a_fidx);
906 : : }
907 : :
908 : 14481888 : chunk = (MemoryChunk *) (block->freeptr);
909 : :
910 : : /* Prepare to initialize the chunk header. */
911 : : VALGRIND_MAKE_MEM_UNDEFINED(chunk, ALLOC_CHUNKHDRSZ);
912 : 14481888 : block->freeptr += (availchunk + ALLOC_CHUNKHDRSZ);
913 : 14481888 : availspace -= (availchunk + ALLOC_CHUNKHDRSZ);
914 : :
915 : : /* store the freelist index in the value field */
916 : 14481888 : MemoryChunkSetHdrMask(chunk, block, a_fidx, MCTX_ASET_ID);
917 : : #ifdef MEMORY_CONTEXT_CHECKING
918 : 14481888 : chunk->requested_size = InvalidAllocSize; /* mark it free */
919 : : #endif
920 : : /* push this chunk onto the free list */
921 : 14481888 : link = GetFreeListLink(chunk);
922 : :
923 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
924 : 14481888 : link->next = set->freelist[a_fidx];
925 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
926 : :
927 : 14481888 : set->freelist[a_fidx] = chunk;
928 : : }
929 : :
930 : : /*
931 : : * The first such block has size initBlockSize, and we double the space in
932 : : * each succeeding block, but not more than maxBlockSize.
933 : : */
934 : 5806226 : blksize = set->nextBlockSize;
935 : 5806226 : set->nextBlockSize <<= 1;
936 [ + + ]: 5806226 : if (set->nextBlockSize > set->maxBlockSize)
937 : 338861 : set->nextBlockSize = set->maxBlockSize;
938 : :
939 : : /* Choose the actual chunk size to allocate */
940 : 5806226 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
941 [ - + ]: 5806226 : Assert(chunk_size >= size);
942 : :
943 : : /*
944 : : * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
945 : : * space... but try to keep it a power of 2.
946 : : */
947 : 5806226 : required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
948 [ + + ]: 8576255 : while (blksize < required_size)
949 : 2770029 : blksize <<= 1;
950 : :
951 : : /* Try to allocate it */
952 : 5806226 : block = (AllocBlock) malloc(blksize);
953 : :
954 : : /*
955 : : * We could be asking for pretty big blocks here, so cope if malloc fails.
956 : : * But give up if there's less than 1 MB or so available...
957 : : */
958 [ - + - - ]: 5806226 : while (block == NULL && blksize > 1024 * 1024)
959 : : {
608 drowley@postgresql.o 960 :UBC 0 : blksize >>= 1;
961 [ # # ]: 0 : if (blksize < required_size)
962 : 0 : break;
963 : 0 : block = (AllocBlock) malloc(blksize);
964 : : }
965 : :
608 drowley@postgresql.o 966 [ - + ]:CBC 5806226 : if (block == NULL)
608 drowley@postgresql.o 967 :UBC 0 : return MemoryContextAllocationFailure(context, size, flags);
968 : :
969 : : /* Make a vchunk covering the new block's header */
970 : : VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
971 : :
608 drowley@postgresql.o 972 :CBC 5806226 : context->mem_allocated += blksize;
973 : :
974 : 5806226 : block->aset = set;
975 : 5806226 : block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
976 : 5806226 : block->endptr = ((char *) block) + blksize;
977 : :
978 : : /* Mark unallocated space NOACCESS. */
979 : : VALGRIND_MAKE_MEM_NOACCESS(block->freeptr,
980 : : blksize - ALLOC_BLOCKHDRSZ);
981 : :
982 : 5806226 : block->prev = NULL;
983 : 5806226 : block->next = set->blocks;
984 [ + - ]: 5806226 : if (block->next)
985 : 5806226 : block->next->prev = block;
986 : 5806226 : set->blocks = block;
987 : :
988 : 5806226 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
989 : : }
990 : :
991 : : /*
992 : : * AllocSetAlloc
993 : : * Returns a pointer to allocated memory of given size or raises an ERROR
994 : : * on allocation failure, or returns NULL when flags contains
995 : : * MCXT_ALLOC_NO_OOM.
996 : : *
997 : : * No request may exceed:
998 : : * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
999 : : * All callers use a much-lower limit.
1000 : : *
1001 : : * Note: when using valgrind, it doesn't matter how the returned allocation
1002 : : * is marked, as mcxt.c will set it to UNDEFINED. In some paths we will
1003 : : * return space that is marked NOACCESS - AllocSetRealloc has to beware!
1004 : : *
1005 : : * This function should only contain the most common code paths. Everything
1006 : : * else should be in pg_noinline helper functions, thus avoiding the overhead
1007 : : * of creating a stack frame for the common cases. Allocating memory is often
1008 : : * a bottleneck in many workloads, so avoiding stack frame setup is
1009 : : * worthwhile. Helper functions should always directly return the newly
1010 : : * allocated memory so that we can just return that address directly as a tail
1011 : : * call.
1012 : : */
1013 : : void *
1014 : 532951828 : AllocSetAlloc(MemoryContext context, Size size, int flags)
1015 : : {
1016 : 532951828 : AllocSet set = (AllocSet) context;
1017 : : AllocBlock block;
1018 : : MemoryChunk *chunk;
1019 : : int fidx;
1020 : : Size chunk_size;
1021 : : Size availspace;
1022 : :
1023 [ + - - + ]: 532951828 : Assert(AllocSetIsValid(set));
1024 : :
1025 : : /* due to the keeper block set->blocks should never be NULL */
1026 [ - + ]: 532951828 : Assert(set->blocks != NULL);
1027 : :
1028 : : /*
1029 : : * If requested size exceeds maximum for chunks we hand the request off to
1030 : : * AllocSetAllocLarge().
1031 : : */
1032 [ + + ]: 532951828 : if (size > set->allocChunkLimit)
1033 : 9542145 : return AllocSetAllocLarge(context, size, flags);
1034 : :
1035 : : /*
1036 : : * Request is small enough to be treated as a chunk. Look in the
1037 : : * corresponding free list to see if there is a free chunk we could reuse.
1038 : : * If one is found, remove it from the free list, make it again a member
1039 : : * of the alloc set and return its data address.
1040 : : *
1041 : : * Note that we don't attempt to ensure there's space for the sentinel
1042 : : * byte here. We expect a large proportion of allocations to be for sizes
1043 : : * which are already a power of 2. If we were to always make space for a
1044 : : * sentinel byte in MEMORY_CONTEXT_CHECKING builds, then we'd end up
1045 : : * doubling the memory requirements for such allocations.
1046 : : */
6756 tgl@sss.pgh.pa.us 1047 : 523409683 : fidx = AllocSetFreeIndex(size);
1048 : 523409683 : chunk = set->freelist[fidx];
9097 1049 [ + + ]: 523409683 : if (chunk != NULL)
1050 : : {
1156 drowley@postgresql.o 1051 : 194165246 : AllocFreeListLink *link = GetFreeListLink(chunk);
1052 : :
1053 : : /* Allow access to the chunk header. */
1054 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1055 : :
1056 [ - + ]: 194165246 : Assert(fidx == MemoryChunkGetValue(chunk));
1057 : :
1058 : : /* pop this chunk off the freelist */
1059 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1060 : 194165246 : set->freelist[fidx] = link->next;
1061 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1062 : :
1063 : : #ifdef MEMORY_CONTEXT_CHECKING
9097 tgl@sss.pgh.pa.us 1064 : 194165246 : chunk->requested_size = size;
1065 : : /* set mark to catch clobber of "unused" space */
1156 drowley@postgresql.o 1066 [ + + ]: 194165246 : if (size < GetChunkSizeFromFreeListIdx(fidx))
1067 : 111245942 : set_sentinel(MemoryChunkGetPointer(chunk), size);
1068 : : #endif
1069 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1070 : : /* fill the allocated space with junk */
1071 : : randomize_mem((char *) MemoryChunkGetPointer(chunk), size);
1072 : : #endif
1073 : :
1074 : : /* Ensure any padding bytes are marked NOACCESS. */
1075 : : VALGRIND_MAKE_MEM_NOACCESS((char *) MemoryChunkGetPointer(chunk) + size,
1076 : : GetChunkSizeFromFreeListIdx(fidx) - size);
1077 : :
1078 : : /* Disallow access to the chunk header. */
1079 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1080 : :
1081 : 194165246 : return MemoryChunkGetPointer(chunk);
1082 : : }
1083 : :
1084 : : /*
1085 : : * Choose the actual chunk size to allocate.
1086 : : */
1087 : 329244437 : chunk_size = GetChunkSizeFromFreeListIdx(fidx);
9656 tgl@sss.pgh.pa.us 1088 [ - + ]: 329244437 : Assert(chunk_size >= size);
1089 : :
608 drowley@postgresql.o 1090 : 329244437 : block = set->blocks;
1091 : 329244437 : availspace = block->endptr - block->freeptr;
1092 : :
1093 : : /*
1094 : : * If there is enough room in the active allocation block, we will put the
1095 : : * chunk into that block. Else must start a new one.
1096 : : */
1097 [ + + ]: 329244437 : if (unlikely(availspace < (chunk_size + ALLOC_CHUNKHDRSZ)))
1098 : 5806226 : return AllocSetAllocFromNewBlock(context, size, flags, fidx);
1099 : :
1100 : : /* There's enough space on the current block, so allocate from that */
1101 : 323438211 : return AllocSetAllocChunkFromBlock(context, block, size, chunk_size, fidx);
1102 : : }
1103 : :
1104 : : /*
1105 : : * AllocSetFree
1106 : : * Frees allocated memory; memory is removed from the set.
1107 : : */
1108 : : void
1156 1109 : 238192248 : AllocSetFree(void *pointer)
1110 : : {
1111 : : AllocSet set;
1112 : 238192248 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1113 : :
1114 : : /* Allow access to the chunk header. */
1115 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1116 : :
1117 [ + + ]: 238192248 : if (MemoryChunkIsExternal(chunk))
1118 : : {
1119 : : /* Release single-chunk block. */
1120 : 8869951 : AllocBlock block = ExternalChunkGetBlock(chunk);
1121 : :
1122 : : /*
1123 : : * Try to verify that we have a sane block pointer: the block header
1124 : : * should reference an aset and the freeptr should match the endptr.
1125 : : */
1114 tgl@sss.pgh.pa.us 1126 [ + - + - : 8869951 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1114 tgl@sss.pgh.pa.us 1127 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1128 : :
1156 drowley@postgresql.o 1129 :CBC 8869951 : set = block->aset;
1130 : :
1131 : : #ifdef MEMORY_CONTEXT_CHECKING
1132 : : {
1133 : : /* Test for someone scribbling on unused space in chunk */
1027 1134 [ - + ]: 8869951 : Assert(chunk->requested_size < (block->endptr - (char *) pointer));
1147 1135 [ - + ]: 8869951 : if (!sentinel_ok(pointer, chunk->requested_size))
1147 drowley@postgresql.o 1136 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1137 : : set->header.name, chunk);
1138 : : }
1139 : : #endif
1140 : :
1141 : : /* OK, remove block from aset's list and free it */
3156 tgl@sss.pgh.pa.us 1142 [ + - ]:CBC 8869951 : if (block->prev)
1143 : 8869951 : block->prev->next = block->next;
1144 : : else
3156 tgl@sss.pgh.pa.us 1145 :UBC 0 : set->blocks = block->next;
3156 tgl@sss.pgh.pa.us 1146 [ + + ]:CBC 8869951 : if (block->next)
1147 : 7279144 : block->next->prev = block->prev;
1148 : :
1156 drowley@postgresql.o 1149 : 8869951 : set->header.mem_allocated -= block->endptr - ((char *) block);
1150 : :
1151 : : #ifdef CLOBBER_FREED_MEMORY
4507 noah@leadboat.com 1152 : 8869951 : wipe_mem(block, block->freeptr - ((char *) block));
1153 : : #endif
1154 : :
1155 : : /* As in AllocSetReset, free block-header vchunks explicitly */
1156 : : VALGRIND_MEMPOOL_FREE(set, block);
1157 : :
9562 tgl@sss.pgh.pa.us 1158 : 8869951 : free(block);
1159 : : }
1160 : : else
1161 : : {
1156 drowley@postgresql.o 1162 : 229322297 : AllocBlock block = MemoryChunkGetBlock(chunk);
1163 : : int fidx;
1164 : : AllocFreeListLink *link;
1165 : :
1166 : : /*
1167 : : * In this path, for speed reasons we just Assert that the referenced
1168 : : * block is good. We can also Assert that the value field is sane.
1169 : : * Future field experience may show that these Asserts had better
1170 : : * become regular runtime test-and-elog checks.
1171 : : */
1096 peter@eisentraut.org 1172 [ + - + - : 229322297 : Assert(AllocBlockIsValid(block));
- + ]
1156 drowley@postgresql.o 1173 : 229322297 : set = block->aset;
1174 : :
1114 tgl@sss.pgh.pa.us 1175 : 229322297 : fidx = MemoryChunkGetValue(chunk);
1176 [ + - - + ]: 229322297 : Assert(FreeListIdxIsValid(fidx));
1177 : 229322297 : link = GetFreeListLink(chunk);
1178 : :
1179 : : #ifdef MEMORY_CONTEXT_CHECKING
1180 : : /* Test for someone scribbling on unused space in chunk */
1156 drowley@postgresql.o 1181 [ + + ]: 229322297 : if (chunk->requested_size < GetChunkSizeFromFreeListIdx(fidx))
1182 [ - + ]: 141918020 : if (!sentinel_ok(pointer, chunk->requested_size))
1156 drowley@postgresql.o 1183 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1184 : : set->header.name, chunk);
1185 : : #endif
1186 : :
1187 : : #ifdef CLOBBER_FREED_MEMORY
1156 drowley@postgresql.o 1188 :CBC 229322297 : wipe_mem(pointer, GetChunkSizeFromFreeListIdx(fidx));
1189 : : #endif
1190 : : /* push this chunk onto the top of the free list */
1191 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1192 : 229322297 : link->next = set->freelist[fidx];
1193 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1194 : 229322297 : set->freelist[fidx] = chunk;
1195 : :
1196 : : #ifdef MEMORY_CONTEXT_CHECKING
1197 : :
1198 : : /*
1199 : : * Reset requested_size to InvalidAllocSize in chunks that are on free
1200 : : * list.
1201 : : */
1202 : 229322297 : chunk->requested_size = InvalidAllocSize;
1203 : : #endif
1204 : : }
10703 scrappy@hub.org 1205 : 238192248 : }
1206 : :
1207 : : /*
1208 : : * AllocSetRealloc
1209 : : * Returns new pointer to allocated memory of given size or NULL if
1210 : : * request could not be completed; this memory is added to the set.
1211 : : * Memory associated with given pointer is copied into the new memory,
1212 : : * and the old memory is freed.
1213 : : *
1214 : : * Without MEMORY_CONTEXT_CHECKING, we don't know the old request size. This
1215 : : * makes our Valgrind client requests less-precise, hazarding false negatives.
1216 : : * (In principle, we could use VALGRIND_GET_VBITS() to rediscover the old
1217 : : * request size.)
1218 : : */
1219 : : void *
609 drowley@postgresql.o 1220 : 2661071 : AllocSetRealloc(void *pointer, Size size, int flags)
1221 : : {
1222 : : AllocBlock block;
1223 : : AllocSet set;
1156 1224 : 2661071 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1225 : : Size oldchksize;
1226 : : int fidx;
1227 : :
1228 : : /* Allow access to the chunk header. */
1229 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1230 : :
1231 [ + + ]: 2661071 : if (MemoryChunkIsExternal(chunk))
1232 : : {
1233 : : /*
1234 : : * The chunk must have been allocated as a single-chunk block. Use
1235 : : * realloc() to make the containing block bigger, or smaller, with
1236 : : * minimum space wastage.
1237 : : */
1238 : : AllocBlock newblock;
1239 : : Size chksize;
1240 : : Size blksize;
1241 : : Size oldblksize;
1242 : :
1243 : 42200 : block = ExternalChunkGetBlock(chunk);
1244 : :
1245 : : /*
1246 : : * Try to verify that we have a sane block pointer: the block header
1247 : : * should reference an aset and the freeptr should match the endptr.
1248 : : */
1114 tgl@sss.pgh.pa.us 1249 [ + - + - : 42200 : if (!AllocBlockIsValid(block) || block->freeptr != block->endptr)
+ - - + ]
1114 tgl@sss.pgh.pa.us 1250 [ # # ]:UBC 0 : elog(ERROR, "could not find block containing chunk %p", chunk);
1251 : :
1156 drowley@postgresql.o 1252 :CBC 42200 : set = block->aset;
1253 : :
1254 : : /* only check size in paths where the limits could be hit */
609 1255 : 42200 : MemoryContextCheckSize((MemoryContext) set, size, flags);
1256 : :
980 tgl@sss.pgh.pa.us 1257 : 42200 : oldchksize = block->endptr - (char *) pointer;
1258 : :
1259 : : #ifdef MEMORY_CONTEXT_CHECKING
1260 : : /* Test for someone scribbling on unused space in chunk */
1261 [ - + ]: 42200 : Assert(chunk->requested_size < oldchksize);
1147 drowley@postgresql.o 1262 [ - + ]: 42200 : if (!sentinel_ok(pointer, chunk->requested_size))
1147 drowley@postgresql.o 1263 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1264 : : set->header.name, chunk);
1265 : : #endif
1266 : :
1267 : : #ifdef MEMORY_CONTEXT_CHECKING
1268 : : /* ensure there's always space for the sentinel byte */
1147 drowley@postgresql.o 1269 :CBC 42200 : chksize = MAXALIGN(size + 1);
1270 : : #else
1271 : : chksize = MAXALIGN(size);
1272 : : #endif
1273 : :
1274 : : /* Do the realloc */
9097 tgl@sss.pgh.pa.us 1275 : 42200 : blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
1993 1276 : 42200 : oldblksize = block->endptr - ((char *) block);
1277 : :
87 tgl@sss.pgh.pa.us 1278 :GNC 42200 : newblock = (AllocBlock) realloc(block, blksize);
1279 [ - + ]: 42200 : if (newblock == NULL)
1280 : : {
1281 : : /* Disallow access to the chunk header. */
1282 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
609 drowley@postgresql.o 1283 :UBC 0 : return MemoryContextAllocationFailure(&set->header, size, flags);
1284 : : }
1285 : :
1286 : : /*
1287 : : * Move the block-header vchunk explicitly. (mcxt.c will take care of
1288 : : * moving the vchunk for the user data.)
1289 : : */
1290 : : VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
87 tgl@sss.pgh.pa.us 1291 :GNC 42200 : block = newblock;
1292 : :
1293 : : /* updated separately, not to underflow when (oldblksize > blksize) */
1156 drowley@postgresql.o 1294 :CBC 42200 : set->header.mem_allocated -= oldblksize;
1295 : 42200 : set->header.mem_allocated += blksize;
1296 : :
9562 tgl@sss.pgh.pa.us 1297 : 42200 : block->freeptr = block->endptr = ((char *) block) + blksize;
1298 : :
1299 : : /* Update pointers since block has likely been moved */
1156 drowley@postgresql.o 1300 : 42200 : chunk = (MemoryChunk *) (((char *) block) + ALLOC_BLOCKHDRSZ);
1301 : 42200 : pointer = MemoryChunkGetPointer(chunk);
3156 tgl@sss.pgh.pa.us 1302 [ + - ]: 42200 : if (block->prev)
1303 : 42200 : block->prev->next = block;
1304 : : else
3156 tgl@sss.pgh.pa.us 1305 :UBC 0 : set->blocks = block;
3156 tgl@sss.pgh.pa.us 1306 [ + + ]:CBC 42200 : if (block->next)
1307 : 29641 : block->next->prev = block;
1308 : :
1309 : : #ifdef MEMORY_CONTEXT_CHECKING
1310 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1311 : :
1312 : : /*
1313 : : * We can only randomize the extra space if we know the prior request.
1314 : : * When using Valgrind, randomize_mem() also marks memory UNDEFINED.
1315 : : */
1316 : : if (size > chunk->requested_size)
1317 : : randomize_mem((char *) pointer + chunk->requested_size,
1318 : : size - chunk->requested_size);
1319 : : #else
1320 : :
1321 : : /*
1322 : : * If this is an increase, realloc() will have marked any
1323 : : * newly-allocated part (from oldchksize to chksize) UNDEFINED, but we
1324 : : * also need to adjust trailing bytes from the old allocation (from
1325 : : * chunk->requested_size to oldchksize) as they are marked NOACCESS.
1326 : : * Make sure not to mark too many bytes in case chunk->requested_size
1327 : : * < size < oldchksize.
1328 : : */
1329 : : #ifdef USE_VALGRIND
1330 : : if (Min(size, oldchksize) > chunk->requested_size)
1331 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + chunk->requested_size,
1332 : : Min(size, oldchksize) - chunk->requested_size);
1333 : : #endif
1334 : : #endif
1335 : :
9097 1336 : 42200 : chunk->requested_size = size;
1337 : : /* set mark to catch clobber of "unused" space */
1147 drowley@postgresql.o 1338 [ - + ]: 42200 : Assert(size < chksize);
1339 : 42200 : set_sentinel(pointer, size);
1340 : : #else /* !MEMORY_CONTEXT_CHECKING */
1341 : :
1342 : : /*
1343 : : * We may need to adjust marking of bytes from the old allocation as
1344 : : * some of them may be marked NOACCESS. We don't know how much of the
1345 : : * old chunk size was the requested size; it could have been as small
1346 : : * as one byte. We have to be conservative and just mark the entire
1347 : : * old portion DEFINED. Make sure not to mark memory beyond the new
1348 : : * allocation in case it's smaller than the old one.
1349 : : */
1350 : : VALGRIND_MAKE_MEM_DEFINED(pointer, Min(size, oldchksize));
1351 : : #endif
1352 : :
1353 : : /* Ensure any padding bytes are marked NOACCESS. */
1354 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
1355 : :
1356 : : /* Disallow access to the chunk header. */
1357 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1358 : :
3156 tgl@sss.pgh.pa.us 1359 : 42200 : return pointer;
1360 : : }
1361 : :
1156 drowley@postgresql.o 1362 : 2618871 : block = MemoryChunkGetBlock(chunk);
1363 : :
1364 : : /*
1365 : : * In this path, for speed reasons we just Assert that the referenced
1366 : : * block is good. We can also Assert that the value field is sane. Future
1367 : : * field experience may show that these Asserts had better become regular
1368 : : * runtime test-and-elog checks.
1369 : : */
1096 peter@eisentraut.org 1370 [ + - + - : 2618871 : Assert(AllocBlockIsValid(block));
- + ]
1156 drowley@postgresql.o 1371 : 2618871 : set = block->aset;
1372 : :
1114 tgl@sss.pgh.pa.us 1373 : 2618871 : fidx = MemoryChunkGetValue(chunk);
1374 [ + - - + ]: 2618871 : Assert(FreeListIdxIsValid(fidx));
980 1375 : 2618871 : oldchksize = GetChunkSizeFromFreeListIdx(fidx);
1376 : :
1377 : : #ifdef MEMORY_CONTEXT_CHECKING
1378 : : /* Test for someone scribbling on unused space in chunk */
1379 [ + + ]: 2618871 : if (chunk->requested_size < oldchksize)
1156 drowley@postgresql.o 1380 [ - + ]: 1257061 : if (!sentinel_ok(pointer, chunk->requested_size))
1156 drowley@postgresql.o 1381 [ # # ]:UBC 0 : elog(WARNING, "detected write past chunk end in %s %p",
1382 : : set->header.name, chunk);
1383 : : #endif
1384 : :
1385 : : /*
1386 : : * Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
1387 : : * allocated area already is >= the new size. (In particular, we will
1388 : : * fall out here if the requested size is a decrease.)
1389 : : */
980 tgl@sss.pgh.pa.us 1390 [ + + ]:CBC 2618871 : if (oldchksize >= size)
1391 : : {
1392 : : #ifdef MEMORY_CONTEXT_CHECKING
2217 1393 : 1288399 : Size oldrequest = chunk->requested_size;
1394 : :
1395 : : #ifdef RANDOMIZE_ALLOCATED_MEMORY
1396 : : /* We can only fill the extra space if we know the prior request */
1397 : : if (size > oldrequest)
1398 : : randomize_mem((char *) pointer + oldrequest,
1399 : : size - oldrequest);
1400 : : #endif
1401 : :
1402 : 1288399 : chunk->requested_size = size;
1403 : :
1404 : : /*
1405 : : * If this is an increase, mark any newly-available part UNDEFINED.
1406 : : * Otherwise, mark the obsolete part NOACCESS.
1407 : : */
1408 : : if (size > oldrequest)
1409 : : VALGRIND_MAKE_MEM_UNDEFINED((char *) pointer + oldrequest,
1410 : : size - oldrequest);
1411 : : else
1412 : : VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size,
1413 : : oldchksize - size);
1414 : :
1415 : : /* set mark to catch clobber of "unused" space */
980 1416 [ + + ]: 1288399 : if (size < oldchksize)
2217 1417 : 1268961 : set_sentinel(pointer, size);
1418 : : #else /* !MEMORY_CONTEXT_CHECKING */
1419 : :
1420 : : /*
1421 : : * We don't have the information to determine whether we're growing
1422 : : * the old request or shrinking it, so we conservatively mark the
1423 : : * entire new allocation DEFINED.
1424 : : */
1425 : : VALGRIND_MAKE_MEM_NOACCESS(pointer, oldchksize);
1426 : : VALGRIND_MAKE_MEM_DEFINED(pointer, size);
1427 : : #endif
1428 : :
1429 : : /* Disallow access to the chunk header. */
1430 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1431 : :
1432 : 1288399 : return pointer;
1433 : : }
1434 : : else
1435 : : {
1436 : : /*
1437 : : * Enlarge-a-small-chunk case. We just do this by brute force, ie,
1438 : : * allocate a new chunk and copy the data. Since we know the existing
1439 : : * data isn't huge, this won't involve any great memcpy expense, so
1440 : : * it's not worth being smarter. (At one time we tried to avoid
1441 : : * memcpy when it was possible to enlarge the chunk in-place, but that
1442 : : * turns out to misbehave unpleasantly for repeated cycles of
1443 : : * palloc/repalloc/pfree: the eventually freed chunks go into the
1444 : : * wrong freelist for the next initial palloc request, and so we leak
1445 : : * memory indefinitely. See pgsql-hackers archives for 2007-08-11.)
1446 : : */
1447 : : AllocPointer newPointer;
1448 : : Size oldsize;
1449 : :
1450 : : /* allocate new chunk (this also checks size is valid) */
609 drowley@postgresql.o 1451 : 1330472 : newPointer = AllocSetAlloc((MemoryContext) set, size, flags);
1452 : :
1453 : : /* leave immediately if request was not completed */
3925 rhaas@postgresql.org 1454 [ - + ]: 1330472 : if (newPointer == NULL)
1455 : : {
1456 : : /* Disallow access to the chunk header. */
1457 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
609 drowley@postgresql.o 1458 :UBC 0 : return MemoryContextAllocationFailure((MemoryContext) set, size, flags);
1459 : : }
1460 : :
1461 : : /*
1462 : : * AllocSetAlloc() may have returned a region that is still NOACCESS.
1463 : : * Change it to UNDEFINED for the moment; memcpy() will then transfer
1464 : : * definedness from the old allocation to the new. If we know the old
1465 : : * allocation, copy just that much. Otherwise, make the entire old
1466 : : * chunk defined to avoid errors as we copy the currently-NOACCESS
1467 : : * trailing bytes.
1468 : : */
1469 : : VALGRIND_MAKE_MEM_UNDEFINED(newPointer, size);
1470 : : #ifdef MEMORY_CONTEXT_CHECKING
4507 noah@leadboat.com 1471 :CBC 1330472 : oldsize = chunk->requested_size;
1472 : : #else
1473 : : oldsize = oldchksize;
1474 : : VALGRIND_MAKE_MEM_DEFINED(pointer, oldsize);
1475 : : #endif
1476 : :
1477 : : /* transfer existing data (certain to fit) */
9562 tgl@sss.pgh.pa.us 1478 : 1330472 : memcpy(newPointer, pointer, oldsize);
1479 : :
1480 : : /* free old chunk */
1156 drowley@postgresql.o 1481 : 1330472 : AllocSetFree(pointer);
1482 : :
9562 tgl@sss.pgh.pa.us 1483 : 1330472 : return newPointer;
1484 : : }
1485 : : }
1486 : :
1487 : : /*
1488 : : * AllocSetGetChunkContext
1489 : : * Return the MemoryContext that 'pointer' belongs to.
1490 : : */
1491 : : MemoryContext
1156 drowley@postgresql.o 1492 : 4224716 : AllocSetGetChunkContext(void *pointer)
1493 : : {
1494 : 4224716 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1495 : : AllocBlock block;
1496 : : AllocSet set;
1497 : :
1498 : : /* Allow access to the chunk header. */
1499 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1500 : :
1501 [ + + ]: 4224716 : if (MemoryChunkIsExternal(chunk))
1502 : 42200 : block = ExternalChunkGetBlock(chunk);
1503 : : else
1504 : 4182516 : block = (AllocBlock) MemoryChunkGetBlock(chunk);
1505 : :
1506 : : /* Disallow access to the chunk header. */
1507 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1508 : :
1096 peter@eisentraut.org 1509 [ + - + - : 4224716 : Assert(AllocBlockIsValid(block));
- + ]
1156 drowley@postgresql.o 1510 : 4224716 : set = block->aset;
1511 : :
1512 : 4224716 : return &set->header;
1513 : : }
1514 : :
1515 : : /*
1516 : : * AllocSetGetChunkSpace
1517 : : * Given a currently-allocated chunk, determine the total space
1518 : : * it occupies (including all memory-allocation overhead).
1519 : : */
1520 : : Size
1521 : 4402505 : AllocSetGetChunkSpace(void *pointer)
1522 : : {
1523 : 4402505 : MemoryChunk *chunk = PointerGetMemoryChunk(pointer);
1524 : : int fidx;
1525 : :
1526 : : /* Allow access to the chunk header. */
1527 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1528 : :
1529 [ + + ]: 4402505 : if (MemoryChunkIsExternal(chunk))
1530 : : {
1531 : 488257 : AllocBlock block = ExternalChunkGetBlock(chunk);
1532 : :
1533 : : /* Disallow access to the chunk header. */
1534 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1535 : :
1096 peter@eisentraut.org 1536 [ + - + - : 488257 : Assert(AllocBlockIsValid(block));
- + ]
1537 : :
1156 drowley@postgresql.o 1538 : 488257 : return block->endptr - (char *) chunk;
1539 : : }
1540 : :
1114 tgl@sss.pgh.pa.us 1541 : 3914248 : fidx = MemoryChunkGetValue(chunk);
1542 [ + - - + ]: 3914248 : Assert(FreeListIdxIsValid(fidx));
1543 : :
1544 : : /* Disallow access to the chunk header. */
1545 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1546 : :
1547 : 3914248 : return GetChunkSizeFromFreeListIdx(fidx) + ALLOC_CHUNKHDRSZ;
1548 : : }
1549 : :
1550 : : /*
1551 : : * AllocSetIsEmpty
1552 : : * Is an allocset empty of any allocated space?
1553 : : */
1554 : : bool
7712 1555 : 4433 : AllocSetIsEmpty(MemoryContext context)
1556 : : {
1096 peter@eisentraut.org 1557 [ + - - + ]: 4433 : Assert(AllocSetIsValid(context));
1558 : :
1559 : : /*
1560 : : * For now, we say "empty" only if the context is new or just reset. We
1561 : : * could examine the freelists to determine if all space has been freed,
1562 : : * but it's not really worth the trouble for present uses of this
1563 : : * functionality.
1564 : : */
5274 heikki.linnakangas@i 1565 [ + + ]: 4433 : if (context->isReset)
7712 tgl@sss.pgh.pa.us 1566 : 4421 : return true;
1567 : 12 : return false;
1568 : : }
1569 : :
1570 : : /*
1571 : : * AllocSetStats
1572 : : * Compute stats about memory consumption of an allocset.
1573 : : *
1574 : : * printfunc: if not NULL, pass a human-readable stats string to this.
1575 : : * passthru: pass this pointer through to printfunc.
1576 : : * totals: if not NULL, add stats about this context into *totals.
1577 : : * print_to_stderr: print stats to stderr if true, elog otherwise.
1578 : : */
1579 : : void
2772 1580 : 2385 : AllocSetStats(MemoryContext context,
1581 : : MemoryStatsPrintFunc printfunc, void *passthru,
1582 : : MemoryContextCounters *totals, bool print_to_stderr)
1583 : : {
9253 1584 : 2385 : AllocSet set = (AllocSet) context;
4296 1585 : 2385 : Size nblocks = 0;
3717 1586 : 2385 : Size freechunks = 0;
1587 : : Size totalspace;
4296 1588 : 2385 : Size freespace = 0;
1589 : : AllocBlock block;
1590 : : int fidx;
1591 : :
1096 peter@eisentraut.org 1592 [ + - - + ]: 2385 : Assert(AllocSetIsValid(set));
1593 : :
1594 : : /* Include context header in totalspace */
2772 tgl@sss.pgh.pa.us 1595 : 2385 : totalspace = MAXALIGN(sizeof(AllocSetContext));
1596 : :
9291 1597 [ + + ]: 7356 : for (block = set->blocks; block != NULL; block = block->next)
1598 : : {
1599 : 4971 : nblocks++;
1600 : 4971 : totalspace += block->endptr - ((char *) block);
1601 : 4971 : freespace += block->endptr - block->freeptr;
1602 : : }
1603 [ + + ]: 28620 : for (fidx = 0; fidx < ALLOCSET_NUM_FREELISTS; fidx++)
1604 : : {
1114 1605 : 26235 : Size chksz = GetChunkSizeFromFreeListIdx(fidx);
1156 drowley@postgresql.o 1606 : 26235 : MemoryChunk *chunk = set->freelist[fidx];
1607 : :
1608 [ + + ]: 35378 : while (chunk != NULL)
1609 : : {
1610 : 9143 : AllocFreeListLink *link = GetFreeListLink(chunk);
1611 : :
1612 : : /* Allow access to the chunk header. */
1613 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1114 tgl@sss.pgh.pa.us 1614 [ - + ]: 9143 : Assert(MemoryChunkGetValue(chunk) == fidx);
1615 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1616 : :
3717 1617 : 9143 : freechunks++;
1156 drowley@postgresql.o 1618 : 9143 : freespace += chksz + ALLOC_CHUNKHDRSZ;
1619 : :
1620 : : VALGRIND_MAKE_MEM_DEFINED(link, sizeof(AllocFreeListLink));
1621 : 9143 : chunk = link->next;
1622 : : VALGRIND_MAKE_MEM_NOACCESS(link, sizeof(AllocFreeListLink));
1623 : : }
1624 : : }
1625 : :
2772 tgl@sss.pgh.pa.us 1626 [ + + ]: 2385 : if (printfunc)
1627 : : {
1628 : : char stats_string[200];
1629 : :
1630 : 813 : snprintf(stats_string, sizeof(stats_string),
1631 : : "%zu total in %zu blocks; %zu free (%zu chunks); %zu used",
1632 : : totalspace, nblocks, freespace, freechunks,
1633 : : totalspace - freespace);
1666 fujii@postgresql.org 1634 : 813 : printfunc(context, passthru, stats_string, print_to_stderr);
1635 : : }
1636 : :
3717 tgl@sss.pgh.pa.us 1637 [ + - ]: 2385 : if (totals)
1638 : : {
1639 : 2385 : totals->nblocks += nblocks;
1640 : 2385 : totals->freechunks += freechunks;
1641 : 2385 : totals->totalspace += totalspace;
1642 : 2385 : totals->freespace += freespace;
1643 : : }
9291 1644 : 2385 : }
1645 : :
1646 : :
1647 : : #ifdef MEMORY_CONTEXT_CHECKING
1648 : :
1649 : : /*
1650 : : * AllocSetCheck
1651 : : * Walk through chunks and check consistency of memory.
1652 : : *
1653 : : * NOTE: report errors as WARNING, *not* ERROR or FATAL. Otherwise you'll
1654 : : * find yourself in an infinite loop when trouble occurs, because this
1655 : : * routine will be entered again when elog cleanup tries to release memory!
1656 : : */
1657 : : void
9240 bruce@momjian.us 1658 : 91982177 : AllocSetCheck(MemoryContext context)
1659 : : {
8986 1660 : 91982177 : AllocSet set = (AllocSet) context;
2876 tgl@sss.pgh.pa.us 1661 : 91982177 : const char *name = set->header.name;
1662 : : AllocBlock prevblock;
1663 : : AllocBlock block;
2216 tomas.vondra@postgre 1664 : 91982177 : Size total_allocated = 0;
1665 : :
3156 tgl@sss.pgh.pa.us 1666 : 91982177 : for (prevblock = NULL, block = set->blocks;
1667 [ + + ]: 259751459 : block != NULL;
1668 : 167769282 : prevblock = block, block = block->next)
1669 : : {
8986 bruce@momjian.us 1670 : 167769282 : char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
1671 : 167769282 : long blk_used = block->freeptr - bpoz;
1672 : 167769282 : long blk_data = 0;
1673 : 167769282 : long nchunks = 0;
1156 drowley@postgresql.o 1674 : 167769282 : bool has_external_chunk = false;
1675 : :
834 1676 [ + + ]: 167769282 : if (IsKeeperBlock(set, block))
2219 tomas.vondra@postgre 1677 : 91982177 : total_allocated += block->endptr - ((char *) set);
1678 : : else
1679 : 75787105 : total_allocated += block->endptr - ((char *) block);
1680 : :
1681 : : /*
1682 : : * Empty block - empty can be keeper-block only
1683 : : */
9240 bruce@momjian.us 1684 [ + + ]: 167769282 : if (!blk_used)
1685 : : {
834 drowley@postgresql.o 1686 [ - + ]: 3186649 : if (!IsKeeperBlock(set, block))
8131 tgl@sss.pgh.pa.us 1687 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: empty block %p",
1688 : : name, block);
1689 : : }
1690 : :
1691 : : /*
1692 : : * Check block header fields
1693 : : */
3156 tgl@sss.pgh.pa.us 1694 [ + - ]:CBC 167769282 : if (block->aset != set ||
1695 [ + - ]: 167769282 : block->prev != prevblock ||
1696 [ + - ]: 167769282 : block->freeptr < bpoz ||
1697 [ - + ]: 167769282 : block->freeptr > block->endptr)
3156 tgl@sss.pgh.pa.us 1698 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: corrupt header in block %p",
1699 : : name, block);
1700 : :
1701 : : /*
1702 : : * Chunk walker
1703 : : */
9097 tgl@sss.pgh.pa.us 1704 [ + + ]:CBC 2879155115 : while (bpoz < block->freeptr)
1705 : : {
1156 drowley@postgresql.o 1706 : 2711385833 : MemoryChunk *chunk = (MemoryChunk *) bpoz;
1707 : : Size chsize,
1708 : : dsize;
1709 : :
1710 : : /* Allow access to the chunk header. */
1711 : : VALGRIND_MAKE_MEM_DEFINED(chunk, ALLOC_CHUNKHDRSZ);
1712 : :
1713 [ + + ]: 2711385833 : if (MemoryChunkIsExternal(chunk))
1714 : : {
1715 : 5640829 : chsize = block->endptr - (char *) MemoryChunkGetPointer(chunk); /* aligned chunk size */
1716 : 5640829 : has_external_chunk = true;
1717 : :
1718 : : /* make sure this chunk consumes the entire block */
1719 [ - + ]: 5640829 : if (chsize + ALLOC_CHUNKHDRSZ != blk_used)
1156 drowley@postgresql.o 1720 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad single-chunk %p in block %p",
1721 : : name, chunk, block);
1722 : : }
1723 : : else
1724 : : {
1114 tgl@sss.pgh.pa.us 1725 :CBC 2705745004 : int fidx = MemoryChunkGetValue(chunk);
1726 : :
1727 [ + - - + ]: 2705745004 : if (!FreeListIdxIsValid(fidx))
1114 tgl@sss.pgh.pa.us 1728 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad chunk size for chunk %p in block %p",
1729 : : name, chunk, block);
1730 : :
1114 tgl@sss.pgh.pa.us 1731 :CBC 2705745004 : chsize = GetChunkSizeFromFreeListIdx(fidx); /* aligned chunk size */
1732 : :
1733 : : /*
1734 : : * Check the stored block offset correctly references this
1735 : : * block.
1736 : : */
1156 drowley@postgresql.o 1737 [ - + ]: 2705745004 : if (block != MemoryChunkGetBlock(chunk))
1156 drowley@postgresql.o 1738 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad block offset for chunk %p in block %p",
1739 : : name, chunk, block);
1740 : : }
3051 tgl@sss.pgh.pa.us 1741 :CBC 2711385833 : dsize = chunk->requested_size; /* real data */
1742 : :
1743 : : /* an allocated chunk's requested size must be <= the chsize */
1156 drowley@postgresql.o 1744 [ + + - + ]: 2711385833 : if (dsize != InvalidAllocSize && dsize > chsize)
8131 tgl@sss.pgh.pa.us 1745 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: req size > alloc size for chunk %p in block %p",
1746 : : name, chunk, block);
1747 : :
1748 : : /* chsize must not be smaller than the first freelist's size */
9240 bruce@momjian.us 1749 [ - + ]:CBC 2711385833 : if (chsize < (1 << ALLOC_MINBITS))
4296 tgl@sss.pgh.pa.us 1750 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: bad size %zu for chunk %p in block %p",
1751 : : name, chsize, chunk, block);
1752 : :
1753 : : /*
1754 : : * Check for overwrite of padding space in an allocated chunk.
1755 : : */
1156 drowley@postgresql.o 1756 [ + + + + ]:CBC 2711385833 : if (dsize != InvalidAllocSize && dsize < chsize &&
4507 noah@leadboat.com 1757 [ - + ]: 1786935662 : !sentinel_ok(chunk, ALLOC_CHUNKHDRSZ + dsize))
8131 tgl@sss.pgh.pa.us 1758 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: detected write past chunk end in block %p, chunk %p",
1759 : : name, block, chunk);
1760 : :
1761 : : /* if chunk is allocated, disallow access to the chunk header */
1762 : : if (dsize != InvalidAllocSize)
1763 : : VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
1764 : :
9240 bruce@momjian.us 1765 :CBC 2711385833 : blk_data += chsize;
1766 : 2711385833 : nchunks++;
1767 : :
9097 tgl@sss.pgh.pa.us 1768 : 2711385833 : bpoz += ALLOC_CHUNKHDRSZ + chsize;
1769 : : }
1770 : :
9240 bruce@momjian.us 1771 [ - + ]: 167769282 : if ((blk_data + (nchunks * ALLOC_CHUNKHDRSZ)) != blk_used)
8131 tgl@sss.pgh.pa.us 1772 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: found inconsistent memory block %p",
1773 : : name, block);
1774 : :
1156 drowley@postgresql.o 1775 [ + + - + ]:CBC 167769282 : if (has_external_chunk && nchunks > 1)
1156 drowley@postgresql.o 1776 [ # # ]:UBC 0 : elog(WARNING, "problem in alloc set %s: external chunk on non-dedicated block %p",
1777 : : name, block);
1778 : : }
1779 : :
2049 jdavis@postgresql.or 1780 [ - + ]:CBC 91982177 : Assert(total_allocated == context->mem_allocated);
9240 bruce@momjian.us 1781 : 91982177 : }
1782 : :
1783 : : #endif /* MEMORY_CONTEXT_CHECKING */
|