Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * buf_internals.h
4 : : * Internal definitions for buffer manager and the buffer replacement
5 : : * strategy.
6 : : *
7 : : *
8 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
9 : : * Portions Copyright (c) 1994, Regents of the University of California
10 : : *
11 : : * src/include/storage/buf_internals.h
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : #ifndef BUFMGR_INTERNALS_H
16 : : #define BUFMGR_INTERNALS_H
17 : :
18 : : #include "pgstat.h"
19 : : #include "port/atomics.h"
20 : : #include "storage/aio_types.h"
21 : : #include "storage/buf.h"
22 : : #include "storage/bufmgr.h"
23 : : #include "storage/condition_variable.h"
24 : : #include "storage/lwlock.h"
25 : : #include "storage/procnumber.h"
26 : : #include "storage/shmem.h"
27 : : #include "storage/smgr.h"
28 : : #include "storage/spin.h"
29 : : #include "utils/relcache.h"
30 : : #include "utils/resowner.h"
31 : :
32 : : /*
33 : : * Buffer state is a single 32-bit variable where following data is combined.
34 : : *
35 : : * - 18 bits refcount
36 : : * - 4 bits usage count
37 : : * - 10 bits of flags
38 : : *
39 : : * Combining these values allows to perform some operations without locking
40 : : * the buffer header, by modifying them together with a CAS loop.
41 : : *
42 : : * The definition of buffer state components is below.
43 : : */
44 : : #define BUF_REFCOUNT_BITS 18
45 : : #define BUF_USAGECOUNT_BITS 4
46 : : #define BUF_FLAG_BITS 10
47 : :
48 : : StaticAssertDecl(BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS + BUF_FLAG_BITS == 32,
49 : : "parts of buffer state space need to equal 32");
50 : :
51 : : #define BUF_REFCOUNT_ONE 1
52 : : #define BUF_REFCOUNT_MASK ((1U << BUF_REFCOUNT_BITS) - 1)
53 : : #define BUF_USAGECOUNT_MASK (((1U << BUF_USAGECOUNT_BITS) - 1) << (BUF_REFCOUNT_BITS))
54 : : #define BUF_USAGECOUNT_ONE (1U << BUF_REFCOUNT_BITS)
55 : : #define BUF_USAGECOUNT_SHIFT BUF_REFCOUNT_BITS
56 : : #define BUF_FLAG_MASK (((1U << BUF_FLAG_BITS) - 1) << (BUF_REFCOUNT_BITS + BUF_USAGECOUNT_BITS))
57 : :
58 : : /* Get refcount and usagecount from buffer state */
59 : : #define BUF_STATE_GET_REFCOUNT(state) ((state) & BUF_REFCOUNT_MASK)
60 : : #define BUF_STATE_GET_USAGECOUNT(state) (((state) & BUF_USAGECOUNT_MASK) >> BUF_USAGECOUNT_SHIFT)
61 : :
62 : : /*
63 : : * Flags for buffer descriptors
64 : : *
65 : : * Note: BM_TAG_VALID essentially means that there is a buffer hashtable
66 : : * entry associated with the buffer's tag.
67 : : */
68 : : #define BM_LOCKED (1U << 22) /* buffer header is locked */
69 : : #define BM_DIRTY (1U << 23) /* data needs writing */
70 : : #define BM_VALID (1U << 24) /* data is valid */
71 : : #define BM_TAG_VALID (1U << 25) /* tag is assigned */
72 : : #define BM_IO_IN_PROGRESS (1U << 26) /* read or write in progress */
73 : : #define BM_IO_ERROR (1U << 27) /* previous I/O failed */
74 : : #define BM_JUST_DIRTIED (1U << 28) /* dirtied since write started */
75 : : #define BM_PIN_COUNT_WAITER (1U << 29) /* have waiter for sole pin */
76 : : #define BM_CHECKPOINT_NEEDED (1U << 30) /* must write for checkpoint */
77 : : #define BM_PERMANENT (1U << 31) /* permanent buffer (not unlogged,
78 : : * or init fork) */
79 : : /*
80 : : * The maximum allowed value of usage_count represents a tradeoff between
81 : : * accuracy and speed of the clock-sweep buffer management algorithm. A
82 : : * large value (comparable to NBuffers) would approximate LRU semantics.
83 : : * But it can take as many as BM_MAX_USAGE_COUNT+1 complete cycles of the
84 : : * clock-sweep hand to find a free buffer, so in practice we don't want the
85 : : * value to be very large.
86 : : */
87 : : #define BM_MAX_USAGE_COUNT 5
88 : :
89 : : StaticAssertDecl(BM_MAX_USAGE_COUNT < (1 << BUF_USAGECOUNT_BITS),
90 : : "BM_MAX_USAGE_COUNT doesn't fit in BUF_USAGECOUNT_BITS bits");
91 : : StaticAssertDecl(MAX_BACKENDS_BITS <= BUF_REFCOUNT_BITS,
92 : : "MAX_BACKENDS_BITS needs to be <= BUF_REFCOUNT_BITS");
93 : :
94 : : /*
95 : : * Buffer tag identifies which disk block the buffer contains.
96 : : *
97 : : * Note: the BufferTag data must be sufficient to determine where to write the
98 : : * block, without reference to pg_class or pg_tablespace entries. It's
99 : : * possible that the backend flushing the buffer doesn't even believe the
100 : : * relation is visible yet (its xact may have started before the xact that
101 : : * created the rel). The storage manager must be able to cope anyway.
102 : : *
103 : : * Note: if there's any pad bytes in the struct, InitBufferTag will have
104 : : * to be fixed to zero them, since this struct is used as a hash key.
105 : : */
106 : : typedef struct buftag
107 : : {
108 : : Oid spcOid; /* tablespace oid */
109 : : Oid dbOid; /* database oid */
110 : : RelFileNumber relNumber; /* relation file number */
111 : : ForkNumber forkNum; /* fork number */
112 : : BlockNumber blockNum; /* blknum relative to begin of reln */
113 : : } BufferTag;
114 : :
115 : : static inline RelFileNumber
1210 rhaas@postgresql.org 116 :CBC 158374621 : BufTagGetRelNumber(const BufferTag *tag)
117 : : {
1175 118 : 158374621 : return tag->relNumber;
119 : : }
120 : :
121 : : static inline ForkNumber
1210 122 : 21558858 : BufTagGetForkNum(const BufferTag *tag)
123 : : {
1175 124 : 21558858 : return tag->forkNum;
125 : : }
126 : :
127 : : static inline void
1210 128 : 70097179 : BufTagSetRelForkDetails(BufferTag *tag, RelFileNumber relnumber,
129 : : ForkNumber forknum)
130 : : {
1175 131 : 70097179 : tag->relNumber = relnumber;
132 : 70097179 : tag->forkNum = forknum;
1210 133 : 70097179 : }
134 : :
135 : : static inline RelFileLocator
136 : 17561143 : BufTagGetRelFileLocator(const BufferTag *tag)
137 : : {
138 : : RelFileLocator rlocator;
139 : :
140 : 17561143 : rlocator.spcOid = tag->spcOid;
141 : 17561143 : rlocator.dbOid = tag->dbOid;
142 : 17561143 : rlocator.relNumber = BufTagGetRelNumber(tag);
143 : :
144 : 17561143 : return rlocator;
145 : : }
146 : :
147 : : static inline void
1238 148 : 11335841 : ClearBufferTag(BufferTag *tag)
149 : : {
1210 150 : 11335841 : tag->spcOid = InvalidOid;
151 : 11335841 : tag->dbOid = InvalidOid;
152 : 11335841 : BufTagSetRelForkDetails(tag, InvalidRelFileNumber, InvalidForkNumber);
1238 153 : 11335841 : tag->blockNum = InvalidBlockNumber;
154 : 11335841 : }
155 : :
156 : : static inline void
157 : 58761338 : InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator,
158 : : ForkNumber forkNum, BlockNumber blockNum)
159 : : {
1210 160 : 58761338 : tag->spcOid = rlocator->spcOid;
161 : 58761338 : tag->dbOid = rlocator->dbOid;
162 : 58761338 : BufTagSetRelForkDetails(tag, rlocator->relNumber, forkNum);
1238 163 : 58761338 : tag->blockNum = blockNum;
164 : 58761338 : }
165 : :
166 : : static inline bool
167 : 2879852 : BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
168 : : {
1210 169 : 5759704 : return (tag1->spcOid == tag2->spcOid) &&
170 [ + - ]: 2879852 : (tag1->dbOid == tag2->dbOid) &&
1175 171 [ + + ]: 2879852 : (tag1->relNumber == tag2->relNumber) &&
172 [ + - + + ]: 8639515 : (tag1->blockNum == tag2->blockNum) &&
173 [ + - ]: 2879811 : (tag1->forkNum == tag2->forkNum);
174 : : }
175 : :
176 : : static inline bool
1210 177 : 423911610 : BufTagMatchesRelFileLocator(const BufferTag *tag,
178 : : const RelFileLocator *rlocator)
179 : : {
180 : 600143223 : return (tag->spcOid == rlocator->spcOid) &&
181 [ + + + + ]: 564397091 : (tag->dbOid == rlocator->dbOid) &&
182 [ + + ]: 140485481 : (BufTagGetRelNumber(tag) == rlocator->relNumber);
183 : : }
184 : :
185 : :
186 : : /*
187 : : * The shared buffer mapping table is partitioned to reduce contention.
188 : : * To determine which partition lock a given tag requires, compute the tag's
189 : : * hash code with BufTableHashCode(), then apply BufMappingPartitionLock().
190 : : * NB: NUM_BUFFER_PARTITIONS must be a power of 2!
191 : : */
192 : : static inline uint32
1238 193 : 58549450 : BufTableHashPartition(uint32 hashcode)
194 : : {
195 : 58549450 : return hashcode % NUM_BUFFER_PARTITIONS;
196 : : }
197 : :
198 : : static inline LWLock *
199 : 58549450 : BufMappingPartitionLock(uint32 hashcode)
200 : : {
201 : 58549450 : return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET +
202 : 58549450 : BufTableHashPartition(hashcode)].lock;
203 : : }
204 : :
205 : : static inline LWLock *
206 : : BufMappingPartitionLockByIndex(uint32 index)
207 : : {
208 : : return &MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + index].lock;
209 : : }
210 : :
211 : : /*
212 : : * BufferDesc -- shared descriptor/state data for a single shared buffer.
213 : : *
214 : : * The state of the buffer is controlled by the, drumroll, state variable. It
215 : : * only may be modified using atomic operations. The state variable combines
216 : : * various flags, the buffer's refcount and usage count. See comment above
217 : : * BUF_REFCOUNT_BITS for details about the division. This layout allow us to
218 : : * do some operations in a single atomic operation, without actually acquiring
219 : : * and releasing the spinlock; for instance, increasing or decreasing the
220 : : * refcount.
221 : : *
222 : : * One of the aforementioned flags is BM_LOCKED, used to implement the buffer
223 : : * header lock. See the following paragraphs, as well as the documentation for
224 : : * individual fields, for more details.
225 : : *
226 : : * The identity of the buffer (BufferDesc.tag) can only be changed by the
227 : : * backend holding the buffer header lock.
228 : : *
229 : : * If the lock is held by another backend, neither additional buffer pins may
230 : : * be established (we would like to relax this eventually), nor can flags be
231 : : * set/cleared. These operations either need to acquire the buffer header
232 : : * spinlock, or need to use a CAS loop, waiting for the lock to be released if
233 : : * it is held. However, existing buffer pins may be released while the buffer
234 : : * header spinlock is held, using an atomic subtraction.
235 : : *
236 : : * The LWLock can take care of itself. The buffer header lock is *not* used
237 : : * to control access to the data in the buffer!
238 : : *
239 : : * If we have the buffer pinned, its tag can't change underneath us, so we can
240 : : * examine the tag without locking the buffer header. Also, in places we do
241 : : * one-time reads of the flags without bothering to lock the buffer header;
242 : : * this is generally for situations where we don't expect the flag bit being
243 : : * tested to be changing.
244 : : *
245 : : * We can't physically remove items from a disk page if another backend has
246 : : * the buffer pinned. Hence, a backend may need to wait for all other pins
247 : : * to go away. This is signaled by storing its own pgprocno into
248 : : * wait_backend_pgprocno and setting flag bit BM_PIN_COUNT_WAITER. At present,
249 : : * there can be only one such waiter per buffer.
250 : : *
251 : : * We use this same struct for local buffer headers, but the locks are not
252 : : * used and not all of the flag bits are useful either. To avoid unnecessary
253 : : * overhead, manipulations of the state field should be done without actual
254 : : * atomic operations (i.e. only pg_atomic_read_u32() and
255 : : * pg_atomic_unlocked_write_u32()).
256 : : *
257 : : * Be careful to avoid increasing the size of the struct when adding or
258 : : * reordering members. Keeping it below 64 bytes (the most common CPU
259 : : * cache line size) is fairly important for performance.
260 : : *
261 : : * Per-buffer I/O condition variables are currently kept outside this struct in
262 : : * a separate array. They could be moved in here and still fit within that
263 : : * limit on common systems, but for now that is not done.
264 : : */
265 : : typedef struct BufferDesc
266 : : {
267 : : /*
268 : : * ID of page contained in buffer. The buffer header spinlock needs to be
269 : : * held to modify this field.
270 : : */
271 : : BufferTag tag;
272 : :
273 : : /*
274 : : * Buffer's index number (from 0). The field never changes after
275 : : * initialization, so does not need locking.
276 : : */
277 : : int buf_id;
278 : :
279 : : /*
280 : : * State of the buffer, containing flags, refcount and usagecount. See
281 : : * BUF_* and BM_* defines at the top of this file.
282 : : */
283 : : pg_atomic_uint32 state;
284 : :
285 : : /*
286 : : * Backend of pin-count waiter. The buffer header spinlock needs to be
287 : : * held to modify this field.
288 : : */
289 : : int wait_backend_pgprocno;
290 : :
291 : : PgAioWaitRef io_wref; /* set iff AIO is in progress */
292 : : LWLock content_lock; /* to lock access to buffer contents */
293 : : } BufferDesc;
294 : :
295 : : /*
296 : : * Concurrent access to buffer headers has proven to be more efficient if
297 : : * they're cache line aligned. So we force the start of the BufferDescriptors
298 : : * array to be on a cache line boundary and force the elements to be cache
299 : : * line sized.
300 : : *
301 : : * XXX: As this is primarily matters in highly concurrent workloads which
302 : : * probably all are 64bit these days, and the space wastage would be a bit
303 : : * more noticeable on 32bit systems, we don't force the stride to be cache
304 : : * line sized on those. If somebody does actual performance testing, we can
305 : : * reevaluate.
306 : : *
307 : : * Note that local buffer descriptors aren't forced to be aligned - as there's
308 : : * no concurrent access to those it's unlikely to be beneficial.
309 : : *
310 : : * We use a 64-byte cache line size here, because that's the most common
311 : : * size. Making it bigger would be a waste of memory. Even if running on a
312 : : * platform with either 32 or 128 byte line sizes, it's good to align to
313 : : * boundaries and avoid false sharing.
314 : : */
315 : : #define BUFFERDESC_PAD_TO_SIZE (SIZEOF_VOID_P == 8 ? 64 : 1)
316 : :
317 : : typedef union BufferDescPadded
318 : : {
319 : : BufferDesc bufferdesc;
320 : : char pad[BUFFERDESC_PAD_TO_SIZE];
321 : : } BufferDescPadded;
322 : :
323 : : /*
324 : : * The PendingWriteback & WritebackContext structure are used to keep
325 : : * information about pending flush requests to be issued to the OS.
326 : : */
327 : : typedef struct PendingWriteback
328 : : {
329 : : /* could store different types of pending flushes here */
330 : : BufferTag tag;
331 : : } PendingWriteback;
332 : :
333 : : /* struct forward declared in bufmgr.h */
334 : : typedef struct WritebackContext
335 : : {
336 : : /* pointer to the max number of writeback requests to coalesce */
337 : : int *max_pending;
338 : :
339 : : /* current number of pending writeback requests */
340 : : int nr_pending;
341 : :
342 : : /* pending requests */
343 : : PendingWriteback pending_writebacks[WRITEBACK_MAX_PENDING_FLUSHES];
344 : : } WritebackContext;
345 : :
346 : : /* in buf_init.c */
347 : : extern PGDLLIMPORT BufferDescPadded *BufferDescriptors;
348 : : extern PGDLLIMPORT ConditionVariableMinimallyPadded *BufferIOCVArray;
349 : : extern PGDLLIMPORT WritebackContext BackendWritebackContext;
350 : :
351 : : /* in localbuf.c */
352 : : extern PGDLLIMPORT BufferDesc *LocalBufferDescriptors;
353 : :
354 : :
355 : : static inline BufferDesc *
356 : 793992426 : GetBufferDescriptor(uint32 id)
357 : : {
358 : 793992426 : return &(BufferDescriptors[id]).bufferdesc;
359 : : }
360 : :
361 : : static inline BufferDesc *
362 : 14859678 : GetLocalBufferDescriptor(uint32 id)
363 : : {
364 : 14859678 : return &LocalBufferDescriptors[id];
365 : : }
366 : :
367 : : static inline Buffer
368 : 293417278 : BufferDescriptorGetBuffer(const BufferDesc *bdesc)
369 : : {
370 : 293417278 : return (Buffer) (bdesc->buf_id + 1);
371 : : }
372 : :
373 : : static inline ConditionVariable *
374 : 12362306 : BufferDescriptorGetIOCV(const BufferDesc *bdesc)
375 : : {
376 : 12362306 : return &(BufferIOCVArray[bdesc->buf_id]).cv;
377 : : }
378 : :
379 : : static inline LWLock *
380 : 280799551 : BufferDescriptorGetContentLock(const BufferDesc *bdesc)
381 : : {
382 : 280799551 : return (LWLock *) (&bdesc->content_lock);
383 : : }
384 : :
385 : : /*
386 : : * Functions for acquiring/releasing a shared buffer header's spinlock. Do
387 : : * not apply these to local buffers!
388 : : */
389 : : extern uint32 LockBufHdr(BufferDesc *desc);
390 : :
391 : : /*
392 : : * Unlock the buffer header.
393 : : *
394 : : * This can only be used if the caller did not modify BufferDesc.state. To
395 : : * set/unset flag bits or change the refcount use UnlockBufHdrExt().
396 : : */
397 : : static inline void
40 andres@anarazel.de 398 :GNC 8203718 : UnlockBufHdr(BufferDesc *desc)
399 : : {
400 [ - + ]: 8203718 : Assert(pg_atomic_read_u32(&desc->state) & BM_LOCKED);
401 : :
402 : 8203718 : pg_atomic_fetch_sub_u32(&desc->state, BM_LOCKED);
403 : 8203718 : }
404 : :
405 : : /*
406 : : * Unlock the buffer header, while atomically adding the flags in set_bits,
407 : : * unsetting the ones in unset_bits and changing the refcount by
408 : : * refcount_change.
409 : : *
410 : : * Note that this approach would not work for usagecount, since we need to cap
411 : : * the usagecount at BM_MAX_USAGE_COUNT.
412 : : */
413 : : static inline uint32
414 : 22566409 : UnlockBufHdrExt(BufferDesc *desc, uint32 old_buf_state,
415 : : uint32 set_bits, uint32 unset_bits,
416 : : int refcount_change)
417 : : {
418 : : for (;;)
419 : 2 : {
420 : 22566411 : uint32 buf_state = old_buf_state;
421 : :
422 [ - + ]: 22566411 : Assert(buf_state & BM_LOCKED);
423 : :
424 : 22566411 : buf_state |= set_bits;
425 : 22566411 : buf_state &= ~unset_bits;
426 : 22566411 : buf_state &= ~BM_LOCKED;
427 : :
428 [ + + ]: 22566411 : if (refcount_change != 0)
429 : 3081656 : buf_state += BUF_REFCOUNT_ONE * refcount_change;
430 : :
431 [ + + ]: 22566411 : if (pg_atomic_compare_exchange_u32(&desc->state, &old_buf_state,
432 : : buf_state))
433 : : {
434 : 22566409 : return old_buf_state;
435 : : }
436 : : }
1238 rhaas@postgresql.org 437 :ECB (34368014) : }
438 : :
439 : : extern uint32 WaitBufHdrUnlocked(BufferDesc *buf);
440 : :
441 : : /* in bufmgr.c */
442 : :
443 : : /*
444 : : * Structure to sort buffers per file on checkpoints.
445 : : *
446 : : * This structure is allocated per buffer in shared memory, so it should be
447 : : * kept as small as possible.
448 : : */
449 : : typedef struct CkptSortItem
450 : : {
451 : : Oid tsId;
452 : : RelFileNumber relNumber;
453 : : ForkNumber forkNum;
454 : : BlockNumber blockNum;
455 : : int buf_id;
456 : : } CkptSortItem;
457 : :
458 : : extern PGDLLIMPORT CkptSortItem *CkptBufferIds;
459 : :
460 : : /* ResourceOwner callbacks to hold buffer I/Os and pins */
461 : : extern PGDLLIMPORT const ResourceOwnerDesc buffer_io_resowner_desc;
462 : : extern PGDLLIMPORT const ResourceOwnerDesc buffer_pin_resowner_desc;
463 : :
464 : : /* Convenience wrappers over ResourceOwnerRemember/Forget */
465 : : static inline void
769 heikki.linnakangas@i 466 :CBC 69807554 : ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
467 : : {
468 : 69807554 : ResourceOwnerRemember(owner, Int32GetDatum(buffer), &buffer_pin_resowner_desc);
469 : 69807554 : }
470 : : static inline void
471 : 69799951 : ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
472 : : {
473 : 69799951 : ResourceOwnerForget(owner, Int32GetDatum(buffer), &buffer_pin_resowner_desc);
474 : 69799951 : }
475 : : static inline void
476 : 2529952 : ResourceOwnerRememberBufferIO(ResourceOwner owner, Buffer buffer)
477 : : {
478 : 2529952 : ResourceOwnerRemember(owner, Int32GetDatum(buffer), &buffer_io_resowner_desc);
479 : 2529952 : }
480 : : static inline void
481 : 2529937 : ResourceOwnerForgetBufferIO(ResourceOwner owner, Buffer buffer)
482 : : {
483 : 2529937 : ResourceOwnerForget(owner, Int32GetDatum(buffer), &buffer_io_resowner_desc);
484 : 2529937 : }
485 : :
486 : : /*
487 : : * Internal buffer management routines
488 : : */
489 : : /* bufmgr.c */
490 : : extern void WritebackContextInit(WritebackContext *context, int *max_pending);
491 : : extern void IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context);
492 : : extern void ScheduleBufferTagForWriteback(WritebackContext *wb_context,
493 : : IOContext io_context, BufferTag *tag);
494 : :
495 : : extern void TrackNewBufferPin(Buffer buf);
496 : :
497 : : /* solely to make it easier to write tests */
498 : : extern bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
499 : : extern void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
500 : : bool forget_owner, bool release_aio);
501 : :
502 : :
503 : : /* freelist.c */
504 : : extern IOContext IOContextForStrategy(BufferAccessStrategy strategy);
505 : : extern BufferDesc *StrategyGetBuffer(BufferAccessStrategy strategy,
506 : : uint32 *buf_state, bool *from_ring);
507 : : extern bool StrategyRejectBuffer(BufferAccessStrategy strategy,
508 : : BufferDesc *buf, bool from_ring);
509 : :
510 : : extern int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc);
511 : : extern void StrategyNotifyBgWriter(int bgwprocno);
512 : :
513 : : extern Size StrategyShmemSize(void);
514 : : extern void StrategyInitialize(bool init);
515 : :
516 : : /* buf_table.c */
517 : : extern Size BufTableShmemSize(int size);
518 : : extern void InitBufTable(int size);
519 : : extern uint32 BufTableHashCode(BufferTag *tagPtr);
520 : : extern int BufTableLookup(BufferTag *tagPtr, uint32 hashcode);
521 : : extern int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id);
522 : : extern void BufTableDelete(BufferTag *tagPtr, uint32 hashcode);
523 : :
524 : : /* localbuf.c */
525 : : extern bool PinLocalBuffer(BufferDesc *buf_hdr, bool adjust_usagecount);
526 : : extern void UnpinLocalBuffer(Buffer buffer);
527 : : extern void UnpinLocalBufferNoOwner(Buffer buffer);
528 : : extern PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr,
529 : : ForkNumber forkNum,
530 : : BlockNumber blockNum);
531 : : extern BufferDesc *LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum,
532 : : BlockNumber blockNum, bool *foundPtr);
533 : : extern BlockNumber ExtendBufferedRelLocal(BufferManagerRelation bmr,
534 : : ForkNumber fork,
535 : : uint32 flags,
536 : : uint32 extend_by,
537 : : BlockNumber extend_upto,
538 : : Buffer *buffers,
539 : : uint32 *extended_by);
540 : : extern void MarkLocalBufferDirty(Buffer buffer);
541 : : extern void TerminateLocalBufferIO(BufferDesc *bufHdr, bool clear_dirty,
542 : : uint32 set_flag_bits, bool release_aio);
543 : : extern bool StartLocalBufferIO(BufferDesc *bufHdr, bool forInput, bool nowait);
544 : : extern void FlushLocalBuffer(BufferDesc *bufHdr, SMgrRelation reln);
545 : : extern void InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced);
546 : : extern void DropRelationLocalBuffers(RelFileLocator rlocator,
547 : : ForkNumber *forkNum, int nforks,
548 : : BlockNumber *firstDelBlock);
549 : : extern void DropRelationAllLocalBuffers(RelFileLocator rlocator);
550 : : extern void AtEOXact_LocalBuffers(bool isCommit);
551 : :
552 : : #endif /* BUFMGR_INTERNALS_H */
|