Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * bufmgr.c
4 : : * buffer manager interface routines
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/buffer/bufmgr.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /*
16 : : * Principal entry points:
17 : : *
18 : : * ReadBuffer() -- find or create a buffer holding the requested page,
19 : : * and pin it so that no one can destroy it while this process
20 : : * is using it.
21 : : *
22 : : * StartReadBuffer() -- as above, with separate wait step
23 : : * StartReadBuffers() -- multiple block version
24 : : * WaitReadBuffers() -- second step of above
25 : : *
26 : : * ReleaseBuffer() -- unpin a buffer
27 : : *
28 : : * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
29 : : * The disk write is delayed until buffer replacement or checkpoint.
30 : : *
31 : : * See also these files:
32 : : * freelist.c -- chooses victim for buffer replacement
33 : : * buf_table.c -- manages the buffer lookup table
34 : : */
35 : : #include "postgres.h"
36 : :
37 : : #include <sys/file.h>
38 : : #include <unistd.h>
39 : :
40 : : #include "access/tableam.h"
41 : : #include "access/xloginsert.h"
42 : : #include "access/xlogutils.h"
43 : : #ifdef USE_ASSERT_CHECKING
44 : : #include "catalog/pg_tablespace_d.h"
45 : : #endif
46 : : #include "catalog/storage.h"
47 : : #include "catalog/storage_xlog.h"
48 : : #include "executor/instrument.h"
49 : : #include "lib/binaryheap.h"
50 : : #include "miscadmin.h"
51 : : #include "pg_trace.h"
52 : : #include "pgstat.h"
53 : : #include "postmaster/bgwriter.h"
54 : : #include "storage/aio.h"
55 : : #include "storage/buf_internals.h"
56 : : #include "storage/bufmgr.h"
57 : : #include "storage/fd.h"
58 : : #include "storage/ipc.h"
59 : : #include "storage/lmgr.h"
60 : : #include "storage/proc.h"
61 : : #include "storage/read_stream.h"
62 : : #include "storage/smgr.h"
63 : : #include "storage/standby.h"
64 : : #include "utils/memdebug.h"
65 : : #include "utils/ps_status.h"
66 : : #include "utils/rel.h"
67 : : #include "utils/resowner.h"
68 : : #include "utils/timestamp.h"
69 : :
70 : :
71 : : /* Note: these two macros only work on shared buffers, not local ones! */
72 : : #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
73 : : #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
74 : :
75 : : /* Note: this macro only works on local buffers, not shared ones! */
76 : : #define LocalBufHdrGetBlock(bufHdr) \
77 : : LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
78 : :
79 : : /* Bits in SyncOneBuffer's return value */
80 : : #define BUF_WRITTEN 0x01
81 : : #define BUF_REUSABLE 0x02
82 : :
83 : : #define RELS_BSEARCH_THRESHOLD 20
84 : :
85 : : /*
86 : : * This is the size (in the number of blocks) above which we scan the
87 : : * entire buffer pool to remove the buffers for all the pages of relation
88 : : * being dropped. For the relations with size below this threshold, we find
89 : : * the buffers by doing lookups in BufMapping table.
90 : : */
91 : : #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
92 : :
93 : : typedef struct PrivateRefCountEntry
94 : : {
95 : : Buffer buffer;
96 : : int32 refcount;
97 : : } PrivateRefCountEntry;
98 : :
99 : : /* 64 bytes, about the size of a cache line on common systems */
100 : : #define REFCOUNT_ARRAY_ENTRIES 8
101 : :
102 : : /*
103 : : * Status of buffers to checkpoint for a particular tablespace, used
104 : : * internally in BufferSync.
105 : : */
106 : : typedef struct CkptTsStatus
107 : : {
108 : : /* oid of the tablespace */
109 : : Oid tsId;
110 : :
111 : : /*
112 : : * Checkpoint progress for this tablespace. To make progress comparable
113 : : * between tablespaces the progress is, for each tablespace, measured as a
114 : : * number between 0 and the total number of to-be-checkpointed pages. Each
115 : : * page checkpointed in this tablespace increments this space's progress
116 : : * by progress_slice.
117 : : */
118 : : float8 progress;
119 : : float8 progress_slice;
120 : :
121 : : /* number of to-be checkpointed pages in this tablespace */
122 : : int num_to_scan;
123 : : /* already processed pages in this tablespace */
124 : : int num_scanned;
125 : :
126 : : /* current offset in CkptBufferIds for this tablespace */
127 : : int index;
128 : : } CkptTsStatus;
129 : :
130 : : /*
131 : : * Type for array used to sort SMgrRelations
132 : : *
133 : : * FlushRelationsAllBuffers shares the same comparator function with
134 : : * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
135 : : * compatible.
136 : : */
137 : : typedef struct SMgrSortArray
138 : : {
139 : : RelFileLocator rlocator; /* This must be the first member */
140 : : SMgrRelation srel;
141 : : } SMgrSortArray;
142 : :
143 : : /* GUC variables */
144 : : bool zero_damaged_pages = false;
145 : : int bgwriter_lru_maxpages = 100;
146 : : double bgwriter_lru_multiplier = 2.0;
147 : : bool track_io_timing = false;
148 : :
149 : : /*
150 : : * How many buffers PrefetchBuffer callers should try to stay ahead of their
151 : : * ReadBuffer calls by. Zero means "never prefetch". This value is only used
152 : : * for buffers not belonging to tablespaces that have their
153 : : * effective_io_concurrency parameter set.
154 : : */
155 : : int effective_io_concurrency = DEFAULT_EFFECTIVE_IO_CONCURRENCY;
156 : :
157 : : /*
158 : : * Like effective_io_concurrency, but used by maintenance code paths that might
159 : : * benefit from a higher setting because they work on behalf of many sessions.
160 : : * Overridden by the tablespace setting of the same name.
161 : : */
162 : : int maintenance_io_concurrency = DEFAULT_MAINTENANCE_IO_CONCURRENCY;
163 : :
164 : : /*
165 : : * Limit on how many blocks should be handled in single I/O operations.
166 : : * StartReadBuffers() callers should respect it, as should other operations
167 : : * that call smgr APIs directly. It is computed as the minimum of underlying
168 : : * GUCs io_combine_limit_guc and io_max_combine_limit.
169 : : */
170 : : int io_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
171 : : int io_combine_limit_guc = DEFAULT_IO_COMBINE_LIMIT;
172 : : int io_max_combine_limit = DEFAULT_IO_COMBINE_LIMIT;
173 : :
174 : : /*
175 : : * GUC variables about triggering kernel writeback for buffers written; OS
176 : : * dependent defaults are set via the GUC mechanism.
177 : : */
178 : : int checkpoint_flush_after = DEFAULT_CHECKPOINT_FLUSH_AFTER;
179 : : int bgwriter_flush_after = DEFAULT_BGWRITER_FLUSH_AFTER;
180 : : int backend_flush_after = DEFAULT_BACKEND_FLUSH_AFTER;
181 : :
182 : : /* local state for LockBufferForCleanup */
183 : : static BufferDesc *PinCountWaitBuf = NULL;
184 : :
185 : : /*
186 : : * Backend-Private refcount management:
187 : : *
188 : : * Each buffer also has a private refcount that keeps track of the number of
189 : : * times the buffer is pinned in the current process. This is so that the
190 : : * shared refcount needs to be modified only once if a buffer is pinned more
191 : : * than once by an individual backend. It's also used to check that no buffers
192 : : * are still pinned at the end of transactions and when exiting.
193 : : *
194 : : *
195 : : * To avoid - as we used to - requiring an array with NBuffers entries to keep
196 : : * track of local buffers, we use a small sequentially searched array
197 : : * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
198 : : * keep track of backend local pins.
199 : : *
200 : : * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
201 : : * refcounts are kept track of in the array; after that, new array entries
202 : : * displace old ones into the hash table. That way a frequently used entry
203 : : * can't get "stuck" in the hashtable while infrequent ones clog the array.
204 : : *
205 : : * Note that in most scenarios the number of pinned buffers will not exceed
206 : : * REFCOUNT_ARRAY_ENTRIES.
207 : : *
208 : : *
209 : : * To enter a buffer into the refcount tracking mechanism first reserve a free
210 : : * entry using ReservePrivateRefCountEntry() and then later, if necessary,
211 : : * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
212 : : * memory allocations in NewPrivateRefCountEntry() which can be important
213 : : * because in some scenarios it's called with a spinlock held...
214 : : */
215 : : static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES];
216 : : static HTAB *PrivateRefCountHash = NULL;
217 : : static int32 PrivateRefCountOverflowed = 0;
218 : : static uint32 PrivateRefCountClock = 0;
219 : : static PrivateRefCountEntry *ReservedRefCountEntry = NULL;
220 : :
221 : : static uint32 MaxProportionalPins;
222 : :
223 : : static void ReservePrivateRefCountEntry(void);
224 : : static PrivateRefCountEntry *NewPrivateRefCountEntry(Buffer buffer);
225 : : static PrivateRefCountEntry *GetPrivateRefCountEntry(Buffer buffer, bool do_move);
226 : : static inline int32 GetPrivateRefCount(Buffer buffer);
227 : : static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref);
228 : :
229 : : /* ResourceOwner callbacks to hold in-progress I/Os and buffer pins */
230 : : static void ResOwnerReleaseBufferIO(Datum res);
231 : : static char *ResOwnerPrintBufferIO(Datum res);
232 : : static void ResOwnerReleaseBufferPin(Datum res);
233 : : static char *ResOwnerPrintBufferPin(Datum res);
234 : :
235 : : const ResourceOwnerDesc buffer_io_resowner_desc =
236 : : {
237 : : .name = "buffer io",
238 : : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
239 : : .release_priority = RELEASE_PRIO_BUFFER_IOS,
240 : : .ReleaseResource = ResOwnerReleaseBufferIO,
241 : : .DebugPrint = ResOwnerPrintBufferIO
242 : : };
243 : :
244 : : const ResourceOwnerDesc buffer_pin_resowner_desc =
245 : : {
246 : : .name = "buffer pin",
247 : : .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
248 : : .release_priority = RELEASE_PRIO_BUFFER_PINS,
249 : : .ReleaseResource = ResOwnerReleaseBufferPin,
250 : : .DebugPrint = ResOwnerPrintBufferPin
251 : : };
252 : :
253 : : /*
254 : : * Ensure that the PrivateRefCountArray has sufficient space to store one more
255 : : * entry. This has to be called before using NewPrivateRefCountEntry() to fill
256 : : * a new entry - but it's perfectly fine to not use a reserved entry.
257 : : */
258 : : static void
3883 andres@anarazel.de 259 :CBC 59412771 : ReservePrivateRefCountEntry(void)
260 : : {
261 : : /* Already reserved (or freed), nothing to do */
262 [ + + ]: 59412771 : if (ReservedRefCountEntry != NULL)
263 : 55654613 : return;
264 : :
265 : : /*
266 : : * First search for a free entry the array, that'll be sufficient in the
267 : : * majority of cases.
268 : : */
269 : : {
270 : : int i;
271 : :
272 [ + + ]: 9841003 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
273 : : {
274 : : PrivateRefCountEntry *res;
275 : :
276 : 9671941 : res = &PrivateRefCountArray[i];
277 : :
278 [ + + ]: 9671941 : if (res->buffer == InvalidBuffer)
279 : : {
280 : 3589096 : ReservedRefCountEntry = res;
281 : 3589096 : return;
282 : : }
283 : : }
284 : : }
285 : :
286 : : /*
287 : : * No luck. All array entries are full. Move one array entry into the hash
288 : : * table.
289 : : */
290 : : {
291 : : /*
292 : : * Move entry from the current clock position in the array into the
293 : : * hashtable. Use that slot.
294 : : */
295 : : PrivateRefCountEntry *hashent;
296 : : bool found;
297 : :
298 : : /* select victim slot */
3759 bruce@momjian.us 299 : 169062 : ReservedRefCountEntry =
3883 andres@anarazel.de 300 : 169062 : &PrivateRefCountArray[PrivateRefCountClock++ % REFCOUNT_ARRAY_ENTRIES];
301 : :
302 : : /* Better be used, otherwise we shouldn't get here. */
303 [ - + ]: 169062 : Assert(ReservedRefCountEntry->buffer != InvalidBuffer);
304 : :
305 : : /* enter victim array entry into hashtable */
306 : 169062 : hashent = hash_search(PrivateRefCountHash,
943 peter@eisentraut.org 307 : 169062 : &(ReservedRefCountEntry->buffer),
308 : : HASH_ENTER,
309 : : &found);
3883 andres@anarazel.de 310 [ - + ]: 169062 : Assert(!found);
311 : 169062 : hashent->refcount = ReservedRefCountEntry->refcount;
312 : :
313 : : /* clear the now free array slot */
314 : 169062 : ReservedRefCountEntry->buffer = InvalidBuffer;
315 : 169062 : ReservedRefCountEntry->refcount = 0;
316 : :
317 : 169062 : PrivateRefCountOverflowed++;
318 : : }
319 : : }
320 : :
321 : : /*
322 : : * Fill a previously reserved refcount entry.
323 : : */
324 : : static PrivateRefCountEntry *
325 : 53807490 : NewPrivateRefCountEntry(Buffer buffer)
326 : : {
327 : : PrivateRefCountEntry *res;
328 : :
329 : : /* only allowed to be called when a reservation has been made */
330 [ - + ]: 53807490 : Assert(ReservedRefCountEntry != NULL);
331 : :
332 : : /* use up the reserved entry */
333 : 53807490 : res = ReservedRefCountEntry;
334 : 53807490 : ReservedRefCountEntry = NULL;
335 : :
336 : : /* and fill it */
337 : 53807490 : res->buffer = buffer;
338 : 53807490 : res->refcount = 0;
339 : :
340 : 53807490 : return res;
341 : : }
342 : :
343 : : /*
344 : : * Return the PrivateRefCount entry for the passed buffer.
345 : : *
346 : : * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
347 : : * do_move is true, and the entry resides in the hashtable the entry is
348 : : * optimized for frequent access by moving it to the array.
349 : : */
350 : : static PrivateRefCountEntry *
351 : 537755269 : GetPrivateRefCountEntry(Buffer buffer, bool do_move)
352 : : {
353 : : PrivateRefCountEntry *res;
354 : : int i;
355 : :
4025 356 [ - + ]: 537755269 : Assert(BufferIsValid(buffer));
357 [ - + ]: 537755269 : Assert(!BufferIsLocal(buffer));
358 : :
359 : : /*
360 : : * First search for references in the array, that'll be sufficient in the
361 : : * majority of cases.
362 : : */
363 [ + + ]: 1473628665 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
364 : : {
365 : 1418614745 : res = &PrivateRefCountArray[i];
366 : :
367 [ + + ]: 1418614745 : if (res->buffer == buffer)
368 : 482741349 : return res;
369 : : }
370 : :
371 : : /*
372 : : * By here we know that the buffer, if already pinned, isn't residing in
373 : : * the array.
374 : : *
375 : : * Only look up the buffer in the hashtable if we've previously overflowed
376 : : * into it.
377 : : */
3883 378 [ + + ]: 55013920 : if (PrivateRefCountOverflowed == 0)
379 : 53410905 : return NULL;
380 : :
943 peter@eisentraut.org 381 : 1603015 : res = hash_search(PrivateRefCountHash, &buffer, HASH_FIND, NULL);
382 : :
3883 andres@anarazel.de 383 [ + + ]: 1603015 : if (res == NULL)
384 : 400618 : return NULL;
385 [ + + ]: 1202397 : else if (!do_move)
386 : : {
387 : : /* caller doesn't want us to move the hash entry into the array */
388 : 1182643 : return res;
389 : : }
390 : : else
391 : : {
392 : : /* move buffer from hashtable into the free array slot */
393 : : bool found;
394 : : PrivateRefCountEntry *free;
395 : :
396 : : /* Ensure there's a free array slot */
397 : 19754 : ReservePrivateRefCountEntry();
398 : :
399 : : /* Use up the reserved slot */
400 [ - + ]: 19754 : Assert(ReservedRefCountEntry != NULL);
401 : 19754 : free = ReservedRefCountEntry;
402 : 19754 : ReservedRefCountEntry = NULL;
403 [ - + ]: 19754 : Assert(free->buffer == InvalidBuffer);
404 : :
405 : : /* and fill it */
406 : 19754 : free->buffer = buffer;
407 : 19754 : free->refcount = res->refcount;
408 : :
409 : : /* delete from hashtable */
943 peter@eisentraut.org 410 : 19754 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
3883 andres@anarazel.de 411 [ - + ]: 19754 : Assert(found);
412 [ - + ]: 19754 : Assert(PrivateRefCountOverflowed > 0);
413 : 19754 : PrivateRefCountOverflowed--;
414 : :
415 : 19754 : return free;
416 : : }
417 : : }
418 : :
419 : : /*
420 : : * Returns how many times the passed buffer is pinned by this backend.
421 : : *
422 : : * Only works for shared memory buffers!
423 : : */
424 : : static inline int32
4025 425 : 405762317 : GetPrivateRefCount(Buffer buffer)
426 : : {
427 : : PrivateRefCountEntry *ref;
428 : :
429 [ - + ]: 405762317 : Assert(BufferIsValid(buffer));
430 [ - + ]: 405762317 : Assert(!BufferIsLocal(buffer));
431 : :
432 : : /*
433 : : * Not moving the entry - that's ok for the current users, but we might
434 : : * want to change this one day.
435 : : */
3883 436 : 405762317 : ref = GetPrivateRefCountEntry(buffer, false);
437 : :
4025 438 [ + + ]: 405762317 : if (ref == NULL)
439 : 4033 : return 0;
440 : 405758284 : return ref->refcount;
441 : : }
442 : :
443 : : /*
444 : : * Release resources used to track the reference count of a buffer which we no
445 : : * longer have pinned and don't want to pin again immediately.
446 : : */
447 : : static void
448 : 53807490 : ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
449 : : {
450 [ - + ]: 53807490 : Assert(ref->refcount == 0);
451 : :
452 [ + - + + ]: 53807490 : if (ref >= &PrivateRefCountArray[0] &&
453 : : ref < &PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES])
454 : : {
455 : 53658182 : ref->buffer = InvalidBuffer;
456 : :
457 : : /*
458 : : * Mark the just used entry as reserved - in many scenarios that
459 : : * allows us to avoid ever having to search the array/hash for free
460 : : * entries.
461 : : */
3883 462 : 53658182 : ReservedRefCountEntry = ref;
463 : : }
464 : : else
465 : : {
466 : : bool found;
3759 bruce@momjian.us 467 : 149308 : Buffer buffer = ref->buffer;
468 : :
943 peter@eisentraut.org 469 : 149308 : hash_search(PrivateRefCountHash, &buffer, HASH_REMOVE, &found);
4025 andres@anarazel.de 470 [ - + ]: 149308 : Assert(found);
471 [ - + ]: 149308 : Assert(PrivateRefCountOverflowed > 0);
472 : 149308 : PrivateRefCountOverflowed--;
473 : : }
474 : 53807490 : }
475 : :
476 : : /*
477 : : * BufferIsPinned
478 : : * True iff the buffer is pinned (also checks for valid buffer number).
479 : : *
480 : : * NOTE: what we check here is that *this* backend holds a pin on
481 : : * the buffer. We do not care whether some other backend does.
482 : : */
483 : : #define BufferIsPinned(bufnum) \
484 : : ( \
485 : : !BufferIsValid(bufnum) ? \
486 : : false \
487 : : : \
488 : : BufferIsLocal(bufnum) ? \
489 : : (LocalRefCount[-(bufnum) - 1] > 0) \
490 : : : \
491 : : (GetPrivateRefCount(bufnum) > 0) \
492 : : )
493 : :
494 : :
495 : : static Buffer ReadBuffer_common(Relation rel,
496 : : SMgrRelation smgr, char smgr_persistence,
497 : : ForkNumber forkNum, BlockNumber blockNum,
498 : : ReadBufferMode mode, BufferAccessStrategy strategy);
499 : : static BlockNumber ExtendBufferedRelCommon(BufferManagerRelation bmr,
500 : : ForkNumber fork,
501 : : BufferAccessStrategy strategy,
502 : : uint32 flags,
503 : : uint32 extend_by,
504 : : BlockNumber extend_upto,
505 : : Buffer *buffers,
506 : : uint32 *extended_by);
507 : : static BlockNumber ExtendBufferedRelShared(BufferManagerRelation bmr,
508 : : ForkNumber fork,
509 : : BufferAccessStrategy strategy,
510 : : uint32 flags,
511 : : uint32 extend_by,
512 : : BlockNumber extend_upto,
513 : : Buffer *buffers,
514 : : uint32 *extended_by);
515 : : static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
516 : : static void PinBuffer_Locked(BufferDesc *buf);
517 : : static void UnpinBuffer(BufferDesc *buf);
518 : : static void UnpinBufferNoOwner(BufferDesc *buf);
519 : : static void BufferSync(int flags);
520 : : static uint32 WaitBufHdrUnlocked(BufferDesc *buf);
521 : : static int SyncOneBuffer(int buf_id, bool skip_recently_used,
522 : : WritebackContext *wb_context);
523 : : static void WaitIO(BufferDesc *buf);
524 : : static void AbortBufferIO(Buffer buffer);
525 : : static void shared_buffer_write_error_callback(void *arg);
526 : : static void local_buffer_write_error_callback(void *arg);
527 : : static inline BufferDesc *BufferAlloc(SMgrRelation smgr,
528 : : char relpersistence,
529 : : ForkNumber forkNum,
530 : : BlockNumber blockNum,
531 : : BufferAccessStrategy strategy,
532 : : bool *foundPtr, IOContext io_context);
533 : : static bool AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress);
534 : : static void CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete);
535 : : static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
536 : : static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
537 : : IOObject io_object, IOContext io_context);
538 : : static void FindAndDropRelationBuffers(RelFileLocator rlocator,
539 : : ForkNumber forkNum,
540 : : BlockNumber nForkBlock,
541 : : BlockNumber firstDelBlock);
542 : : static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
543 : : RelFileLocator dstlocator,
544 : : ForkNumber forkNum, bool permanent);
545 : : static void AtProcExit_Buffers(int code, Datum arg);
546 : : static void CheckForBufferLeaks(void);
547 : : #ifdef USE_ASSERT_CHECKING
548 : : static void AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
549 : : void *unused_context);
550 : : #endif
551 : : static int rlocator_comparator(const void *p1, const void *p2);
552 : : static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
553 : : static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
554 : : static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
555 : :
556 : :
557 : : /*
558 : : * Implementation of PrefetchBuffer() for shared buffers.
559 : : */
560 : : PrefetchBufferResult
1977 tmunro@postgresql.or 561 : 30752 : PrefetchSharedBuffer(SMgrRelation smgr_reln,
562 : : ForkNumber forkNum,
563 : : BlockNumber blockNum)
564 : : {
565 : 30752 : PrefetchBufferResult result = {InvalidBuffer, false};
566 : : BufferTag newTag; /* identity of requested block */
567 : : uint32 newHash; /* hash value for newTag */
568 : : LWLock *newPartitionLock; /* buffer partition lock for it */
569 : : int buf_id;
570 : :
571 [ - + ]: 30752 : Assert(BlockNumberIsValid(blockNum));
572 : :
573 : : /* create a tag so we can lookup the buffer */
1137 rhaas@postgresql.org 574 : 30752 : InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
575 : : forkNum, blockNum);
576 : :
577 : : /* determine its hash code and partition lock ID */
1977 tmunro@postgresql.or 578 : 30752 : newHash = BufTableHashCode(&newTag);
579 : 30752 : newPartitionLock = BufMappingPartitionLock(newHash);
580 : :
581 : : /* see if the block is in the buffer pool already */
582 : 30752 : LWLockAcquire(newPartitionLock, LW_SHARED);
583 : 30752 : buf_id = BufTableLookup(&newTag, newHash);
584 : 30752 : LWLockRelease(newPartitionLock);
585 : :
586 : : /* If not in buffers, initiate prefetch */
587 [ + + ]: 30752 : if (buf_id < 0)
588 : : {
589 : : #ifdef USE_PREFETCH
590 : : /*
591 : : * Try to initiate an asynchronous read. This returns false in
592 : : * recovery if the relation file doesn't exist.
593 : : */
882 594 [ + + + - ]: 16232 : if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
630 595 : 8004 : smgrprefetch(smgr_reln, forkNum, blockNum, 1))
596 : : {
1977 597 : 8004 : result.initiated_io = true;
598 : : }
599 : : #endif /* USE_PREFETCH */
600 : : }
601 : : else
602 : : {
603 : : /*
604 : : * Report the buffer it was in at that time. The caller may be able
605 : : * to avoid a buffer table lookup, but it's not pinned and it must be
606 : : * rechecked!
607 : : */
608 : 22524 : result.recent_buffer = buf_id + 1;
609 : : }
610 : :
611 : : /*
612 : : * If the block *is* in buffers, we do nothing. This is not really ideal:
613 : : * the block might be just about to be evicted, which would be stupid
614 : : * since we know we are going to need it soon. But the only easy answer
615 : : * is to bump the usage_count, which does not seem like a great solution:
616 : : * when the caller does ultimately touch the block, usage_count would get
617 : : * bumped again, resulting in too much favoritism for blocks that are
618 : : * involved in a prefetch sequence. A real fix would involve some
619 : : * additional per-buffer state, and it's not clear that there's enough of
620 : : * a problem to justify that.
621 : : */
622 : :
623 : 30752 : return result;
624 : : }
625 : :
626 : : /*
627 : : * PrefetchBuffer -- initiate asynchronous read of a block of a relation
628 : : *
629 : : * This is named by analogy to ReadBuffer but doesn't actually allocate a
630 : : * buffer. Instead it tries to ensure that a future ReadBuffer for the given
631 : : * block will not be delayed by the I/O. Prefetching is optional.
632 : : *
633 : : * There are three possible outcomes:
634 : : *
635 : : * 1. If the block is already cached, the result includes a valid buffer that
636 : : * could be used by the caller to avoid the need for a later buffer lookup, but
637 : : * it's not pinned, so the caller must recheck it.
638 : : *
639 : : * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
640 : : * true. Currently there is no way to know if the data was already cached by
641 : : * the kernel and therefore didn't really initiate I/O, and no way to know when
642 : : * the I/O completes other than using synchronous ReadBuffer().
643 : : *
644 : : * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and
645 : : * USE_PREFETCH is not defined (this build doesn't support prefetching due to
646 : : * lack of a kernel facility), direct I/O is enabled, or the underlying
647 : : * relation file wasn't found and we are in recovery. (If the relation file
648 : : * wasn't found and we are not in recovery, an error is raised).
649 : : */
650 : : PrefetchBufferResult
6081 tgl@sss.pgh.pa.us 651 : 21083 : PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
652 : : {
653 [ - + ]: 21083 : Assert(RelationIsValid(reln));
654 [ - + ]: 21083 : Assert(BlockNumberIsValid(blockNum));
655 : :
5381 rhaas@postgresql.org 656 [ + + ]: 21083 : if (RelationUsesLocalBuffers(reln))
657 : : {
658 : : /* see comments in ReadBufferExtended */
6003 tgl@sss.pgh.pa.us 659 [ + - - + ]: 799 : if (RELATION_IS_OTHER_TEMP(reln))
6003 tgl@sss.pgh.pa.us 660 [ # # ]:UBC 0 : ereport(ERROR,
661 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
662 : : errmsg("cannot access temporary tables of other sessions")));
663 : :
664 : : /* pass it off to localbuf.c */
1517 tgl@sss.pgh.pa.us 665 :CBC 799 : return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
666 : : }
667 : : else
668 : : {
669 : : /* pass it to the shared buffer version */
670 : 20284 : return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
671 : : }
672 : : }
673 : :
674 : : /*
675 : : * ReadRecentBuffer -- try to pin a block in a recently observed buffer
676 : : *
677 : : * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
678 : : * successful. Return true if the buffer is valid and still has the expected
679 : : * tag. In that case, the buffer is pinned and the usage count is bumped.
680 : : */
681 : : bool
1158 rhaas@postgresql.org 682 : 4044 : ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
683 : : Buffer recent_buffer)
684 : : {
685 : : BufferDesc *bufHdr;
686 : : BufferTag tag;
687 : : uint32 buf_state;
688 : : bool have_private_ref;
689 : :
1612 tmunro@postgresql.or 690 [ - + ]: 4044 : Assert(BufferIsValid(recent_buffer));
691 : :
668 heikki.linnakangas@i 692 : 4044 : ResourceOwnerEnlarge(CurrentResourceOwner);
1612 tmunro@postgresql.or 693 : 4044 : ReservePrivateRefCountEntry();
1137 rhaas@postgresql.org 694 : 4044 : InitBufferTag(&tag, &rlocator, forkNum, blockNum);
695 : :
1612 tmunro@postgresql.or 696 [ + + ]: 4044 : if (BufferIsLocal(recent_buffer))
697 : : {
1139 heikki.linnakangas@i 698 : 48 : int b = -recent_buffer - 1;
699 : :
700 : 48 : bufHdr = GetLocalBufferDescriptor(b);
1612 tmunro@postgresql.or 701 : 48 : buf_state = pg_atomic_read_u32(&bufHdr->state);
702 : :
703 : : /* Is it still valid and holding the right tag? */
1137 rhaas@postgresql.org 704 [ + - + - ]: 48 : if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
705 : : {
885 andres@anarazel.de 706 : 48 : PinLocalBuffer(bufHdr, true);
707 : :
1248 tmunro@postgresql.or 708 : 48 : pgBufferUsage.local_blks_hit++;
709 : :
1612 710 : 48 : return true;
711 : : }
712 : : }
713 : : else
714 : : {
715 : 3996 : bufHdr = GetBufferDescriptor(recent_buffer - 1);
716 : 3996 : have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
717 : :
718 : : /*
719 : : * Do we already have this buffer pinned with a private reference? If
720 : : * so, it must be valid and it is safe to check the tag without
721 : : * locking. If not, we have to lock the header first and then check.
722 : : */
723 [ - + ]: 3996 : if (have_private_ref)
1612 tmunro@postgresql.or 724 :UBC 0 : buf_state = pg_atomic_read_u32(&bufHdr->state);
725 : : else
1612 tmunro@postgresql.or 726 :CBC 3996 : buf_state = LockBufHdr(bufHdr);
727 : :
1137 rhaas@postgresql.org 728 [ + + + + ]: 3996 : if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
729 : : {
730 : : /*
731 : : * It's now safe to pin the buffer. We can't pin first and ask
732 : : * questions later, because it might confuse code paths like
733 : : * InvalidateBuffer() if we pinned a random non-matching buffer.
734 : : */
1612 tmunro@postgresql.or 735 [ - + ]: 3934 : if (have_private_ref)
1612 tmunro@postgresql.or 736 :UBC 0 : PinBuffer(bufHdr, NULL); /* bump pin count */
737 : : else
1612 tmunro@postgresql.or 738 :CBC 3934 : PinBuffer_Locked(bufHdr); /* pin for first time */
739 : :
1248 740 : 3934 : pgBufferUsage.shared_blks_hit++;
741 : :
1612 742 : 3934 : return true;
743 : : }
744 : :
745 : : /* If we locked the header above, now unlock. */
746 [ + - ]: 62 : if (!have_private_ref)
747 : 62 : UnlockBufHdr(bufHdr, buf_state);
748 : : }
749 : :
750 : 62 : return false;
751 : : }
752 : :
753 : : /*
754 : : * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
755 : : * fork with RBM_NORMAL mode and default strategy.
756 : : */
757 : : Buffer
6154 heikki.linnakangas@i 758 : 39559642 : ReadBuffer(Relation reln, BlockNumber blockNum)
759 : : {
760 : 39559642 : return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
761 : : }
762 : :
763 : : /*
764 : : * ReadBufferExtended -- returns a buffer containing the requested
765 : : * block of the requested relation. If the blknum
766 : : * requested is P_NEW, extend the relation file and
767 : : * allocate a new block. (Caller is responsible for
768 : : * ensuring that only one backend tries to extend a
769 : : * relation at the same time!)
770 : : *
771 : : * Returns: the buffer number for the buffer containing
772 : : * the block read. The returned buffer has been pinned.
773 : : * Does not return on error --- elog's instead.
774 : : *
775 : : * Assume when this function is called, that reln has been opened already.
776 : : *
777 : : * In RBM_NORMAL mode, the page is read from disk, and the page header is
778 : : * validated. An error is thrown if the page header is not valid. (But
779 : : * note that an all-zero page is considered "valid"; see
780 : : * PageIsVerified().)
781 : : *
782 : : * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
783 : : * valid, the page is zeroed instead of throwing an error. This is intended
784 : : * for non-critical data, where the caller is prepared to repair errors.
785 : : *
786 : : * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
787 : : * filled with zeros instead of reading it from disk. Useful when the caller
788 : : * is going to fill the page from scratch, since this saves I/O and avoids
789 : : * unnecessary failure if the page-on-disk has corrupt page headers.
790 : : * The page is returned locked to ensure that the caller has a chance to
791 : : * initialize the page before it's made visible to others.
792 : : * Caution: do not use this mode to read a page that is beyond the relation's
793 : : * current physical EOF; that is likely to cause problems in md.c when
794 : : * the page is modified and written out. P_NEW is OK, though.
795 : : *
796 : : * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
797 : : * a cleanup-strength lock on the page.
798 : : *
799 : : * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
800 : : *
801 : : * If strategy is not NULL, a nondefault buffer access strategy is used.
802 : : * See buffer/README for details.
803 : : */
804 : : inline Buffer
805 : 47823470 : ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
806 : : ReadBufferMode mode, BufferAccessStrategy strategy)
807 : : {
808 : : Buffer buf;
809 : :
810 : : /*
811 : : * Reject attempts to read non-local temporary relations; we would be
812 : : * likely to get wrong data since we have no visibility into the owning
813 : : * session's local buffers.
814 : : */
6003 tgl@sss.pgh.pa.us 815 [ + + - + ]: 47823470 : if (RELATION_IS_OTHER_TEMP(reln))
6003 tgl@sss.pgh.pa.us 816 [ # # ]:UBC 0 : ereport(ERROR,
817 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
818 : : errmsg("cannot access temporary tables of other sessions")));
819 : :
820 : : /*
821 : : * Read the buffer, and update pgstat counters to reflect a cache hit or
822 : : * miss.
823 : : */
521 tmunro@postgresql.or 824 :CBC 47823470 : buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
825 : : forkNum, blockNum, mode, strategy);
826 : :
6295 heikki.linnakangas@i 827 : 47823446 : return buf;
828 : : }
829 : :
830 : :
831 : : /*
832 : : * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
833 : : * a relcache entry for the relation.
834 : : *
835 : : * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
836 : : * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
837 : : * cannot be used for temporary relations (and making that work might be
838 : : * difficult, unless we only want to read temporary relations for our own
839 : : * ProcNumber).
840 : : */
841 : : Buffer
1158 rhaas@postgresql.org 842 : 5679219 : ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
843 : : BlockNumber blockNum, ReadBufferMode mode,
844 : : BufferAccessStrategy strategy, bool permanent)
845 : : {
552 heikki.linnakangas@i 846 : 5679219 : SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
847 : :
521 tmunro@postgresql.or 848 [ + - ]: 5679219 : return ReadBuffer_common(NULL, smgr,
849 : : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
850 : : forkNum, blockNum,
851 : : mode, strategy);
852 : : }
853 : :
854 : : /*
855 : : * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
856 : : */
857 : : Buffer
745 858 : 44435 : ExtendBufferedRel(BufferManagerRelation bmr,
859 : : ForkNumber forkNum,
860 : : BufferAccessStrategy strategy,
861 : : uint32 flags)
862 : : {
863 : : Buffer buf;
885 andres@anarazel.de 864 : 44435 : uint32 extend_by = 1;
865 : :
745 tmunro@postgresql.or 866 : 44435 : ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
867 : : &buf, &extend_by);
868 : :
885 andres@anarazel.de 869 : 44435 : return buf;
870 : : }
871 : :
872 : : /*
873 : : * Extend relation by multiple blocks.
874 : : *
875 : : * Tries to extend the relation by extend_by blocks. Depending on the
876 : : * availability of resources the relation may end up being extended by a
877 : : * smaller number of pages (unless an error is thrown, always by at least one
878 : : * page). *extended_by is updated to the number of pages the relation has been
879 : : * extended to.
880 : : *
881 : : * buffers needs to be an array that is at least extend_by long. Upon
882 : : * completion, the first extend_by array elements will point to a pinned
883 : : * buffer.
884 : : *
885 : : * If EB_LOCK_FIRST is part of flags, the first returned buffer is
886 : : * locked. This is useful for callers that want a buffer that is guaranteed to
887 : : * be empty.
888 : : */
889 : : BlockNumber
745 tmunro@postgresql.or 890 : 156318 : ExtendBufferedRelBy(BufferManagerRelation bmr,
891 : : ForkNumber fork,
892 : : BufferAccessStrategy strategy,
893 : : uint32 flags,
894 : : uint32 extend_by,
895 : : Buffer *buffers,
896 : : uint32 *extended_by)
897 : : {
898 [ - + ]: 156318 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
899 [ - + - - ]: 156318 : Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
885 andres@anarazel.de 900 [ - + ]: 156318 : Assert(extend_by > 0);
901 : :
745 tmunro@postgresql.or 902 [ + - ]: 156318 : if (bmr.smgr == NULL)
903 : : {
904 : 156318 : bmr.smgr = RelationGetSmgr(bmr.rel);
905 : 156318 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
906 : : }
907 : :
908 : 156318 : return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
909 : : extend_by, InvalidBlockNumber,
910 : : buffers, extended_by);
911 : : }
912 : :
913 : : /*
914 : : * Extend the relation so it is at least extend_to blocks large, return buffer
915 : : * (extend_to - 1).
916 : : *
917 : : * This is useful for callers that want to write a specific page, regardless
918 : : * of the current size of the relation (e.g. useful for visibilitymap and for
919 : : * crash recovery).
920 : : */
921 : : Buffer
922 : 49396 : ExtendBufferedRelTo(BufferManagerRelation bmr,
923 : : ForkNumber fork,
924 : : BufferAccessStrategy strategy,
925 : : uint32 flags,
926 : : BlockNumber extend_to,
927 : : ReadBufferMode mode)
928 : : {
929 : : BlockNumber current_size;
885 andres@anarazel.de 930 : 49396 : uint32 extended_by = 0;
931 : 49396 : Buffer buffer = InvalidBuffer;
932 : : Buffer buffers[64];
933 : :
745 tmunro@postgresql.or 934 [ - + ]: 49396 : Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
935 [ + + - + ]: 49396 : Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
885 andres@anarazel.de 936 [ + - - + ]: 49396 : Assert(extend_to != InvalidBlockNumber && extend_to > 0);
937 : :
745 tmunro@postgresql.or 938 [ + + ]: 49396 : if (bmr.smgr == NULL)
939 : : {
940 : 7053 : bmr.smgr = RelationGetSmgr(bmr.rel);
941 : 7053 : bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
942 : : }
943 : :
944 : : /*
945 : : * If desired, create the file if it doesn't exist. If
946 : : * smgr_cached_nblocks[fork] is positive then it must exist, no need for
947 : : * an smgrexists call.
948 : : */
885 andres@anarazel.de 949 [ + + ]: 49396 : if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
745 tmunro@postgresql.or 950 [ + + ]: 7053 : (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
951 [ - + ]: 20 : bmr.smgr->smgr_cached_nblocks[fork] == InvalidBlockNumber) &&
952 [ + + ]: 7033 : !smgrexists(bmr.smgr, fork))
953 : : {
954 : 7017 : LockRelationForExtension(bmr.rel, ExclusiveLock);
955 : :
956 : : /* recheck, fork might have been created concurrently */
957 [ + + ]: 7017 : if (!smgrexists(bmr.smgr, fork))
958 : 7015 : smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
959 : :
960 : 7017 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
961 : : }
962 : :
963 : : /*
964 : : * If requested, invalidate size cache, so that smgrnblocks asks the
965 : : * kernel.
966 : : */
885 andres@anarazel.de 967 [ + + ]: 49396 : if (flags & EB_CLEAR_SIZE_CACHE)
745 tmunro@postgresql.or 968 : 7053 : bmr.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
969 : :
970 : : /*
971 : : * Estimate how many pages we'll need to extend by. This avoids acquiring
972 : : * unnecessarily many victim buffers.
973 : : */
974 : 49396 : current_size = smgrnblocks(bmr.smgr, fork);
975 : :
976 : : /*
977 : : * Since no-one else can be looking at the page contents yet, there is no
978 : : * difference between an exclusive lock and a cleanup-strength lock. Note
979 : : * that we pass the original mode to ReadBuffer_common() below, when
980 : : * falling back to reading the buffer to a concurrent relation extension.
981 : : */
876 andres@anarazel.de 982 [ + + - + ]: 49396 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
885 983 : 41975 : flags |= EB_LOCK_TARGET;
984 : :
985 [ + + ]: 100898 : while (current_size < extend_to)
986 : : {
987 : 51502 : uint32 num_pages = lengthof(buffers);
988 : : BlockNumber first_block;
989 : :
990 [ + + ]: 51502 : if ((uint64) current_size + num_pages > extend_to)
991 : 51436 : num_pages = extend_to - current_size;
992 : :
745 tmunro@postgresql.or 993 : 51502 : first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
994 : : num_pages, extend_to,
995 : : buffers, &extended_by);
996 : :
885 andres@anarazel.de 997 : 51502 : current_size = first_block + extended_by;
998 [ - + - - ]: 51502 : Assert(num_pages != 0 || current_size >= extend_to);
999 : :
718 peter@eisentraut.org 1000 [ + + ]: 110232 : for (uint32 i = 0; i < extended_by; i++)
1001 : : {
885 andres@anarazel.de 1002 [ + + ]: 58730 : if (first_block + i != extend_to - 1)
1003 : 9339 : ReleaseBuffer(buffers[i]);
1004 : : else
1005 : 49391 : buffer = buffers[i];
1006 : : }
1007 : : }
1008 : :
1009 : : /*
1010 : : * It's possible that another backend concurrently extended the relation.
1011 : : * In that case read the buffer.
1012 : : *
1013 : : * XXX: Should we control this via a flag?
1014 : : */
1015 [ + + ]: 49396 : if (buffer == InvalidBuffer)
1016 : : {
1017 [ - + ]: 5 : Assert(extended_by == 0);
413 noah@leadboat.com 1018 : 5 : buffer = ReadBuffer_common(bmr.rel, bmr.smgr, bmr.relpersistence,
1019 : : fork, extend_to - 1, mode, strategy);
1020 : : }
1021 : :
885 andres@anarazel.de 1022 : 49396 : return buffer;
1023 : : }
1024 : :
1025 : : /*
1026 : : * Lock and optionally zero a buffer, as part of the implementation of
1027 : : * RBM_ZERO_AND_LOCK or RBM_ZERO_AND_CLEANUP_LOCK. The buffer must be already
1028 : : * pinned. If the buffer is not already valid, it is zeroed and made valid.
1029 : : */
1030 : : static void
453 tmunro@postgresql.or 1031 : 311971 : ZeroAndLockBuffer(Buffer buffer, ReadBufferMode mode, bool already_valid)
1032 : : {
1033 : : BufferDesc *bufHdr;
1034 : : bool need_to_zero;
1035 : 311971 : bool isLocalBuf = BufferIsLocal(buffer);
1036 : :
521 1037 [ + + - + ]: 311971 : Assert(mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
1038 : :
453 1039 [ + + ]: 311971 : if (already_valid)
1040 : : {
1041 : : /*
1042 : : * If the caller already knew the buffer was valid, we can skip some
1043 : : * header interaction. The caller just wants to lock the buffer.
1044 : : */
1045 : 37204 : need_to_zero = false;
1046 : : }
1047 [ + + ]: 274767 : else if (isLocalBuf)
1048 : : {
1049 : : /* Simple case for non-shared buffers. */
521 1050 : 36 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
160 andres@anarazel.de 1051 : 36 : need_to_zero = StartLocalBufferIO(bufHdr, true, false);
1052 : : }
1053 : : else
1054 : : {
1055 : : /*
1056 : : * Take BM_IO_IN_PROGRESS, or discover that BM_VALID has been set
1057 : : * concurrently. Even though we aren't doing I/O, that ensures that
1058 : : * we don't zero a page that someone else has pinned. An exclusive
1059 : : * content lock wouldn't be enough, because readers are allowed to
1060 : : * drop the content lock after determining that a tuple is visible
1061 : : * (see buffer access rules in README).
1062 : : */
521 tmunro@postgresql.or 1063 : 274731 : bufHdr = GetBufferDescriptor(buffer - 1);
453 1064 : 274731 : need_to_zero = StartBufferIO(bufHdr, true, false);
1065 : : }
1066 : :
1067 [ + + ]: 311971 : if (need_to_zero)
1068 : : {
1069 : 274767 : memset(BufferGetPage(buffer), 0, BLCKSZ);
1070 : :
1071 : : /*
1072 : : * Grab the buffer content lock before marking the page as valid, to
1073 : : * make sure that no other backend sees the zeroed page before the
1074 : : * caller has had a chance to initialize it.
1075 : : *
1076 : : * Since no-one else can be looking at the page contents yet, there is
1077 : : * no difference between an exclusive lock and a cleanup-strength
1078 : : * lock. (Note that we cannot use LockBuffer() or
1079 : : * LockBufferForCleanup() here, because they assert that the buffer is
1080 : : * already valid.)
1081 : : */
1082 [ + + ]: 274767 : if (!isLocalBuf)
1083 : 274731 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_EXCLUSIVE);
1084 : :
1085 : : /* Set BM_VALID, terminate IO, and wake up any waiters */
1086 [ + + ]: 274767 : if (isLocalBuf)
160 andres@anarazel.de 1087 : 36 : TerminateLocalBufferIO(bufHdr, false, BM_VALID, false);
1088 : : else
1089 : 274731 : TerminateBufferIO(bufHdr, false, BM_VALID, true, false);
1090 : : }
453 tmunro@postgresql.or 1091 [ + + ]: 37204 : else if (!isLocalBuf)
1092 : : {
1093 : : /*
1094 : : * The buffer is valid, so we can't zero it. The caller still expects
1095 : : * the page to be locked on return.
1096 : : */
1097 [ + + ]: 37174 : if (mode == RBM_ZERO_AND_LOCK)
1098 : 37126 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
1099 : : else
1100 : 48 : LockBufferForCleanup(buffer);
1101 : : }
521 1102 : 311971 : }
1103 : :
1104 : : /*
1105 : : * Pin a buffer for a given block. *foundPtr is set to true if the block was
1106 : : * already present, or false if more work is required to either read it in or
1107 : : * zero it.
1108 : : */
1109 : : static pg_attribute_always_inline Buffer
1110 : 56867235 : PinBufferForBlock(Relation rel,
1111 : : SMgrRelation smgr,
1112 : : char persistence,
1113 : : ForkNumber forkNum,
1114 : : BlockNumber blockNum,
1115 : : BufferAccessStrategy strategy,
1116 : : bool *foundPtr)
1117 : : {
1118 : : BufferDesc *bufHdr;
1119 : : IOContext io_context;
1120 : : IOObject io_object;
1121 : :
1122 [ - + ]: 56867235 : Assert(blockNum != P_NEW);
1123 : :
1124 : : /* Persistence should be set before */
413 noah@leadboat.com 1125 [ + + + + : 56867235 : Assert((persistence == RELPERSISTENCE_TEMP ||
- + ]
1126 : : persistence == RELPERSISTENCE_PERMANENT ||
1127 : : persistence == RELPERSISTENCE_UNLOGGED));
1128 : :
521 tmunro@postgresql.or 1129 [ + + ]: 56867235 : if (persistence == RELPERSISTENCE_TEMP)
1130 : : {
1131 : 1535259 : io_context = IOCONTEXT_NORMAL;
1132 : 1535259 : io_object = IOOBJECT_TEMP_RELATION;
1133 : : }
1134 : : else
1135 : : {
1136 : 55331976 : io_context = IOContextForStrategy(strategy);
1137 : 55331976 : io_object = IOOBJECT_RELATION;
1138 : : }
1139 : :
1140 : : TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
1141 : : smgr->smgr_rlocator.locator.spcOid,
1142 : : smgr->smgr_rlocator.locator.dbOid,
1143 : : smgr->smgr_rlocator.locator.relNumber,
1144 : : smgr->smgr_rlocator.backend);
1145 : :
1146 [ + + ]: 56867235 : if (persistence == RELPERSISTENCE_TEMP)
1147 : : {
1148 : 1535259 : bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, foundPtr);
1149 [ + + ]: 1535253 : if (*foundPtr)
5744 rhaas@postgresql.org 1150 : 1526830 : pgBufferUsage.local_blks_hit++;
1151 : : }
1152 : : else
1153 : : {
521 tmunro@postgresql.or 1154 : 55331976 : bufHdr = BufferAlloc(smgr, persistence, forkNum, blockNum,
1155 : : strategy, foundPtr, io_context);
1156 [ + + ]: 55331976 : if (*foundPtr)
1157 : 53658295 : pgBufferUsage.shared_blks_hit++;
1158 : : }
1159 [ + + ]: 56867229 : if (rel)
1160 : : {
1161 : : /*
1162 : : * While pgBufferUsage's "read" counter isn't bumped unless we reach
1163 : : * WaitReadBuffers() (so, not for hits, and not for buffers that are
1164 : : * zeroed instead), the per-relation stats always count them.
1165 : : */
1166 [ + + + + : 50958197 : pgstat_count_buffer_read(rel);
+ + ]
1167 [ + + ]: 50958197 : if (*foundPtr)
1168 [ + + - + : 49726409 : pgstat_count_buffer_hit(rel);
+ + ]
1169 : : }
1170 [ + + ]: 56867229 : if (*foundPtr)
1171 : : {
235 michael@paquier.xyz 1172 : 55185125 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
885 andres@anarazel.de 1173 [ + + ]: 55185125 : if (VacuumCostActive)
1174 : 101181 : VacuumCostBalance += VacuumCostPageHit;
1175 : :
1176 : : TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1177 : : smgr->smgr_rlocator.locator.spcOid,
1178 : : smgr->smgr_rlocator.locator.dbOid,
1179 : : smgr->smgr_rlocator.locator.relNumber,
1180 : : smgr->smgr_rlocator.backend,
1181 : : true);
1182 : : }
1183 : :
521 tmunro@postgresql.or 1184 : 56867229 : return BufferDescriptorGetBuffer(bufHdr);
1185 : : }
1186 : :
1187 : : /*
1188 : : * ReadBuffer_common -- common logic for all ReadBuffer variants
1189 : : *
1190 : : * smgr is required, rel is optional unless using P_NEW.
1191 : : */
1192 : : static pg_attribute_always_inline Buffer
1193 : 53502694 : ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence,
1194 : : ForkNumber forkNum,
1195 : : BlockNumber blockNum, ReadBufferMode mode,
1196 : : BufferAccessStrategy strategy)
1197 : : {
1198 : : ReadBuffersOperation operation;
1199 : : Buffer buffer;
1200 : : int flags;
1201 : : char persistence;
1202 : :
1203 : : /*
1204 : : * Backward compatibility path, most code should use ExtendBufferedRel()
1205 : : * instead, as acquiring the extension lock inside ExtendBufferedRel()
1206 : : * scales a lot better.
1207 : : */
1208 [ + + ]: 53502694 : if (unlikely(blockNum == P_NEW))
1209 : : {
1210 : 260 : uint32 flags = EB_SKIP_EXTENSION_LOCK;
1211 : :
1212 : : /*
1213 : : * Since no-one else can be looking at the page contents yet, there is
1214 : : * no difference between an exclusive lock and a cleanup-strength
1215 : : * lock.
1216 : : */
1217 [ + - - + ]: 260 : if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
521 tmunro@postgresql.or 1218 :UBC 0 : flags |= EB_LOCK_FIRST;
1219 : :
521 tmunro@postgresql.or 1220 :CBC 260 : return ExtendBufferedRel(BMR_REL(rel), forkNum, strategy, flags);
1221 : : }
1222 : :
413 noah@leadboat.com 1223 [ + + ]: 53502434 : if (rel)
1224 : 47823215 : persistence = rel->rd_rel->relpersistence;
1225 : : else
1226 : 5679219 : persistence = smgr_persistence;
1227 : :
521 tmunro@postgresql.or 1228 [ + + + + : 53502434 : if (unlikely(mode == RBM_ZERO_AND_CLEANUP_LOCK ||
+ + ]
1229 : : mode == RBM_ZERO_AND_LOCK))
1230 : : {
1231 : : bool found;
1232 : :
413 noah@leadboat.com 1233 : 311971 : buffer = PinBufferForBlock(rel, smgr, persistence,
1234 : : forkNum, blockNum, strategy, &found);
453 tmunro@postgresql.or 1235 : 311971 : ZeroAndLockBuffer(buffer, mode, found);
521 1236 : 311971 : return buffer;
1237 : : }
1238 : :
1239 : : /*
1240 : : * Signal that we are going to immediately wait. If we're immediately
1241 : : * waiting, there is no benefit in actually executing the IO
1242 : : * asynchronously, it would just add dispatch overhead.
1243 : : */
160 andres@anarazel.de 1244 : 53190463 : flags = READ_BUFFERS_SYNCHRONOUSLY;
521 tmunro@postgresql.or 1245 [ + + ]: 53190463 : if (mode == RBM_ZERO_ON_ERROR)
160 andres@anarazel.de 1246 : 769289 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
521 tmunro@postgresql.or 1247 : 53190463 : operation.smgr = smgr;
1248 : 53190463 : operation.rel = rel;
413 noah@leadboat.com 1249 : 53190463 : operation.persistence = persistence;
521 tmunro@postgresql.or 1250 : 53190463 : operation.forknum = forkNum;
1251 : 53190463 : operation.strategy = strategy;
1252 [ + + ]: 53190463 : if (StartReadBuffer(&operation,
1253 : : &buffer,
1254 : : blockNum,
1255 : : flags))
1256 : 688354 : WaitReadBuffers(&operation);
1257 : :
1258 : 53190439 : return buffer;
1259 : : }
1260 : :
1261 : : static pg_attribute_always_inline bool
1262 : 56395405 : StartReadBuffersImpl(ReadBuffersOperation *operation,
1263 : : Buffer *buffers,
1264 : : BlockNumber blockNum,
1265 : : int *nblocks,
1266 : : int flags,
1267 : : bool allow_forwarding)
1268 : : {
1269 : 56395405 : int actual_nblocks = *nblocks;
333 andres@anarazel.de 1270 : 56395405 : int maxcombine = 0;
1271 : : bool did_start_io;
1272 : :
169 tmunro@postgresql.or 1273 [ + + - + ]: 56395405 : Assert(*nblocks == 1 || allow_forwarding);
521 1274 [ - + ]: 56395405 : Assert(*nblocks > 0);
1275 [ - + ]: 56395405 : Assert(*nblocks <= MAX_IO_COMBINE_LIMIT);
1276 : :
1277 [ + + ]: 57802742 : for (int i = 0; i < actual_nblocks; ++i)
1278 : : {
1279 : : bool found;
1280 : :
169 1281 [ + + + + ]: 56556604 : if (allow_forwarding && buffers[i] != InvalidBuffer)
1282 : 1340 : {
1283 : : BufferDesc *bufHdr;
1284 : :
1285 : : /*
1286 : : * This is a buffer that was pinned by an earlier call to
1287 : : * StartReadBuffers(), but couldn't be handled in one operation at
1288 : : * that time. The operation was split, and the caller has passed
1289 : : * an already pinned buffer back to us to handle the rest of the
1290 : : * operation. It must continue at the expected block number.
1291 : : */
1292 [ - + ]: 1340 : Assert(BufferGetBlockNumber(buffers[i]) == blockNum + i);
1293 : :
1294 : : /*
1295 : : * It might be an already valid buffer (a hit) that followed the
1296 : : * final contiguous block of an earlier I/O (a miss) marking the
1297 : : * end of it, or a buffer that some other backend has since made
1298 : : * valid by performing the I/O for us, in which case we can handle
1299 : : * it as a hit now. It is safe to check for a BM_VALID flag with
1300 : : * a relaxed load, because we got a fresh view of it while pinning
1301 : : * it in the previous call.
1302 : : *
1303 : : * On the other hand if we don't see BM_VALID yet, it must be an
1304 : : * I/O that was split by the previous call and we need to try to
1305 : : * start a new I/O from this block. We're also racing against any
1306 : : * other backend that might start the I/O or even manage to mark
1307 : : * it BM_VALID after this check, but StartBufferIO() will handle
1308 : : * those cases.
1309 : : */
1310 [ + + ]: 1340 : if (BufferIsLocal(buffers[i]))
1311 : 3 : bufHdr = GetLocalBufferDescriptor(-buffers[i] - 1);
1312 : : else
1313 : 1337 : bufHdr = GetBufferDescriptor(buffers[i] - 1);
1314 [ - + ]: 1340 : Assert(pg_atomic_read_u32(&bufHdr->state) & BM_TAG_VALID);
1315 : 1340 : found = pg_atomic_read_u32(&bufHdr->state) & BM_VALID;
1316 : : }
1317 : : else
1318 : : {
1319 : 56555258 : buffers[i] = PinBufferForBlock(operation->rel,
1320 : 56555264 : operation->smgr,
1321 : 56555264 : operation->persistence,
1322 : : operation->forknum,
1323 : : blockNum + i,
1324 : : operation->strategy,
1325 : : &found);
1326 : : }
1327 : :
521 1328 [ + + ]: 56556598 : if (found)
1329 : : {
1330 : : /*
1331 : : * We have a hit. If it's the first block in the requested range,
1332 : : * we can return it immediately and report that WaitReadBuffers()
1333 : : * does not need to be called. If the initial value of *nblocks
1334 : : * was larger, the caller will have to call again for the rest.
1335 : : */
169 1336 [ + + ]: 55149261 : if (i == 0)
1337 : : {
1338 : 55147918 : *nblocks = 1;
1339 : :
1340 : : #ifdef USE_ASSERT_CHECKING
1341 : :
1342 : : /*
1343 : : * Initialize enough of ReadBuffersOperation to make
1344 : : * CheckReadBuffersOperation() work. Outside of assertions
1345 : : * that's not necessary when no IO is issued.
1346 : : */
160 andres@anarazel.de 1347 : 55147918 : operation->buffers = buffers;
1348 : 55147918 : operation->blocknum = blockNum;
1349 : 55147918 : operation->nblocks = 1;
1350 : 55147918 : operation->nblocks_done = 1;
1351 : 55147918 : CheckReadBuffersOperation(operation, true);
1352 : : #endif
169 tmunro@postgresql.or 1353 : 55147918 : return false;
1354 : : }
1355 : :
1356 : : /*
1357 : : * Otherwise we already have an I/O to perform, but this block
1358 : : * can't be included as it is already valid. Split the I/O here.
1359 : : * There may or may not be more blocks requiring I/O after this
1360 : : * one, we haven't checked, but they can't be contiguous with this
1361 : : * one in the way. We'll leave this buffer pinned, forwarding it
1362 : : * to the next call, avoiding the need to unpin it here and re-pin
1363 : : * it in the next call.
1364 : : */
1365 : 1343 : actual_nblocks = i;
521 1366 : 1343 : break;
1367 : : }
1368 : : else
1369 : : {
1370 : : /*
1371 : : * Check how many blocks we can cover with the same IO. The smgr
1372 : : * implementation might e.g. be limited due to a segment boundary.
1373 : : */
333 andres@anarazel.de 1374 [ + + + + ]: 1407337 : if (i == 0 && actual_nblocks > 1)
1375 : : {
1376 : 31935 : maxcombine = smgrmaxcombine(operation->smgr,
1377 : : operation->forknum,
1378 : : blockNum);
1379 [ - + ]: 31935 : if (unlikely(maxcombine < actual_nblocks))
1380 : : {
333 andres@anarazel.de 1381 [ # # ]:UBC 0 : elog(DEBUG2, "limiting nblocks at %u from %u to %u",
1382 : : blockNum, actual_nblocks, maxcombine);
1383 : 0 : actual_nblocks = maxcombine;
1384 : : }
1385 : : }
1386 : : }
1387 : : }
521 tmunro@postgresql.or 1388 :CBC 1247481 : *nblocks = actual_nblocks;
1389 : :
1390 : : /* Populate information needed for I/O. */
1391 : 1247481 : operation->buffers = buffers;
1392 : 1247481 : operation->blocknum = blockNum;
1393 : 1247481 : operation->flags = flags;
1394 : 1247481 : operation->nblocks = actual_nblocks;
160 andres@anarazel.de 1395 : 1247481 : operation->nblocks_done = 0;
1396 : 1247481 : pgaio_wref_clear(&operation->io_wref);
1397 : :
1398 : : /*
1399 : : * When using AIO, start the IO in the background. If not, issue prefetch
1400 : : * requests if desired by the caller.
1401 : : *
1402 : : * The reason we have a dedicated path for IOMETHOD_SYNC here is to
1403 : : * de-risk the introduction of AIO somewhat. It's a large architectural
1404 : : * change, with lots of chances for unanticipated performance effects.
1405 : : *
1406 : : * Use of IOMETHOD_SYNC already leads to not actually performing IO
1407 : : * asynchronously, but without the check here we'd execute IO earlier than
1408 : : * we used to. Eventually this IOMETHOD_SYNC specific path should go away.
1409 : : */
1410 [ + + ]: 1247481 : if (io_method != IOMETHOD_SYNC)
1411 : : {
1412 : : /*
1413 : : * Try to start IO asynchronously. It's possible that no IO needs to
1414 : : * be started, if another backend already performed the IO.
1415 : : *
1416 : : * Note that if an IO is started, it might not cover the entire
1417 : : * requested range, e.g. because an intermediary block has been read
1418 : : * in by another backend. In that case any "trailing" buffers we
1419 : : * already pinned above will be "forwarded" by read_stream.c to the
1420 : : * next call to StartReadBuffers().
1421 : : *
1422 : : * This is signalled to the caller by decrementing *nblocks *and*
1423 : : * reducing operation->nblocks. The latter is done here, but not below
1424 : : * WaitReadBuffers(), as in WaitReadBuffers() we can't "shorten" the
1425 : : * overall read size anymore, we need to retry until done in its
1426 : : * entirety or until failed.
1427 : : */
1428 : 1246420 : did_start_io = AsyncReadBuffers(operation, nblocks);
1429 : :
1430 : 1246405 : operation->nblocks = *nblocks;
1431 : : }
1432 : : else
1433 : : {
1434 : 1061 : operation->flags |= READ_BUFFERS_SYNCHRONOUSLY;
1435 : :
1436 [ + + ]: 1061 : if (flags & READ_BUFFERS_ISSUE_ADVICE)
1437 : : {
1438 : : /*
1439 : : * In theory we should only do this if PinBufferForBlock() had to
1440 : : * allocate new buffers above. That way, if two calls to
1441 : : * StartReadBuffers() were made for the same blocks before
1442 : : * WaitReadBuffers(), only the first would issue the advice.
1443 : : * That'd be a better simulation of true asynchronous I/O, which
1444 : : * would only start the I/O once, but isn't done here for
1445 : : * simplicity.
1446 : : */
1447 : 2 : smgrprefetch(operation->smgr,
1448 : : operation->forknum,
1449 : : blockNum,
1450 : : actual_nblocks);
1451 : : }
1452 : :
1453 : : /*
1454 : : * Indicate that WaitReadBuffers() should be called. WaitReadBuffers()
1455 : : * will initiate the necessary IO.
1456 : : */
1457 : 1061 : did_start_io = true;
1458 : : }
1459 : :
1460 : 1247466 : CheckReadBuffersOperation(operation, !did_start_io);
1461 : :
1462 : 1247466 : return did_start_io;
1463 : : }
1464 : :
1465 : : /*
1466 : : * Begin reading a range of blocks beginning at blockNum and extending for
1467 : : * *nblocks. *nblocks and the buffers array are in/out parameters. On entry,
1468 : : * the buffers elements covered by *nblocks must hold either InvalidBuffer or
1469 : : * buffers forwarded by an earlier call to StartReadBuffers() that was split
1470 : : * and is now being continued. On return, *nblocks holds the number of blocks
1471 : : * accepted by this operation. If it is less than the original number then
1472 : : * this operation has been split, but buffer elements up to the original
1473 : : * requested size may hold forwarded buffers to be used for a continuing
1474 : : * operation. The caller must either start a new I/O beginning at the block
1475 : : * immediately following the blocks accepted by this call and pass those
1476 : : * buffers back in, or release them if it chooses not to. It shouldn't make
1477 : : * any other use of or assumptions about forwarded buffers.
1478 : : *
1479 : : * If false is returned, no I/O is necessary and the buffers covered by
1480 : : * *nblocks on exit are valid and ready to be accessed. If true is returned,
1481 : : * an I/O has been started, and WaitReadBuffers() must be called with the same
1482 : : * operation object before the buffers covered by *nblocks on exit can be
1483 : : * accessed. Along with the operation object, the caller-supplied array of
1484 : : * buffers must remain valid until WaitReadBuffers() is called, and any
1485 : : * forwarded buffers must also be preserved for a continuing call unless
1486 : : * they are explicitly released.
1487 : : */
1488 : : bool
521 tmunro@postgresql.or 1489 : 1440193 : StartReadBuffers(ReadBuffersOperation *operation,
1490 : : Buffer *buffers,
1491 : : BlockNumber blockNum,
1492 : : int *nblocks,
1493 : : int flags)
1494 : : {
169 1495 : 1440193 : return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags,
1496 : : true /* expect forwarded buffers */ );
1497 : : }
1498 : :
1499 : : /*
1500 : : * Single block version of the StartReadBuffers(). This might save a few
1501 : : * instructions when called from another translation unit, because it is
1502 : : * specialized for nblocks == 1.
1503 : : *
1504 : : * This version does not support "forwarded" buffers: they cannot be created
1505 : : * by reading only one block and *buffer is ignored on entry.
1506 : : */
1507 : : bool
521 1508 : 54955212 : StartReadBuffer(ReadBuffersOperation *operation,
1509 : : Buffer *buffer,
1510 : : BlockNumber blocknum,
1511 : : int flags)
1512 : : {
1513 : 54955212 : int nblocks = 1;
1514 : : bool result;
1515 : :
169 1516 : 54955212 : result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags,
1517 : : false /* single block, no forwarding */ );
521 1518 [ - + ]: 54955197 : Assert(nblocks == 1); /* single block can't be short */
1519 : :
1520 : 54955197 : return result;
1521 : : }
1522 : :
1523 : : /*
1524 : : * Perform sanity checks on the ReadBuffersOperation.
1525 : : */
1526 : : static void
160 andres@anarazel.de 1527 : 58886863 : CheckReadBuffersOperation(ReadBuffersOperation *operation, bool is_complete)
1528 : : {
1529 : : #ifdef USE_ASSERT_CHECKING
1530 [ - + ]: 58886863 : Assert(operation->nblocks_done <= operation->nblocks);
1531 [ + + - + ]: 58886863 : Assert(!is_complete || operation->nblocks == operation->nblocks_done);
1532 : :
1533 [ + + ]: 118254117 : for (int i = 0; i < operation->nblocks; i++)
1534 : : {
1535 : 59367254 : Buffer buffer = operation->buffers[i];
1536 : 59367254 : BufferDesc *buf_hdr = BufferIsLocal(buffer) ?
1537 [ + + ]: 59367254 : GetLocalBufferDescriptor(-buffer - 1) :
1538 : 57815879 : GetBufferDescriptor(buffer - 1);
1539 : :
1540 [ - + ]: 59367254 : Assert(BufferGetBlockNumber(buffer) == operation->blocknum + i);
1541 [ - + ]: 59367254 : Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_TAG_VALID);
1542 : :
1543 [ + + ]: 59367254 : if (i < operation->nblocks_done)
1544 [ - + ]: 56554901 : Assert(pg_atomic_read_u32(&buf_hdr->state) & BM_VALID);
1545 : : }
1546 : : #endif
1547 : 58886863 : }
1548 : :
1549 : : /* helper for ReadBuffersCanStartIO(), to avoid repetition */
1550 : : static inline bool
1551 : 1407337 : ReadBuffersCanStartIOOnce(Buffer buffer, bool nowait)
1552 : : {
521 tmunro@postgresql.or 1553 [ + + ]: 1407337 : if (BufferIsLocal(buffer))
160 andres@anarazel.de 1554 : 8387 : return StartLocalBufferIO(GetLocalBufferDescriptor(-buffer - 1),
1555 : : true, nowait);
1556 : : else
521 tmunro@postgresql.or 1557 : 1398950 : return StartBufferIO(GetBufferDescriptor(buffer - 1), true, nowait);
1558 : : }
1559 : :
1560 : : /*
1561 : : * Helper for AsyncReadBuffers that tries to get the buffer ready for IO.
1562 : : */
1563 : : static inline bool
160 andres@anarazel.de 1564 : 1407337 : ReadBuffersCanStartIO(Buffer buffer, bool nowait)
1565 : : {
1566 : : /*
1567 : : * If this backend currently has staged IO, we need to submit the pending
1568 : : * IO before waiting for the right to issue IO, to avoid the potential for
1569 : : * deadlocks (and, more commonly, unnecessary delays for other backends).
1570 : : */
1571 [ + + + + ]: 1407337 : if (!nowait && pgaio_have_staged())
1572 : : {
1573 [ + - ]: 548 : if (ReadBuffersCanStartIOOnce(buffer, true))
1574 : 548 : return true;
1575 : :
1576 : : /*
1577 : : * Unfortunately StartBufferIO() returning false doesn't allow to
1578 : : * distinguish between the buffer already being valid and IO already
1579 : : * being in progress. Since IO already being in progress is quite
1580 : : * rare, this approach seems fine.
1581 : : */
160 andres@anarazel.de 1582 :UBC 0 : pgaio_submit_staged();
1583 : : }
1584 : :
160 andres@anarazel.de 1585 :CBC 1406789 : return ReadBuffersCanStartIOOnce(buffer, nowait);
1586 : : }
1587 : :
1588 : : /*
1589 : : * Helper for WaitReadBuffers() that processes the results of a readv
1590 : : * operation, raising an error if necessary.
1591 : : */
1592 : : static void
1593 : 1245222 : ProcessReadBuffersResult(ReadBuffersOperation *operation)
1594 : : {
1595 : 1245222 : PgAioReturn *aio_ret = &operation->io_return;
1596 : 1245222 : PgAioResultStatus rs = aio_ret->result.status;
1597 : 1245222 : int newly_read_blocks = 0;
1598 : :
1599 [ - + ]: 1245222 : Assert(pgaio_wref_valid(&operation->io_wref));
1600 [ - + ]: 1245222 : Assert(aio_ret->result.status != PGAIO_RS_UNKNOWN);
1601 : :
1602 : : /*
1603 : : * SMGR reports the number of blocks successfully read as the result of
1604 : : * the IO operation. Thus we can simply add that to ->nblocks_done.
1605 : : */
1606 : :
1607 [ + + ]: 1245222 : if (likely(rs != PGAIO_RS_ERROR))
1608 : 1245195 : newly_read_blocks = aio_ret->result.result;
1609 : :
1610 [ + + + + ]: 1245222 : if (rs == PGAIO_RS_ERROR || rs == PGAIO_RS_WARNING)
1611 [ + + ]: 51 : pgaio_result_report(aio_ret->result, &aio_ret->target_data,
1612 : : rs == PGAIO_RS_ERROR ? ERROR : WARNING);
1613 [ - + ]: 1245171 : else if (aio_ret->result.status == PGAIO_RS_PARTIAL)
1614 : : {
1615 : : /*
1616 : : * We'll retry, so we just emit a debug message to the server log (or
1617 : : * not even that in prod scenarios).
1618 : : */
160 andres@anarazel.de 1619 :UBC 0 : pgaio_result_report(aio_ret->result, &aio_ret->target_data, DEBUG1);
1620 [ # # ]: 0 : elog(DEBUG3, "partial read, will retry");
1621 : : }
1622 : :
160 andres@anarazel.de 1623 [ - + ]:CBC 1245195 : Assert(newly_read_blocks > 0);
1624 [ - + ]: 1245195 : Assert(newly_read_blocks <= MAX_IO_COMBINE_LIMIT);
1625 : :
1626 : 1245195 : operation->nblocks_done += newly_read_blocks;
1627 : :
1628 [ - + ]: 1245195 : Assert(operation->nblocks_done <= operation->nblocks);
1629 : 1245195 : }
1630 : :
1631 : : void
521 tmunro@postgresql.or 1632 : 1245223 : WaitReadBuffers(ReadBuffersOperation *operation)
1633 : : {
160 andres@anarazel.de 1634 : 1245223 : PgAioReturn *aio_ret = &operation->io_return;
1635 : : IOContext io_context;
1636 : : IOObject io_object;
1637 : :
1638 [ + + ]: 1245223 : if (operation->persistence == RELPERSISTENCE_TEMP)
1639 : : {
521 tmunro@postgresql.or 1640 : 1499 : io_context = IOCONTEXT_NORMAL;
1641 : 1499 : io_object = IOOBJECT_TEMP_RELATION;
1642 : : }
1643 : : else
1644 : : {
1645 : 1243724 : io_context = IOContextForStrategy(operation->strategy);
1646 : 1243724 : io_object = IOOBJECT_RELATION;
1647 : : }
1648 : :
1649 : : /*
1650 : : * If we get here without an IO operation having been issued, the
1651 : : * io_method == IOMETHOD_SYNC path must have been used. Otherwise the
1652 : : * caller should not have called WaitReadBuffers().
1653 : : *
1654 : : * In the case of IOMETHOD_SYNC, we start - as we used to before the
1655 : : * introducing of AIO - the IO in WaitReadBuffers(). This is done as part
1656 : : * of the retry logic below, no extra code is required.
1657 : : *
1658 : : * This path is expected to eventually go away.
1659 : : */
160 andres@anarazel.de 1660 [ + + + - ]: 1245223 : if (!pgaio_wref_valid(&operation->io_wref) && io_method != IOMETHOD_SYNC)
160 andres@anarazel.de 1661 [ # # ]:UBC 0 : elog(ERROR, "waiting for read operation that didn't read");
1662 : :
1663 : : /*
1664 : : * To handle partial reads, and IOMETHOD_SYNC, we re-issue IO until we're
1665 : : * done. We may need multiple retries, not just because we could get
1666 : : * multiple partial reads, but also because some of the remaining
1667 : : * to-be-read buffers may have been read in by other backends, limiting
1668 : : * the IO size.
1669 : : */
1670 : : while (true)
521 tmunro@postgresql.or 1671 :CBC 1061 : {
1672 : : int ignored_nblocks_progress;
1673 : :
160 andres@anarazel.de 1674 : 1246284 : CheckReadBuffersOperation(operation, false);
1675 : :
1676 : : /*
1677 : : * If there is an IO associated with the operation, we may need to
1678 : : * wait for it.
1679 : : */
1680 [ + + ]: 1246284 : if (pgaio_wref_valid(&operation->io_wref))
1681 : : {
1682 : : /*
1683 : : * Track the time spent waiting for the IO to complete. As
1684 : : * tracking a wait even if we don't actually need to wait
1685 : : *
1686 : : * a) is not cheap, due to the timestamping overhead
1687 : : *
1688 : : * b) reports some time as waiting, even if we never waited
1689 : : *
1690 : : * we first check if we already know the IO is complete.
1691 : : */
1692 [ + + ]: 1245223 : if (aio_ret->result.status == PGAIO_RS_UNKNOWN &&
1693 [ + + ]: 548789 : !pgaio_wref_check_done(&operation->io_wref))
1694 : 274945 : {
1695 : 274946 : instr_time io_start = pgstat_prepare_io_time(track_io_timing);
1696 : :
1697 : 274946 : pgaio_wref_wait(&operation->io_wref);
1698 : :
1699 : : /*
1700 : : * The IO operation itself was already counted earlier, in
1701 : : * AsyncReadBuffers(), this just accounts for the wait time.
1702 : : */
1703 : 274945 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
1704 : : io_start, 0, 0);
1705 : : }
1706 : : else
1707 : : {
1708 [ - + ]: 970277 : Assert(pgaio_wref_check_done(&operation->io_wref));
1709 : : }
1710 : :
1711 : : /*
1712 : : * We now are sure the IO completed. Check the results. This
1713 : : * includes reporting on errors if there were any.
1714 : : */
1715 : 1245222 : ProcessReadBuffersResult(operation);
1716 : : }
1717 : :
1718 : : /*
1719 : : * Most of the time, the one IO we already started, will read in
1720 : : * everything. But we need to deal with partial reads and buffers not
1721 : : * needing IO anymore.
1722 : : */
1723 [ + + ]: 1246256 : if (operation->nblocks_done == operation->nblocks)
1724 : 1245195 : break;
1725 : :
1726 [ - + ]: 1061 : CHECK_FOR_INTERRUPTS();
1727 : :
1728 : : /*
1729 : : * This may only complete the IO partially, either because some
1730 : : * buffers were already valid, or because of a partial read.
1731 : : *
1732 : : * NB: In contrast to after the AsyncReadBuffers() call in
1733 : : * StartReadBuffers(), we do *not* reduce
1734 : : * ReadBuffersOperation->nblocks here, callers expect the full
1735 : : * operation to be completed at this point (as more operations may
1736 : : * have been queued).
1737 : : */
1738 : 1061 : AsyncReadBuffers(operation, &ignored_nblocks_progress);
1739 : : }
1740 : :
1741 : 1245195 : CheckReadBuffersOperation(operation, true);
1742 : :
1743 : : /* NB: READ_DONE tracepoint was already executed in completion callback */
1744 : 1245195 : }
1745 : :
1746 : : /*
1747 : : * Initiate IO for the ReadBuffersOperation
1748 : : *
1749 : : * This function only starts a single IO at a time. The size of the IO may be
1750 : : * limited to below the to-be-read blocks, if one of the buffers has
1751 : : * concurrently been read in. If the first to-be-read buffer is already valid,
1752 : : * no IO will be issued.
1753 : : *
1754 : : * To support retries after partial reads, the first operation->nblocks_done
1755 : : * buffers are skipped.
1756 : : *
1757 : : * On return *nblocks_progress is updated to reflect the number of buffers
1758 : : * affected by the call. If the first buffer is valid, *nblocks_progress is
1759 : : * set to 1 and operation->nblocks_done is incremented.
1760 : : *
1761 : : * Returns true if IO was initiated, false if no IO was necessary.
1762 : : */
1763 : : static bool
1764 : 1247481 : AsyncReadBuffers(ReadBuffersOperation *operation, int *nblocks_progress)
1765 : : {
1766 : 1247481 : Buffer *buffers = &operation->buffers[0];
1767 : 1247481 : int flags = operation->flags;
1768 : 1247481 : BlockNumber blocknum = operation->blocknum;
1769 : 1247481 : ForkNumber forknum = operation->forknum;
1770 : 1247481 : char persistence = operation->persistence;
1771 : 1247481 : int16 nblocks_done = operation->nblocks_done;
1772 : 1247481 : Buffer *io_buffers = &operation->buffers[nblocks_done];
1773 : 1247481 : int io_buffers_len = 0;
1774 : : PgAioHandle *ioh;
1775 : 1247481 : uint32 ioh_flags = 0;
1776 : : void *io_pages[MAX_IO_COMBINE_LIMIT];
1777 : : IOContext io_context;
1778 : : IOObject io_object;
1779 : : bool did_start_io;
1780 : :
1781 : : /*
1782 : : * When this IO is executed synchronously, either because the caller will
1783 : : * immediately block waiting for the IO or because IOMETHOD_SYNC is used,
1784 : : * the AIO subsystem needs to know.
1785 : : */
1786 [ + + ]: 1247481 : if (flags & READ_BUFFERS_SYNCHRONOUSLY)
1787 : 689873 : ioh_flags |= PGAIO_HF_SYNCHRONOUS;
1788 : :
1789 [ + + ]: 1247481 : if (persistence == RELPERSISTENCE_TEMP)
1790 : : {
1791 : 1793 : io_context = IOCONTEXT_NORMAL;
1792 : 1793 : io_object = IOOBJECT_TEMP_RELATION;
1793 : 1793 : ioh_flags |= PGAIO_HF_REFERENCES_LOCAL;
1794 : : }
1795 : : else
1796 : : {
1797 : 1245688 : io_context = IOContextForStrategy(operation->strategy);
1798 : 1245688 : io_object = IOOBJECT_RELATION;
1799 : : }
1800 : :
1801 : : /*
1802 : : * If zero_damaged_pages is enabled, add the READ_BUFFERS_ZERO_ON_ERROR
1803 : : * flag. The reason for that is that, hopefully, zero_damaged_pages isn't
1804 : : * set globally, but on a per-session basis. The completion callback,
1805 : : * which may be run in other processes, e.g. in IO workers, may have a
1806 : : * different value of the zero_damaged_pages GUC.
1807 : : *
1808 : : * XXX: We probably should eventually use a different flag for
1809 : : * zero_damaged_pages, so we can report different log levels / error codes
1810 : : * for zero_damaged_pages and ZERO_ON_ERROR.
1811 : : */
1812 [ + + ]: 1247481 : if (zero_damaged_pages)
1813 : 24 : flags |= READ_BUFFERS_ZERO_ON_ERROR;
1814 : :
1815 : : /*
1816 : : * For the same reason as with zero_damaged_pages we need to use this
1817 : : * backend's ignore_checksum_failure value.
1818 : : */
1819 [ + + ]: 1247481 : if (ignore_checksum_failure)
1820 : 12 : flags |= READ_BUFFERS_IGNORE_CHECKSUM_FAILURES;
1821 : :
1822 : :
1823 : : /*
1824 : : * To be allowed to report stats in the local completion callback we need
1825 : : * to prepare to report stats now. This ensures we can safely report the
1826 : : * checksum failure even in a critical section.
1827 : : */
1828 : 1247481 : pgstat_prepare_report_checksum_failure(operation->smgr->smgr_rlocator.locator.dbOid);
1829 : :
1830 : : /*
1831 : : * Get IO handle before ReadBuffersCanStartIO(), as pgaio_io_acquire()
1832 : : * might block, which we don't want after setting IO_IN_PROGRESS.
1833 : : *
1834 : : * If we need to wait for IO before we can get a handle, submit
1835 : : * already-staged IO first, so that other backends don't need to wait.
1836 : : * There wouldn't be a deadlock risk, as pgaio_io_acquire() just needs to
1837 : : * wait for already submitted IO, which doesn't require additional locks,
1838 : : * but it could still cause undesirable waits.
1839 : : *
1840 : : * A secondary benefit is that this would allow us to measure the time in
1841 : : * pgaio_io_acquire() without causing undue timer overhead in the common,
1842 : : * non-blocking, case. However, currently the pgstats infrastructure
1843 : : * doesn't really allow that, as it a) asserts that an operation can't
1844 : : * have time without operations b) doesn't have an API to report
1845 : : * "accumulated" time.
1846 : : */
1847 : 1247481 : ioh = pgaio_io_acquire_nb(CurrentResourceOwner, &operation->io_return);
1848 [ + + ]: 1247481 : if (unlikely(!ioh))
1849 : : {
1850 : 3415 : pgaio_submit_staged();
1851 : :
1852 : 3415 : ioh = pgaio_io_acquire(CurrentResourceOwner, &operation->io_return);
1853 : : }
1854 : :
1855 : : /*
1856 : : * Check if we can start IO on the first to-be-read buffer.
1857 : : *
1858 : : * If an I/O is already in progress in another backend, we want to wait
1859 : : * for the outcome: either done, or something went wrong and we will
1860 : : * retry.
1861 : : */
1862 [ + + ]: 1247481 : if (!ReadBuffersCanStartIO(buffers[nblocks_done], false))
1863 : : {
1864 : : /*
1865 : : * Someone else has already completed this block, we're done.
1866 : : *
1867 : : * When IO is necessary, ->nblocks_done is updated in
1868 : : * ProcessReadBuffersResult(), but that is not called if no IO is
1869 : : * necessary. Thus update here.
1870 : : */
1871 : 1944 : operation->nblocks_done += 1;
1872 : 1944 : *nblocks_progress = 1;
1873 : :
1874 : 1944 : pgaio_io_release(ioh);
1875 : 1944 : pgaio_wref_clear(&operation->io_wref);
1876 : 1944 : did_start_io = false;
1877 : :
1878 : : /*
1879 : : * Report and track this as a 'hit' for this backend, even though it
1880 : : * must have started out as a miss in PinBufferForBlock(). The other
1881 : : * backend will track this as a 'read'.
1882 : : */
1883 : : TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + operation->nblocks_done,
1884 : : operation->smgr->smgr_rlocator.locator.spcOid,
1885 : : operation->smgr->smgr_rlocator.locator.dbOid,
1886 : : operation->smgr->smgr_rlocator.locator.relNumber,
1887 : : operation->smgr->smgr_rlocator.backend,
1888 : : true);
1889 : :
1890 [ - + ]: 1944 : if (persistence == RELPERSISTENCE_TEMP)
160 andres@anarazel.de 1891 :UBC 0 : pgBufferUsage.local_blks_hit += 1;
1892 : : else
160 andres@anarazel.de 1893 :CBC 1944 : pgBufferUsage.shared_blks_hit += 1;
1894 : :
1895 [ + - ]: 1944 : if (operation->rel)
1896 [ - + - - : 1944 : pgstat_count_buffer_hit(operation->rel);
+ - ]
1897 : :
1898 : 1944 : pgstat_count_io_op(io_object, io_context, IOOP_HIT, 1, 0);
1899 : :
1900 [ + + ]: 1944 : if (VacuumCostActive)
1901 : 12 : VacuumCostBalance += VacuumCostPageHit;
1902 : : }
1903 : : else
1904 : : {
1905 : : instr_time io_start;
1906 : :
1907 : : /* We found a buffer that we need to read in. */
1908 [ - + ]: 1245537 : Assert(io_buffers[0] == buffers[nblocks_done]);
1909 : 1245537 : io_pages[0] = BufferGetBlock(buffers[nblocks_done]);
521 tmunro@postgresql.or 1910 : 1245537 : io_buffers_len = 1;
1911 : :
1912 : : /*
1913 : : * How many neighboring-on-disk blocks can we scatter-read into other
1914 : : * buffers at the same time? In this case we don't wait if we see an
1915 : : * I/O already in progress. We already set BM_IO_IN_PROGRESS for the
1916 : : * head block, so we should get on with that I/O as soon as possible.
1917 : : */
160 andres@anarazel.de 1918 [ + + ]: 1405393 : for (int i = nblocks_done + 1; i < operation->nblocks; i++)
1919 : : {
1920 [ - + ]: 159856 : if (!ReadBuffersCanStartIO(buffers[i], true))
160 andres@anarazel.de 1921 :UBC 0 : break;
1922 : : /* Must be consecutive block numbers. */
160 andres@anarazel.de 1923 [ - + ]:CBC 159856 : Assert(BufferGetBlockNumber(buffers[i - 1]) ==
1924 : : BufferGetBlockNumber(buffers[i]) - 1);
1925 [ - + ]: 159856 : Assert(io_buffers[io_buffers_len] == buffers[i]);
1926 : :
521 tmunro@postgresql.or 1927 : 159856 : io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
1928 : : }
1929 : :
1930 : : /* get a reference to wait for in WaitReadBuffers() */
160 andres@anarazel.de 1931 : 1245537 : pgaio_io_get_wref(ioh, &operation->io_wref);
1932 : :
1933 : : /* provide the list of buffers to the completion callbacks */
1934 : 1245537 : pgaio_io_set_handle_data_32(ioh, (uint32 *) io_buffers, io_buffers_len);
1935 : :
1936 [ + + ]: 1245537 : pgaio_io_register_callbacks(ioh,
1937 : : persistence == RELPERSISTENCE_TEMP ?
1938 : : PGAIO_HCB_LOCAL_BUFFER_READV :
1939 : : PGAIO_HCB_SHARED_BUFFER_READV,
1940 : : flags);
1941 : :
1942 : 1245537 : pgaio_io_set_flag(ioh, ioh_flags);
1943 : :
1944 : : /* ---
1945 : : * Even though we're trying to issue IO asynchronously, track the time
1946 : : * in smgrstartreadv():
1947 : : * - if io_method == IOMETHOD_SYNC, we will always perform the IO
1948 : : * immediately
1949 : : * - the io method might not support the IO (e.g. worker IO for a temp
1950 : : * table)
1951 : : * ---
1952 : : */
1953 : 1245537 : io_start = pgstat_prepare_io_time(track_io_timing);
1954 : 1245537 : smgrstartreadv(ioh, operation->smgr, forknum,
1955 : : blocknum + nblocks_done,
1956 : : io_pages, io_buffers_len);
1957 : 1245522 : pgstat_count_io_op_time(io_object, io_context, IOOP_READ,
1958 : 1245522 : io_start, 1, io_buffers_len * BLCKSZ);
1959 : :
170 1960 [ + + ]: 1245522 : if (persistence == RELPERSISTENCE_TEMP)
1961 : 1793 : pgBufferUsage.local_blks_read += io_buffers_len;
1962 : : else
1963 : 1243729 : pgBufferUsage.shared_blks_read += io_buffers_len;
1964 : :
1965 : : /*
1966 : : * Track vacuum cost when issuing IO, not after waiting for it.
1967 : : * Otherwise we could end up issuing a lot of IO in a short timespan,
1968 : : * despite a low cost limit.
1969 : : */
521 tmunro@postgresql.or 1970 [ + + ]: 1245522 : if (VacuumCostActive)
1971 : 15653 : VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
1972 : :
160 andres@anarazel.de 1973 : 1245522 : *nblocks_progress = io_buffers_len;
1974 : 1245522 : did_start_io = true;
1975 : : }
1976 : :
1977 : 1247466 : return did_start_io;
1978 : : }
1979 : :
1980 : : /*
1981 : : * BufferAlloc -- subroutine for PinBufferForBlock. Handles lookup of a shared
1982 : : * buffer. If no buffer exists already, selects a replacement victim and
1983 : : * evicts the old page, but does NOT read in new page.
1984 : : *
1985 : : * "strategy" can be a buffer replacement strategy object, or NULL for
1986 : : * the default strategy. The selected buffer's usage_count is advanced when
1987 : : * using the default strategy, but otherwise possibly not (see PinBuffer).
1988 : : *
1989 : : * The returned buffer is pinned and is already marked as holding the
1990 : : * desired page. If it already did have the desired page, *foundPtr is
1991 : : * set true. Otherwise, *foundPtr is set false.
1992 : : *
1993 : : * io_context is passed as an output parameter to avoid calling
1994 : : * IOContextForStrategy() when there is a shared buffers hit and no IO
1995 : : * statistics need be captured.
1996 : : *
1997 : : * No locks are held either at entry or exit.
1998 : : */
1999 : : static pg_attribute_always_inline BufferDesc *
5365 rhaas@postgresql.org 2000 : 55331976 : BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
2001 : : BlockNumber blockNum,
2002 : : BufferAccessStrategy strategy,
2003 : : bool *foundPtr, IOContext io_context)
2004 : : {
2005 : : BufferTag newTag; /* identity of requested block */
2006 : : uint32 newHash; /* hash value for newTag */
2007 : : LWLock *newPartitionLock; /* buffer partition lock for it */
2008 : : int existing_buf_id;
2009 : : Buffer victim_buffer;
2010 : : BufferDesc *victim_buf_hdr;
2011 : : uint32 victim_buf_state;
2012 : :
2013 : : /* Make sure we will have room to remember the buffer pin */
668 heikki.linnakangas@i 2014 : 55331976 : ResourceOwnerEnlarge(CurrentResourceOwner);
2015 : 55331976 : ReservePrivateRefCountEntry();
2016 : :
2017 : : /* create a tag so we can lookup the buffer */
1137 rhaas@postgresql.org 2018 : 55331976 : InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
2019 : :
2020 : : /* determine its hash code and partition lock ID */
6985 tgl@sss.pgh.pa.us 2021 : 55331976 : newHash = BufTableHashCode(&newTag);
2022 : 55331976 : newPartitionLock = BufMappingPartitionLock(newHash);
2023 : :
2024 : : /* see if the block is in the buffer pool already */
2025 : 55331976 : LWLockAcquire(newPartitionLock, LW_SHARED);
885 andres@anarazel.de 2026 : 55331976 : existing_buf_id = BufTableLookup(&newTag, newHash);
2027 [ + + ]: 55331976 : if (existing_buf_id >= 0)
2028 : : {
2029 : : BufferDesc *buf;
2030 : : bool valid;
2031 : :
2032 : : /*
2033 : : * Found it. Now, pin the buffer so no one can steal it from the
2034 : : * buffer pool, and check to see if the correct data has been loaded
2035 : : * into the buffer.
2036 : : */
2037 : 53659854 : buf = GetBufferDescriptor(existing_buf_id);
2038 : :
6674 tgl@sss.pgh.pa.us 2039 : 53659854 : valid = PinBuffer(buf, strategy);
2040 : :
2041 : : /* Can release the mapping lock as soon as we've pinned it */
6985 2042 : 53659854 : LWLockRelease(newPartitionLock);
2043 : :
2943 peter_e@gmx.net 2044 : 53659854 : *foundPtr = true;
2045 : :
7491 tgl@sss.pgh.pa.us 2046 [ + + ]: 53659854 : if (!valid)
2047 : : {
2048 : : /*
2049 : : * We can only get here if (a) someone else is still reading in
2050 : : * the page, (b) a previous read attempt failed, or (c) someone
2051 : : * called StartReadBuffers() but not yet WaitReadBuffers().
2052 : : */
521 tmunro@postgresql.or 2053 : 1797 : *foundPtr = false;
2054 : : }
2055 : :
9867 bruce@momjian.us 2056 : 53659854 : return buf;
2057 : : }
2058 : :
2059 : : /*
2060 : : * Didn't find it in the buffer pool. We'll have to initialize a new
2061 : : * buffer. Remember to unlock the mapping lock while doing the work.
2062 : : */
6985 tgl@sss.pgh.pa.us 2063 : 1672122 : LWLockRelease(newPartitionLock);
2064 : :
2065 : : /*
2066 : : * Acquire a victim buffer. Somebody else might try to do the same, we
2067 : : * don't hold any conflicting locks. If so we'll have to undo our work
2068 : : * later.
2069 : : */
885 andres@anarazel.de 2070 : 1672122 : victim_buffer = GetVictimBuffer(strategy, io_context);
2071 : 1672122 : victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
2072 : :
2073 : : /*
2074 : : * Try to make a hashtable entry for the buffer under its new tag. If
2075 : : * somebody else inserted another buffer for the tag, we'll release the
2076 : : * victim buffer we acquired and use the already inserted one.
2077 : : */
2078 : 1672122 : LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
2079 : 1672122 : existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
2080 [ + + ]: 1672122 : if (existing_buf_id >= 0)
2081 : : {
2082 : : BufferDesc *existing_buf_hdr;
2083 : : bool valid;
2084 : :
2085 : : /*
2086 : : * Got a collision. Someone has already done what we were about to do.
2087 : : * We'll just handle this as if it were found in the buffer pool in
2088 : : * the first place. First, give up the buffer we were planning to
2089 : : * use.
2090 : : *
2091 : : * We could do this after releasing the partition lock, but then we'd
2092 : : * have to call ResourceOwnerEnlarge() & ReservePrivateRefCountEntry()
2093 : : * before acquiring the lock, for the rare case of such a collision.
2094 : : */
2095 : 436 : UnpinBuffer(victim_buf_hdr);
2096 : :
2097 : : /* remaining code should match code at top of routine */
2098 : :
2099 : 436 : existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
2100 : :
2101 : 436 : valid = PinBuffer(existing_buf_hdr, strategy);
2102 : :
2103 : : /* Can release the mapping lock as soon as we've pinned it */
2104 : 436 : LWLockRelease(newPartitionLock);
2105 : :
2106 : 436 : *foundPtr = true;
2107 : :
2108 [ + + ]: 436 : if (!valid)
2109 : : {
2110 : : /*
2111 : : * We can only get here if (a) someone else is still reading in
2112 : : * the page, (b) a previous read attempt failed, or (c) someone
2113 : : * called StartReadBuffers() but not yet WaitReadBuffers().
2114 : : */
521 tmunro@postgresql.or 2115 : 198 : *foundPtr = false;
2116 : : }
2117 : :
885 andres@anarazel.de 2118 : 436 : return existing_buf_hdr;
2119 : : }
2120 : :
2121 : : /*
2122 : : * Need to lock the buffer header too in order to change its tag.
2123 : : */
2124 : 1671686 : victim_buf_state = LockBufHdr(victim_buf_hdr);
2125 : :
2126 : : /* some sanity checks while we hold the buffer header lock */
2127 [ - + ]: 1671686 : Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
2128 [ - + ]: 1671686 : Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
2129 : :
2130 : 1671686 : victim_buf_hdr->tag = newTag;
2131 : :
2132 : : /*
2133 : : * Make sure BM_PERMANENT is set for buffers that must be written at every
2134 : : * checkpoint. Unlogged buffers only need to be written at shutdown
2135 : : * checkpoints, except for their "init" forks, which need to be treated
2136 : : * just like permanent relations.
2137 : : */
2138 : 1671686 : victim_buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
3098 rhaas@postgresql.org 2139 [ + + - + ]: 1671686 : if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
885 andres@anarazel.de 2140 : 1671346 : victim_buf_state |= BM_PERMANENT;
2141 : :
2142 : 1671686 : UnlockBufHdr(victim_buf_hdr, victim_buf_state);
2143 : :
6985 tgl@sss.pgh.pa.us 2144 : 1671686 : LWLockRelease(newPartitionLock);
2145 : :
2146 : : /*
2147 : : * Buffer contents are currently invalid.
2148 : : */
521 tmunro@postgresql.or 2149 : 1671686 : *foundPtr = false;
2150 : :
885 andres@anarazel.de 2151 : 1671686 : return victim_buf_hdr;
2152 : : }
2153 : :
2154 : : /*
2155 : : * InvalidateBuffer -- mark a shared buffer invalid.
2156 : : *
2157 : : * The buffer header spinlock must be held at entry. We drop it before
2158 : : * returning. (This is sane because the caller must have locked the
2159 : : * buffer in order to be sure it should be dropped.)
2160 : : *
2161 : : * This is used only in contexts such as dropping a relation. We assume
2162 : : * that no other backend could possibly be interested in using the page,
2163 : : * so the only reason the buffer might be pinned is if someone else is
2164 : : * trying to write it out. We have to let them finish before we can
2165 : : * reclaim the buffer.
2166 : : *
2167 : : * The buffer could get reclaimed by someone else while we are waiting
2168 : : * to acquire the necessary locks; if so, don't mess it up.
2169 : : */
2170 : : static void
3582 rhaas@postgresql.org 2171 : 106502 : InvalidateBuffer(BufferDesc *buf)
2172 : : {
2173 : : BufferTag oldTag;
2174 : : uint32 oldHash; /* hash value for oldTag */
2175 : : LWLock *oldPartitionLock; /* buffer partition lock for it */
2176 : : uint32 oldFlags;
2177 : : uint32 buf_state;
2178 : :
2179 : : /* Save the original buffer tag before dropping the spinlock */
7491 tgl@sss.pgh.pa.us 2180 : 106502 : oldTag = buf->tag;
2181 : :
3436 andres@anarazel.de 2182 : 106502 : buf_state = pg_atomic_read_u32(&buf->state);
2183 [ - + ]: 106502 : Assert(buf_state & BM_LOCKED);
2184 : 106502 : UnlockBufHdr(buf, buf_state);
2185 : :
2186 : : /*
2187 : : * Need to compute the old tag's hashcode and partition lock ID. XXX is it
2188 : : * worth storing the hashcode in BufferDesc so we need not recompute it
2189 : : * here? Probably not.
2190 : : */
6985 tgl@sss.pgh.pa.us 2191 : 106502 : oldHash = BufTableHashCode(&oldTag);
2192 : 106502 : oldPartitionLock = BufMappingPartitionLock(oldHash);
2193 : :
7491 2194 : 106503 : retry:
2195 : :
2196 : : /*
2197 : : * Acquire exclusive mapping lock in preparation for changing the buffer's
2198 : : * association.
2199 : : */
6985 2200 : 106503 : LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
2201 : :
2202 : : /* Re-lock the buffer header */
3436 andres@anarazel.de 2203 : 106503 : buf_state = LockBufHdr(buf);
2204 : :
2205 : : /* If it's changed while we were waiting for lock, do nothing */
1137 rhaas@postgresql.org 2206 [ - + ]: 106503 : if (!BufferTagsEqual(&buf->tag, &oldTag))
2207 : : {
3436 andres@anarazel.de 2208 :LBC (1) : UnlockBufHdr(buf, buf_state);
6985 tgl@sss.pgh.pa.us 2209 : (1) : LWLockRelease(oldPartitionLock);
7491 2210 : (1) : return;
2211 : : }
2212 : :
2213 : : /*
2214 : : * We assume the reason for it to be pinned is that either we were
2215 : : * asynchronously reading the page in before erroring out or someone else
2216 : : * is flushing the page out. Wait for the IO to finish. (This could be
2217 : : * an infinite loop if the refcount is messed up... it would be nice to
2218 : : * time out after awhile, but there seems no way to be sure how many loops
2219 : : * may be needed. Note that if the other guy has pinned the buffer but
2220 : : * not yet done StartBufferIO, WaitIO will fall through and we'll
2221 : : * effectively be busy-looping here.)
2222 : : */
3436 andres@anarazel.de 2223 [ + + ]:CBC 106503 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
2224 : : {
2225 : 1 : UnlockBufHdr(buf, buf_state);
6985 tgl@sss.pgh.pa.us 2226 : 1 : LWLockRelease(oldPartitionLock);
2227 : : /* safety check: should definitely not be our *own* pin */
3678 andres@anarazel.de 2228 [ - + ]: 1 : if (GetPrivateRefCount(BufferDescriptorGetBuffer(buf)) > 0)
7477 tgl@sss.pgh.pa.us 2229 [ # # ]:UBC 0 : elog(ERROR, "buffer is pinned in InvalidateBuffer");
7491 tgl@sss.pgh.pa.us 2230 :CBC 1 : WaitIO(buf);
2231 : 1 : goto retry;
2232 : : }
2233 : :
2234 : : /*
2235 : : * Clear out the buffer's tag and flags. We must do this to ensure that
2236 : : * linear scans of the buffer array don't think the buffer is valid.
2237 : : */
3436 andres@anarazel.de 2238 : 106502 : oldFlags = buf_state & BUF_FLAG_MASK;
1137 rhaas@postgresql.org 2239 : 106502 : ClearBufferTag(&buf->tag);
3436 andres@anarazel.de 2240 : 106502 : buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
2241 : 106502 : UnlockBufHdr(buf, buf_state);
2242 : :
2243 : : /*
2244 : : * Remove the buffer from the lookup hashtable, if it was in there.
2245 : : */
7491 tgl@sss.pgh.pa.us 2246 [ + - ]: 106502 : if (oldFlags & BM_TAG_VALID)
6985 2247 : 106502 : BufTableDelete(&oldTag, oldHash);
2248 : :
2249 : : /*
2250 : : * Done with mapping lock.
2251 : : */
2252 : 106502 : LWLockRelease(oldPartitionLock);
2253 : : }
2254 : :
2255 : : /*
2256 : : * Helper routine for GetVictimBuffer()
2257 : : *
2258 : : * Needs to be called on a buffer with a valid tag, pinned, but without the
2259 : : * buffer header spinlock held.
2260 : : *
2261 : : * Returns true if the buffer can be reused, in which case the buffer is only
2262 : : * pinned by this backend and marked as invalid, false otherwise.
2263 : : */
2264 : : static bool
885 andres@anarazel.de 2265 : 1191372 : InvalidateVictimBuffer(BufferDesc *buf_hdr)
2266 : : {
2267 : : uint32 buf_state;
2268 : : uint32 hash;
2269 : : LWLock *partition_lock;
2270 : : BufferTag tag;
2271 : :
2272 [ - + ]: 1191372 : Assert(GetPrivateRefCount(BufferDescriptorGetBuffer(buf_hdr)) == 1);
2273 : :
2274 : : /* have buffer pinned, so it's safe to read tag without lock */
2275 : 1191372 : tag = buf_hdr->tag;
2276 : :
2277 : 1191372 : hash = BufTableHashCode(&tag);
2278 : 1191372 : partition_lock = BufMappingPartitionLock(hash);
2279 : :
2280 : 1191372 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2281 : :
2282 : : /* lock the buffer header */
2283 : 1191372 : buf_state = LockBufHdr(buf_hdr);
2284 : :
2285 : : /*
2286 : : * We have the buffer pinned nobody else should have been able to unset
2287 : : * this concurrently.
2288 : : */
2289 [ - + ]: 1191372 : Assert(buf_state & BM_TAG_VALID);
2290 [ - + ]: 1191372 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2291 [ - + ]: 1191372 : Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
2292 : :
2293 : : /*
2294 : : * If somebody else pinned the buffer since, or even worse, dirtied it,
2295 : : * give up on this buffer: It's clearly in use.
2296 : : */
2297 [ + + + + ]: 1191372 : if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
2298 : : {
2299 [ - + ]: 449 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2300 : :
2301 : 449 : UnlockBufHdr(buf_hdr, buf_state);
2302 : 449 : LWLockRelease(partition_lock);
2303 : :
2304 : 449 : return false;
2305 : : }
2306 : :
2307 : : /*
2308 : : * Clear out the buffer's tag and flags and usagecount. This is not
2309 : : * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
2310 : : * doing anything with the buffer. But currently it's beneficial, as the
2311 : : * cheaper pre-check for several linear scans of shared buffers use the
2312 : : * tag (see e.g. FlushDatabaseBuffers()).
2313 : : */
2314 : 1190923 : ClearBufferTag(&buf_hdr->tag);
2315 : 1190923 : buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
2316 : 1190923 : UnlockBufHdr(buf_hdr, buf_state);
2317 : :
2318 [ - + ]: 1190923 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2319 : :
2320 : : /* finally delete buffer from the buffer mapping table */
2321 : 1190923 : BufTableDelete(&tag, hash);
2322 : :
2323 : 1190923 : LWLockRelease(partition_lock);
2324 : :
2325 [ - + ]: 1190923 : Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
2326 [ - + ]: 1190923 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2327 [ - + ]: 1190923 : Assert(BUF_STATE_GET_REFCOUNT(pg_atomic_read_u32(&buf_hdr->state)) > 0);
2328 : :
2329 : 1190923 : return true;
2330 : : }
2331 : :
2332 : : static Buffer
2333 : 1888651 : GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context)
2334 : : {
2335 : : BufferDesc *buf_hdr;
2336 : : Buffer buf;
2337 : : uint32 buf_state;
2338 : : bool from_ring;
2339 : :
2340 : : /*
2341 : : * Ensure, while the spinlock's not yet held, that there's a free refcount
2342 : : * entry, and a resource owner slot for the pin.
2343 : : */
2344 : 1888651 : ReservePrivateRefCountEntry();
668 heikki.linnakangas@i 2345 : 1888651 : ResourceOwnerEnlarge(CurrentResourceOwner);
2346 : :
2347 : : /* we return here if a prospective victim buffer gets used concurrently */
885 andres@anarazel.de 2348 : 6797 : again:
2349 : :
2350 : : /*
2351 : : * Select a victim buffer. The buffer is returned with its header
2352 : : * spinlock still held!
2353 : : */
2354 : 1895448 : buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
2355 : 1895448 : buf = BufferDescriptorGetBuffer(buf_hdr);
2356 : :
2357 [ - + ]: 1895448 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
2358 : :
2359 : : /* Pin the buffer and then release the buffer spinlock */
2360 : 1895448 : PinBuffer_Locked(buf_hdr);
2361 : :
2362 : : /*
2363 : : * We shouldn't have any other pins for this buffer.
2364 : : */
2365 : 1895448 : CheckBufferIsPinnedOnce(buf);
2366 : :
2367 : : /*
2368 : : * If the buffer was dirty, try to write it out. There is a race
2369 : : * condition here, in that someone might dirty it after we released the
2370 : : * buffer header lock above, or even while we are writing it out (since
2371 : : * our share-lock won't prevent hint-bit updates). We will recheck the
2372 : : * dirty bit after re-locking the buffer header.
2373 : : */
2374 [ + + ]: 1895448 : if (buf_state & BM_DIRTY)
2375 : : {
2376 : : LWLock *content_lock;
2377 : :
2378 [ - + ]: 261623 : Assert(buf_state & BM_TAG_VALID);
2379 [ - + ]: 261623 : Assert(buf_state & BM_VALID);
2380 : :
2381 : : /*
2382 : : * We need a share-lock on the buffer contents to write it out (else
2383 : : * we might write invalid data, eg because someone else is compacting
2384 : : * the page contents while we write). We must use a conditional lock
2385 : : * acquisition here to avoid deadlock. Even though the buffer was not
2386 : : * pinned (and therefore surely not locked) when StrategyGetBuffer
2387 : : * returned it, someone else could have pinned and exclusive-locked it
2388 : : * by the time we get here. If we try to get the lock unconditionally,
2389 : : * we'd block waiting for them; if they later block waiting for us,
2390 : : * deadlock ensues. (This has been observed to happen when two
2391 : : * backends are both trying to split btree index pages, and the second
2392 : : * one just happens to be trying to split the page the first one got
2393 : : * from StrategyGetBuffer.)
2394 : : */
2395 : 261623 : content_lock = BufferDescriptorGetContentLock(buf_hdr);
2396 [ - + ]: 261623 : if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
2397 : : {
2398 : : /*
2399 : : * Someone else has locked the buffer, so give it up and loop back
2400 : : * to get another one.
2401 : : */
885 andres@anarazel.de 2402 :UBC 0 : UnpinBuffer(buf_hdr);
2403 : 0 : goto again;
2404 : : }
2405 : :
2406 : : /*
2407 : : * If using a nondefault strategy, and writing the buffer would
2408 : : * require a WAL flush, let the strategy decide whether to go ahead
2409 : : * and write/reuse the buffer or to choose another victim. We need a
2410 : : * lock to inspect the page LSN, so this can't be done inside
2411 : : * StrategyGetBuffer.
2412 : : */
885 andres@anarazel.de 2413 [ + + ]:CBC 261623 : if (strategy != NULL)
2414 : : {
2415 : : XLogRecPtr lsn;
2416 : :
2417 : : /* Read the LSN while holding buffer header lock */
2418 : 75029 : buf_state = LockBufHdr(buf_hdr);
2419 : 75029 : lsn = BufferGetLSN(buf_hdr);
2420 : 75029 : UnlockBufHdr(buf_hdr, buf_state);
2421 : :
2422 [ + + ]: 75029 : if (XLogNeedsFlush(lsn)
2423 [ + + ]: 9417 : && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
2424 : : {
2425 : 6348 : LWLockRelease(content_lock);
2426 : 6348 : UnpinBuffer(buf_hdr);
2427 : 6348 : goto again;
2428 : : }
2429 : : }
2430 : :
2431 : : /* OK, do the I/O */
2432 : 255275 : FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
2433 : 255275 : LWLockRelease(content_lock);
2434 : :
843 2435 : 255275 : ScheduleBufferTagForWriteback(&BackendWritebackContext, io_context,
2436 : : &buf_hdr->tag);
2437 : : }
2438 : :
2439 : :
885 2440 [ + + ]: 1889100 : if (buf_state & BM_VALID)
2441 : : {
2442 : : /*
2443 : : * When a BufferAccessStrategy is in use, blocks evicted from shared
2444 : : * buffers are counted as IOOP_EVICT in the corresponding context
2445 : : * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
2446 : : * strategy in two cases: 1) while initially claiming buffers for the
2447 : : * strategy ring 2) to replace an existing strategy ring buffer
2448 : : * because it is pinned or in use and cannot be reused.
2449 : : *
2450 : : * Blocks evicted from buffers already in the strategy ring are
2451 : : * counted as IOOP_REUSE in the corresponding strategy context.
2452 : : *
2453 : : * At this point, we can accurately count evictions and reuses,
2454 : : * because we have successfully claimed the valid buffer. Previously,
2455 : : * we may have been forced to release the buffer due to concurrent
2456 : : * pinners or erroring out.
2457 : : */
2458 : 1189187 : pgstat_count_io_op(IOOBJECT_RELATION, io_context,
235 michael@paquier.xyz 2459 [ + + ]: 1189187 : from_ring ? IOOP_REUSE : IOOP_EVICT, 1, 0);
2460 : : }
2461 : :
2462 : : /*
2463 : : * If the buffer has an entry in the buffer mapping table, delete it. This
2464 : : * can fail because another backend could have pinned or dirtied the
2465 : : * buffer.
2466 : : */
885 andres@anarazel.de 2467 [ + + + + ]: 1889100 : if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
2468 : : {
2469 : 449 : UnpinBuffer(buf_hdr);
2470 : 449 : goto again;
2471 : : }
2472 : :
2473 : : /* a final set of sanity checks */
2474 : : #ifdef USE_ASSERT_CHECKING
2475 : 1888651 : buf_state = pg_atomic_read_u32(&buf_hdr->state);
2476 : :
2477 [ - + ]: 1888651 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2478 [ - + ]: 1888651 : Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
2479 : :
2480 : 1888651 : CheckBufferIsPinnedOnce(buf);
2481 : : #endif
2482 : :
2483 : 1888651 : return buf;
2484 : : }
2485 : :
2486 : : /*
2487 : : * Return the maximum number of buffers that a backend should try to pin once,
2488 : : * to avoid exceeding its fair share. This is the highest value that
2489 : : * GetAdditionalPinLimit() could ever return. Note that it may be zero on a
2490 : : * system with a very small buffer pool relative to max_connections.
2491 : : */
2492 : : uint32
176 tmunro@postgresql.or 2493 : 507881 : GetPinLimit(void)
2494 : : {
2495 : 507881 : return MaxProportionalPins;
2496 : : }
2497 : :
2498 : : /*
2499 : : * Return the maximum number of additional buffers that this backend should
2500 : : * pin if it wants to stay under the per-backend limit, considering the number
2501 : : * of buffers it has already pinned. Unlike LimitAdditionalPins(), the limit
2502 : : * return by this function can be zero.
2503 : : */
2504 : : uint32
2505 : 2868500 : GetAdditionalPinLimit(void)
2506 : : {
2507 : : uint32 estimated_pins_held;
2508 : :
2509 : : /*
2510 : : * We get the number of "overflowed" pins for free, but don't know the
2511 : : * number of pins in PrivateRefCountArray. The cost of calculating that
2512 : : * exactly doesn't seem worth it, so just assume the max.
2513 : : */
2514 : 2868500 : estimated_pins_held = PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2515 : :
2516 : : /* Is this backend already holding more than its fair share? */
2517 [ + + ]: 2868500 : if (estimated_pins_held > MaxProportionalPins)
2518 : 1205741 : return 0;
2519 : :
2520 : 1662759 : return MaxProportionalPins - estimated_pins_held;
2521 : : }
2522 : :
2523 : : /*
2524 : : * Limit the number of pins a batch operation may additionally acquire, to
2525 : : * avoid running out of pinnable buffers.
2526 : : *
2527 : : * One additional pin is always allowed, on the assumption that the operation
2528 : : * requires at least one to make progress.
2529 : : */
2530 : : void
885 andres@anarazel.de 2531 : 195372 : LimitAdditionalPins(uint32 *additional_pins)
2532 : : {
2533 : : uint32 limit;
2534 : :
2535 [ + + ]: 195372 : if (*additional_pins <= 1)
2536 : 185448 : return;
2537 : :
176 tmunro@postgresql.or 2538 : 9924 : limit = GetAdditionalPinLimit();
2539 : 9924 : limit = Max(limit, 1);
2540 [ + + ]: 9924 : if (limit < *additional_pins)
2541 : 5479 : *additional_pins = limit;
2542 : : }
2543 : :
2544 : : /*
2545 : : * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
2546 : : * avoid duplicating the tracing and relpersistence related logic.
2547 : : */
2548 : : static BlockNumber
745 2549 : 207820 : ExtendBufferedRelCommon(BufferManagerRelation bmr,
2550 : : ForkNumber fork,
2551 : : BufferAccessStrategy strategy,
2552 : : uint32 flags,
2553 : : uint32 extend_by,
2554 : : BlockNumber extend_upto,
2555 : : Buffer *buffers,
2556 : : uint32 *extended_by)
2557 : : {
2558 : : BlockNumber first_block;
2559 : :
2560 : : TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
2561 : : bmr.smgr->smgr_rlocator.locator.spcOid,
2562 : : bmr.smgr->smgr_rlocator.locator.dbOid,
2563 : : bmr.smgr->smgr_rlocator.locator.relNumber,
2564 : : bmr.smgr->smgr_rlocator.backend,
2565 : : extend_by);
2566 : :
2567 [ + + ]: 207820 : if (bmr.relpersistence == RELPERSISTENCE_TEMP)
2568 : 12448 : first_block = ExtendBufferedRelLocal(bmr, fork, flags,
2569 : : extend_by, extend_upto,
2570 : : buffers, &extend_by);
2571 : : else
2572 : 195372 : first_block = ExtendBufferedRelShared(bmr, fork, strategy, flags,
2573 : : extend_by, extend_upto,
2574 : : buffers, &extend_by);
885 andres@anarazel.de 2575 : 207820 : *extended_by = extend_by;
2576 : :
2577 : : TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
2578 : : bmr.smgr->smgr_rlocator.locator.spcOid,
2579 : : bmr.smgr->smgr_rlocator.locator.dbOid,
2580 : : bmr.smgr->smgr_rlocator.locator.relNumber,
2581 : : bmr.smgr->smgr_rlocator.backend,
2582 : : *extended_by,
2583 : : first_block);
2584 : :
2585 : 207820 : return first_block;
2586 : : }
2587 : :
2588 : : /*
2589 : : * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
2590 : : * shared buffers.
2591 : : */
2592 : : static BlockNumber
745 tmunro@postgresql.or 2593 : 195372 : ExtendBufferedRelShared(BufferManagerRelation bmr,
2594 : : ForkNumber fork,
2595 : : BufferAccessStrategy strategy,
2596 : : uint32 flags,
2597 : : uint32 extend_by,
2598 : : BlockNumber extend_upto,
2599 : : Buffer *buffers,
2600 : : uint32 *extended_by)
2601 : : {
2602 : : BlockNumber first_block;
885 andres@anarazel.de 2603 : 195372 : IOContext io_context = IOContextForStrategy(strategy);
2604 : : instr_time io_start;
2605 : :
2606 : 195372 : LimitAdditionalPins(&extend_by);
2607 : :
2608 : : /*
2609 : : * Acquire victim buffers for extension without holding extension lock.
2610 : : * Writing out victim buffers is the most expensive part of extending the
2611 : : * relation, particularly when doing so requires WAL flushes. Zeroing out
2612 : : * the buffers is also quite expensive, so do that before holding the
2613 : : * extension lock as well.
2614 : : *
2615 : : * These pages are pinned by us and not valid. While we hold the pin they
2616 : : * can't be acquired as victim buffers by another backend.
2617 : : */
2618 [ + + ]: 411901 : for (uint32 i = 0; i < extend_by; i++)
2619 : : {
2620 : : Block buf_block;
2621 : :
2622 : 216529 : buffers[i] = GetVictimBuffer(strategy, io_context);
2623 : 216529 : buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
2624 : :
2625 : : /* new buffers are zero-filled */
206 peter@eisentraut.org 2626 [ + - + - : 216529 : MemSet(buf_block, 0, BLCKSZ);
+ - - + -
- ]
2627 : : }
2628 : :
2629 : : /*
2630 : : * Lock relation against concurrent extensions, unless requested not to.
2631 : : *
2632 : : * We use the same extension lock for all forks. That's unnecessarily
2633 : : * restrictive, but currently extensions for forks don't happen often
2634 : : * enough to make it worth locking more granularly.
2635 : : *
2636 : : * Note that another backend might have extended the relation by the time
2637 : : * we get the lock.
2638 : : */
885 andres@anarazel.de 2639 [ + + ]: 195372 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
745 tmunro@postgresql.or 2640 : 145869 : LockRelationForExtension(bmr.rel, ExclusiveLock);
2641 : :
2642 : : /*
2643 : : * If requested, invalidate size cache, so that smgrnblocks asks the
2644 : : * kernel.
2645 : : */
885 andres@anarazel.de 2646 [ + + ]: 195372 : if (flags & EB_CLEAR_SIZE_CACHE)
745 tmunro@postgresql.or 2647 : 7713 : bmr.smgr->smgr_cached_nblocks[fork] = InvalidBlockNumber;
2648 : :
2649 : 195372 : first_block = smgrnblocks(bmr.smgr, fork);
2650 : :
2651 : : /*
2652 : : * Now that we have the accurate relation size, check if the caller wants
2653 : : * us to extend to only up to a specific size. If there were concurrent
2654 : : * extensions, we might have acquired too many buffers and need to release
2655 : : * them.
2656 : : */
885 andres@anarazel.de 2657 [ + + ]: 195372 : if (extend_upto != InvalidBlockNumber)
2658 : : {
2659 : 51329 : uint32 orig_extend_by = extend_by;
2660 : :
2661 [ - + ]: 51329 : if (first_block > extend_upto)
885 andres@anarazel.de 2662 :UBC 0 : extend_by = 0;
885 andres@anarazel.de 2663 [ + + ]:CBC 51329 : else if ((uint64) first_block + extend_by > extend_upto)
2664 : 4 : extend_by = extend_upto - first_block;
2665 : :
2666 [ + + ]: 51335 : for (uint32 i = extend_by; i < orig_extend_by; i++)
2667 : : {
2668 : 6 : BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
2669 : :
2670 : 6 : UnpinBuffer(buf_hdr);
2671 : : }
2672 : :
2673 [ + + ]: 51329 : if (extend_by == 0)
2674 : : {
2675 [ + - ]: 4 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
745 tmunro@postgresql.or 2676 : 4 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
885 andres@anarazel.de 2677 : 4 : *extended_by = extend_by;
2678 : 4 : return first_block;
2679 : : }
2680 : : }
2681 : :
2682 : : /* Fail if relation is already at maximum possible length */
2683 [ - + ]: 195368 : if ((uint64) first_block + extend_by >= MaxBlockNumber)
885 andres@anarazel.de 2684 [ # # ]:UBC 0 : ereport(ERROR,
2685 : : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2686 : : errmsg("cannot extend relation %s beyond %u blocks",
2687 : : relpath(bmr.smgr->smgr_rlocator, fork).str,
2688 : : MaxBlockNumber)));
2689 : :
2690 : : /*
2691 : : * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
2692 : : *
2693 : : * This needs to happen before we extend the relation, because as soon as
2694 : : * we do, other backends can start to read in those pages.
2695 : : */
718 peter@eisentraut.org 2696 [ + + ]:CBC 411891 : for (uint32 i = 0; i < extend_by; i++)
2697 : : {
885 andres@anarazel.de 2698 : 216523 : Buffer victim_buf = buffers[i];
2699 : 216523 : BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
2700 : : BufferTag tag;
2701 : : uint32 hash;
2702 : : LWLock *partition_lock;
2703 : : int existing_id;
2704 : :
2705 : : /* in case we need to pin an existing buffer below */
668 heikki.linnakangas@i 2706 : 216523 : ResourceOwnerEnlarge(CurrentResourceOwner);
2707 : 216523 : ReservePrivateRefCountEntry();
2708 : :
745 tmunro@postgresql.or 2709 : 216523 : InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
885 andres@anarazel.de 2710 : 216523 : hash = BufTableHashCode(&tag);
2711 : 216523 : partition_lock = BufMappingPartitionLock(hash);
2712 : :
2713 : 216523 : LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2714 : :
2715 : 216523 : existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
2716 : :
2717 : : /*
2718 : : * We get here only in the corner case where we are trying to extend
2719 : : * the relation but we found a pre-existing buffer. This can happen
2720 : : * because a prior attempt at extending the relation failed, and
2721 : : * because mdread doesn't complain about reads beyond EOF (when
2722 : : * zero_damaged_pages is ON) and so a previous attempt to read a block
2723 : : * beyond EOF could have left a "valid" zero-filled buffer.
2724 : : *
2725 : : * This has also been observed when relation was overwritten by
2726 : : * external process. Since the legitimate cases should always have
2727 : : * left a zero-filled buffer, complain if not PageIsNew.
2728 : : */
2729 [ - + ]: 216523 : if (existing_id >= 0)
2730 : : {
885 andres@anarazel.de 2731 :UBC 0 : BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
2732 : : Block buf_block;
2733 : : bool valid;
2734 : :
2735 : : /*
2736 : : * Pin the existing buffer before releasing the partition lock,
2737 : : * preventing it from being evicted.
2738 : : */
2739 : 0 : valid = PinBuffer(existing_hdr, strategy);
2740 : :
2741 : 0 : LWLockRelease(partition_lock);
2742 : 0 : UnpinBuffer(victim_buf_hdr);
2743 : :
2744 : 0 : buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
2745 : 0 : buf_block = BufHdrGetBlock(existing_hdr);
2746 : :
2747 [ # # # # ]: 0 : if (valid && !PageIsNew((Page) buf_block))
2748 [ # # ]: 0 : ereport(ERROR,
2749 : : (errmsg("unexpected data beyond EOF in block %u of relation \"%s\"",
2750 : : existing_hdr->tag.blockNum,
2751 : : relpath(bmr.smgr->smgr_rlocator, fork).str)));
2752 : :
2753 : : /*
2754 : : * We *must* do smgr[zero]extend before succeeding, else the page
2755 : : * will not be reserved by the kernel, and the next P_NEW call
2756 : : * will decide to return the same page. Clear the BM_VALID bit,
2757 : : * do StartBufferIO() and proceed.
2758 : : *
2759 : : * Loop to handle the very small possibility that someone re-sets
2760 : : * BM_VALID between our clearing it and StartBufferIO inspecting
2761 : : * it.
2762 : : */
2763 : : do
2764 : : {
2765 : 0 : uint32 buf_state = LockBufHdr(existing_hdr);
2766 : :
2767 : 0 : buf_state &= ~BM_VALID;
2768 : 0 : UnlockBufHdr(existing_hdr, buf_state);
521 tmunro@postgresql.or 2769 [ # # ]: 0 : } while (!StartBufferIO(existing_hdr, true, false));
2770 : : }
2771 : : else
2772 : : {
2773 : : uint32 buf_state;
2774 : :
885 andres@anarazel.de 2775 :CBC 216523 : buf_state = LockBufHdr(victim_buf_hdr);
2776 : :
2777 : : /* some sanity checks while we hold the buffer header lock */
2778 [ - + ]: 216523 : Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
2779 [ - + ]: 216523 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2780 : :
2781 : 216523 : victim_buf_hdr->tag = tag;
2782 : :
2783 : 216523 : buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
745 tmunro@postgresql.or 2784 [ + + + + ]: 216523 : if (bmr.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
885 andres@anarazel.de 2785 : 210651 : buf_state |= BM_PERMANENT;
2786 : :
2787 : 216523 : UnlockBufHdr(victim_buf_hdr, buf_state);
2788 : :
2789 : 216523 : LWLockRelease(partition_lock);
2790 : :
2791 : : /* XXX: could combine the locked operations in it with the above */
521 tmunro@postgresql.or 2792 : 216523 : StartBufferIO(victim_buf_hdr, true, false);
2793 : : }
2794 : : }
2795 : :
192 michael@paquier.xyz 2796 : 195368 : io_start = pgstat_prepare_io_time(track_io_timing);
2797 : :
2798 : : /*
2799 : : * Note: if smgrzeroextend fails, we will end up with buffers that are
2800 : : * allocated but not marked BM_VALID. The next relation extension will
2801 : : * still select the same block number (because the relation didn't get any
2802 : : * longer on disk) and so future attempts to extend the relation will find
2803 : : * the same buffers (if they have not been recycled) but come right back
2804 : : * here to try smgrzeroextend again.
2805 : : *
2806 : : * We don't need to set checksum for all-zero pages.
2807 : : */
745 tmunro@postgresql.or 2808 : 195368 : smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
2809 : :
2810 : : /*
2811 : : * Release the file-extension lock; it's now OK for someone else to extend
2812 : : * the relation some more.
2813 : : *
2814 : : * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
2815 : : * take noticeable time.
2816 : : */
885 andres@anarazel.de 2817 [ + + ]: 195368 : if (!(flags & EB_SKIP_EXTENSION_LOCK))
745 tmunro@postgresql.or 2818 : 145865 : UnlockRelationForExtension(bmr.rel, ExclusiveLock);
2819 : :
883 andres@anarazel.de 2820 : 195368 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context, IOOP_EXTEND,
235 michael@paquier.xyz 2821 : 195368 : io_start, 1, extend_by * BLCKSZ);
2822 : :
2823 : : /* Set BM_VALID, terminate IO, and wake up any waiters */
718 peter@eisentraut.org 2824 [ + + ]: 411891 : for (uint32 i = 0; i < extend_by; i++)
2825 : : {
885 andres@anarazel.de 2826 : 216523 : Buffer buf = buffers[i];
2827 : 216523 : BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
2828 : 216523 : bool lock = false;
2829 : :
2830 [ + + + + ]: 216523 : if (flags & EB_LOCK_FIRST && i == 0)
2831 : 143774 : lock = true;
2832 [ + + ]: 72749 : else if (flags & EB_LOCK_TARGET)
2833 : : {
2834 [ - + ]: 42548 : Assert(extend_upto != InvalidBlockNumber);
2835 [ + + ]: 42548 : if (first_block + i + 1 == extend_upto)
2836 : 41975 : lock = true;
2837 : : }
2838 : :
2839 [ + + ]: 216523 : if (lock)
2840 : 185749 : LWLockAcquire(BufferDescriptorGetContentLock(buf_hdr), LW_EXCLUSIVE);
2841 : :
160 2842 : 216523 : TerminateBufferIO(buf_hdr, false, BM_VALID, true, false);
2843 : : }
2844 : :
885 2845 : 195368 : pgBufferUsage.shared_blks_written += extend_by;
2846 : :
2847 : 195368 : *extended_by = extend_by;
2848 : :
2849 : 195368 : return first_block;
2850 : : }
2851 : :
2852 : : /*
2853 : : * BufferIsExclusiveLocked
2854 : : *
2855 : : * Checks if buffer is exclusive-locked.
2856 : : *
2857 : : * Buffer must be pinned.
2858 : : */
2859 : : bool
684 jdavis@postgresql.or 2860 : 14589262 : BufferIsExclusiveLocked(Buffer buffer)
2861 : : {
2862 : : BufferDesc *bufHdr;
2863 : :
220 tgl@sss.pgh.pa.us 2864 [ - + + + : 14589262 : Assert(BufferIsPinned(buffer));
- + ]
2865 : :
684 jdavis@postgresql.or 2866 [ + + ]: 14589262 : if (BufferIsLocal(buffer))
2867 : : {
2868 : : /* Content locks are not maintained for local buffers. */
220 tgl@sss.pgh.pa.us 2869 :GBC 764 : return true;
2870 : : }
2871 : : else
2872 : : {
684 jdavis@postgresql.or 2873 :CBC 14588498 : bufHdr = GetBufferDescriptor(buffer - 1);
220 tgl@sss.pgh.pa.us 2874 : 14588498 : return LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2875 : : LW_EXCLUSIVE);
2876 : : }
2877 : : }
2878 : :
2879 : : /*
2880 : : * BufferIsDirty
2881 : : *
2882 : : * Checks if buffer is already dirty.
2883 : : *
2884 : : * Buffer must be pinned and exclusive-locked. (Without an exclusive lock,
2885 : : * the result may be stale before it's returned.)
2886 : : */
2887 : : bool
684 jdavis@postgresql.or 2888 : 14535315 : BufferIsDirty(Buffer buffer)
2889 : : {
2890 : : BufferDesc *bufHdr;
2891 : :
220 tgl@sss.pgh.pa.us 2892 [ - + - + : 14535315 : Assert(BufferIsPinned(buffer));
- + ]
2893 : :
684 jdavis@postgresql.or 2894 [ - + ]: 14535315 : if (BufferIsLocal(buffer))
2895 : : {
684 jdavis@postgresql.or 2896 :UBC 0 : int bufid = -buffer - 1;
2897 : :
2898 : 0 : bufHdr = GetLocalBufferDescriptor(bufid);
2899 : : /* Content locks are not maintained for local buffers. */
2900 : : }
2901 : : else
2902 : : {
684 jdavis@postgresql.or 2903 :CBC 14535315 : bufHdr = GetBufferDescriptor(buffer - 1);
220 tgl@sss.pgh.pa.us 2904 [ - + ]: 14535315 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2905 : : LW_EXCLUSIVE));
2906 : : }
2907 : :
684 jdavis@postgresql.or 2908 : 14535315 : return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2909 : : }
2910 : :
2911 : : /*
2912 : : * MarkBufferDirty
2913 : : *
2914 : : * Marks buffer contents as dirty (actual write happens later).
2915 : : *
2916 : : * Buffer must be pinned and exclusive-locked. (If caller does not hold
2917 : : * exclusive lock, then somebody could be in process of writing the buffer,
2918 : : * leading to risk of bad data written to disk.)
2919 : : */
2920 : : void
7099 tgl@sss.pgh.pa.us 2921 : 21291067 : MarkBufferDirty(Buffer buffer)
2922 : : {
2923 : : BufferDesc *bufHdr;
2924 : : uint32 buf_state;
2925 : : uint32 old_buf_state;
2926 : :
7631 2927 [ - + ]: 21291067 : if (!BufferIsValid(buffer))
5193 peter_e@gmx.net 2928 [ # # ]:UBC 0 : elog(ERROR, "bad buffer ID: %d", buffer);
2929 : :
9290 tgl@sss.pgh.pa.us 2930 [ + + ]:CBC 21291067 : if (BufferIsLocal(buffer))
2931 : : {
7099 2932 : 1379756 : MarkLocalBufferDirty(buffer);
8484 bruce@momjian.us 2933 : 1379756 : return;
2934 : : }
2935 : :
3873 andres@anarazel.de 2936 : 19911311 : bufHdr = GetBufferDescriptor(buffer - 1);
2937 : :
4025 2938 [ - + - + : 19911311 : Assert(BufferIsPinned(buffer));
- + ]
3288 simon@2ndQuadrant.co 2939 [ - + ]: 19911311 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
2940 : : LW_EXCLUSIVE));
2941 : :
3436 andres@anarazel.de 2942 : 19911311 : old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2943 : : for (;;)
2944 : : {
2945 [ + + ]: 19911376 : if (old_buf_state & BM_LOCKED)
2946 : 19 : old_buf_state = WaitBufHdrUnlocked(bufHdr);
2947 : :
2948 : 19911376 : buf_state = old_buf_state;
2949 : :
2950 [ - + ]: 19911376 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2951 : 19911376 : buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2952 : :
2953 [ + + ]: 19911376 : if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2954 : : buf_state))
2955 : 19911311 : break;
2956 : : }
2957 : :
2958 : : /*
2959 : : * If the buffer was not dirty already, do vacuum accounting.
2960 : : */
2961 [ + + ]: 19911311 : if (!(old_buf_state & BM_DIRTY))
2962 : : {
4945 rhaas@postgresql.org 2963 : 642505 : pgBufferUsage.shared_blks_dirtied++;
5034 alvherre@alvh.no-ip. 2964 [ + + ]: 642505 : if (VacuumCostActive)
2965 : 8697 : VacuumCostBalance += VacuumCostPageDirty;
2966 : : }
2967 : : }
2968 : :
2969 : : /*
2970 : : * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
2971 : : *
2972 : : * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
2973 : : * compared to calling the two routines separately. Now it's mainly just
2974 : : * a convenience function. However, if the passed buffer is valid and
2975 : : * already contains the desired block, we just return it as-is; and that
2976 : : * does save considerable work compared to a full release and reacquire.
2977 : : *
2978 : : * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
2979 : : * buffer actually needs to be released. This case is the same as ReadBuffer,
2980 : : * but can save some tests in the caller.
2981 : : */
2982 : : Buffer
10651 scrappy@hub.org 2983 : 26356004 : ReleaseAndReadBuffer(Buffer buffer,
2984 : : Relation relation,
2985 : : BlockNumber blockNum)
2986 : : {
5931 bruce@momjian.us 2987 : 26356004 : ForkNumber forkNum = MAIN_FORKNUM;
2988 : : BufferDesc *bufHdr;
2989 : :
8883 tgl@sss.pgh.pa.us 2990 [ + + ]: 26356004 : if (BufferIsValid(buffer))
2991 : : {
4025 andres@anarazel.de 2992 [ - + + + : 15893018 : Assert(BufferIsPinned(buffer));
- + ]
8883 tgl@sss.pgh.pa.us 2993 [ + + ]: 15893018 : if (BufferIsLocal(buffer))
2994 : : {
3873 andres@anarazel.de 2995 : 136457 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
8855 tgl@sss.pgh.pa.us 2996 [ + + + - ]: 139979 : if (bufHdr->tag.blockNum == blockNum &&
1109 rhaas@postgresql.org 2997 [ + - ]: 7044 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2998 : 3522 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
8855 tgl@sss.pgh.pa.us 2999 : 3522 : return buffer;
885 andres@anarazel.de 3000 : 132935 : UnpinLocalBuffer(buffer);
3001 : : }
3002 : : else
3003 : : {
3873 3004 : 15756561 : bufHdr = GetBufferDescriptor(buffer - 1);
3005 : : /* we have pin, so it's ok to examine tag without spinlock */
8855 tgl@sss.pgh.pa.us 3006 [ + + + - ]: 21149997 : if (bufHdr->tag.blockNum == blockNum &&
1109 rhaas@postgresql.org 3007 [ + - ]: 10786872 : BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
3008 : 5393436 : BufTagGetForkNum(&bufHdr->tag) == forkNum)
8855 tgl@sss.pgh.pa.us 3009 : 5393436 : return buffer;
1072 michael@paquier.xyz 3010 : 10363125 : UnpinBuffer(bufHdr);
3011 : : }
3012 : : }
3013 : :
7491 tgl@sss.pgh.pa.us 3014 : 20959046 : return ReadBuffer(relation, blockNum);
3015 : : }
3016 : :
3017 : : /*
3018 : : * PinBuffer -- make buffer unavailable for replacement.
3019 : : *
3020 : : * For the default access strategy, the buffer's usage_count is incremented
3021 : : * when we first pin it; for other strategies we just make sure the usage_count
3022 : : * isn't zero. (The idea of the latter is that we don't want synchronized
3023 : : * heap scans to inflate the count, but we need it to not be zero to discourage
3024 : : * other backends from stealing buffers from our ring. As long as we cycle
3025 : : * through the ring faster than the global clock-sweep cycles, buffers in
3026 : : * our ring won't be chosen as victims for replacement by other backends.)
3027 : : *
3028 : : * This should be applied only to shared buffers, never local ones.
3029 : : *
3030 : : * Since buffers are pinned/unpinned very frequently, pin buffers without
3031 : : * taking the buffer header lock; instead update the state variable in loop of
3032 : : * CAS operations. Hopefully it's just a single CAS.
3033 : : *
3034 : : * Note that ResourceOwnerEnlarge() and ReservePrivateRefCountEntry()
3035 : : * must have been done already.
3036 : : *
3037 : : * Returns true if buffer is BM_VALID, else false. This provision allows
3038 : : * some callers to avoid an extra spinlock cycle.
3039 : : */
3040 : : static bool
3582 rhaas@postgresql.org 3041 : 53660290 : PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
3042 : : {
3678 andres@anarazel.de 3043 : 53660290 : Buffer b = BufferDescriptorGetBuffer(buf);
3044 : : bool result;
3045 : : PrivateRefCountEntry *ref;
3046 : :
885 3047 [ - + ]: 53660290 : Assert(!BufferIsLocal(b));
668 heikki.linnakangas@i 3048 [ - + ]: 53660290 : Assert(ReservedRefCountEntry != NULL);
3049 : :
3678 andres@anarazel.de 3050 : 53660290 : ref = GetPrivateRefCountEntry(b, true);
3051 : :
3883 3052 [ + + ]: 53660290 : if (ref == NULL)
3053 : : {
3054 : : uint32 buf_state;
3055 : : uint32 old_buf_state;
3056 : :
3678 3057 : 51609550 : ref = NewPrivateRefCountEntry(b);
3058 : :
3436 3059 : 51609550 : old_buf_state = pg_atomic_read_u32(&buf->state);
3060 : : for (;;)
3061 : : {
3062 [ + + ]: 51617165 : if (old_buf_state & BM_LOCKED)
3063 : 230 : old_buf_state = WaitBufHdrUnlocked(buf);
3064 : :
3065 : 51617165 : buf_state = old_buf_state;
3066 : :
3067 : : /* increase refcount */
3068 : 51617165 : buf_state += BUF_REFCOUNT_ONE;
3069 : :
3092 teodor@sigaev.ru 3070 [ + + ]: 51617165 : if (strategy == NULL)
3071 : : {
3072 : : /* Default case: increase usagecount unless already max. */
3073 [ + + ]: 51159301 : if (BUF_STATE_GET_USAGECOUNT(buf_state) < BM_MAX_USAGE_COUNT)
3074 : 3215098 : buf_state += BUF_USAGECOUNT_ONE;
3075 : : }
3076 : : else
3077 : : {
3078 : : /*
3079 : : * Ring buffers shouldn't evict others from pool. Thus we
3080 : : * don't make usagecount more than 1.
3081 : : */
3082 [ + + ]: 457864 : if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3083 : 32103 : buf_state += BUF_USAGECOUNT_ONE;
3084 : : }
3085 : :
3436 andres@anarazel.de 3086 [ + + ]: 51617165 : if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
3087 : : buf_state))
3088 : : {
3089 : 51609550 : result = (buf_state & BM_VALID) != 0;
3090 : :
3091 : : /*
3092 : : * Assume that we acquired a buffer pin for the purposes of
3093 : : * Valgrind buffer client checks (even in !result case) to
3094 : : * keep things simple. Buffers that are unsafe to access are
3095 : : * not generally guaranteed to be marked undefined or
3096 : : * non-accessible in any case.
3097 : : */
3098 : : VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
3099 : 51609550 : break;
3100 : : }
3101 : : }
3102 : : }
3103 : : else
3104 : : {
3105 : : /*
3106 : : * If we previously pinned the buffer, it is likely to be valid, but
3107 : : * it may not be if StartReadBuffers() was called and
3108 : : * WaitReadBuffers() hasn't been called yet. We'll check by loading
3109 : : * the flags without locking. This is racy, but it's OK to return
3110 : : * false spuriously: when WaitReadBuffers() calls StartBufferIO(),
3111 : : * it'll see that it's now valid.
3112 : : *
3113 : : * Note: We deliberately avoid a Valgrind client request here.
3114 : : * Individual access methods can optionally superimpose buffer page
3115 : : * client requests on top of our client requests to enforce that
3116 : : * buffers are only accessed while locked (and pinned). It's possible
3117 : : * that the buffer page is legitimately non-accessible here. We
3118 : : * cannot meddle with that.
3119 : : */
521 tmunro@postgresql.or 3120 : 2050740 : result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
3121 : : }
3122 : :
4025 andres@anarazel.de 3123 : 53660290 : ref->refcount++;
3124 [ - + ]: 53660290 : Assert(ref->refcount > 0);
3678 3125 : 53660290 : ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
7491 tgl@sss.pgh.pa.us 3126 : 53660290 : return result;
3127 : : }
3128 : :
3129 : : /*
3130 : : * PinBuffer_Locked -- as above, but caller already locked the buffer header.
3131 : : * The spinlock is released before return.
3132 : : *
3133 : : * As this function is called with the spinlock held, the caller has to
3134 : : * previously call ReservePrivateRefCountEntry() and
3135 : : * ResourceOwnerEnlarge(CurrentResourceOwner);
3136 : : *
3137 : : * Currently, no callers of this function want to modify the buffer's
3138 : : * usage_count at all, so there's no need for a strategy parameter.
3139 : : * Also we don't bother with a BM_VALID test (the caller could check that for
3140 : : * itself).
3141 : : *
3142 : : * Also all callers only ever use this function when it's known that the
3143 : : * buffer can't have a preexisting pin by this backend. That allows us to skip
3144 : : * searching the private refcount array & hash, which is a boon, because the
3145 : : * spinlock is still held.
3146 : : *
3147 : : * Note: use of this routine is frequently mandatory, not just an optimization
3148 : : * to save a spin lock/unlock cycle, because we need to pin a buffer before
3149 : : * its state can change under us.
3150 : : */
3151 : : static void
3582 rhaas@postgresql.org 3152 : 2197940 : PinBuffer_Locked(BufferDesc *buf)
3153 : : {
3154 : : Buffer b;
3155 : : PrivateRefCountEntry *ref;
3156 : : uint32 buf_state;
3157 : :
3158 : : /*
3159 : : * As explained, We don't expect any preexisting pins. That allows us to
3160 : : * manipulate the PrivateRefCount after releasing the spinlock
3161 : : */
3678 andres@anarazel.de 3162 [ - + ]: 2197940 : Assert(GetPrivateRefCountEntry(BufferDescriptorGetBuffer(buf), false) == NULL);
3163 : :
3164 : : /*
3165 : : * Buffer can't have a preexisting pin, so mark its page as defined to
3166 : : * Valgrind (this is similar to the PinBuffer() case where the backend
3167 : : * doesn't already have a buffer pin)
3168 : : */
3169 : : VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
3170 : :
3171 : : /*
3172 : : * Since we hold the buffer spinlock, we can update the buffer state and
3173 : : * release the lock in one operation.
3174 : : */
3436 3175 : 2197940 : buf_state = pg_atomic_read_u32(&buf->state);
3176 [ - + ]: 2197940 : Assert(buf_state & BM_LOCKED);
3177 : 2197940 : buf_state += BUF_REFCOUNT_ONE;
3178 : 2197940 : UnlockBufHdr(buf, buf_state);
3179 : :
3678 3180 : 2197940 : b = BufferDescriptorGetBuffer(buf);
3181 : :
3182 : 2197940 : ref = NewPrivateRefCountEntry(b);
4025 3183 : 2197940 : ref->refcount++;
3184 : :
3678 3185 : 2197940 : ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
7810 tgl@sss.pgh.pa.us 3186 : 2197940 : }
3187 : :
3188 : : /*
3189 : : * Support for waking up another backend that is waiting for the cleanup lock
3190 : : * to be released using BM_PIN_COUNT_WAITER.
3191 : : *
3192 : : * See LockBufferForCleanup().
3193 : : *
3194 : : * Expected to be called just after releasing a buffer pin (in a BufferDesc,
3195 : : * not just reducing the backend-local pincount for the buffer).
3196 : : */
3197 : : static void
160 andres@anarazel.de 3198 : 89 : WakePinCountWaiter(BufferDesc *buf)
3199 : : {
3200 : : /*
3201 : : * Acquire the buffer header lock, re-check that there's a waiter. Another
3202 : : * backend could have unpinned this buffer, and already woken up the
3203 : : * waiter.
3204 : : *
3205 : : * There's no danger of the buffer being replaced after we unpinned it
3206 : : * above, as it's pinned by the waiter. The waiter removes
3207 : : * BM_PIN_COUNT_WAITER if it stops waiting for a reason other than this
3208 : : * backend waking it up.
3209 : : */
3210 : 89 : uint32 buf_state = LockBufHdr(buf);
3211 : :
3212 [ + - ]: 89 : if ((buf_state & BM_PIN_COUNT_WAITER) &&
3213 [ + - ]: 89 : BUF_STATE_GET_REFCOUNT(buf_state) == 1)
3214 : 89 : {
3215 : : /* we just released the last pin other than the waiter's */
3216 : 89 : int wait_backend_pgprocno = buf->wait_backend_pgprocno;
3217 : :
3218 : 89 : buf_state &= ~BM_PIN_COUNT_WAITER;
3219 : 89 : UnlockBufHdr(buf, buf_state);
3220 : 89 : ProcSendSignal(wait_backend_pgprocno);
3221 : : }
3222 : : else
160 andres@anarazel.de 3223 :UBC 0 : UnlockBufHdr(buf, buf_state);
160 andres@anarazel.de 3224 :CBC 89 : }
3225 : :
3226 : : /*
3227 : : * UnpinBuffer -- make buffer available for replacement.
3228 : : *
3229 : : * This should be applied only to shared buffers, never local ones. This
3230 : : * always adjusts CurrentResourceOwner.
3231 : : */
3232 : : static void
1072 michael@paquier.xyz 3233 : 65992046 : UnpinBuffer(BufferDesc *buf)
3234 : : {
668 heikki.linnakangas@i 3235 : 65992046 : Buffer b = BufferDescriptorGetBuffer(buf);
3236 : :
3237 : 65992046 : ResourceOwnerForgetBuffer(CurrentResourceOwner, b);
3238 : 65992046 : UnpinBufferNoOwner(buf);
3239 : 65992046 : }
3240 : :
3241 : : static void
3242 : 65996476 : UnpinBufferNoOwner(BufferDesc *buf)
3243 : : {
3244 : : PrivateRefCountEntry *ref;
3678 andres@anarazel.de 3245 : 65996476 : Buffer b = BufferDescriptorGetBuffer(buf);
3246 : :
885 3247 [ - + ]: 65996476 : Assert(!BufferIsLocal(b));
3248 : :
3249 : : /* not moving as we're likely deleting it soon anyway */
3678 3250 : 65996476 : ref = GetPrivateRefCountEntry(b, false);
4025 3251 [ - + ]: 65996476 : Assert(ref != NULL);
3252 [ - + ]: 65996476 : Assert(ref->refcount > 0);
3253 : 65996476 : ref->refcount--;
3254 [ + + ]: 65996476 : if (ref->refcount == 0)
3255 : : {
3256 : : uint32 buf_state;
3257 : : uint32 old_buf_state;
3258 : :
3259 : : /*
3260 : : * Mark buffer non-accessible to Valgrind.
3261 : : *
3262 : : * Note that the buffer may have already been marked non-accessible
3263 : : * within access method code that enforces that buffers are only
3264 : : * accessed while a buffer lock is held.
3265 : : */
3266 : : VALGRIND_MAKE_MEM_NOACCESS(BufHdrGetBlock(buf), BLCKSZ);
3267 : :
3268 : : /* I'd better not still hold the buffer content lock */
3553 rhaas@postgresql.org 3269 [ - + ]: 53807490 : Assert(!LWLockHeldByMe(BufferDescriptorGetContentLock(buf)));
3270 : :
3271 : : /*
3272 : : * Decrement the shared reference count.
3273 : : *
3274 : : * Since buffer spinlock holder can update status using just write,
3275 : : * it's not safe to use atomic decrement here; thus use a CAS loop.
3276 : : */
3436 andres@anarazel.de 3277 : 53807490 : old_buf_state = pg_atomic_read_u32(&buf->state);
3278 : : for (;;)
3279 : : {
3280 [ + + ]: 53813324 : if (old_buf_state & BM_LOCKED)
3281 : 204 : old_buf_state = WaitBufHdrUnlocked(buf);
3282 : :
3283 : 53813324 : buf_state = old_buf_state;
3284 : :
3285 : 53813324 : buf_state -= BUF_REFCOUNT_ONE;
3286 : :
3287 [ + + ]: 53813324 : if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
3288 : : buf_state))
3289 : 53807490 : break;
3290 : : }
3291 : :
3292 : : /* Support LockBufferForCleanup() */
3293 [ + + ]: 53807490 : if (buf_state & BM_PIN_COUNT_WAITER)
160 3294 : 89 : WakePinCountWaiter(buf);
3295 : :
4025 3296 : 53807490 : ForgetPrivateRefCountEntry(ref);
3297 : : }
7810 tgl@sss.pgh.pa.us 3298 : 65996476 : }
3299 : :
3300 : : #define ST_SORT sort_checkpoint_bufferids
3301 : : #define ST_ELEMENT_TYPE CkptSortItem
3302 : : #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
3303 : : #define ST_SCOPE static
3304 : : #define ST_DEFINE
3305 : : #include "lib/sort_template.h"
3306 : :
3307 : : /*
3308 : : * BufferSync -- Write out all dirty buffers in the pool.
3309 : : *
3310 : : * This is called at checkpoint time to write out all dirty shared buffers.
3311 : : * The checkpoint request flags should be passed in. If CHECKPOINT_FAST is
3312 : : * set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
3313 : : * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_UNLOGGED is set, we write
3314 : : * even unlogged buffers, which are otherwise skipped. The remaining flags
3315 : : * currently have no effect here.
3316 : : */
3317 : : static void
6645 3318 : 1677 : BufferSync(int flags)
3319 : : {
3320 : : uint32 buf_state;
3321 : : int buf_id;
3322 : : int num_to_scan;
3323 : : int num_spaces;
3324 : : int num_processed;
3325 : : int num_written;
3487 andres@anarazel.de 3326 : 1677 : CkptTsStatus *per_ts_stat = NULL;
3327 : : Oid last_tsid;
3328 : : binaryheap *ts_heap;
3329 : : int i;
5365 rhaas@postgresql.org 3330 : 1677 : int mask = BM_DIRTY;
3331 : : WritebackContext wb_context;
3332 : :
3333 : : /*
3334 : : * Unless this is a shutdown checkpoint or we have been explicitly told,
3335 : : * we write only permanent, dirty buffers. But at shutdown or end of
3336 : : * recovery, we write all dirty buffers.
3337 : : */
3974 andres@anarazel.de 3338 [ + + ]: 1677 : if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
3339 : : CHECKPOINT_FLUSH_UNLOGGED))))
4992 rhaas@postgresql.org 3340 : 967 : mask |= BM_PERMANENT;
3341 : :
3342 : : /*
3343 : : * Loop over all buffers, and mark the ones that need to be written with
3344 : : * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
3345 : : * can estimate how much work needs to be done.
3346 : : *
3347 : : * This allows us to write only those pages that were dirty when the
3348 : : * checkpoint began, and not those that get dirtied while it proceeds.
3349 : : * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
3350 : : * later in this function, or by normal backends or the bgwriter cleaning
3351 : : * scan, the flag is cleared. Any buffer dirtied after this point won't
3352 : : * have the flag set.
3353 : : *
3354 : : * Note that if we fail to write some buffer, we may leave buffers with
3355 : : * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
3356 : : * certainly need to be written for the next checkpoint attempt, too.
3357 : : */
3487 andres@anarazel.de 3358 : 1677 : num_to_scan = 0;
6645 tgl@sss.pgh.pa.us 3359 [ + + ]: 11302621 : for (buf_id = 0; buf_id < NBuffers; buf_id++)
3360 : : {
3582 rhaas@postgresql.org 3361 : 11300944 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3362 : :
3363 : : /*
3364 : : * Header spinlock is enough to examine BM_DIRTY, see comment in
3365 : : * SyncOneBuffer.
3366 : : */
3436 andres@anarazel.de 3367 : 11300944 : buf_state = LockBufHdr(bufHdr);
3368 : :
3369 [ + + ]: 11300944 : if ((buf_state & mask) == mask)
3370 : : {
3371 : : CkptSortItem *item;
3372 : :
3373 : 287041 : buf_state |= BM_CHECKPOINT_NEEDED;
3374 : :
3487 3375 : 287041 : item = &CkptBufferIds[num_to_scan++];
3376 : 287041 : item->buf_id = buf_id;
1109 rhaas@postgresql.org 3377 : 287041 : item->tsId = bufHdr->tag.spcOid;
3378 : 287041 : item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
3379 : 287041 : item->forkNum = BufTagGetForkNum(&bufHdr->tag);
3487 andres@anarazel.de 3380 : 287041 : item->blockNum = bufHdr->tag.blockNum;
3381 : : }
3382 : :
3436 3383 : 11300944 : UnlockBufHdr(bufHdr, buf_state);
3384 : :
3385 : : /* Check for barrier events in case NBuffers is large. */
2088 rhaas@postgresql.org 3386 [ - + ]: 11300944 : if (ProcSignalBarrierPending)
2088 rhaas@postgresql.org 3387 :UBC 0 : ProcessProcSignalBarrier();
3388 : : }
3389 : :
3487 andres@anarazel.de 3390 [ + + ]:CBC 1677 : if (num_to_scan == 0)
6645 tgl@sss.pgh.pa.us 3391 : 638 : return; /* nothing to do */
3392 : :
3487 andres@anarazel.de 3393 : 1039 : WritebackContextInit(&wb_context, &checkpoint_flush_after);
3394 : :
3395 : : TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
3396 : :
3397 : : /*
3398 : : * Sort buffers that need to be written to reduce the likelihood of random
3399 : : * IO. The sorting is also important for the implementation of balancing
3400 : : * writes between tablespaces. Without balancing writes we'd potentially
3401 : : * end up writing to the tablespaces one-by-one; possibly overloading the
3402 : : * underlying system.
3403 : : */
1639 tmunro@postgresql.or 3404 : 1039 : sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
3405 : :
3487 andres@anarazel.de 3406 : 1039 : num_spaces = 0;
3407 : :
3408 : : /*
3409 : : * Allocate progress status for each tablespace with buffers that need to
3410 : : * be flushed. This requires the to-be-flushed array to be sorted.
3411 : : */
3412 : 1039 : last_tsid = InvalidOid;
3413 [ + + ]: 288080 : for (i = 0; i < num_to_scan; i++)
3414 : : {
3415 : : CkptTsStatus *s;
3416 : : Oid cur_tsid;
3417 : :
3418 : 287041 : cur_tsid = CkptBufferIds[i].tsId;
3419 : :
3420 : : /*
3421 : : * Grow array of per-tablespace status structs, every time a new
3422 : : * tablespace is found.
3423 : : */
3424 [ + + + + ]: 287041 : if (last_tsid == InvalidOid || last_tsid != cur_tsid)
3425 : 1570 : {
3426 : : Size sz;
3427 : :
3428 : 1570 : num_spaces++;
3429 : :
3430 : : /*
3431 : : * Not worth adding grow-by-power-of-2 logic here - even with a
3432 : : * few hundred tablespaces this should be fine.
3433 : : */
3434 : 1570 : sz = sizeof(CkptTsStatus) * num_spaces;
3435 : :
3436 [ + + ]: 1570 : if (per_ts_stat == NULL)
3437 : 1039 : per_ts_stat = (CkptTsStatus *) palloc(sz);
3438 : : else
3439 : 531 : per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
3440 : :
3441 : 1570 : s = &per_ts_stat[num_spaces - 1];
3442 : 1570 : memset(s, 0, sizeof(*s));
3443 : 1570 : s->tsId = cur_tsid;
3444 : :
3445 : : /*
3446 : : * The first buffer in this tablespace. As CkptBufferIds is sorted
3447 : : * by tablespace all (s->num_to_scan) buffers in this tablespace
3448 : : * will follow afterwards.
3449 : : */
3450 : 1570 : s->index = i;
3451 : :
3452 : : /*
3453 : : * progress_slice will be determined once we know how many buffers
3454 : : * are in each tablespace, i.e. after this loop.
3455 : : */
3456 : :
3457 : 1570 : last_tsid = cur_tsid;
3458 : : }
3459 : : else
3460 : : {
3461 : 285471 : s = &per_ts_stat[num_spaces - 1];
3462 : : }
3463 : :
3464 : 287041 : s->num_to_scan++;
3465 : :
3466 : : /* Check for barrier events. */
2088 rhaas@postgresql.org 3467 [ - + ]: 287041 : if (ProcSignalBarrierPending)
2088 rhaas@postgresql.org 3468 :UBC 0 : ProcessProcSignalBarrier();
3469 : : }
3470 : :
3487 andres@anarazel.de 3471 [ - + ]:CBC 1039 : Assert(num_spaces > 0);
3472 : :
3473 : : /*
3474 : : * Build a min-heap over the write-progress in the individual tablespaces,
3475 : : * and compute how large a portion of the total progress a single
3476 : : * processed buffer is.
3477 : : */
3478 : 1039 : ts_heap = binaryheap_allocate(num_spaces,
3479 : : ts_ckpt_progress_comparator,
3480 : : NULL);
3481 : :
3482 [ + + ]: 2609 : for (i = 0; i < num_spaces; i++)
3483 : : {
3484 : 1570 : CkptTsStatus *ts_stat = &per_ts_stat[i];
3485 : :
3486 : 1570 : ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
3487 : :
3488 : 1570 : binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
3489 : : }
3490 : :
3491 : 1039 : binaryheap_build(ts_heap);
3492 : :
3493 : : /*
3494 : : * Iterate through to-be-checkpointed buffers and write the ones (still)
3495 : : * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
3496 : : * tablespaces; otherwise the sorting would lead to only one tablespace
3497 : : * receiving writes at a time, making inefficient use of the hardware.
3498 : : */
3499 : 1039 : num_processed = 0;
6645 tgl@sss.pgh.pa.us 3500 : 1039 : num_written = 0;
3487 andres@anarazel.de 3501 [ + + ]: 288080 : while (!binaryheap_empty(ts_heap))
3502 : : {
3503 : 287041 : BufferDesc *bufHdr = NULL;
3504 : : CkptTsStatus *ts_stat = (CkptTsStatus *)
841 tgl@sss.pgh.pa.us 3505 : 287041 : DatumGetPointer(binaryheap_first(ts_heap));
3506 : :
3487 andres@anarazel.de 3507 : 287041 : buf_id = CkptBufferIds[ts_stat->index].buf_id;
3508 [ - + ]: 287041 : Assert(buf_id != -1);
3509 : :
3510 : 287041 : bufHdr = GetBufferDescriptor(buf_id);
3511 : :
3512 : 287041 : num_processed++;
3513 : :
3514 : : /*
3515 : : * We don't need to acquire the lock here, because we're only looking
3516 : : * at a single bit. It's possible that someone else writes the buffer
3517 : : * and clears the flag right after we check, but that doesn't matter
3518 : : * since SyncOneBuffer will then do nothing. However, there is a
3519 : : * further race condition: it's conceivable that between the time we
3520 : : * examine the bit here and the time SyncOneBuffer acquires the lock,
3521 : : * someone else not only wrote the buffer but replaced it with another
3522 : : * page and dirtied it. In that improbable case, SyncOneBuffer will
3523 : : * write the buffer though we didn't need to. It doesn't seem worth
3524 : : * guarding against this, though.
3525 : : */
3436 3526 [ + + ]: 287041 : if (pg_atomic_read_u32(&bufHdr->state) & BM_CHECKPOINT_NEEDED)
3527 : : {
3487 3528 [ + - ]: 266325 : if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
3529 : : {
3530 : : TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
677 michael@paquier.xyz 3531 : 266325 : PendingCheckpointerStats.buffers_written++;
6645 tgl@sss.pgh.pa.us 3532 : 266325 : num_written++;
3533 : : }
3534 : : }
3535 : :
3536 : : /*
3537 : : * Measure progress independent of actually having to flush the buffer
3538 : : * - otherwise writing become unbalanced.
3539 : : */
3487 andres@anarazel.de 3540 : 287041 : ts_stat->progress += ts_stat->progress_slice;
3541 : 287041 : ts_stat->num_scanned++;
3542 : 287041 : ts_stat->index++;
3543 : :
3544 : : /* Have all the buffers from the tablespace been processed? */
3545 [ + + ]: 287041 : if (ts_stat->num_scanned == ts_stat->num_to_scan)
3546 : : {
3547 : 1570 : binaryheap_remove_first(ts_heap);
3548 : : }
3549 : : else
3550 : : {
3551 : : /* update heap with the new progress */
3552 : 285471 : binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
3553 : : }
3554 : :
3555 : : /*
3556 : : * Sleep to throttle our I/O rate.
3557 : : *
3558 : : * (This will check for barrier events even if it doesn't sleep.)
3559 : : */
3560 : 287041 : CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
3561 : : }
3562 : :
3563 : : /*
3564 : : * Issue all pending flushes. Only checkpointer calls BufferSync(), so
3565 : : * IOContext will always be IOCONTEXT_NORMAL.
3566 : : */
843 3567 : 1039 : IssuePendingWritebacks(&wb_context, IOCONTEXT_NORMAL);
3568 : :
3487 3569 : 1039 : pfree(per_ts_stat);
3570 : 1039 : per_ts_stat = NULL;
3571 : 1039 : binaryheap_free(ts_heap);
3572 : :
3573 : : /*
3574 : : * Update checkpoint statistics. As noted above, this doesn't include
3575 : : * buffers written by other backends or bgwriter scan.
3576 : : */
6643 tgl@sss.pgh.pa.us 3577 : 1039 : CheckpointStats.ckpt_bufs_written += num_written;
3578 : :
3579 : : TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
3580 : : }
3581 : :
3582 : : /*
3583 : : * BgBufferSync -- Write out some dirty buffers in the pool.
3584 : : *
3585 : : * This is called periodically by the background writer process.
3586 : : *
3587 : : * Returns true if it's appropriate for the bgwriter process to go into
3588 : : * low-power hibernation mode. (This happens if the strategy clock-sweep
3589 : : * has been "lapped" and no buffer allocations have occurred recently,
3590 : : * or if the bgwriter has been effectively disabled by setting
3591 : : * bgwriter_lru_maxpages to 0.)
3592 : : */
3593 : : bool
3487 andres@anarazel.de 3594 : 11160 : BgBufferSync(WritebackContext *wb_context)
3595 : : {
3596 : : /* info obtained from freelist.c */
3597 : : int strategy_buf_id;
3598 : : uint32 strategy_passes;
3599 : : uint32 recent_alloc;
3600 : :
3601 : : /*
3602 : : * Information saved between calls so we can determine the strategy
3603 : : * point's advance rate and avoid scanning already-cleaned buffers.
3604 : : */
3605 : : static bool saved_info_valid = false;
3606 : : static int prev_strategy_buf_id;
3607 : : static uint32 prev_strategy_passes;
3608 : : static int next_to_clean;
3609 : : static uint32 next_passes;
3610 : :
3611 : : /* Moving averages of allocation rate and clean-buffer density */
3612 : : static float smoothed_alloc = 0;
3613 : : static float smoothed_density = 10.0;
3614 : :
3615 : : /* Potentially these could be tunables, but for now, not */
6556 tgl@sss.pgh.pa.us 3616 : 11160 : float smoothing_samples = 16;
3617 : 11160 : float scan_whole_pool_milliseconds = 120000.0;
3618 : :
3619 : : /* Used to compute how far we scan ahead */
3620 : : long strategy_delta;
3621 : : int bufs_to_lap;
3622 : : int bufs_ahead;
3623 : : float scans_per_alloc;
3624 : : int reusable_buffers_est;
3625 : : int upcoming_alloc_est;
3626 : : int min_scan_buffers;
3627 : :
3628 : : /* Variables for the scanning loop proper */
3629 : : int num_to_scan;
3630 : : int num_written;
3631 : : int reusable_buffers;
3632 : :
3633 : : /* Variables for final smoothed_density update */
3634 : : long new_strategy_delta;
3635 : : uint32 new_recent_alloc;
3636 : :
3637 : : /*
3638 : : * Find out where the clock-sweep currently is, and how many buffer
3639 : : * allocations have happened since our last call.
3640 : : */
3641 : 11160 : strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3642 : :
3643 : : /* Report buffer alloc counts to pgstat */
1249 andres@anarazel.de 3644 : 11160 : PendingBgWriterStats.buf_alloc += recent_alloc;
3645 : :
3646 : : /*
3647 : : * If we're not running the LRU scan, just stop after doing the stats
3648 : : * stuff. We mark the saved state invalid so that we can recover sanely
3649 : : * if LRU scan is turned back on later.
3650 : : */
6556 tgl@sss.pgh.pa.us 3651 [ + + ]: 11160 : if (bgwriter_lru_maxpages <= 0)
3652 : : {
3653 : 41 : saved_info_valid = false;
4972 heikki.linnakangas@i 3654 : 41 : return true;
3655 : : }
3656 : :
3657 : : /*
3658 : : * Compute strategy_delta = how many buffers have been scanned by the
3659 : : * clock-sweep since last time. If first time through, assume none. Then
3660 : : * see if we are still ahead of the clock-sweep, and if so, how many
3661 : : * buffers we could scan before we'd catch up with it and "lap" it. Note:
3662 : : * weird-looking coding of xxx_passes comparisons are to avoid bogus
3663 : : * behavior when the passes counts wrap around.
3664 : : */
6556 tgl@sss.pgh.pa.us 3665 [ + + ]: 11119 : if (saved_info_valid)
3666 : : {
6505 bruce@momjian.us 3667 : 10634 : int32 passes_delta = strategy_passes - prev_strategy_passes;
3668 : :
6556 tgl@sss.pgh.pa.us 3669 : 10634 : strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2999 3670 : 10634 : strategy_delta += (long) passes_delta * NBuffers;
3671 : :
6556 3672 [ - + ]: 10634 : Assert(strategy_delta >= 0);
3673 : :
3674 [ + + ]: 10634 : if ((int32) (next_passes - strategy_passes) > 0)
3675 : : {
3676 : : /* we're one pass ahead of the strategy point */
3677 : 1933 : bufs_to_lap = strategy_buf_id - next_to_clean;
3678 : : #ifdef BGW_DEBUG
3679 : : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3680 : : next_passes, next_to_clean,
3681 : : strategy_passes, strategy_buf_id,
3682 : : strategy_delta, bufs_to_lap);
3683 : : #endif
3684 : : }
3685 [ + + ]: 8701 : else if (next_passes == strategy_passes &&
3686 [ + + ]: 6136 : next_to_clean >= strategy_buf_id)
3687 : : {
3688 : : /* on same pass, but ahead or at least not behind */
3689 : 5399 : bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3690 : : #ifdef BGW_DEBUG
3691 : : elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3692 : : next_passes, next_to_clean,
3693 : : strategy_passes, strategy_buf_id,
3694 : : strategy_delta, bufs_to_lap);
3695 : : #endif
3696 : : }
3697 : : else
3698 : : {
3699 : : /*
3700 : : * We're behind, so skip forward to the strategy point and start
3701 : : * cleaning from there.
3702 : : */
3703 : : #ifdef BGW_DEBUG
3704 : : elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3705 : : next_passes, next_to_clean,
3706 : : strategy_passes, strategy_buf_id,
3707 : : strategy_delta);
3708 : : #endif
3709 : 3302 : next_to_clean = strategy_buf_id;
3710 : 3302 : next_passes = strategy_passes;
3711 : 3302 : bufs_to_lap = NBuffers;
3712 : : }
3713 : : }
3714 : : else
3715 : : {
3716 : : /*
3717 : : * Initializing at startup or after LRU scanning had been off. Always
3718 : : * start at the strategy point.
3719 : : */
3720 : : #ifdef BGW_DEBUG
3721 : : elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3722 : : strategy_passes, strategy_buf_id);
3723 : : #endif
3724 : 485 : strategy_delta = 0;
3725 : 485 : next_to_clean = strategy_buf_id;
3726 : 485 : next_passes = strategy_passes;
3727 : 485 : bufs_to_lap = NBuffers;
3728 : : }
3729 : :
3730 : : /* Update saved info for next time */
3731 : 11119 : prev_strategy_buf_id = strategy_buf_id;
3732 : 11119 : prev_strategy_passes = strategy_passes;
3733 : 11119 : saved_info_valid = true;
3734 : :
3735 : : /*
3736 : : * Compute how many buffers had to be scanned for each new allocation, ie,
3737 : : * 1/density of reusable buffers, and track a moving average of that.
3738 : : *
3739 : : * If the strategy point didn't move, we don't update the density estimate
3740 : : */
3741 [ + + + - ]: 11119 : if (strategy_delta > 0 && recent_alloc > 0)
3742 : : {
3743 : 7729 : scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3744 : 7729 : smoothed_density += (scans_per_alloc - smoothed_density) /
3745 : : smoothing_samples;
3746 : : }
3747 : :
3748 : : /*
3749 : : * Estimate how many reusable buffers there are between the current
3750 : : * strategy point and where we've scanned ahead to, based on the smoothed
3751 : : * density estimate.
3752 : : */
3753 : 11119 : bufs_ahead = NBuffers - bufs_to_lap;
3754 : 11119 : reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3755 : :
3756 : : /*
3757 : : * Track a moving average of recent buffer allocations. Here, rather than
3758 : : * a true average we want a fast-attack, slow-decline behavior: we
3759 : : * immediately follow any increase.
3760 : : */
3761 [ + + ]: 11119 : if (smoothed_alloc <= (float) recent_alloc)
3762 : 2071 : smoothed_alloc = recent_alloc;
3763 : : else
3764 : 9048 : smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3765 : : smoothing_samples;
3766 : :
3767 : : /* Scale the estimate by a GUC to allow more aggressive tuning. */
5040 3768 : 11119 : upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3769 : :
3770 : : /*
3771 : : * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3772 : : * eventually underflow to zero, and the underflows produce annoying
3773 : : * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3774 : : * zero, there's no point in tracking smaller and smaller values of
3775 : : * smoothed_alloc, so just reset it to exactly zero to avoid this
3776 : : * syndrome. It will pop back up as soon as recent_alloc increases.
3777 : : */
3778 [ + + ]: 11119 : if (upcoming_alloc_est == 0)
3779 : 658 : smoothed_alloc = 0;
3780 : :
3781 : : /*
3782 : : * Even in cases where there's been little or no buffer allocation
3783 : : * activity, we want to make a small amount of progress through the buffer
3784 : : * cache so that as many reusable buffers as possible are clean after an
3785 : : * idle period.
3786 : : *
3787 : : * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3788 : : * the BGW will be called during the scan_whole_pool time; slice the
3789 : : * buffer pool into that many sections.
3790 : : */
6556 3791 : 11119 : min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3792 : :
3793 [ + + ]: 11119 : if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3794 : : {
3795 : : #ifdef BGW_DEBUG
3796 : : elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3797 : : upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3798 : : #endif
3799 : 5198 : upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3800 : : }
3801 : :
3802 : : /*
3803 : : * Now write out dirty reusable buffers, working forward from the
3804 : : * next_to_clean point, until we have lapped the strategy scan, or cleaned
3805 : : * enough buffers to match our estimate of the next cycle's allocation
3806 : : * requirements, or hit the bgwriter_lru_maxpages limit.
3807 : : */
3808 : :
3809 : 11119 : num_to_scan = bufs_to_lap;
3810 : 11119 : num_written = 0;
3811 : 11119 : reusable_buffers = reusable_buffers_est;
3812 : :
3813 : : /* Execute the LRU scan */
3814 [ + + + + ]: 1688994 : while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3815 : : {
3436 andres@anarazel.de 3816 : 1677876 : int sync_state = SyncOneBuffer(next_to_clean, true,
3817 : : wb_context);
3818 : :
6556 tgl@sss.pgh.pa.us 3819 [ + + ]: 1677876 : if (++next_to_clean >= NBuffers)
3820 : : {
3821 : 3149 : next_to_clean = 0;
3822 : 3149 : next_passes++;
3823 : : }
3824 : 1677876 : num_to_scan--;
3825 : :
3436 andres@anarazel.de 3826 [ + + ]: 1677876 : if (sync_state & BUF_WRITTEN)
3827 : : {
6556 tgl@sss.pgh.pa.us 3828 : 25584 : reusable_buffers++;
3829 [ + + ]: 25584 : if (++num_written >= bgwriter_lru_maxpages)
3830 : : {
1249 andres@anarazel.de 3831 : 1 : PendingBgWriterStats.maxwritten_clean++;
6556 tgl@sss.pgh.pa.us 3832 : 1 : break;
3833 : : }
3834 : : }
3436 andres@anarazel.de 3835 [ + + ]: 1652292 : else if (sync_state & BUF_REUSABLE)
6556 tgl@sss.pgh.pa.us 3836 : 1239614 : reusable_buffers++;
3837 : : }
3838 : :
1249 andres@anarazel.de 3839 : 11119 : PendingBgWriterStats.buf_written_clean += num_written;
3840 : :
3841 : : #ifdef BGW_DEBUG
3842 : : elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3843 : : recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3844 : : smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3845 : : bufs_to_lap - num_to_scan,
3846 : : num_written,
3847 : : reusable_buffers - reusable_buffers_est);
3848 : : #endif
3849 : :
3850 : : /*
3851 : : * Consider the above scan as being like a new allocation scan.
3852 : : * Characterize its density and update the smoothed one based on it. This
3853 : : * effectively halves the moving average period in cases where both the
3854 : : * strategy and the background writer are doing some useful scanning,
3855 : : * which is helpful because a long memory isn't as desirable on the
3856 : : * density estimates.
3857 : : */
4868 tgl@sss.pgh.pa.us 3858 : 11119 : new_strategy_delta = bufs_to_lap - num_to_scan;
3859 : 11119 : new_recent_alloc = reusable_buffers - reusable_buffers_est;
3860 [ + + + + ]: 11119 : if (new_strategy_delta > 0 && new_recent_alloc > 0)
3861 : : {
3862 : 9979 : scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
6556 3863 : 9979 : smoothed_density += (scans_per_alloc - smoothed_density) /
3864 : : smoothing_samples;
3865 : :
3866 : : #ifdef BGW_DEBUG
3867 : : elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3868 : : new_recent_alloc, new_strategy_delta,
3869 : : scans_per_alloc, smoothed_density);
3870 : : #endif
3871 : : }
3872 : :
3873 : : /* Return true if OK to hibernate */
4868 3874 [ + + + - ]: 11119 : return (bufs_to_lap == 0 && recent_alloc == 0);
3875 : : }
3876 : :
3877 : : /*
3878 : : * SyncOneBuffer -- process a single buffer during syncing.
3879 : : *
3880 : : * If skip_recently_used is true, we don't write currently-pinned buffers, nor
3881 : : * buffers marked recently used, as these are not replacement candidates.
3882 : : *
3883 : : * Returns a bitmask containing the following flag bits:
3884 : : * BUF_WRITTEN: we wrote the buffer.
3885 : : * BUF_REUSABLE: buffer is available for replacement, ie, it has
3886 : : * pin count 0 and usage count 0.
3887 : : *
3888 : : * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
3889 : : * after locking it, but we don't care all that much.)
3890 : : */
3891 : : static int
3487 andres@anarazel.de 3892 : 1944201 : SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
3893 : : {
3582 rhaas@postgresql.org 3894 : 1944201 : BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
6505 bruce@momjian.us 3895 : 1944201 : int result = 0;
3896 : : uint32 buf_state;
3897 : : BufferTag tag;
3898 : :
3899 : : /* Make sure we can handle the pin */
3883 andres@anarazel.de 3900 : 1944201 : ReservePrivateRefCountEntry();
668 heikki.linnakangas@i 3901 : 1944201 : ResourceOwnerEnlarge(CurrentResourceOwner);
3902 : :
3903 : : /*
3904 : : * Check whether buffer needs writing.
3905 : : *
3906 : : * We can make this check without taking the buffer content lock so long
3907 : : * as we mark pages dirty in access methods *before* logging changes with
3908 : : * XLogInsert(): if someone marks the buffer dirty just after our check we
3909 : : * don't worry because our checkpoint.redo points before log record for
3910 : : * upcoming changes and so we are not required to write such dirty buffer.
3911 : : */
3436 andres@anarazel.de 3912 : 1944201 : buf_state = LockBufHdr(bufHdr);
3913 : :
3914 [ + + ]: 1944201 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
3915 [ + + ]: 1939317 : BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3916 : : {
6556 tgl@sss.pgh.pa.us 3917 : 1267782 : result |= BUF_REUSABLE;
3918 : : }
3919 [ + + ]: 676419 : else if (skip_recently_used)
3920 : : {
3921 : : /* Caller told us not to write recently-used buffers */
3436 andres@anarazel.de 3922 : 412678 : UnlockBufHdr(bufHdr, buf_state);
6556 tgl@sss.pgh.pa.us 3923 : 412678 : return result;
3924 : : }
3925 : :
3436 andres@anarazel.de 3926 [ + + + + ]: 1531523 : if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
3927 : : {
3928 : : /* It's clean, so nothing to do */
3929 : 1239614 : UnlockBufHdr(bufHdr, buf_state);
6556 tgl@sss.pgh.pa.us 3930 : 1239614 : return result;
3931 : : }
3932 : :
3933 : : /*
3934 : : * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
3935 : : * buffer is clean by the time we've locked it.)
3936 : : */
7491 3937 : 291909 : PinBuffer_Locked(bufHdr);
3553 rhaas@postgresql.org 3938 : 291909 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
3939 : :
940 andres@anarazel.de 3940 : 291909 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
3941 : :
3553 rhaas@postgresql.org 3942 : 291909 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
3943 : :
3487 andres@anarazel.de 3944 : 291909 : tag = bufHdr->tag;
3945 : :
1072 michael@paquier.xyz 3946 : 291909 : UnpinBuffer(bufHdr);
3947 : :
3948 : : /*
3949 : : * SyncOneBuffer() is only called by checkpointer and bgwriter, so
3950 : : * IOContext will always be IOCONTEXT_NORMAL.
3951 : : */
843 andres@anarazel.de 3952 : 291909 : ScheduleBufferTagForWriteback(wb_context, IOCONTEXT_NORMAL, &tag);
3953 : :
6556 tgl@sss.pgh.pa.us 3954 : 291909 : return result | BUF_WRITTEN;
3955 : : }
3956 : :
3957 : : /*
3958 : : * AtEOXact_Buffers - clean up at end of transaction.
3959 : : *
3960 : : * As of PostgreSQL 8.0, buffer pins should get released by the
3961 : : * ResourceOwner mechanism. This routine is just a debugging
3962 : : * cross-check that no pins remain.
3963 : : */
3964 : : void
8432 3965 : 318597 : AtEOXact_Buffers(bool isCommit)
3966 : : {
4096 andres@anarazel.de 3967 : 318597 : CheckForBufferLeaks();
3968 : :
7630 tgl@sss.pgh.pa.us 3969 : 318597 : AtEOXact_LocalBuffers(isCommit);
3970 : :
4025 andres@anarazel.de 3971 [ - + ]: 318597 : Assert(PrivateRefCountOverflowed == 0);
3972 : 318597 : }
3973 : :
3974 : : /*
3975 : : * Initialize access to shared buffer pool
3976 : : *
3977 : : * This is called during backend startup (whether standalone or under the
3978 : : * postmaster). It sets up for this backend's access to the already-existing
3979 : : * buffer pool.
3980 : : */
3981 : : void
373 heikki.linnakangas@i 3982 : 18761 : InitBufferManagerAccess(void)
3983 : : {
3984 : : HASHCTL hash_ctl;
3985 : :
3986 : : /*
3987 : : * An advisory limit on the number of pins each backend should hold, based
3988 : : * on shared_buffers and the maximum number of connections possible.
3989 : : * That's very pessimistic, but outside toy-sized shared_buffers it should
3990 : : * allow plenty of pins. LimitAdditionalPins() and
3991 : : * GetAdditionalPinLimit() can be used to check the remaining balance.
3992 : : */
176 tmunro@postgresql.or 3993 : 18761 : MaxProportionalPins = NBuffers / (MaxBackends + NUM_AUXILIARY_PROCS);
3994 : :
4025 andres@anarazel.de 3995 : 18761 : memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3996 : :
3997 : 18761 : hash_ctl.keysize = sizeof(int32);
3485 3998 : 18761 : hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3999 : :
4025 4000 : 18761 : PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
4001 : : HASH_ELEM | HASH_BLOBS);
4002 : :
4003 : : /*
4004 : : * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
4005 : : * the corresponding phase of backend shutdown.
4006 : : */
1493 4007 [ - + ]: 18761 : Assert(MyProc != NULL);
7334 tgl@sss.pgh.pa.us 4008 : 18761 : on_shmem_exit(AtProcExit_Buffers, 0);
4009 : 18761 : }
4010 : :
4011 : : /*
4012 : : * During backend exit, ensure that we released all shared-buffer locks and
4013 : : * assert that we have no remaining pins.
4014 : : */
4015 : : static void
4016 : 18761 : AtProcExit_Buffers(int code, Datum arg)
4017 : : {
7630 4018 : 18761 : UnlockBuffers();
4019 : :
4096 andres@anarazel.de 4020 : 18761 : CheckForBufferLeaks();
4021 : :
4022 : : /* localbuf.c needs a chance too */
4023 : 18761 : AtProcExit_LocalBuffers();
4024 : 18761 : }
4025 : :
4026 : : /*
4027 : : * CheckForBufferLeaks - ensure this backend holds no buffer pins
4028 : : *
4029 : : * As of PostgreSQL 8.0, buffer pins should get released by the
4030 : : * ResourceOwner mechanism. This routine is just a debugging
4031 : : * cross-check that no pins remain.
4032 : : */
4033 : : static void
4034 : 337358 : CheckForBufferLeaks(void)
4035 : : {
4036 : : #ifdef USE_ASSERT_CHECKING
4037 : 337358 : int RefCountErrors = 0;
4038 : : PrivateRefCountEntry *res;
4039 : : int i;
4040 : : char *s;
4041 : :
4042 : : /* check the array */
4025 4043 [ + + ]: 3036222 : for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
4044 : : {
4045 : 2698864 : res = &PrivateRefCountArray[i];
4046 : :
4047 [ - + ]: 2698864 : if (res->buffer != InvalidBuffer)
4048 : : {
668 heikki.linnakangas@i 4049 :UBC 0 : s = DebugPrintBufferRefcount(res->buffer);
4050 [ # # ]: 0 : elog(WARNING, "buffer refcount leak: %s", s);
4051 : 0 : pfree(s);
4052 : :
4025 andres@anarazel.de 4053 : 0 : RefCountErrors++;
4054 : : }
4055 : : }
4056 : :
4057 : : /* if necessary search the hash */
4025 andres@anarazel.de 4058 [ - + ]:CBC 337358 : if (PrivateRefCountOverflowed)
4059 : : {
4060 : : HASH_SEQ_STATUS hstat;
4061 : :
4025 andres@anarazel.de 4062 :UBC 0 : hash_seq_init(&hstat, PrivateRefCountHash);
4063 [ # # ]: 0 : while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
4064 : : {
668 heikki.linnakangas@i 4065 : 0 : s = DebugPrintBufferRefcount(res->buffer);
4066 [ # # ]: 0 : elog(WARNING, "buffer refcount leak: %s", s);
4067 : 0 : pfree(s);
4096 andres@anarazel.de 4068 : 0 : RefCountErrors++;
4069 : : }
4070 : : }
4071 : :
4096 andres@anarazel.de 4072 [ - + ]:CBC 337358 : Assert(RefCountErrors == 0);
4073 : : #endif
7630 tgl@sss.pgh.pa.us 4074 : 337358 : }
4075 : :
4076 : : #ifdef USE_ASSERT_CHECKING
4077 : : /*
4078 : : * Check for exclusive-locked catalog buffers. This is the core of
4079 : : * AssertCouldGetRelation().
4080 : : *
4081 : : * A backend would self-deadlock on LWLocks if the catalog scan read the
4082 : : * exclusive-locked buffer. The main threat is exclusive-locked buffers of
4083 : : * catalogs used in relcache, because a catcache search on any catalog may
4084 : : * build that catalog's relcache entry. We don't have an inventory of
4085 : : * catalogs relcache uses, so just check buffers of most catalogs.
4086 : : *
4087 : : * It's better to minimize waits while holding an exclusive buffer lock, so it
4088 : : * would be nice to broaden this check not to be catalog-specific. However,
4089 : : * bttextcmp() accesses pg_collation, and non-core opclasses might similarly
4090 : : * read tables. That is deadlock-free as long as there's no loop in the
4091 : : * dependency graph: modifying table A may cause an opclass to read table B,
4092 : : * but it must not cause a read of table A.
4093 : : */
4094 : : void
142 noah@leadboat.com 4095 : 101208092 : AssertBufferLocksPermitCatalogRead(void)
4096 : : {
4097 : 101208092 : ForEachLWLockHeldByMe(AssertNotCatalogBufferLock, NULL);
4098 : 101208092 : }
4099 : :
4100 : : static void
4101 : 127442 : AssertNotCatalogBufferLock(LWLock *lock, LWLockMode mode,
4102 : : void *unused_context)
4103 : : {
4104 : : BufferDesc *bufHdr;
4105 : : BufferTag tag;
4106 : : Oid relid;
4107 : :
4108 [ + + ]: 127442 : if (mode != LW_EXCLUSIVE)
4109 : 96964 : return;
4110 : :
4111 [ + + ]: 35085 : if (!((BufferDescPadded *) lock > BufferDescriptors &&
4112 [ - + ]: 31903 : (BufferDescPadded *) lock < BufferDescriptors + NBuffers))
4113 : 3182 : return; /* not a buffer lock */
4114 : :
4115 : 31903 : bufHdr = (BufferDesc *)
4116 : : ((char *) lock - offsetof(BufferDesc, content_lock));
4117 : 31903 : tag = bufHdr->tag;
4118 : :
4119 : : /*
4120 : : * This relNumber==relid assumption holds until a catalog experiences
4121 : : * VACUUM FULL or similar. After a command like that, relNumber will be
4122 : : * in the normal (non-catalog) range, and we lose the ability to detect
4123 : : * hazardous access to that catalog. Calling RelidByRelfilenumber() would
4124 : : * close that gap, but RelidByRelfilenumber() might then deadlock with a
4125 : : * held lock.
4126 : : */
4127 : 31903 : relid = tag.relNumber;
4128 : :
4129 [ + + ]: 31903 : if (IsCatalogTextUniqueIndexOid(relid)) /* see comments at the callee */
4130 : 1425 : return;
4131 : :
4132 [ - + ]: 30478 : Assert(!IsCatalogRelationOid(relid));
4133 : : }
4134 : : #endif
4135 : :
4136 : :
4137 : : /*
4138 : : * Helper routine to issue warnings when a buffer is unexpectedly pinned
4139 : : */
4140 : : char *
668 heikki.linnakangas@i 4141 : 60 : DebugPrintBufferRefcount(Buffer buffer)
4142 : : {
4143 : : BufferDesc *buf;
4144 : : int32 loccount;
4145 : : char *result;
4146 : : ProcNumber backend;
4147 : : uint32 buf_state;
4148 : :
7630 tgl@sss.pgh.pa.us 4149 [ - + ]: 60 : Assert(BufferIsValid(buffer));
4150 [ + + ]: 60 : if (BufferIsLocal(buffer))
4151 : : {
3873 andres@anarazel.de 4152 : 24 : buf = GetLocalBufferDescriptor(-buffer - 1);
7630 tgl@sss.pgh.pa.us 4153 : 24 : loccount = LocalRefCount[-buffer - 1];
552 heikki.linnakangas@i 4154 : 24 : backend = MyProcNumber;
4155 : : }
4156 : : else
4157 : : {
3873 andres@anarazel.de 4158 : 36 : buf = GetBufferDescriptor(buffer - 1);
4025 4159 : 36 : loccount = GetPrivateRefCount(buffer);
552 heikki.linnakangas@i 4160 : 36 : backend = INVALID_PROC_NUMBER;
4161 : : }
4162 : :
4163 : : /* theoretically we should lock the bufhdr here */
3436 andres@anarazel.de 4164 : 60 : buf_state = pg_atomic_read_u32(&buf->state);
4165 : :
668 heikki.linnakangas@i 4166 : 60 : result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
4167 : : buffer,
193 andres@anarazel.de 4168 : 60 : relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
4169 : : BufTagGetForkNum(&buf->tag)).str,
4170 : : buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
4171 : : BUF_STATE_GET_REFCOUNT(buf_state), loccount);
668 heikki.linnakangas@i 4172 : 60 : return result;
4173 : : }
4174 : :
4175 : : /*
4176 : : * CheckPointBuffers
4177 : : *
4178 : : * Flush all dirty blocks in buffer pool to disk at checkpoint time.
4179 : : *
4180 : : * Note: temporary relations do not participate in checkpoints, so they don't
4181 : : * need to be flushed.
4182 : : */
4183 : : void
6645 tgl@sss.pgh.pa.us 4184 : 1677 : CheckPointBuffers(int flags)
4185 : : {
4186 : 1677 : BufferSync(flags);
9046 vadim4o@yahoo.com 4187 : 1677 : }
4188 : :
4189 : : /*
4190 : : * BufferGetBlockNumber
4191 : : * Returns the block number associated with a buffer.
4192 : : *
4193 : : * Note:
4194 : : * Assumes that the buffer is valid and pinned, else the
4195 : : * value may be obsolete immediately...
4196 : : */
4197 : : BlockNumber
10651 scrappy@hub.org 4198 : 139151184 : BufferGetBlockNumber(Buffer buffer)
4199 : : {
4200 : : BufferDesc *bufHdr;
4201 : :
8545 bruce@momjian.us 4202 [ - + + + : 139151184 : Assert(BufferIsPinned(buffer));
- + ]
4203 : :
10226 4204 [ + + ]: 139151184 : if (BufferIsLocal(buffer))
3873 andres@anarazel.de 4205 : 4243822 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4206 : : else
4207 : 134907362 : bufHdr = GetBufferDescriptor(buffer - 1);
4208 : :
4209 : : /* pinned, so OK to read tag without spinlock */
7491 tgl@sss.pgh.pa.us 4210 : 139151184 : return bufHdr->tag.blockNum;
4211 : : }
4212 : :
4213 : : /*
4214 : : * BufferGetTag
4215 : : * Returns the relfilelocator, fork number and block number associated with
4216 : : * a buffer.
4217 : : */
4218 : : void
1158 rhaas@postgresql.org 4219 : 14750896 : BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
4220 : : BlockNumber *blknum)
4221 : : {
4222 : : BufferDesc *bufHdr;
4223 : :
4224 : : /* Do the same checks as BufferGetBlockNumber. */
6235 heikki.linnakangas@i 4225 [ - + - + : 14750896 : Assert(BufferIsPinned(buffer));
- + ]
4226 : :
7810 tgl@sss.pgh.pa.us 4227 [ - + ]: 14750896 : if (BufferIsLocal(buffer))
3873 andres@anarazel.de 4228 :UBC 0 : bufHdr = GetLocalBufferDescriptor(-buffer - 1);
4229 : : else
3873 andres@anarazel.de 4230 :CBC 14750896 : bufHdr = GetBufferDescriptor(buffer - 1);
4231 : :
4232 : : /* pinned, so OK to read tag without spinlock */
1109 rhaas@postgresql.org 4233 : 14750896 : *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
4234 : 14750896 : *forknum = BufTagGetForkNum(&bufHdr->tag);
6235 heikki.linnakangas@i 4235 : 14750896 : *blknum = bufHdr->tag.blockNum;
7810 tgl@sss.pgh.pa.us 4236 : 14750896 : }
4237 : :
4238 : : /*
4239 : : * FlushBuffer
4240 : : * Physically write out a shared buffer.
4241 : : *
4242 : : * NOTE: this actually just passes the buffer contents to the kernel; the
4243 : : * real write to disk won't happen until the kernel feels like it. This
4244 : : * is okay from our point of view since we can redo the changes from WAL.
4245 : : * However, we will need to force the changes to disk via fsync before
4246 : : * we can checkpoint WAL.
4247 : : *
4248 : : * The caller must hold a pin on the buffer and have share-locked the
4249 : : * buffer contents. (Note: a share-lock does not prevent updates of
4250 : : * hint bits in the buffer, so the page could change while the write
4251 : : * is in progress, but we assume that that will not invalidate the data
4252 : : * written.)
4253 : : *
4254 : : * If the caller has an smgr reference for the buffer's relation, pass it
4255 : : * as the second parameter. If not, pass NULL.
4256 : : */
4257 : : static void
940 andres@anarazel.de 4258 : 552721 : FlushBuffer(BufferDesc *buf, SMgrRelation reln, IOObject io_object,
4259 : : IOContext io_context)
4260 : : {
4261 : : XLogRecPtr recptr;
4262 : : ErrorContextCallback errcallback;
4263 : : instr_time io_start;
4264 : : Block bufBlock;
4265 : : char *bufToWrite;
4266 : : uint32 buf_state;
4267 : :
4268 : : /*
4269 : : * Try to start an I/O operation. If StartBufferIO returns false, then
4270 : : * someone else flushed the buffer before we could, so we need not do
4271 : : * anything.
4272 : : */
521 tmunro@postgresql.or 4273 [ + + ]: 552721 : if (!StartBufferIO(buf, false, false))
7491 tgl@sss.pgh.pa.us 4274 : 11 : return;
4275 : :
4276 : : /* Setup error traceback support for ereport() */
4681 heikki.linnakangas@i 4277 : 552710 : errcallback.callback = shared_buffer_write_error_callback;
282 peter@eisentraut.org 4278 : 552710 : errcallback.arg = buf;
4681 heikki.linnakangas@i 4279 : 552710 : errcallback.previous = error_context_stack;
4280 : 552710 : error_context_stack = &errcallback;
4281 : :
4282 : : /* Find smgr relation for buffer */
7808 tgl@sss.pgh.pa.us 4283 [ + + ]: 552710 : if (reln == NULL)
552 heikki.linnakangas@i 4284 : 548280 : reln = smgropen(BufTagGetRelFileLocator(&buf->tag), INVALID_PROC_NUMBER);
4285 : :
4286 : : TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
4287 : : buf->tag.blockNum,
4288 : : reln->smgr_rlocator.locator.spcOid,
4289 : : reln->smgr_rlocator.locator.dbOid,
4290 : : reln->smgr_rlocator.locator.relNumber);
4291 : :
3436 andres@anarazel.de 4292 : 552710 : buf_state = LockBufHdr(buf);
4293 : :
4294 : : /*
4295 : : * Run PageGetLSN while holding header lock, since we don't have the
4296 : : * buffer locked exclusively in all cases.
4297 : : */
4551 simon@2ndQuadrant.co 4298 : 552710 : recptr = BufferGetLSN(buf);
4299 : :
4300 : : /* To check if block content changes while flushing. - vadim 01/17/97 */
3436 andres@anarazel.de 4301 : 552710 : buf_state &= ~BM_JUST_DIRTIED;
4302 : 552710 : UnlockBufHdr(buf, buf_state);
4303 : :
4304 : : /*
4305 : : * Force XLOG flush up to buffer's LSN. This implements the basic WAL
4306 : : * rule that log updates must hit disk before any of the data-file changes
4307 : : * they describe do.
4308 : : *
4309 : : * However, this rule does not apply to unlogged relations, which will be
4310 : : * lost after a crash anyway. Most unlogged relation pages do not bear
4311 : : * LSNs since we never emit WAL records for them, and therefore flushing
4312 : : * up through the buffer LSN would be useless, but harmless. However,
4313 : : * GiST indexes use LSNs internally to track page-splits, and therefore
4314 : : * unlogged GiST pages bear "fake" LSNs generated by
4315 : : * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
4316 : : * LSN counter could advance past the WAL insertion point; and if it did
4317 : : * happen, attempting to flush WAL through that location would fail, with
4318 : : * disastrous system-wide consequences. To make sure that can't happen,
4319 : : * skip the flush if the buffer isn't permanent.
4320 : : */
4321 [ + + ]: 552710 : if (buf_state & BM_PERMANENT)
4590 heikki.linnakangas@i 4322 : 550934 : XLogFlush(recptr);
4323 : :
4324 : : /*
4325 : : * Now it's safe to write the buffer to disk. Note that no one else should
4326 : : * have been able to write it, while we were busy with log flushing,
4327 : : * because we got the exclusive right to perform I/O by setting the
4328 : : * BM_IO_IN_PROGRESS bit.
4329 : : */
4551 simon@2ndQuadrant.co 4330 : 552710 : bufBlock = BufHdrGetBlock(buf);
4331 : :
4332 : : /*
4333 : : * Update page checksum if desired. Since we have only shared lock on the
4334 : : * buffer, other processes might be updating hint bits in it, so we must
4335 : : * copy the page to private storage if we do checksumming.
4336 : : */
4337 : 552710 : bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
4338 : :
192 michael@paquier.xyz 4339 : 552710 : io_start = pgstat_prepare_io_time(track_io_timing);
4340 : :
4341 : : /*
4342 : : * bufToWrite is either the shared buffer or a copy, as appropriate.
4343 : : */
7879 tgl@sss.pgh.pa.us 4344 : 552710 : smgrwrite(reln,
1109 rhaas@postgresql.org 4345 : 552710 : BufTagGetForkNum(&buf->tag),
4346 : : buf->tag.blockNum,
4347 : : bufToWrite,
4348 : : false);
4349 : :
4350 : : /*
4351 : : * When a strategy is in use, only flushes of dirty buffers already in the
4352 : : * strategy ring are counted as strategy writes (IOCONTEXT
4353 : : * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
4354 : : * statistics tracking.
4355 : : *
4356 : : * If a shared buffer initially added to the ring must be flushed before
4357 : : * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
4358 : : *
4359 : : * If a shared buffer which was added to the ring later because the
4360 : : * current strategy buffer is pinned or in use or because all strategy
4361 : : * buffers were dirty and rejected (for BAS_BULKREAD operations only)
4362 : : * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
4363 : : * (from_ring will be false).
4364 : : *
4365 : : * When a strategy is not in use, the write can only be a "regular" write
4366 : : * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
4367 : : */
883 andres@anarazel.de 4368 : 552710 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
4369 : : IOOP_WRITE, io_start, 1, BLCKSZ);
4370 : :
5744 rhaas@postgresql.org 4371 : 552710 : pgBufferUsage.shared_blks_written++;
4372 : :
4373 : : /*
4374 : : * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
4375 : : * end the BM_IO_IN_PROGRESS state.
4376 : : */
160 andres@anarazel.de 4377 : 552710 : TerminateBufferIO(buf, true, 0, true, false);
4378 : :
4379 : : TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
4380 : : buf->tag.blockNum,
4381 : : reln->smgr_rlocator.locator.spcOid,
4382 : : reln->smgr_rlocator.locator.dbOid,
4383 : : reln->smgr_rlocator.locator.relNumber);
4384 : :
4385 : : /* Pop the error context stack */
4681 heikki.linnakangas@i 4386 : 552710 : error_context_stack = errcallback.previous;
4387 : : }
4388 : :
4389 : : /*
4390 : : * RelationGetNumberOfBlocksInFork
4391 : : * Determines the current number of pages in the specified relation fork.
4392 : : *
4393 : : * Note that the accuracy of the result will depend on the details of the
4394 : : * relation's storage. For builtin AMs it'll be accurate, but for external AMs
4395 : : * it might not be.
4396 : : */
4397 : : BlockNumber
5365 rhaas@postgresql.org 4398 : 1716895 : RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
4399 : : {
1373 peter@eisentraut.org 4400 [ + + + + : 1716895 : if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
+ + ]
4401 : : {
4402 : : /*
4403 : : * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
4404 : : * tableam returns the size in bytes - but for the purpose of this
4405 : : * routine, we want the number of blocks. Therefore divide, rounding
4406 : : * up.
4407 : : */
4408 : : uint64 szbytes;
4409 : :
4410 : 1277756 : szbytes = table_relation_size(relation, forkNum);
4411 : :
4412 : 1277737 : return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
4413 : : }
4414 [ + - + + : 439139 : else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
- + - - -
- ]
4415 : : {
1213 tgl@sss.pgh.pa.us 4416 : 439139 : return smgrnblocks(RelationGetSmgr(relation), forkNum);
4417 : : }
4418 : : else
1373 peter@eisentraut.org 4419 :UBC 0 : Assert(false);
4420 : :
4421 : : return 0; /* keep compiler quiet */
4422 : : }
4423 : :
4424 : : /*
4425 : : * BufferIsPermanent
4426 : : * Determines whether a buffer will potentially still be around after
4427 : : * a crash. Caller must hold a buffer pin.
4428 : : */
4429 : : bool
5062 rhaas@postgresql.org 4430 :CBC 9468036 : BufferIsPermanent(Buffer buffer)
4431 : : {
4432 : : BufferDesc *bufHdr;
4433 : :
4434 : : /* Local buffers are used only for temp relations. */
4435 [ + + ]: 9468036 : if (BufferIsLocal(buffer))
4436 : 736030 : return false;
4437 : :
4438 : : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4439 [ - + ]: 8732006 : Assert(BufferIsValid(buffer));
4440 [ - + - + : 8732006 : Assert(BufferIsPinned(buffer));
- + ]
4441 : :
4442 : : /*
4443 : : * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
4444 : : * need not bother with the buffer header spinlock. Even if someone else
4445 : : * changes the buffer header state while we're doing this, the state is
4446 : : * changed atomically, so we'll read the old value or the new value, but
4447 : : * not random garbage.
4448 : : */
3873 andres@anarazel.de 4449 : 8732006 : bufHdr = GetBufferDescriptor(buffer - 1);
3436 4450 : 8732006 : return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
4451 : : }
4452 : :
4453 : : /*
4454 : : * BufferGetLSNAtomic
4455 : : * Retrieves the LSN of the buffer atomically using a buffer header lock.
4456 : : * This is necessary for some callers who may not have an exclusive lock
4457 : : * on the buffer.
4458 : : */
4459 : : XLogRecPtr
4551 simon@2ndQuadrant.co 4460 : 5886733 : BufferGetLSNAtomic(Buffer buffer)
4461 : : {
3426 kgrittn@postgresql.o 4462 : 5886733 : char *page = BufferGetPage(buffer);
4463 : : BufferDesc *bufHdr;
4464 : : XLogRecPtr lsn;
4465 : : uint32 buf_state;
4466 : :
4467 : : /*
4468 : : * If we don't need locking for correctness, fastpath out.
4469 : : */
3725 heikki.linnakangas@i 4470 [ + + - + : 5886733 : if (!XLogHintBitIsNeeded() || BufferIsLocal(buffer))
+ + ]
4551 simon@2ndQuadrant.co 4471 : 238852 : return PageGetLSN(page);
4472 : :
4473 : : /* Make sure we've got a real buffer, and that we hold a pin on it. */
4474 [ - + ]: 5647881 : Assert(BufferIsValid(buffer));
4475 [ - + - + : 5647881 : Assert(BufferIsPinned(buffer));
- + ]
4476 : :
199 rguo@postgresql.org 4477 : 5647881 : bufHdr = GetBufferDescriptor(buffer - 1);
3436 andres@anarazel.de 4478 : 5647881 : buf_state = LockBufHdr(bufHdr);
4551 simon@2ndQuadrant.co 4479 : 5647881 : lsn = PageGetLSN(page);
3436 andres@anarazel.de 4480 : 5647881 : UnlockBufHdr(bufHdr, buf_state);
4481 : :
4551 simon@2ndQuadrant.co 4482 : 5647881 : return lsn;
4483 : : }
4484 : :
4485 : : /* ---------------------------------------------------------------------
4486 : : * DropRelationBuffers
4487 : : *
4488 : : * This function removes from the buffer pool all the pages of the
4489 : : * specified relation forks that have block numbers >= firstDelBlock.
4490 : : * (In particular, with firstDelBlock = 0, all pages are removed.)
4491 : : * Dirty pages are simply dropped, without bothering to write them
4492 : : * out first. Therefore, this is NOT rollback-able, and so should be
4493 : : * used only with extreme caution!
4494 : : *
4495 : : * Currently, this is called only from smgr.c when the underlying file
4496 : : * is about to be deleted or truncated (firstDelBlock is needed for
4497 : : * the truncation case). The data in the affected pages would therefore
4498 : : * be deleted momentarily anyway, and there is no point in writing it.
4499 : : * It is the responsibility of higher-level code to ensure that the
4500 : : * deletion or truncation does not lose any data that could be needed
4501 : : * later. It is also the responsibility of higher-level code to ensure
4502 : : * that no other process could be trying to load more pages of the
4503 : : * relation into buffers.
4504 : : * --------------------------------------------------------------------
4505 : : */
4506 : : void
1152 rhaas@postgresql.org 4507 : 627 : DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
4508 : : int nforks, BlockNumber *firstDelBlock)
4509 : : {
4510 : : int i;
4511 : : int j;
4512 : : RelFileLocatorBackend rlocator;
4513 : : BlockNumber nForkBlock[MAX_FORKNUM];
1578 tgl@sss.pgh.pa.us 4514 : 627 : uint64 nBlocksToInvalidate = 0;
4515 : :
1158 rhaas@postgresql.org 4516 : 627 : rlocator = smgr_reln->smgr_rlocator;
4517 : :
4518 : : /* If it's a local relation, it's localbuf.c's problem. */
4519 [ + + ]: 627 : if (RelFileLocatorBackendIsTemp(rlocator))
4520 : : {
552 heikki.linnakangas@i 4521 [ + - ]: 374 : if (rlocator.backend == MyProcNumber)
64 fujii@postgresql.org 4522 :GNC 374 : DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
4523 : : firstDelBlock);
4524 : :
8432 tgl@sss.pgh.pa.us 4525 :CBC 417 : return;
4526 : : }
4527 : :
4528 : : /*
4529 : : * To remove all the pages of the specified relation forks from the buffer
4530 : : * pool, we need to scan the entire buffer pool but we can optimize it by
4531 : : * finding the buffers from BufMapping table provided we know the exact
4532 : : * size of each fork of the relation. The exact size is required to ensure
4533 : : * that we don't leave any buffer for the relation being dropped as
4534 : : * otherwise the background writer or checkpointer can lead to a PANIC
4535 : : * error while flushing buffers corresponding to files that don't exist.
4536 : : *
4537 : : * To know the exact size, we rely on the size cached for each fork by us
4538 : : * during recovery which limits the optimization to recovery and on
4539 : : * standbys but we can easily extend it once we have shared cache for
4540 : : * relation size.
4541 : : *
4542 : : * In recovery, we cache the value returned by the first lseek(SEEK_END)
4543 : : * and the future writes keeps the cached value up-to-date. See
4544 : : * smgrextend. It is possible that the value of the first lseek is smaller
4545 : : * than the actual number of existing blocks in the file due to buggy
4546 : : * Linux kernels that might not have accounted for the recent write. But
4547 : : * that should be fine because there must not be any buffers after that
4548 : : * file size.
4549 : : */
1698 akapila@postgresql.o 4550 [ + + ]: 353 : for (i = 0; i < nforks; i++)
4551 : : {
4552 : : /* Get the number of blocks for a relation's fork */
4553 : 300 : nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
4554 : :
4555 [ + + ]: 300 : if (nForkBlock[i] == InvalidBlockNumber)
4556 : : {
4557 : 200 : nBlocksToInvalidate = InvalidBlockNumber;
4558 : 200 : break;
4559 : : }
4560 : :
4561 : : /* calculate the number of blocks to be invalidated */
4562 : 100 : nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
4563 : : }
4564 : :
4565 : : /*
4566 : : * We apply the optimization iff the total number of blocks to invalidate
4567 : : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4568 : : */
4569 [ + + ]: 253 : if (BlockNumberIsValid(nBlocksToInvalidate) &&
4570 [ + + ]: 53 : nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4571 : : {
4572 [ + + ]: 120 : for (j = 0; j < nforks; j++)
1152 rhaas@postgresql.org 4573 : 77 : FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
4574 : 77 : nForkBlock[j], firstDelBlock[j]);
1698 akapila@postgresql.o 4575 : 43 : return;
4576 : : }
4577 : :
7491 tgl@sss.pgh.pa.us 4578 [ + + ]: 2725586 : for (i = 0; i < NBuffers; i++)
4579 : : {
3582 rhaas@postgresql.org 4580 : 2725376 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4581 : : uint32 buf_state;
4582 : :
4583 : : /*
4584 : : * We can make this a tad faster by prechecking the buffer tag before
4585 : : * we attempt to lock the buffer; this saves a lot of lock
4586 : : * acquisitions in typical cases. It should be safe because the
4587 : : * caller must have AccessExclusiveLock on the relation, or some other
4588 : : * reason to be certain that no one is loading new pages of the rel
4589 : : * into the buffer pool. (Otherwise we might well miss such pages
4590 : : * entirely.) Therefore, while the tag might be changing while we
4591 : : * look at it, it can't be changing *to* a value we care about, only
4592 : : * *away* from such a value. So false negatives are impossible, and
4593 : : * false positives are safe because we'll recheck after getting the
4594 : : * buffer lock.
4595 : : *
4596 : : * We could check forkNum and blockNum as well as the rlocator, but
4597 : : * the incremental win from doing so seems small.
4598 : : */
1109 4599 [ + + ]: 2725376 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
4839 tgl@sss.pgh.pa.us 4600 : 2716357 : continue;
4601 : :
3436 andres@anarazel.de 4602 : 9019 : buf_state = LockBufHdr(bufHdr);
4603 : :
2174 fujii@postgresql.org 4604 [ + + ]: 22473 : for (j = 0; j < nforks; j++)
4605 : : {
1109 rhaas@postgresql.org 4606 [ + - ]: 15879 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
4607 [ + + ]: 15879 : BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
2174 fujii@postgresql.org 4608 [ + + ]: 8900 : bufHdr->tag.blockNum >= firstDelBlock[j])
4609 : : {
1941 tgl@sss.pgh.pa.us 4610 : 2425 : InvalidateBuffer(bufHdr); /* releases spinlock */
2174 fujii@postgresql.org 4611 : 2425 : break;
4612 : : }
4613 : : }
4614 [ + + ]: 9019 : if (j >= nforks)
3436 andres@anarazel.de 4615 : 6594 : UnlockBufHdr(bufHdr, buf_state);
4616 : : }
4617 : : }
4618 : :
4619 : : /* ---------------------------------------------------------------------
4620 : : * DropRelationsAllBuffers
4621 : : *
4622 : : * This function removes from the buffer pool all the pages of all
4623 : : * forks of the specified relations. It's equivalent to calling
4624 : : * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
4625 : : * --------------------------------------------------------------------
4626 : : */
4627 : : void
1152 rhaas@postgresql.org 4628 : 13336 : DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
4629 : : {
4630 : : int i;
1697 akapila@postgresql.o 4631 : 13336 : int n = 0;
4632 : : SMgrRelation *rels;
4633 : : BlockNumber (*block)[MAX_FORKNUM + 1];
1578 tgl@sss.pgh.pa.us 4634 : 13336 : uint64 nBlocksToInvalidate = 0;
4635 : : RelFileLocator *locators;
1697 akapila@postgresql.o 4636 : 13336 : bool cached = true;
4637 : : bool use_bsearch;
4638 : :
1158 rhaas@postgresql.org 4639 [ - + ]: 13336 : if (nlocators == 0)
4615 alvherre@alvh.no-ip. 4640 :UBC 0 : return;
4641 : :
1158 rhaas@postgresql.org 4642 :CBC 13336 : rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
4643 : :
4644 : : /* If it's a local relation, it's localbuf.c's problem. */
4645 [ + + ]: 58532 : for (i = 0; i < nlocators; i++)
4646 : : {
4647 [ + + ]: 45196 : if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
4648 : : {
552 heikki.linnakangas@i 4649 [ + - ]: 3162 : if (smgr_reln[i]->smgr_rlocator.backend == MyProcNumber)
1152 rhaas@postgresql.org 4650 : 3162 : DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
4651 : : }
4652 : : else
1697 akapila@postgresql.o 4653 : 42034 : rels[n++] = smgr_reln[i];
4654 : : }
4655 : :
4656 : : /*
4657 : : * If there are no non-local relations, then we're done. Release the
4658 : : * memory and return.
4659 : : */
4615 alvherre@alvh.no-ip. 4660 [ + + ]: 13336 : if (n == 0)
4661 : : {
1697 akapila@postgresql.o 4662 : 822 : pfree(rels);
4839 tgl@sss.pgh.pa.us 4663 : 822 : return;
4664 : : }
4665 : :
4666 : : /*
4667 : : * This is used to remember the number of blocks for all the relations
4668 : : * forks.
4669 : : */
4670 : : block = (BlockNumber (*)[MAX_FORKNUM + 1])
1697 akapila@postgresql.o 4671 : 12514 : palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
4672 : :
4673 : : /*
4674 : : * We can avoid scanning the entire buffer pool if we know the exact size
4675 : : * of each of the given relation forks. See DropRelationBuffers.
4676 : : */
4677 [ + + + + ]: 26376 : for (i = 0; i < n && cached; i++)
4678 : : {
1109 drowley@postgresql.o 4679 [ + + ]: 22634 : for (int j = 0; j <= MAX_FORKNUM; j++)
4680 : : {
4681 : : /* Get the number of blocks for a relation's fork. */
1697 akapila@postgresql.o 4682 : 20452 : block[i][j] = smgrnblocks_cached(rels[i], j);
4683 : :
4684 : : /* We need to only consider the relation forks that exists. */
4685 [ + + ]: 20452 : if (block[i][j] == InvalidBlockNumber)
4686 : : {
4687 [ + + ]: 18068 : if (!smgrexists(rels[i], j))
4688 : 6388 : continue;
4689 : 11680 : cached = false;
4690 : 11680 : break;
4691 : : }
4692 : :
4693 : : /* calculate the total number of blocks to be invalidated */
4694 : 2384 : nBlocksToInvalidate += block[i][j];
4695 : : }
4696 : : }
4697 : :
4698 : : /*
4699 : : * We apply the optimization iff the total number of blocks to invalidate
4700 : : * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4701 : : */
4702 [ + + + + ]: 12514 : if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4703 : : {
4704 [ + + ]: 1367 : for (i = 0; i < n; i++)
4705 : : {
1109 drowley@postgresql.o 4706 [ + + ]: 3765 : for (int j = 0; j <= MAX_FORKNUM; j++)
4707 : : {
4708 : : /* ignore relation forks that doesn't exist */
1697 akapila@postgresql.o 4709 [ + + ]: 3012 : if (!BlockNumberIsValid(block[i][j]))
4710 : 2250 : continue;
4711 : :
4712 : : /* drop all the buffers for a particular relation fork */
1152 rhaas@postgresql.org 4713 : 762 : FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
4714 : 762 : j, block[i][j], 0);
4715 : : }
4716 : : }
4717 : :
1697 akapila@postgresql.o 4718 : 614 : pfree(block);
4719 : 614 : pfree(rels);
4720 : 614 : return;
4721 : : }
4722 : :
4723 : 11900 : pfree(block);
1158 rhaas@postgresql.org 4724 : 11900 : locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
1697 akapila@postgresql.o 4725 [ + + ]: 53181 : for (i = 0; i < n; i++)
1158 rhaas@postgresql.org 4726 : 41281 : locators[i] = rels[i]->smgr_rlocator.locator;
4727 : :
4728 : : /*
4729 : : * For low number of relations to drop just use a simple walk through, to
4730 : : * save the bsearch overhead. The threshold to use is rather a guess than
4731 : : * an exactly determined value, as it depends on many factors (CPU and RAM
4732 : : * speeds, amount of shared buffers etc.).
4733 : : */
1981 noah@leadboat.com 4734 : 11900 : use_bsearch = n > RELS_BSEARCH_THRESHOLD;
4735 : :
4736 : : /* sort the list of rlocators if necessary */
4615 alvherre@alvh.no-ip. 4737 [ + + ]: 11900 : if (use_bsearch)
568 nathan@postgresql.or 4738 : 167 : qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
4739 : :
4839 tgl@sss.pgh.pa.us 4740 [ + + ]: 128738300 : for (i = 0; i < NBuffers; i++)
4741 : : {
1158 rhaas@postgresql.org 4742 : 128726400 : RelFileLocator *rlocator = NULL;
3582 4743 : 128726400 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4744 : : uint32 buf_state;
4745 : :
4746 : : /*
4747 : : * As in DropRelationBuffers, an unlocked precheck should be safe and
4748 : : * saves some cycles.
4749 : : */
4750 : :
4615 alvherre@alvh.no-ip. 4751 [ + + ]: 128726400 : if (!use_bsearch)
4752 : : {
4753 : : int j;
4754 : :
4755 [ + + ]: 515490020 : for (j = 0; j < n; j++)
4756 : : {
1109 rhaas@postgresql.org 4757 [ + + ]: 388612952 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
4758 : : {
1158 4759 : 88564 : rlocator = &locators[j];
4615 alvherre@alvh.no-ip. 4760 : 88564 : break;
4761 : : }
4762 : : }
4763 : : }
4764 : : else
4765 : : {
4766 : : RelFileLocator locator;
4767 : :
1109 rhaas@postgresql.org 4768 : 1760768 : locator = BufTagGetRelFileLocator(&bufHdr->tag);
274 peter@eisentraut.org 4769 : 1760768 : rlocator = bsearch(&locator,
4770 : : locators, n, sizeof(RelFileLocator),
4771 : : rlocator_comparator);
4772 : : }
4773 : :
4774 : : /* buffer doesn't belong to any of the given relfilelocators; skip it */
1158 rhaas@postgresql.org 4775 [ + + ]: 128726400 : if (rlocator == NULL)
4839 tgl@sss.pgh.pa.us 4776 : 128636151 : continue;
4777 : :
3436 andres@anarazel.de 4778 : 90249 : buf_state = LockBufHdr(bufHdr);
1109 rhaas@postgresql.org 4779 [ + - ]: 90249 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
4839 tgl@sss.pgh.pa.us 4780 : 90249 : InvalidateBuffer(bufHdr); /* releases spinlock */
4781 : : else
3436 andres@anarazel.de 4782 :UBC 0 : UnlockBufHdr(bufHdr, buf_state);
4783 : : }
4784 : :
1158 rhaas@postgresql.org 4785 :CBC 11900 : pfree(locators);
1697 akapila@postgresql.o 4786 : 11900 : pfree(rels);
4787 : : }
4788 : :
4789 : : /* ---------------------------------------------------------------------
4790 : : * FindAndDropRelationBuffers
4791 : : *
4792 : : * This function performs look up in BufMapping table and removes from the
4793 : : * buffer pool all the pages of the specified relation fork that has block
4794 : : * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
4795 : : * pages are removed.)
4796 : : * --------------------------------------------------------------------
4797 : : */
4798 : : static void
1152 rhaas@postgresql.org 4799 : 839 : FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
4800 : : BlockNumber nForkBlock,
4801 : : BlockNumber firstDelBlock)
4802 : : {
4803 : : BlockNumber curBlock;
4804 : :
1698 akapila@postgresql.o 4805 [ + + ]: 2023 : for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
4806 : : {
4807 : : uint32 bufHash; /* hash value for tag */
4808 : : BufferTag bufTag; /* identity of requested block */
4809 : : LWLock *bufPartitionLock; /* buffer partition lock for it */
4810 : : int buf_id;
4811 : : BufferDesc *bufHdr;
4812 : : uint32 buf_state;
4813 : :
4814 : : /* create a tag so we can lookup the buffer */
1137 rhaas@postgresql.org 4815 : 1184 : InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
4816 : :
4817 : : /* determine its hash code and partition lock ID */
1698 akapila@postgresql.o 4818 : 1184 : bufHash = BufTableHashCode(&bufTag);
4819 : 1184 : bufPartitionLock = BufMappingPartitionLock(bufHash);
4820 : :
4821 : : /* Check that it is in the buffer pool. If not, do nothing. */
4822 : 1184 : LWLockAcquire(bufPartitionLock, LW_SHARED);
4823 : 1184 : buf_id = BufTableLookup(&bufTag, bufHash);
4824 : 1184 : LWLockRelease(bufPartitionLock);
4825 : :
4826 [ + + ]: 1184 : if (buf_id < 0)
4827 : 157 : continue;
4828 : :
4829 : 1027 : bufHdr = GetBufferDescriptor(buf_id);
4830 : :
4831 : : /*
4832 : : * We need to lock the buffer header and recheck if the buffer is
4833 : : * still associated with the same block because the buffer could be
4834 : : * evicted by some other backend loading blocks for a different
4835 : : * relation after we release lock on the BufMapping table.
4836 : : */
4837 : 1027 : buf_state = LockBufHdr(bufHdr);
4838 : :
1109 rhaas@postgresql.org 4839 [ + - + - ]: 2054 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
4840 : 1027 : BufTagGetForkNum(&bufHdr->tag) == forkNum &&
1698 akapila@postgresql.o 4841 [ + - ]: 1027 : bufHdr->tag.blockNum >= firstDelBlock)
4842 : 1027 : InvalidateBuffer(bufHdr); /* releases spinlock */
4843 : : else
1698 akapila@postgresql.o 4844 :UBC 0 : UnlockBufHdr(bufHdr, buf_state);
4845 : : }
1698 akapila@postgresql.o 4846 :CBC 839 : }
4847 : :
4848 : : /* ---------------------------------------------------------------------
4849 : : * DropDatabaseBuffers
4850 : : *
4851 : : * This function removes all the buffers in the buffer cache for a
4852 : : * particular database. Dirty pages are simply dropped, without
4853 : : * bothering to write them out first. This is used when we destroy a
4854 : : * database, to avoid trying to flush data to disk when the directory
4855 : : * tree no longer exists. Implementation is pretty similar to
4856 : : * DropRelationBuffers() which is for destroying just one relation.
4857 : : * --------------------------------------------------------------------
4858 : : */
4859 : : void
7101 tgl@sss.pgh.pa.us 4860 : 67 : DropDatabaseBuffers(Oid dbid)
4861 : : {
4862 : : int i;
4863 : :
4864 : : /*
4865 : : * We needn't consider local buffers, since by assumption the target
4866 : : * database isn't our own.
4867 : : */
4868 : :
7491 4869 [ + + ]: 463811 : for (i = 0; i < NBuffers; i++)
4870 : : {
3582 rhaas@postgresql.org 4871 : 463744 : BufferDesc *bufHdr = GetBufferDescriptor(i);
4872 : : uint32 buf_state;
4873 : :
4874 : : /*
4875 : : * As in DropRelationBuffers, an unlocked precheck should be safe and
4876 : : * saves some cycles.
4877 : : */
1109 4878 [ + + ]: 463744 : if (bufHdr->tag.dbOid != dbid)
4839 tgl@sss.pgh.pa.us 4879 : 450943 : continue;
4880 : :
3436 andres@anarazel.de 4881 : 12801 : buf_state = LockBufHdr(bufHdr);
1109 rhaas@postgresql.org 4882 [ + - ]: 12801 : if (bufHdr->tag.dbOid == dbid)
7266 bruce@momjian.us 4883 : 12801 : InvalidateBuffer(bufHdr); /* releases spinlock */
4884 : : else
3436 andres@anarazel.de 4885 :UBC 0 : UnlockBufHdr(bufHdr, buf_state);
4886 : : }
10651 scrappy@hub.org 4887 :CBC 67 : }
4888 : :
4889 : : /* ---------------------------------------------------------------------
4890 : : * FlushRelationBuffers
4891 : : *
4892 : : * This function writes all dirty pages of a relation out to disk
4893 : : * (or more accurately, out to kernel disk buffers), ensuring that the
4894 : : * kernel has an up-to-date view of the relation.
4895 : : *
4896 : : * Generally, the caller should be holding AccessExclusiveLock on the
4897 : : * target relation to ensure that no other backend is busy dirtying
4898 : : * more blocks of the relation; the effects can't be expected to last
4899 : : * after the lock is released.
4900 : : *
4901 : : * XXX currently it sequentially searches the buffer pool, should be
4902 : : * changed to more clever ways of searching. This routine is not
4903 : : * used in any performance-critical code paths, so it's not worth
4904 : : * adding additional overhead to normal paths to make it go faster.
4905 : : * --------------------------------------------------------------------
4906 : : */
4907 : : void
7475 tgl@sss.pgh.pa.us 4908 : 137 : FlushRelationBuffers(Relation rel)
4909 : : {
4910 : : int i;
4911 : : BufferDesc *bufHdr;
584 heikki.linnakangas@i 4912 : 137 : SMgrRelation srel = RelationGetSmgr(rel);
4913 : :
5381 rhaas@postgresql.org 4914 [ + + ]: 137 : if (RelationUsesLocalBuffers(rel))
4915 : : {
10211 vadim4o@yahoo.com 4916 [ + + ]: 909 : for (i = 0; i < NLocBuffer; i++)
4917 : : {
4918 : : uint32 buf_state;
4919 : :
3873 andres@anarazel.de 4920 : 900 : bufHdr = GetLocalBufferDescriptor(i);
1109 rhaas@postgresql.org 4921 [ + + ]: 900 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3436 andres@anarazel.de 4922 [ + - ]: 300 : ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4923 : : (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4924 : : {
4925 : : ErrorContextCallback errcallback;
4926 : :
4927 : : /* Setup error traceback support for ereport() */
4681 heikki.linnakangas@i 4928 : 300 : errcallback.callback = local_buffer_write_error_callback;
282 peter@eisentraut.org 4929 : 300 : errcallback.arg = bufHdr;
4681 heikki.linnakangas@i 4930 : 300 : errcallback.previous = error_context_stack;
4931 : 300 : error_context_stack = &errcallback;
4932 : :
4933 : : /* Make sure we can handle the pin */
152 andres@anarazel.de 4934 : 300 : ReservePrivateRefCountEntry();
4935 : 300 : ResourceOwnerEnlarge(CurrentResourceOwner);
4936 : :
4937 : : /*
4938 : : * Pin/unpin mostly to make valgrind work, but it also seems
4939 : : * like the right thing to do.
4940 : : */
4941 : 300 : PinLocalBuffer(bufHdr, false);
4942 : :
4943 : :
175 4944 : 300 : FlushLocalBuffer(bufHdr, srel);
4945 : :
152 4946 : 300 : UnpinLocalBuffer(BufferDescriptorGetBuffer(bufHdr));
4947 : :
4948 : : /* Pop the error context stack */
4681 heikki.linnakangas@i 4949 : 300 : error_context_stack = errcallback.previous;
4950 : : }
4951 : : }
4952 : :
7768 tgl@sss.pgh.pa.us 4953 : 9 : return;
4954 : : }
4955 : :
10211 vadim4o@yahoo.com 4956 [ + + ]: 1495808 : for (i = 0; i < NBuffers; i++)
4957 : : {
4958 : : uint32 buf_state;
4959 : :
3873 andres@anarazel.de 4960 : 1495680 : bufHdr = GetBufferDescriptor(i);
4961 : :
4962 : : /*
4963 : : * As in DropRelationBuffers, an unlocked precheck should be safe and
4964 : : * saves some cycles.
4965 : : */
1109 rhaas@postgresql.org 4966 [ + + ]: 1495680 : if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4839 tgl@sss.pgh.pa.us 4967 : 1495459 : continue;
4968 : :
4969 : : /* Make sure we can handle the pin */
3883 andres@anarazel.de 4970 : 221 : ReservePrivateRefCountEntry();
668 heikki.linnakangas@i 4971 : 221 : ResourceOwnerEnlarge(CurrentResourceOwner);
4972 : :
3436 andres@anarazel.de 4973 : 221 : buf_state = LockBufHdr(bufHdr);
1109 rhaas@postgresql.org 4974 [ + - ]: 221 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3436 andres@anarazel.de 4975 [ + + ]: 221 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4976 : : {
7475 tgl@sss.pgh.pa.us 4977 : 178 : PinBuffer_Locked(bufHdr);
3553 rhaas@postgresql.org 4978 : 178 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
584 heikki.linnakangas@i 4979 : 178 : FlushBuffer(bufHdr, srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
3553 rhaas@postgresql.org 4980 : 178 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
1072 michael@paquier.xyz 4981 : 178 : UnpinBuffer(bufHdr);
4982 : : }
4983 : : else
3436 andres@anarazel.de 4984 : 43 : UnlockBufHdr(bufHdr, buf_state);
4985 : : }
4986 : : }
4987 : :
4988 : : /* ---------------------------------------------------------------------
4989 : : * FlushRelationsAllBuffers
4990 : : *
4991 : : * This function flushes out of the buffer pool all the pages of all
4992 : : * forks of the specified smgr relations. It's equivalent to calling
4993 : : * FlushRelationBuffers once per relation. The relations are assumed not
4994 : : * to use local buffers.
4995 : : * --------------------------------------------------------------------
4996 : : */
4997 : : void
1981 noah@leadboat.com 4998 : 13 : FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
4999 : : {
5000 : : int i;
5001 : : SMgrSortArray *srels;
5002 : : bool use_bsearch;
5003 : :
5004 [ - + ]: 13 : if (nrels == 0)
1981 noah@leadboat.com 5005 :UBC 0 : return;
5006 : :
5007 : : /* fill-in array for qsort */
1981 noah@leadboat.com 5008 :CBC 13 : srels = palloc(sizeof(SMgrSortArray) * nrels);
5009 : :
5010 [ + + ]: 26 : for (i = 0; i < nrels; i++)
5011 : : {
1158 rhaas@postgresql.org 5012 [ - + ]: 13 : Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
5013 : :
5014 : 13 : srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
1981 noah@leadboat.com 5015 : 13 : srels[i].srel = smgrs[i];
5016 : : }
5017 : :
5018 : : /*
5019 : : * Save the bsearch overhead for low number of relations to sync. See
5020 : : * DropRelationsAllBuffers for details.
5021 : : */
5022 : 13 : use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
5023 : :
5024 : : /* sort the list of SMgrRelations if necessary */
5025 [ - + ]: 13 : if (use_bsearch)
568 nathan@postgresql.or 5026 :UBC 0 : qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
5027 : :
1981 noah@leadboat.com 5028 [ + + ]:CBC 213005 : for (i = 0; i < NBuffers; i++)
5029 : : {
5030 : 212992 : SMgrSortArray *srelent = NULL;
5031 : 212992 : BufferDesc *bufHdr = GetBufferDescriptor(i);
5032 : : uint32 buf_state;
5033 : :
5034 : : /*
5035 : : * As in DropRelationBuffers, an unlocked precheck should be safe and
5036 : : * saves some cycles.
5037 : : */
5038 : :
5039 [ + - ]: 212992 : if (!use_bsearch)
5040 : : {
5041 : : int j;
5042 : :
5043 [ + + ]: 421272 : for (j = 0; j < nrels; j++)
5044 : : {
1109 rhaas@postgresql.org 5045 [ + + ]: 212992 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
5046 : : {
1981 noah@leadboat.com 5047 : 4712 : srelent = &srels[j];
5048 : 4712 : break;
5049 : : }
5050 : : }
5051 : : }
5052 : : else
5053 : : {
5054 : : RelFileLocator rlocator;
5055 : :
1109 rhaas@postgresql.org 5056 :UBC 0 : rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
274 peter@eisentraut.org 5057 : 0 : srelent = bsearch(&rlocator,
5058 : : srels, nrels, sizeof(SMgrSortArray),
5059 : : rlocator_comparator);
5060 : : }
5061 : :
5062 : : /* buffer doesn't belong to any of the given relfilelocators; skip it */
1981 noah@leadboat.com 5063 [ + + ]:CBC 212992 : if (srelent == NULL)
5064 : 208280 : continue;
5065 : :
5066 : : /* Make sure we can handle the pin */
5067 : 4712 : ReservePrivateRefCountEntry();
668 heikki.linnakangas@i 5068 : 4712 : ResourceOwnerEnlarge(CurrentResourceOwner);
5069 : :
1981 noah@leadboat.com 5070 : 4712 : buf_state = LockBufHdr(bufHdr);
1109 rhaas@postgresql.org 5071 [ + - ]: 4712 : if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
1981 noah@leadboat.com 5072 [ + + ]: 4712 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5073 : : {
5074 : 4252 : PinBuffer_Locked(bufHdr);
5075 : 4252 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
940 andres@anarazel.de 5076 : 4252 : FlushBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
1981 noah@leadboat.com 5077 : 4252 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
1072 michael@paquier.xyz 5078 : 4252 : UnpinBuffer(bufHdr);
5079 : : }
5080 : : else
1981 noah@leadboat.com 5081 : 460 : UnlockBufHdr(bufHdr, buf_state);
5082 : : }
5083 : :
5084 : 13 : pfree(srels);
5085 : : }
5086 : :
5087 : : /* ---------------------------------------------------------------------
5088 : : * RelationCopyStorageUsingBuffer
5089 : : *
5090 : : * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
5091 : : * of using smgrread and smgrextend this will copy using bufmgr APIs.
5092 : : *
5093 : : * Refer comments atop CreateAndCopyRelationData() for details about
5094 : : * 'permanent' parameter.
5095 : : * --------------------------------------------------------------------
5096 : : */
5097 : : static void
1121 rhaas@postgresql.org 5098 : 71249 : RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
5099 : : RelFileLocator dstlocator,
5100 : : ForkNumber forkNum, bool permanent)
5101 : : {
5102 : : Buffer srcBuf;
5103 : : Buffer dstBuf;
5104 : : Page srcPage;
5105 : : Page dstPage;
5106 : : bool use_wal;
5107 : : BlockNumber nblocks;
5108 : : BlockNumber blkno;
5109 : : PGIOAlignedBlock buf;
5110 : : BufferAccessStrategy bstrategy_src;
5111 : : BufferAccessStrategy bstrategy_dst;
5112 : : BlockRangeReadStreamPrivate p;
5113 : : ReadStream *src_stream;
5114 : : SMgrRelation src_smgr;
5115 : :
5116 : : /*
5117 : : * In general, we want to write WAL whenever wal_level > 'minimal', but we
5118 : : * can skip it when copying any fork of an unlogged relation other than
5119 : : * the init fork.
5120 : : */
1257 5121 [ + + - + : 71249 : use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
- - ]
5122 : :
5123 : : /* Get number of blocks in the source relation. */
552 heikki.linnakangas@i 5124 : 71249 : nblocks = smgrnblocks(smgropen(srclocator, INVALID_PROC_NUMBER),
5125 : : forkNum);
5126 : :
5127 : : /* Nothing to copy; just return. */
1257 rhaas@postgresql.org 5128 [ + + ]: 71249 : if (nblocks == 0)
5129 : 12427 : return;
5130 : :
5131 : : /*
5132 : : * Bulk extend the destination relation of the same size as the source
5133 : : * relation before starting to copy block by block.
5134 : : */
1115 5135 : 58822 : memset(buf.data, 0, BLCKSZ);
552 heikki.linnakangas@i 5136 : 58822 : smgrextend(smgropen(dstlocator, INVALID_PROC_NUMBER), forkNum, nblocks - 1,
5137 : : buf.data, true);
5138 : :
5139 : : /* This is a bulk operation, so use buffer access strategies. */
1257 rhaas@postgresql.org 5140 : 58822 : bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
5141 : 58822 : bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
5142 : :
5143 : : /* Initialize streaming read */
368 noah@leadboat.com 5144 : 58822 : p.current_blocknum = 0;
5145 : 58822 : p.last_exclusive = nblocks;
413 5146 : 58822 : src_smgr = smgropen(srclocator, INVALID_PROC_NUMBER);
5147 : :
5148 : : /*
5149 : : * It is safe to use batchmode as block_range_read_stream_cb takes no
5150 : : * locks.
5151 : : */
160 andres@anarazel.de 5152 [ + - ]: 58822 : src_stream = read_stream_begin_smgr_relation(READ_STREAM_FULL |
5153 : : READ_STREAM_USE_BATCHING,
5154 : : bstrategy_src,
5155 : : src_smgr,
5156 : : permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
5157 : : forkNum,
5158 : : block_range_read_stream_cb,
5159 : : &p,
5160 : : 0);
5161 : :
5162 : : /* Iterate over each block of the source relation file. */
1257 rhaas@postgresql.org 5163 [ + + ]: 288629 : for (blkno = 0; blkno < nblocks; blkno++)
5164 : : {
5165 [ - + ]: 229810 : CHECK_FOR_INTERRUPTS();
5166 : :
5167 : : /* Read block from source relation. */
413 noah@leadboat.com 5168 : 229810 : srcBuf = read_stream_next_buffer(src_stream, NULL);
1127 tgl@sss.pgh.pa.us 5169 : 229807 : LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
1257 rhaas@postgresql.org 5170 : 229807 : srcPage = BufferGetPage(srcBuf);
5171 : :
413 noah@leadboat.com 5172 : 229807 : dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum,
5173 : : BufferGetBlockNumber(srcBuf),
5174 : : RBM_ZERO_AND_LOCK, bstrategy_dst,
5175 : : permanent);
1127 tgl@sss.pgh.pa.us 5176 : 229807 : dstPage = BufferGetPage(dstBuf);
5177 : :
1257 rhaas@postgresql.org 5178 : 229807 : START_CRIT_SECTION();
5179 : :
5180 : : /* Copy page data from the source to the destination. */
5181 : 229807 : memcpy(dstPage, srcPage, BLCKSZ);
5182 : 229807 : MarkBufferDirty(dstBuf);
5183 : :
5184 : : /* WAL-log the copied page. */
5185 [ + + ]: 229807 : if (use_wal)
5186 : 120215 : log_newpage_buffer(dstBuf, true);
5187 : :
5188 [ - + ]: 229807 : END_CRIT_SECTION();
5189 : :
5190 : 229807 : UnlockReleaseBuffer(dstBuf);
1127 tgl@sss.pgh.pa.us 5191 : 229807 : UnlockReleaseBuffer(srcBuf);
5192 : : }
413 noah@leadboat.com 5193 [ - + ]: 58819 : Assert(read_stream_next_buffer(src_stream, NULL) == InvalidBuffer);
5194 : 58819 : read_stream_end(src_stream);
5195 : :
901 andres@anarazel.de 5196 : 58819 : FreeAccessStrategy(bstrategy_src);
5197 : 58819 : FreeAccessStrategy(bstrategy_dst);
5198 : : }
5199 : :
5200 : : /* ---------------------------------------------------------------------
5201 : : * CreateAndCopyRelationData
5202 : : *
5203 : : * Create destination relation storage and copy all forks from the
5204 : : * source relation to the destination.
5205 : : *
5206 : : * Pass permanent as true for permanent relations and false for
5207 : : * unlogged relations. Currently this API is not supported for
5208 : : * temporary relations.
5209 : : * --------------------------------------------------------------------
5210 : : */
5211 : : void
1158 rhaas@postgresql.org 5212 : 53558 : CreateAndCopyRelationData(RelFileLocator src_rlocator,
5213 : : RelFileLocator dst_rlocator, bool permanent)
5214 : : {
5215 : : char relpersistence;
5216 : : SMgrRelation src_rel;
5217 : : SMgrRelation dst_rel;
5218 : :
5219 : : /* Set the relpersistence. */
1257 5220 [ + - ]: 53558 : relpersistence = permanent ?
5221 : : RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
5222 : :
552 heikki.linnakangas@i 5223 : 53558 : src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
5224 : 53558 : dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
5225 : :
5226 : : /*
5227 : : * Create and copy all forks of the relation. During create database we
5228 : : * have a separate cleanup mechanism which deletes complete database
5229 : : * directory. Therefore, each individual relation doesn't need to be
5230 : : * registered for cleanup.
5231 : : */
1158 rhaas@postgresql.org 5232 : 53558 : RelationCreateStorage(dst_rlocator, relpersistence, false);
5233 : :
5234 : : /* copy main fork. */
1121 5235 : 53558 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
5236 : : permanent);
5237 : :
5238 : : /* copy those extra forks that exist */
1257 5239 : 53555 : for (ForkNumber forkNum = MAIN_FORKNUM + 1;
5240 [ + + ]: 214220 : forkNum <= MAX_FORKNUM; forkNum++)
5241 : : {
584 heikki.linnakangas@i 5242 [ + + ]: 160665 : if (smgrexists(src_rel, forkNum))
5243 : : {
5244 : 17691 : smgrcreate(dst_rel, forkNum, false);
5245 : :
5246 : : /*
5247 : : * WAL log creation if the relation is persistent, or this is the
5248 : : * init fork of an unlogged relation.
5249 : : */
1257 rhaas@postgresql.org 5250 [ - + - - ]: 17691 : if (permanent || forkNum == INIT_FORKNUM)
1158 5251 : 17691 : log_smgrcreate(&dst_rlocator, forkNum);
5252 : :
5253 : : /* Copy a fork's data, block by block. */
1121 5254 : 17691 : RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
5255 : : permanent);
5256 : : }
5257 : : }
1257 5258 : 53555 : }
5259 : :
5260 : : /* ---------------------------------------------------------------------
5261 : : * FlushDatabaseBuffers
5262 : : *
5263 : : * This function writes all dirty pages of a database out to disk
5264 : : * (or more accurately, out to kernel disk buffers), ensuring that the
5265 : : * kernel has an up-to-date view of the database.
5266 : : *
5267 : : * Generally, the caller should be holding an appropriate lock to ensure
5268 : : * no other backend is active in the target database; otherwise more
5269 : : * pages could get dirtied.
5270 : : *
5271 : : * Note we don't worry about flushing any pages of temporary relations.
5272 : : * It's assumed these wouldn't be interesting.
5273 : : * --------------------------------------------------------------------
5274 : : */
5275 : : void
6645 tgl@sss.pgh.pa.us 5276 : 4 : FlushDatabaseBuffers(Oid dbid)
5277 : : {
5278 : : int i;
5279 : : BufferDesc *bufHdr;
5280 : :
5281 [ + + ]: 516 : for (i = 0; i < NBuffers; i++)
5282 : : {
5283 : : uint32 buf_state;
5284 : :
3873 andres@anarazel.de 5285 : 512 : bufHdr = GetBufferDescriptor(i);
5286 : :
5287 : : /*
5288 : : * As in DropRelationBuffers, an unlocked precheck should be safe and
5289 : : * saves some cycles.
5290 : : */
1109 rhaas@postgresql.org 5291 [ + + ]: 512 : if (bufHdr->tag.dbOid != dbid)
4839 tgl@sss.pgh.pa.us 5292 : 308 : continue;
5293 : :
5294 : : /* Make sure we can handle the pin */
3883 andres@anarazel.de 5295 : 204 : ReservePrivateRefCountEntry();
668 heikki.linnakangas@i 5296 : 204 : ResourceOwnerEnlarge(CurrentResourceOwner);
5297 : :
3436 andres@anarazel.de 5298 : 204 : buf_state = LockBufHdr(bufHdr);
1109 rhaas@postgresql.org 5299 [ + - ]: 204 : if (bufHdr->tag.dbOid == dbid &&
3436 andres@anarazel.de 5300 [ + + ]: 204 : (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
5301 : : {
6645 tgl@sss.pgh.pa.us 5302 : 34 : PinBuffer_Locked(bufHdr);
3553 rhaas@postgresql.org 5303 : 34 : LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
940 andres@anarazel.de 5304 : 34 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
3553 rhaas@postgresql.org 5305 : 34 : LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
1072 michael@paquier.xyz 5306 : 34 : UnpinBuffer(bufHdr);
5307 : : }
5308 : : else
3436 andres@anarazel.de 5309 : 170 : UnlockBufHdr(bufHdr, buf_state);
5310 : : }
6645 tgl@sss.pgh.pa.us 5311 : 4 : }
5312 : :
5313 : : /*
5314 : : * Flush a previously, shared or exclusively, locked and pinned buffer to the
5315 : : * OS.
5316 : : */
5317 : : void
3558 andres@anarazel.de 5318 : 74 : FlushOneBuffer(Buffer buffer)
5319 : : {
5320 : : BufferDesc *bufHdr;
5321 : :
5322 : : /* currently not needed, but no fundamental reason not to support */
5323 [ - + ]: 74 : Assert(!BufferIsLocal(buffer));
5324 : :
5325 [ - + - + : 74 : Assert(BufferIsPinned(buffer));
- + ]
5326 : :
5327 : 74 : bufHdr = GetBufferDescriptor(buffer - 1);
5328 : :
3553 rhaas@postgresql.org 5329 [ - + ]: 74 : Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
5330 : :
940 andres@anarazel.de 5331 : 74 : FlushBuffer(bufHdr, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
3558 5332 : 74 : }
5333 : :
5334 : : /*
5335 : : * ReleaseBuffer -- release the pin on a buffer
5336 : : */
5337 : : void
8432 tgl@sss.pgh.pa.us 5338 : 57094766 : ReleaseBuffer(Buffer buffer)
5339 : : {
7631 5340 [ - + ]: 57094766 : if (!BufferIsValid(buffer))
5193 peter_e@gmx.net 5341 [ # # ]:UBC 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5342 : :
10226 bruce@momjian.us 5343 [ + + ]:CBC 57094766 : if (BufferIsLocal(buffer))
885 andres@anarazel.de 5344 : 1771642 : UnpinLocalBuffer(buffer);
5345 : : else
5346 : 55323124 : UnpinBuffer(GetBufferDescriptor(buffer - 1));
10651 scrappy@hub.org 5347 : 57094766 : }
5348 : :
5349 : : /*
5350 : : * UnlockReleaseBuffer -- release the content lock and pin on a buffer
5351 : : *
5352 : : * This is just a shorthand for a common combination.
5353 : : */
5354 : : void
7099 tgl@sss.pgh.pa.us 5355 : 17952279 : UnlockReleaseBuffer(Buffer buffer)
5356 : : {
5357 : 17952279 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5358 : 17952279 : ReleaseBuffer(buffer);
5359 : 17952279 : }
5360 : :
5361 : : /*
5362 : : * IncrBufferRefCount
5363 : : * Increment the pin count on a buffer that we have *already* pinned
5364 : : * at least once.
5365 : : *
5366 : : * This function cannot be used on a buffer we do not have pinned,
5367 : : * because it doesn't change the shared buffer state.
5368 : : */
5369 : : void
7721 5370 : 10494307 : IncrBufferRefCount(Buffer buffer)
5371 : : {
7591 neilc@samurai.com 5372 [ - + + + : 10494307 : Assert(BufferIsPinned(buffer));
- + ]
668 heikki.linnakangas@i 5373 : 10494307 : ResourceOwnerEnlarge(CurrentResourceOwner);
7721 tgl@sss.pgh.pa.us 5374 [ + + ]: 10494307 : if (BufferIsLocal(buffer))
5375 : 356061 : LocalRefCount[-buffer - 1]++;
5376 : : else
5377 : : {
5378 : : PrivateRefCountEntry *ref;
5379 : :
3883 andres@anarazel.de 5380 : 10138246 : ref = GetPrivateRefCountEntry(buffer, true);
4025 5381 [ - + ]: 10138246 : Assert(ref != NULL);
5382 : 10138246 : ref->refcount++;
5383 : : }
2859 tgl@sss.pgh.pa.us 5384 : 10494307 : ResourceOwnerRememberBuffer(CurrentResourceOwner, buffer);
7721 5385 : 10494307 : }
5386 : :
5387 : : /*
5388 : : * MarkBufferDirtyHint
5389 : : *
5390 : : * Mark a buffer dirty for non-critical changes.
5391 : : *
5392 : : * This is essentially the same as MarkBufferDirty, except:
5393 : : *
5394 : : * 1. The caller does not write WAL; so if checksums are enabled, we may need
5395 : : * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
5396 : : * 2. The caller might have only share-lock instead of exclusive-lock on the
5397 : : * buffer's content lock.
5398 : : * 3. This function does not guarantee that the buffer is always marked dirty
5399 : : * (due to a race condition), so it cannot be used for important changes.
5400 : : */
5401 : : void
4464 jdavis@postgresql.or 5402 : 9994987 : MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
5403 : : {
5404 : : BufferDesc *bufHdr;
3426 kgrittn@postgresql.o 5405 : 9994987 : Page page = BufferGetPage(buffer);
5406 : :
7631 tgl@sss.pgh.pa.us 5407 [ - + ]: 9994987 : if (!BufferIsValid(buffer))
5193 peter_e@gmx.net 5408 [ # # ]:UBC 0 : elog(ERROR, "bad buffer ID: %d", buffer);
5409 : :
9281 tgl@sss.pgh.pa.us 5410 [ + + ]:CBC 9994987 : if (BufferIsLocal(buffer))
5411 : : {
7099 5412 : 745651 : MarkLocalBufferDirty(buffer);
9281 5413 : 745651 : return;
5414 : : }
5415 : :
3873 andres@anarazel.de 5416 : 9249336 : bufHdr = GetBufferDescriptor(buffer - 1);
5417 : :
4025 5418 [ - + ]: 9249336 : Assert(GetPrivateRefCount(buffer) > 0);
5419 : : /* here, either share or exclusive lock is OK */
3553 rhaas@postgresql.org 5420 [ - + ]: 9249336 : Assert(LWLockHeldByMe(BufferDescriptorGetContentLock(bufHdr)));
5421 : :
5422 : : /*
5423 : : * This routine might get called many times on the same page, if we are
5424 : : * making the first scan after commit of an xact that added/deleted many
5425 : : * tuples. So, be as quick as we can if the buffer is already dirty. We
5426 : : * do this by not acquiring spinlock if it looks like the status bits are
5427 : : * already set. Since we make this test unlocked, there's a chance we
5428 : : * might fail to notice that the flags have just been cleared, and failed
5429 : : * to reset them, due to memory-ordering issues. But since this function
5430 : : * is only intended to be used in cases where failing to write out the
5431 : : * data would be harmless anyway, it doesn't really matter.
5432 : : */
3436 andres@anarazel.de 5433 [ + + ]: 9249336 : if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
5434 : : (BM_DIRTY | BM_JUST_DIRTIED))
5435 : : {
4551 simon@2ndQuadrant.co 5436 : 899569 : XLogRecPtr lsn = InvalidXLogRecPtr;
5437 : 899569 : bool dirtied = false;
1247 rhaas@postgresql.org 5438 : 899569 : bool delayChkptFlags = false;
5439 : : uint32 buf_state;
5440 : :
5441 : : /*
5442 : : * If we need to protect hint bit updates from torn writes, WAL-log a
5443 : : * full page image of the page. This full page image is only necessary
5444 : : * if the hint bit update is the first change to the page since the
5445 : : * last checkpoint.
5446 : : *
5447 : : * We don't check full_page_writes here because that logic is included
5448 : : * when we call XLogInsert() since the value changes dynamically.
5449 : : */
3436 andres@anarazel.de 5450 [ + + - + : 1798046 : if (XLogHintBitIsNeeded() &&
+ + ]
5451 : 898477 : (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
5452 : : {
5453 : : /*
5454 : : * If we must not write WAL, due to a relfilelocator-specific
5455 : : * condition or being in recovery, don't dirty the page. We can
5456 : : * set the hint, just not dirty the page as a result so the hint
5457 : : * is lost when we evict the page or shutdown.
5458 : : *
5459 : : * See src/backend/storage/page/README for longer discussion.
5460 : : */
1981 noah@leadboat.com 5461 [ + + + + ]: 961149 : if (RecoveryInProgress() ||
1109 rhaas@postgresql.org 5462 : 62706 : RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag)))
4551 simon@2ndQuadrant.co 5463 : 839169 : return;
5464 : :
5465 : : /*
5466 : : * If the block is already dirty because we either made a change
5467 : : * or set a hint already, then we don't need to write a full page
5468 : : * image. Note that aggressive cleaning of blocks dirtied by hint
5469 : : * bit setting would increase the call rate. Bulk setting of hint
5470 : : * bits would reduce the call rate...
5471 : : *
5472 : : * We must issue the WAL record before we mark the buffer dirty.
5473 : : * Otherwise we might write the page before we write the WAL. That
5474 : : * causes a race condition, since a checkpoint might occur between
5475 : : * writing the WAL record and marking the buffer dirty. We solve
5476 : : * that with a kluge, but one that is already in use during
5477 : : * transaction commit to prevent race conditions. Basically, we
5478 : : * simply prevent the checkpoint WAL record from being written
5479 : : * until we have marked the buffer dirty. We don't start the
5480 : : * checkpoint flush until we have marked dirty, so our checkpoint
5481 : : * must flush the change to disk successfully or the checkpoint
5482 : : * never gets written, so crash recovery will fix.
5483 : : *
5484 : : * It's possible we may enter here without an xid, so it is
5485 : : * essential that CreateCheckPoint waits for virtual transactions
5486 : : * rather than full transactionids.
5487 : : */
1247 rhaas@postgresql.org 5488 [ - + ]: 59274 : Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
5489 : 59274 : MyProc->delayChkptFlags |= DELAY_CHKPT_START;
5490 : 59274 : delayChkptFlags = true;
4464 jdavis@postgresql.or 5491 : 59274 : lsn = XLogSaveBufferForHint(buffer, buffer_std);
5492 : : }
5493 : :
3436 andres@anarazel.de 5494 : 60400 : buf_state = LockBufHdr(bufHdr);
5495 : :
5496 [ - + ]: 60400 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5497 : :
5498 [ + + ]: 60400 : if (!(buf_state & BM_DIRTY))
5499 : : {
4551 simon@2ndQuadrant.co 5500 : 60353 : dirtied = true; /* Means "will be dirtied by this action" */
5501 : :
5502 : : /*
5503 : : * Set the page LSN if we wrote a backup block. We aren't supposed
5504 : : * to set this when only holding a share lock but as long as we
5505 : : * serialise it somehow we're OK. We choose to set LSN while
5506 : : * holding the buffer header lock, which causes any reader of an
5507 : : * LSN who holds only a share lock to also obtain a buffer header
5508 : : * lock before using PageGetLSN(), which is enforced in
5509 : : * BufferGetLSNAtomic().
5510 : : *
5511 : : * If checksums are enabled, you might think we should reset the
5512 : : * checksum here. That will happen when the page is written
5513 : : * sometime later in this checkpoint cycle.
5514 : : */
5515 [ + + ]: 60353 : if (!XLogRecPtrIsInvalid(lsn))
5516 : 31213 : PageSetLSN(page, lsn);
5517 : : }
5518 : :
3436 andres@anarazel.de 5519 : 60400 : buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
5520 : 60400 : UnlockBufHdr(bufHdr, buf_state);
5521 : :
1247 rhaas@postgresql.org 5522 [ + + ]: 60400 : if (delayChkptFlags)
5523 : 59274 : MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
5524 : :
4551 simon@2ndQuadrant.co 5525 [ + + ]: 60400 : if (dirtied)
5526 : : {
4177 rhaas@postgresql.org 5527 : 60353 : pgBufferUsage.shared_blks_dirtied++;
5034 alvherre@alvh.no-ip. 5528 [ + + ]: 60353 : if (VacuumCostActive)
5529 : 2149 : VacuumCostBalance += VacuumCostPageDirty;
5530 : : }
5531 : : }
5532 : : }
5533 : :
5534 : : /*
5535 : : * Release buffer content locks for shared buffers.
5536 : : *
5537 : : * Used to clean up after errors.
5538 : : *
5539 : : * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
5540 : : * of releasing buffer content locks per se; the only thing we need to deal
5541 : : * with here is clearing any PIN_COUNT request that was in progress.
5542 : : */
5543 : : void
9003 tgl@sss.pgh.pa.us 5544 : 48379 : UnlockBuffers(void)
5545 : : {
3582 rhaas@postgresql.org 5546 : 48379 : BufferDesc *buf = PinCountWaitBuf;
5547 : :
7630 tgl@sss.pgh.pa.us 5548 [ - + ]: 48379 : if (buf)
5549 : : {
5550 : : uint32 buf_state;
5551 : :
3436 andres@anarazel.de 5552 :UBC 0 : buf_state = LockBufHdr(buf);
5553 : :
5554 : : /*
5555 : : * Don't complain if flag bit not set; it could have been reset but we
5556 : : * got a cancel/die interrupt before getting the signal.
5557 : : */
5558 [ # # ]: 0 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
562 heikki.linnakangas@i 5559 [ # # ]: 0 : buf->wait_backend_pgprocno == MyProcNumber)
3436 andres@anarazel.de 5560 : 0 : buf_state &= ~BM_PIN_COUNT_WAITER;
5561 : :
5562 : 0 : UnlockBufHdr(buf, buf_state);
5563 : :
7491 tgl@sss.pgh.pa.us 5564 : 0 : PinCountWaitBuf = NULL;
5565 : : }
9762 vadim4o@yahoo.com 5566 :CBC 48379 : }
5567 : :
5568 : : /*
5569 : : * Acquire or release the content_lock for the buffer.
5570 : : */
5571 : : void
9601 bruce@momjian.us 5572 : 161756957 : LockBuffer(Buffer buffer, int mode)
5573 : : {
5574 : : BufferDesc *buf;
5575 : :
1874 pg@bowt.ie 5576 [ - + + + : 161756957 : Assert(BufferIsPinned(buffer));
- + ]
9762 vadim4o@yahoo.com 5577 [ + + ]: 161756957 : if (BufferIsLocal(buffer))
7099 tgl@sss.pgh.pa.us 5578 : 10419273 : return; /* local buffers need no lock */
5579 : :
3873 andres@anarazel.de 5580 : 151337684 : buf = GetBufferDescriptor(buffer - 1);
5581 : :
9762 vadim4o@yahoo.com 5582 [ + + ]: 151337684 : if (mode == BUFFER_LOCK_UNLOCK)
3553 rhaas@postgresql.org 5583 : 76397664 : LWLockRelease(BufferDescriptorGetContentLock(buf));
9762 vadim4o@yahoo.com 5584 [ + + ]: 74940020 : else if (mode == BUFFER_LOCK_SHARE)
3553 rhaas@postgresql.org 5585 : 52658579 : LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_SHARED);
9762 vadim4o@yahoo.com 5586 [ + - ]: 22281441 : else if (mode == BUFFER_LOCK_EXCLUSIVE)
3553 rhaas@postgresql.org 5587 : 22281441 : LWLockAcquire(BufferDescriptorGetContentLock(buf), LW_EXCLUSIVE);
5588 : : else
8080 tgl@sss.pgh.pa.us 5589 [ # # ]:UBC 0 : elog(ERROR, "unrecognized buffer lock mode: %d", mode);
5590 : : }
5591 : :
5592 : : /*
5593 : : * Acquire the content_lock for the buffer, but only if we don't have to wait.
5594 : : *
5595 : : * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
5596 : : */
5597 : : bool
8063 tgl@sss.pgh.pa.us 5598 :CBC 1062614 : ConditionalLockBuffer(Buffer buffer)
5599 : : {
5600 : : BufferDesc *buf;
5601 : :
1874 pg@bowt.ie 5602 [ - + + + : 1062614 : Assert(BufferIsPinned(buffer));
- + ]
8063 tgl@sss.pgh.pa.us 5603 [ + + ]: 1062614 : if (BufferIsLocal(buffer))
5604 : 64647 : return true; /* act as though we got it */
5605 : :
3873 andres@anarazel.de 5606 : 997967 : buf = GetBufferDescriptor(buffer - 1);
5607 : :
3553 rhaas@postgresql.org 5608 : 997967 : return LWLockConditionalAcquire(BufferDescriptorGetContentLock(buf),
5609 : : LW_EXCLUSIVE);
5610 : : }
5611 : :
5612 : : /*
5613 : : * Verify that this backend is pinning the buffer exactly once.
5614 : : *
5615 : : * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
5616 : : * holds a pin on the buffer. We do not care whether some other backend does.
5617 : : */
5618 : : void
885 andres@anarazel.de 5619 : 3877410 : CheckBufferIsPinnedOnce(Buffer buffer)
5620 : : {
5621 [ + + ]: 3877410 : if (BufferIsLocal(buffer))
5622 : : {
5623 [ - + ]: 789 : if (LocalRefCount[-buffer - 1] != 1)
885 andres@anarazel.de 5624 [ # # ]:UBC 0 : elog(ERROR, "incorrect local pin count: %d",
5625 : : LocalRefCount[-buffer - 1]);
5626 : : }
5627 : : else
5628 : : {
885 andres@anarazel.de 5629 [ - + ]:CBC 3876621 : if (GetPrivateRefCount(buffer) != 1)
885 andres@anarazel.de 5630 [ # # ]:UBC 0 : elog(ERROR, "incorrect local pin count: %d",
5631 : : GetPrivateRefCount(buffer));
5632 : : }
885 andres@anarazel.de 5633 :CBC 3877410 : }
5634 : :
5635 : : /*
5636 : : * LockBufferForCleanup - lock a buffer in preparation for deleting items
5637 : : *
5638 : : * Items may be deleted from a disk page only when the caller (a) holds an
5639 : : * exclusive lock on the buffer and (b) has observed that no other backend
5640 : : * holds a pin on the buffer. If there is a pin, then the other backend
5641 : : * might have a pointer into the buffer (for example, a heapscan reference
5642 : : * to an item --- see README for more details). It's OK if a pin is added
5643 : : * after the cleanup starts, however; the newly-arrived backend will be
5644 : : * unable to look at the page until we release the exclusive lock.
5645 : : *
5646 : : * To implement this protocol, a would-be deleter must pin the buffer and
5647 : : * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
5648 : : * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
5649 : : * it has successfully observed pin count = 1.
5650 : : */
5651 : : void
8828 tgl@sss.pgh.pa.us 5652 : 23131 : LockBufferForCleanup(Buffer buffer)
5653 : : {
5654 : : BufferDesc *bufHdr;
1702 fujii@postgresql.org 5655 : 23131 : TimestampTz waitStart = 0;
929 drowley@postgresql.o 5656 : 23131 : bool waiting = false;
1702 fujii@postgresql.org 5657 : 23131 : bool logged_recovery_conflict = false;
5658 : :
1874 pg@bowt.ie 5659 [ - + + + : 23131 : Assert(BufferIsPinned(buffer));
- + ]
7630 tgl@sss.pgh.pa.us 5660 [ - + ]: 23131 : Assert(PinCountWaitBuf == NULL);
5661 : :
885 andres@anarazel.de 5662 : 23131 : CheckBufferIsPinnedOnce(buffer);
5663 : :
5664 : : /*
5665 : : * We do not yet need to be worried about in-progress AIOs holding a pin,
5666 : : * as we, so far, only support doing reads via AIO and this function can
5667 : : * only be called once the buffer is valid (i.e. no read can be in
5668 : : * flight).
5669 : : */
5670 : :
5671 : : /* Nobody else to wait for */
8828 tgl@sss.pgh.pa.us 5672 [ + + ]: 23131 : if (BufferIsLocal(buffer))
5673 : 16 : return;
5674 : :
3873 andres@anarazel.de 5675 : 23115 : bufHdr = GetBufferDescriptor(buffer - 1);
5676 : :
5677 : : for (;;)
8828 tgl@sss.pgh.pa.us 5678 : 96 : {
5679 : : uint32 buf_state;
5680 : :
5681 : : /* Try to acquire lock */
5682 : 23211 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3436 andres@anarazel.de 5683 : 23211 : buf_state = LockBufHdr(bufHdr);
5684 : :
5685 [ - + ]: 23211 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5686 [ + + ]: 23211 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5687 : : {
5688 : : /* Successfully acquired exclusive lock with pincount 1 */
5689 : 23115 : UnlockBufHdr(bufHdr, buf_state);
5690 : :
5691 : : /*
5692 : : * Emit the log message if recovery conflict on buffer pin was
5693 : : * resolved but the startup process waited longer than
5694 : : * deadlock_timeout for it.
5695 : : */
1697 fujii@postgresql.org 5696 [ + + ]: 23115 : if (logged_recovery_conflict)
5697 : 2 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
5698 : : waitStart, GetCurrentTimestamp(),
5699 : : NULL, false);
5700 : :
929 drowley@postgresql.o 5701 [ + + ]: 23115 : if (waiting)
5702 : : {
5703 : : /* reset ps display to remove the suffix if we added one */
5704 : 2 : set_ps_display_remove_suffix();
5705 : 2 : waiting = false;
5706 : : }
8828 tgl@sss.pgh.pa.us 5707 : 23115 : return;
5708 : : }
5709 : : /* Failed, so mark myself as waiting for pincount 1 */
3436 andres@anarazel.de 5710 [ - + ]: 96 : if (buf_state & BM_PIN_COUNT_WAITER)
5711 : : {
3436 andres@anarazel.de 5712 :UBC 0 : UnlockBufHdr(bufHdr, buf_state);
8828 tgl@sss.pgh.pa.us 5713 : 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
8080 5714 [ # # ]: 0 : elog(ERROR, "multiple backends attempting to wait for pincount 1");
5715 : : }
562 heikki.linnakangas@i 5716 :CBC 96 : bufHdr->wait_backend_pgprocno = MyProcNumber;
7630 tgl@sss.pgh.pa.us 5717 : 96 : PinCountWaitBuf = bufHdr;
3436 andres@anarazel.de 5718 : 96 : buf_state |= BM_PIN_COUNT_WAITER;
5719 : 96 : UnlockBufHdr(bufHdr, buf_state);
8828 tgl@sss.pgh.pa.us 5720 : 96 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5721 : :
5722 : : /* Wait to be signaled by UnpinBuffer() */
5705 simon@2ndQuadrant.co 5723 [ + + ]: 96 : if (InHotStandby)
5724 : : {
929 drowley@postgresql.o 5725 [ + + ]: 9 : if (!waiting)
5726 : : {
5727 : : /* adjust the process title to indicate that it's waiting */
5728 : 2 : set_ps_display_suffix("waiting");
5729 : 2 : waiting = true;
5730 : : }
5731 : :
5732 : : /*
5733 : : * Emit the log message if the startup process is waiting longer
5734 : : * than deadlock_timeout for recovery conflict on buffer pin.
5735 : : *
5736 : : * Skip this if first time through because the startup process has
5737 : : * not started waiting yet in this case. So, the wait start
5738 : : * timestamp is set after this logic.
5739 : : */
1702 fujii@postgresql.org 5740 [ + + + + ]: 9 : if (waitStart != 0 && !logged_recovery_conflict)
5741 : : {
5742 : 3 : TimestampTz now = GetCurrentTimestamp();
5743 : :
5744 [ + + ]: 3 : if (TimestampDifferenceExceeds(waitStart, now,
5745 : : DeadlockTimeout))
5746 : : {
5747 : 2 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_BUFFERPIN,
5748 : : waitStart, now, NULL, true);
5749 : 2 : logged_recovery_conflict = true;
5750 : : }
5751 : : }
5752 : :
5753 : : /*
5754 : : * Set the wait start timestamp if logging is enabled and first
5755 : : * time through.
5756 : : */
5757 [ + - + + ]: 9 : if (log_recovery_conflict_waits && waitStart == 0)
5758 : 2 : waitStart = GetCurrentTimestamp();
5759 : :
5760 : : /* Publish the bufid that Startup process waits on */
5705 simon@2ndQuadrant.co 5761 : 9 : SetStartupBufferPinWaitBufId(buffer - 1);
5762 : : /* Set alarm and then wait to be signaled by UnpinBuffer() */
5763 : 9 : ResolveRecoveryConflictWithBufferPin();
5764 : : /* Reset the published bufid */
5765 : 9 : SetStartupBufferPinWaitBufId(-1);
5766 : : }
5767 : : else
796 michael@paquier.xyz 5768 : 87 : ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
5769 : :
5770 : : /*
5771 : : * Remove flag marking us as waiter. Normally this will not be set
5772 : : * anymore, but ProcWaitForSignal() can return for other signals as
5773 : : * well. We take care to only reset the flag if we're the waiter, as
5774 : : * theoretically another backend could have started waiting. That's
5775 : : * impossible with the current usages due to table level locking, but
5776 : : * better be safe.
5777 : : */
3436 andres@anarazel.de 5778 : 96 : buf_state = LockBufHdr(bufHdr);
5779 [ + + ]: 96 : if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
562 heikki.linnakangas@i 5780 [ + - ]: 7 : bufHdr->wait_backend_pgprocno == MyProcNumber)
3436 andres@anarazel.de 5781 : 7 : buf_state &= ~BM_PIN_COUNT_WAITER;
5782 : 96 : UnlockBufHdr(bufHdr, buf_state);
5783 : :
7630 tgl@sss.pgh.pa.us 5784 : 96 : PinCountWaitBuf = NULL;
5785 : : /* Loop back and try again */
5786 : : }
5787 : : }
5788 : :
5789 : : /*
5790 : : * Check called from ProcessRecoveryConflictInterrupts() when Startup process
5791 : : * requests cancellation of all pin holders that are blocking it.
5792 : : */
5793 : : bool
5705 simon@2ndQuadrant.co 5794 : 3 : HoldingBufferPinThatDelaysRecovery(void)
5795 : : {
5671 bruce@momjian.us 5796 : 3 : int bufid = GetStartupBufferPinWaitBufId();
5797 : :
5798 : : /*
5799 : : * If we get woken slowly then it's possible that the Startup process was
5800 : : * already woken by other backends before we got here. Also possible that
5801 : : * we get here by multiple interrupts or interrupts at inappropriate
5802 : : * times, so make sure we do nothing if the bufid is not set.
5803 : : */
5705 simon@2ndQuadrant.co 5804 [ + + ]: 3 : if (bufid < 0)
5805 : 1 : return false;
5806 : :
4025 andres@anarazel.de 5807 [ + - ]: 2 : if (GetPrivateRefCount(bufid + 1) > 0)
5705 simon@2ndQuadrant.co 5808 : 2 : return true;
5809 : :
5705 simon@2ndQuadrant.co 5810 :UBC 0 : return false;
5811 : : }
5812 : :
5813 : : /*
5814 : : * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
5815 : : *
5816 : : * We won't loop, but just check once to see if the pin count is OK. If
5817 : : * not, return false with no lock held.
5818 : : */
5819 : : bool
6561 tgl@sss.pgh.pa.us 5820 :CBC 112799 : ConditionalLockBufferForCleanup(Buffer buffer)
5821 : : {
5822 : : BufferDesc *bufHdr;
5823 : : uint32 buf_state,
5824 : : refcount;
5825 : :
5826 [ - + ]: 112799 : Assert(BufferIsValid(buffer));
5827 : :
5828 : : /* see AIO related comment in LockBufferForCleanup() */
5829 : :
5830 [ + + ]: 112799 : if (BufferIsLocal(buffer))
5831 : : {
3436 andres@anarazel.de 5832 : 804 : refcount = LocalRefCount[-buffer - 1];
5833 : : /* There should be exactly one pin */
5834 [ - + ]: 804 : Assert(refcount > 0);
5835 [ + + ]: 804 : if (refcount != 1)
6561 tgl@sss.pgh.pa.us 5836 : 21 : return false;
5837 : : /* Nobody else to wait for */
5838 : 783 : return true;
5839 : : }
5840 : :
5841 : : /* There should be exactly one local pin */
3436 andres@anarazel.de 5842 : 111995 : refcount = GetPrivateRefCount(buffer);
5843 [ - + ]: 111995 : Assert(refcount);
5844 [ + + ]: 111995 : if (refcount != 1)
6561 tgl@sss.pgh.pa.us 5845 : 234 : return false;
5846 : :
5847 : : /* Try to acquire lock */
5848 [ + + ]: 111761 : if (!ConditionalLockBuffer(buffer))
5849 : 27 : return false;
5850 : :
3873 andres@anarazel.de 5851 : 111734 : bufHdr = GetBufferDescriptor(buffer - 1);
3436 5852 : 111734 : buf_state = LockBufHdr(bufHdr);
5853 : 111734 : refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5854 : :
5855 [ - + ]: 111734 : Assert(refcount > 0);
5856 [ + + ]: 111734 : if (refcount == 1)
5857 : : {
5858 : : /* Successfully acquired exclusive lock with pincount 1 */
5859 : 111525 : UnlockBufHdr(bufHdr, buf_state);
6561 tgl@sss.pgh.pa.us 5860 : 111525 : return true;
5861 : : }
5862 : :
5863 : : /* Failed, so release the lock */
3436 andres@anarazel.de 5864 : 209 : UnlockBufHdr(bufHdr, buf_state);
6561 tgl@sss.pgh.pa.us 5865 : 209 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5866 : 209 : return false;
5867 : : }
5868 : :
5869 : : /*
5870 : : * IsBufferCleanupOK - as above, but we already have the lock
5871 : : *
5872 : : * Check whether it's OK to perform cleanup on a buffer we've already
5873 : : * locked. If we observe that the pin count is 1, our exclusive lock
5874 : : * happens to be a cleanup lock, and we can proceed with anything that
5875 : : * would have been allowable had we sought a cleanup lock originally.
5876 : : */
5877 : : bool
3228 rhaas@postgresql.org 5878 : 2042 : IsBufferCleanupOK(Buffer buffer)
5879 : : {
5880 : : BufferDesc *bufHdr;
5881 : : uint32 buf_state;
5882 : :
5883 [ - + ]: 2042 : Assert(BufferIsValid(buffer));
5884 : :
5885 : : /* see AIO related comment in LockBufferForCleanup() */
5886 : :
5887 [ - + ]: 2042 : if (BufferIsLocal(buffer))
5888 : : {
5889 : : /* There should be exactly one pin */
3228 rhaas@postgresql.org 5890 [ # # ]:UBC 0 : if (LocalRefCount[-buffer - 1] != 1)
5891 : 0 : return false;
5892 : : /* Nobody else to wait for */
5893 : 0 : return true;
5894 : : }
5895 : :
5896 : : /* There should be exactly one local pin */
3228 rhaas@postgresql.org 5897 [ - + ]:CBC 2042 : if (GetPrivateRefCount(buffer) != 1)
3228 rhaas@postgresql.org 5898 :UBC 0 : return false;
5899 : :
3228 rhaas@postgresql.org 5900 :CBC 2042 : bufHdr = GetBufferDescriptor(buffer - 1);
5901 : :
5902 : : /* caller must hold exclusive lock on buffer */
5903 [ - + ]: 2042 : Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
5904 : : LW_EXCLUSIVE));
5905 : :
5906 : 2042 : buf_state = LockBufHdr(bufHdr);
5907 : :
5908 [ - + ]: 2042 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5909 [ + - ]: 2042 : if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5910 : : {
5911 : : /* pincount is OK. */
5912 : 2042 : UnlockBufHdr(bufHdr, buf_state);
5913 : 2042 : return true;
5914 : : }
5915 : :
3228 rhaas@postgresql.org 5916 :UBC 0 : UnlockBufHdr(bufHdr, buf_state);
5917 : 0 : return false;
5918 : : }
5919 : :
5920 : :
5921 : : /*
5922 : : * Functions for buffer I/O handling
5923 : : *
5924 : : * Also note that these are used only for shared buffers, not local ones.
5925 : : */
5926 : :
5927 : : /*
5928 : : * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
5929 : : */
5930 : : static void
3582 rhaas@postgresql.org 5931 :CBC 1558 : WaitIO(BufferDesc *buf)
5932 : : {
1640 tmunro@postgresql.or 5933 : 1558 : ConditionVariable *cv = BufferDescriptorGetIOCV(buf);
5934 : :
5935 : 1558 : ConditionVariablePrepareToSleep(cv);
5936 : : for (;;)
7491 tgl@sss.pgh.pa.us 5937 : 1547 : {
5938 : : uint32 buf_state;
5939 : : PgAioWaitRef iow;
5940 : :
5941 : : /*
5942 : : * It may not be necessary to acquire the spinlock to check the flag
5943 : : * here, but since this test is essential for correctness, we'd better
5944 : : * play it safe.
5945 : : */
3436 andres@anarazel.de 5946 : 3105 : buf_state = LockBufHdr(buf);
5947 : :
5948 : : /*
5949 : : * Copy the wait reference while holding the spinlock. This protects
5950 : : * against a concurrent TerminateBufferIO() in another backend from
5951 : : * clearing the wref while it's being read.
5952 : : */
160 5953 : 3105 : iow = buf->io_wref;
3436 5954 : 3105 : UnlockBufHdr(buf, buf_state);
5955 : :
5956 : : /* no IO in progress, we don't need to wait */
5957 [ + + ]: 3105 : if (!(buf_state & BM_IO_IN_PROGRESS))
7491 tgl@sss.pgh.pa.us 5958 : 1558 : break;
5959 : :
5960 : : /*
5961 : : * The buffer has asynchronous IO in progress, wait for it to
5962 : : * complete.
5963 : : */
160 andres@anarazel.de 5964 [ + + ]: 1547 : if (pgaio_wref_valid(&iow))
5965 : : {
5966 : 1393 : pgaio_wref_wait(&iow);
5967 : :
5968 : : /*
5969 : : * The AIO subsystem internally uses condition variables and thus
5970 : : * might remove this backend from the BufferDesc's CV. While that
5971 : : * wouldn't cause a correctness issue (the first CV sleep just
5972 : : * immediately returns if not already registered), it seems worth
5973 : : * avoiding unnecessary loop iterations, given that we take care
5974 : : * to do so at the start of the function.
5975 : : */
5976 : 1393 : ConditionVariablePrepareToSleep(cv);
5977 : 1393 : continue;
5978 : : }
5979 : :
5980 : : /* wait on BufferDesc->cv, e.g. for concurrent synchronous IO */
1640 tmunro@postgresql.or 5981 : 154 : ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
5982 : : }
5983 : 1558 : ConditionVariableCancelSleep();
7491 tgl@sss.pgh.pa.us 5984 : 1558 : }
5985 : :
5986 : : /*
5987 : : * StartBufferIO: begin I/O on this buffer
5988 : : * (Assumptions)
5989 : : * My process is executing no IO on this buffer
5990 : : * The buffer is Pinned
5991 : : *
5992 : : * In some scenarios multiple backends could attempt the same I/O operation
5993 : : * concurrently. If someone else has already started I/O on this buffer then
5994 : : * we will wait for completion of the IO using WaitIO().
5995 : : *
5996 : : * Input operations are only attempted on buffers that are not BM_VALID,
5997 : : * and output operations only on buffers that are BM_VALID and BM_DIRTY,
5998 : : * so we can always tell if the work is already done.
5999 : : *
6000 : : * Returns true if we successfully marked the buffer as I/O busy,
6001 : : * false if someone else already did the work.
6002 : : *
6003 : : * If nowait is true, then we don't wait for an I/O to be finished by another
6004 : : * backend. In that case, false indicates either that the I/O was already
6005 : : * finished, or is still in progress. This is useful for callers that want to
6006 : : * find out if they can perform the I/O as part of a larger operation, without
6007 : : * waiting for the answer or distinguishing the reasons why not.
6008 : : */
6009 : : bool
521 tmunro@postgresql.or 6010 : 2443066 : StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
6011 : : {
6012 : : uint32 buf_state;
6013 : :
668 heikki.linnakangas@i 6014 : 2443066 : ResourceOwnerEnlarge(CurrentResourceOwner);
6015 : :
6016 : : for (;;)
6017 : : {
3436 andres@anarazel.de 6018 : 2444623 : buf_state = LockBufHdr(buf);
6019 : :
6020 [ + + ]: 2444623 : if (!(buf_state & BM_IO_IN_PROGRESS))
7491 tgl@sss.pgh.pa.us 6021 : 2443060 : break;
3436 andres@anarazel.de 6022 : 1563 : UnlockBufHdr(buf, buf_state);
521 tmunro@postgresql.or 6023 [ + + ]: 1563 : if (nowait)
6024 : 6 : return false;
7491 tgl@sss.pgh.pa.us 6025 : 1557 : WaitIO(buf);
6026 : : }
6027 : :
6028 : : /* Once we get here, there is definitely no I/O active on this buffer */
6029 : :
6030 : : /* Check if someone else already did the I/O */
3436 andres@anarazel.de 6031 [ + + + + ]: 2443060 : if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
6032 : : {
6033 : 1958 : UnlockBufHdr(buf, buf_state);
7491 tgl@sss.pgh.pa.us 6034 : 1958 : return false;
6035 : : }
6036 : :
3436 andres@anarazel.de 6037 : 2441102 : buf_state |= BM_IO_IN_PROGRESS;
6038 : 2441102 : UnlockBufHdr(buf, buf_state);
6039 : :
885 6040 : 2441102 : ResourceOwnerRememberBufferIO(CurrentResourceOwner,
6041 : : BufferDescriptorGetBuffer(buf));
6042 : :
7491 tgl@sss.pgh.pa.us 6043 : 2441102 : return true;
6044 : : }
6045 : :
6046 : : /*
6047 : : * TerminateBufferIO: release a buffer we were doing I/O on
6048 : : * (Assumptions)
6049 : : * My process is executing IO for the buffer
6050 : : * BM_IO_IN_PROGRESS bit is set for the buffer
6051 : : * The buffer is Pinned
6052 : : *
6053 : : * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
6054 : : * buffer's BM_DIRTY flag. This is appropriate when terminating a
6055 : : * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
6056 : : * marking the buffer clean if it was re-dirtied while we were writing.
6057 : : *
6058 : : * set_flag_bits gets ORed into the buffer's flags. It must include
6059 : : * BM_IO_ERROR in a failure case. For successful completion it could
6060 : : * be 0, or BM_VALID if we just finished reading in the page.
6061 : : *
6062 : : * If forget_owner is true, we release the buffer I/O from the current
6063 : : * resource owner. (forget_owner=false is used when the resource owner itself
6064 : : * is being released)
6065 : : */
6066 : : void
668 heikki.linnakangas@i 6067 : 2304634 : TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
6068 : : bool forget_owner, bool release_aio)
6069 : : {
6070 : : uint32 buf_state;
6071 : :
3436 andres@anarazel.de 6072 : 2304634 : buf_state = LockBufHdr(buf);
6073 : :
6074 [ - + ]: 2304634 : Assert(buf_state & BM_IO_IN_PROGRESS);
175 6075 : 2304634 : buf_state &= ~BM_IO_IN_PROGRESS;
6076 : :
6077 : : /* Clear earlier errors, if this IO failed, it'll be marked again */
6078 : 2304634 : buf_state &= ~BM_IO_ERROR;
6079 : :
3436 6080 [ + + + + ]: 2304634 : if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
6081 : 552666 : buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
6082 : :
160 6083 [ + + ]: 2304634 : if (release_aio)
6084 : : {
6085 : : /* release ownership by the AIO subsystem */
6086 [ - + ]: 1260646 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
6087 : 1260646 : buf_state -= BUF_REFCOUNT_ONE;
6088 : 1260646 : pgaio_wref_clear(&buf->io_wref);
6089 : : }
6090 : :
3436 6091 : 2304634 : buf_state |= set_flag_bits;
6092 : 2304634 : UnlockBufHdr(buf, buf_state);
6093 : :
668 heikki.linnakangas@i 6094 [ + + ]: 2304634 : if (forget_owner)
6095 : 1043964 : ResourceOwnerForgetBufferIO(CurrentResourceOwner,
6096 : : BufferDescriptorGetBuffer(buf));
6097 : :
1640 tmunro@postgresql.or 6098 : 2304634 : ConditionVariableBroadcast(BufferDescriptorGetIOCV(buf));
6099 : :
6100 : : /*
6101 : : * Support LockBufferForCleanup()
6102 : : *
6103 : : * We may have just released the last pin other than the waiter's. In most
6104 : : * cases, this backend holds another pin on the buffer. But, if, for
6105 : : * example, this backend is completing an IO issued by another backend, it
6106 : : * may be time to wake the waiter.
6107 : : */
160 andres@anarazel.de 6108 [ + + - + ]: 2304634 : if (release_aio && (buf_state & BM_PIN_COUNT_WAITER))
160 andres@anarazel.de 6109 :UBC 0 : WakePinCountWaiter(buf);
9364 inoue@tpf.co.jp 6110 :CBC 2304634 : }
6111 : :
6112 : : /*
6113 : : * AbortBufferIO: Clean up active buffer I/O after an error.
6114 : : *
6115 : : * All LWLocks we might have held have been released,
6116 : : * but we haven't yet released buffer pins, so the buffer is still pinned.
6117 : : *
6118 : : * If I/O was in progress, we always set BM_IO_ERROR, even though it's
6119 : : * possible the error condition wasn't related to the I/O.
6120 : : *
6121 : : * Note: this does not remove the buffer I/O from the resource owner.
6122 : : * That's correct when we're releasing the whole resource owner, but
6123 : : * beware if you use this in other contexts.
6124 : : */
6125 : : static void
877 pg@bowt.ie 6126 : 15 : AbortBufferIO(Buffer buffer)
6127 : : {
6128 : 15 : BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
6129 : : uint32 buf_state;
6130 : :
885 andres@anarazel.de 6131 : 15 : buf_state = LockBufHdr(buf_hdr);
6132 [ - + ]: 15 : Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
6133 : :
6134 [ + - ]: 15 : if (!(buf_state & BM_VALID))
6135 : : {
6136 [ - + ]: 15 : Assert(!(buf_state & BM_DIRTY));
6137 : 15 : UnlockBufHdr(buf_hdr, buf_state);
6138 : : }
6139 : : else
6140 : : {
883 andres@anarazel.de 6141 [ # # ]:UBC 0 : Assert(buf_state & BM_DIRTY);
885 6142 : 0 : UnlockBufHdr(buf_hdr, buf_state);
6143 : :
6144 : : /* Issue notice if this is not the first failure... */
6145 [ # # ]: 0 : if (buf_state & BM_IO_ERROR)
6146 : : {
6147 : : /* Buffer is pinned, so we can read tag without spinlock */
6148 [ # # ]: 0 : ereport(WARNING,
6149 : : (errcode(ERRCODE_IO_ERROR),
6150 : : errmsg("could not write block %u of %s",
6151 : : buf_hdr->tag.blockNum,
6152 : : relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
6153 : : BufTagGetForkNum(&buf_hdr->tag)).str),
6154 : : errdetail("Multiple failures --- write error might be permanent.")));
6155 : : }
6156 : : }
6157 : :
160 andres@anarazel.de 6158 :CBC 15 : TerminateBufferIO(buf_hdr, false, BM_IO_ERROR, false, false);
9364 inoue@tpf.co.jp 6159 : 15 : }
6160 : :
6161 : : /*
6162 : : * Error context callback for errors occurring during shared buffer writes.
6163 : : */
6164 : : static void
5503 rhaas@postgresql.org 6165 : 37 : shared_buffer_write_error_callback(void *arg)
6166 : : {
3582 6167 : 37 : BufferDesc *bufHdr = (BufferDesc *) arg;
6168 : :
6169 : : /* Buffer is pinned, so we can read the tag without locking the spinlock */
8155 tgl@sss.pgh.pa.us 6170 [ + - ]: 37 : if (bufHdr != NULL)
11 peter@eisentraut.org 6171 : 74 : errcontext("writing block %u of relation \"%s\"",
6172 : : bufHdr->tag.blockNum,
193 andres@anarazel.de 6173 : 37 : relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
6174 : : BufTagGetForkNum(&bufHdr->tag)).str);
5503 rhaas@postgresql.org 6175 : 37 : }
6176 : :
6177 : : /*
6178 : : * Error context callback for errors occurring during local buffer writes.
6179 : : */
6180 : : static void
5503 rhaas@postgresql.org 6181 :UBC 0 : local_buffer_write_error_callback(void *arg)
6182 : : {
3582 6183 : 0 : BufferDesc *bufHdr = (BufferDesc *) arg;
6184 : :
5503 6185 [ # # ]: 0 : if (bufHdr != NULL)
11 peter@eisentraut.org 6186 : 0 : errcontext("writing block %u of relation \"%s\"",
6187 : : bufHdr->tag.blockNum,
193 andres@anarazel.de 6188 : 0 : relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
6189 : : MyProcNumber,
6190 : : BufTagGetForkNum(&bufHdr->tag)).str);
8155 tgl@sss.pgh.pa.us 6191 : 0 : }
6192 : :
6193 : : /*
6194 : : * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
6195 : : */
6196 : : static int
1158 rhaas@postgresql.org 6197 :CBC 9216457 : rlocator_comparator(const void *p1, const void *p2)
6198 : : {
6199 : 9216457 : RelFileLocator n1 = *(const RelFileLocator *) p1;
6200 : 9216457 : RelFileLocator n2 = *(const RelFileLocator *) p2;
6201 : :
6202 [ + + ]: 9216457 : if (n1.relNumber < n2.relNumber)
4615 alvherre@alvh.no-ip. 6203 : 9177751 : return -1;
1158 rhaas@postgresql.org 6204 [ + + ]: 38706 : else if (n1.relNumber > n2.relNumber)
4615 alvherre@alvh.no-ip. 6205 : 37021 : return 1;
6206 : :
1158 rhaas@postgresql.org 6207 [ - + ]: 1685 : if (n1.dbOid < n2.dbOid)
4615 alvherre@alvh.no-ip. 6208 :UBC 0 : return -1;
1158 rhaas@postgresql.org 6209 [ - + ]:CBC 1685 : else if (n1.dbOid > n2.dbOid)
4615 alvherre@alvh.no-ip. 6210 :UBC 0 : return 1;
6211 : :
1158 rhaas@postgresql.org 6212 [ - + ]:CBC 1685 : if (n1.spcOid < n2.spcOid)
4615 alvherre@alvh.no-ip. 6213 :UBC 0 : return -1;
1158 rhaas@postgresql.org 6214 [ - + ]:CBC 1685 : else if (n1.spcOid > n2.spcOid)
4615 alvherre@alvh.no-ip. 6215 :UBC 0 : return 1;
6216 : : else
4615 alvherre@alvh.no-ip. 6217 :CBC 1685 : return 0;
6218 : : }
6219 : :
6220 : : /*
6221 : : * Lock buffer header - set BM_LOCKED in buffer state.
6222 : : */
6223 : : uint32
3436 andres@anarazel.de 6224 : 34321126 : LockBufHdr(BufferDesc *desc)
6225 : : {
6226 : : SpinDelayStatus delayStatus;
6227 : : uint32 old_buf_state;
6228 : :
885 6229 [ - + ]: 34321126 : Assert(!BufferIsLocal(BufferDescriptorGetBuffer(desc)));
6230 : :
3432 6231 : 34321126 : init_local_spin_delay(&delayStatus);
6232 : :
6233 : : while (true)
6234 : : {
6235 : : /* set BM_LOCKED flag */
3436 6236 : 34368756 : old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
6237 : : /* if it wasn't set before we're OK */
6238 [ + + ]: 34368756 : if (!(old_buf_state & BM_LOCKED))
6239 : 34321126 : break;
6240 : 47630 : perform_spin_delay(&delayStatus);
6241 : : }
6242 : 34321126 : finish_spin_delay(&delayStatus);
6243 : 34321126 : return old_buf_state | BM_LOCKED;
6244 : : }
6245 : :
6246 : : /*
6247 : : * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
6248 : : * state at that point.
6249 : : *
6250 : : * Obviously the buffer could be locked by the time the value is returned, so
6251 : : * this is primarily useful in CAS style loops.
6252 : : */
6253 : : static uint32
6254 : 453 : WaitBufHdrUnlocked(BufferDesc *buf)
6255 : : {
6256 : : SpinDelayStatus delayStatus;
6257 : : uint32 buf_state;
6258 : :
3432 6259 : 453 : init_local_spin_delay(&delayStatus);
6260 : :
3436 6261 : 453 : buf_state = pg_atomic_read_u32(&buf->state);
6262 : :
6263 [ + + ]: 71798 : while (buf_state & BM_LOCKED)
6264 : : {
6265 : 71345 : perform_spin_delay(&delayStatus);
6266 : 71345 : buf_state = pg_atomic_read_u32(&buf->state);
6267 : : }
6268 : :
6269 : 453 : finish_spin_delay(&delayStatus);
6270 : :
6271 : 453 : return buf_state;
6272 : : }
6273 : :
6274 : : /*
6275 : : * BufferTag comparator.
6276 : : */
6277 : : static inline int
1639 tmunro@postgresql.or 6278 :UBC 0 : buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
6279 : : {
6280 : : int ret;
6281 : : RelFileLocator rlocatora;
6282 : : RelFileLocator rlocatorb;
6283 : :
1109 rhaas@postgresql.org 6284 : 0 : rlocatora = BufTagGetRelFileLocator(ba);
6285 : 0 : rlocatorb = BufTagGetRelFileLocator(bb);
6286 : :
6287 : 0 : ret = rlocator_comparator(&rlocatora, &rlocatorb);
6288 : :
3487 andres@anarazel.de 6289 [ # # ]: 0 : if (ret != 0)
6290 : 0 : return ret;
6291 : :
1109 rhaas@postgresql.org 6292 [ # # ]: 0 : if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
3487 andres@anarazel.de 6293 : 0 : return -1;
1109 rhaas@postgresql.org 6294 [ # # ]: 0 : if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
3487 andres@anarazel.de 6295 : 0 : return 1;
6296 : :
6297 [ # # ]: 0 : if (ba->blockNum < bb->blockNum)
6298 : 0 : return -1;
6299 [ # # ]: 0 : if (ba->blockNum > bb->blockNum)
6300 : 0 : return 1;
6301 : :
6302 : 0 : return 0;
6303 : : }
6304 : :
6305 : : /*
6306 : : * Comparator determining the writeout order in a checkpoint.
6307 : : *
6308 : : * It is important that tablespaces are compared first, the logic balancing
6309 : : * writes between tablespaces relies on it.
6310 : : */
6311 : : static inline int
1639 tmunro@postgresql.or 6312 :CBC 2946943 : ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
6313 : : {
6314 : : /* compare tablespace */
3487 andres@anarazel.de 6315 [ + + ]: 2946943 : if (a->tsId < b->tsId)
6316 : 5727 : return -1;
6317 [ + + ]: 2941216 : else if (a->tsId > b->tsId)
6318 : 26162 : return 1;
6319 : : /* compare relation */
1158 rhaas@postgresql.org 6320 [ + + ]: 2915054 : if (a->relNumber < b->relNumber)
3487 andres@anarazel.de 6321 : 824900 : return -1;
1158 rhaas@postgresql.org 6322 [ + + ]: 2090154 : else if (a->relNumber > b->relNumber)
3487 andres@anarazel.de 6323 : 797825 : return 1;
6324 : : /* compare fork */
6325 [ + + ]: 1292329 : else if (a->forkNum < b->forkNum)
6326 : 54638 : return -1;
6327 [ + + ]: 1237691 : else if (a->forkNum > b->forkNum)
6328 : 59191 : return 1;
6329 : : /* compare block number */
6330 [ + + ]: 1178500 : else if (a->blockNum < b->blockNum)
6331 : 581059 : return -1;
2796 tgl@sss.pgh.pa.us 6332 [ + + ]: 597441 : else if (a->blockNum > b->blockNum)
3487 andres@anarazel.de 6333 : 560342 : return 1;
6334 : : /* equal page IDs are unlikely, but not impossible */
2796 tgl@sss.pgh.pa.us 6335 : 37099 : return 0;
6336 : : }
6337 : :
6338 : : /*
6339 : : * Comparator for a Min-Heap over the per-tablespace checkpoint completion
6340 : : * progress.
6341 : : */
6342 : : static int
3487 andres@anarazel.de 6343 : 237834 : ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
6344 : : {
29 peter@eisentraut.org 6345 :GNC 237834 : CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a);
6346 : 237834 : CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b);
6347 : :
6348 : : /* we want a min-heap, so return 1 for the a < b */
3487 andres@anarazel.de 6349 [ + + ]:CBC 237834 : if (sa->progress < sb->progress)
6350 : 212977 : return 1;
6351 [ + + ]: 24857 : else if (sa->progress == sb->progress)
6352 : 809 : return 0;
6353 : : else
6354 : 24048 : return -1;
6355 : : }
6356 : :
6357 : : /*
6358 : : * Initialize a writeback context, discarding potential previous state.
6359 : : *
6360 : : * *max_pending is a pointer instead of an immediate value, so the coalesce
6361 : : * limits can easily changed by the GUC mechanism, and so calling code does
6362 : : * not have to check the current configuration. A value of 0 means that no
6363 : : * writeback control will be performed.
6364 : : */
6365 : : void
6366 : 2559 : WritebackContextInit(WritebackContext *context, int *max_pending)
6367 : : {
6368 [ - + ]: 2559 : Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
6369 : :
6370 : 2559 : context->max_pending = max_pending;
6371 : 2559 : context->nr_pending = 0;
6372 : 2559 : }
6373 : :
6374 : : /*
6375 : : * Add buffer to list of pending writeback requests.
6376 : : */
6377 : : void
843 6378 : 547184 : ScheduleBufferTagForWriteback(WritebackContext *wb_context, IOContext io_context,
6379 : : BufferTag *tag)
6380 : : {
6381 : : PendingWriteback *pending;
6382 : :
6383 : : /*
6384 : : * As pg_flush_data() doesn't do anything with fsync disabled, there's no
6385 : : * point in tracking in that case.
6386 : : */
333 6387 [ + + ]: 547184 : if (io_direct_flags & IO_DIRECT_DATA ||
6388 [ + + ]: 546656 : !enableFsync)
882 tmunro@postgresql.or 6389 : 547182 : return;
6390 : :
6391 : : /*
6392 : : * Add buffer to the pending writeback array, unless writeback control is
6393 : : * disabled.
6394 : : */
843 andres@anarazel.de 6395 [ - + ]: 2 : if (*wb_context->max_pending > 0)
6396 : : {
843 andres@anarazel.de 6397 [ # # ]:UBC 0 : Assert(*wb_context->max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
6398 : :
6399 : 0 : pending = &wb_context->pending_writebacks[wb_context->nr_pending++];
6400 : :
3487 6401 : 0 : pending->tag = *tag;
6402 : : }
6403 : :
6404 : : /*
6405 : : * Perform pending flushes if the writeback limit is exceeded. This
6406 : : * includes the case where previously an item has been added, but control
6407 : : * is now disabled.
6408 : : */
843 andres@anarazel.de 6409 [ + - ]:CBC 2 : if (wb_context->nr_pending >= *wb_context->max_pending)
6410 : 2 : IssuePendingWritebacks(wb_context, io_context);
6411 : : }
6412 : :
6413 : : #define ST_SORT sort_pending_writebacks
6414 : : #define ST_ELEMENT_TYPE PendingWriteback
6415 : : #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
6416 : : #define ST_SCOPE static
6417 : : #define ST_DEFINE
6418 : : #include "lib/sort_template.h"
6419 : :
6420 : : /*
6421 : : * Issue all pending writeback requests, previously scheduled with
6422 : : * ScheduleBufferTagForWriteback, to the OS.
6423 : : *
6424 : : * Because this is only used to improve the OSs IO scheduling we try to never
6425 : : * error out - it's just a hint.
6426 : : */
6427 : : void
6428 : 1041 : IssuePendingWritebacks(WritebackContext *wb_context, IOContext io_context)
6429 : : {
6430 : : instr_time io_start;
6431 : : int i;
6432 : :
6433 [ + - ]: 1041 : if (wb_context->nr_pending == 0)
3487 6434 : 1041 : return;
6435 : :
6436 : : /*
6437 : : * Executing the writes in-order can make them a lot faster, and allows to
6438 : : * merge writeback requests to consecutive blocks into larger writebacks.
6439 : : */
843 andres@anarazel.de 6440 :UBC 0 : sort_pending_writebacks(wb_context->pending_writebacks,
6441 : 0 : wb_context->nr_pending);
6442 : :
192 michael@paquier.xyz 6443 : 0 : io_start = pgstat_prepare_io_time(track_io_timing);
6444 : :
6445 : : /*
6446 : : * Coalesce neighbouring writes, but nothing else. For that we iterate
6447 : : * through the, now sorted, array of pending flushes, and look forward to
6448 : : * find all neighbouring (or identical) writes.
6449 : : */
843 andres@anarazel.de 6450 [ # # ]: 0 : for (i = 0; i < wb_context->nr_pending; i++)
6451 : : {
6452 : : PendingWriteback *cur;
6453 : : PendingWriteback *next;
6454 : : SMgrRelation reln;
6455 : : int ahead;
6456 : : BufferTag tag;
6457 : : RelFileLocator currlocator;
3487 6458 : 0 : Size nblocks = 1;
6459 : :
843 6460 : 0 : cur = &wb_context->pending_writebacks[i];
3487 6461 : 0 : tag = cur->tag;
1109 rhaas@postgresql.org 6462 : 0 : currlocator = BufTagGetRelFileLocator(&tag);
6463 : :
6464 : : /*
6465 : : * Peek ahead, into following writeback requests, to see if they can
6466 : : * be combined with the current one.
6467 : : */
843 andres@anarazel.de 6468 [ # # ]: 0 : for (ahead = 0; i + ahead + 1 < wb_context->nr_pending; ahead++)
6469 : : {
6470 : :
6471 : 0 : next = &wb_context->pending_writebacks[i + ahead + 1];
6472 : :
6473 : : /* different file, stop */
1109 rhaas@postgresql.org 6474 [ # # # # : 0 : if (!RelFileLocatorEquals(currlocator,
# # ]
6475 [ # # ]: 0 : BufTagGetRelFileLocator(&next->tag)) ||
6476 : 0 : BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
6477 : : break;
6478 : :
6479 : : /* ok, block queued twice, skip */
3487 andres@anarazel.de 6480 [ # # ]: 0 : if (cur->tag.blockNum == next->tag.blockNum)
6481 : 0 : continue;
6482 : :
6483 : : /* only merge consecutive writes */
6484 [ # # ]: 0 : if (cur->tag.blockNum + 1 != next->tag.blockNum)
6485 : 0 : break;
6486 : :
6487 : 0 : nblocks++;
6488 : 0 : cur = next;
6489 : : }
6490 : :
6491 : 0 : i += ahead;
6492 : :
6493 : : /* and finally tell the kernel to write the data to storage */
552 heikki.linnakangas@i 6494 : 0 : reln = smgropen(currlocator, INVALID_PROC_NUMBER);
1109 rhaas@postgresql.org 6495 : 0 : smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
6496 : : }
6497 : :
6498 : : /*
6499 : : * Assume that writeback requests are only issued for buffers containing
6500 : : * blocks of permanent relations.
6501 : : */
843 andres@anarazel.de 6502 : 0 : pgstat_count_io_op_time(IOOBJECT_RELATION, io_context,
235 michael@paquier.xyz 6503 : 0 : IOOP_WRITEBACK, io_start, wb_context->nr_pending, 0);
6504 : :
843 andres@anarazel.de 6505 : 0 : wb_context->nr_pending = 0;
6506 : : }
6507 : :
6508 : : /* ResourceOwner callbacks */
6509 : :
6510 : : static void
668 heikki.linnakangas@i 6511 :CBC 15 : ResOwnerReleaseBufferIO(Datum res)
6512 : : {
6513 : 15 : Buffer buffer = DatumGetInt32(res);
6514 : :
6515 : 15 : AbortBufferIO(buffer);
6516 : 15 : }
6517 : :
6518 : : static char *
668 heikki.linnakangas@i 6519 :UBC 0 : ResOwnerPrintBufferIO(Datum res)
6520 : : {
6521 : 0 : Buffer buffer = DatumGetInt32(res);
6522 : :
6523 : 0 : return psprintf("lost track of buffer IO on buffer %d", buffer);
6524 : : }
6525 : :
6526 : : static void
668 heikki.linnakangas@i 6527 :CBC 7420 : ResOwnerReleaseBufferPin(Datum res)
6528 : : {
6529 : 7420 : Buffer buffer = DatumGetInt32(res);
6530 : :
6531 : : /* Like ReleaseBuffer, but don't call ResourceOwnerForgetBuffer */
6532 [ - + ]: 7420 : if (!BufferIsValid(buffer))
668 heikki.linnakangas@i 6533 [ # # ]:UBC 0 : elog(ERROR, "bad buffer ID: %d", buffer);
6534 : :
668 heikki.linnakangas@i 6535 [ + + ]:CBC 7420 : if (BufferIsLocal(buffer))
6536 : 2990 : UnpinLocalBufferNoOwner(buffer);
6537 : : else
6538 : 4430 : UnpinBufferNoOwner(GetBufferDescriptor(buffer - 1));
6539 : 7420 : }
6540 : :
6541 : : static char *
668 heikki.linnakangas@i 6542 :UBC 0 : ResOwnerPrintBufferPin(Datum res)
6543 : : {
6544 : 0 : return DebugPrintBufferRefcount(DatumGetInt32(res));
6545 : : }
6546 : :
6547 : : /*
6548 : : * Helper function to evict unpinned buffer whose buffer header lock is
6549 : : * already acquired.
6550 : : */
6551 : : static bool
151 andres@anarazel.de 6552 :CBC 2185 : EvictUnpinnedBufferInternal(BufferDesc *desc, bool *buffer_flushed)
6553 : : {
6554 : : uint32 buf_state;
6555 : : bool result;
6556 : :
6557 : 2185 : *buffer_flushed = false;
6558 : :
6559 : 2185 : buf_state = pg_atomic_read_u32(&(desc->state));
6560 [ - + ]: 2185 : Assert(buf_state & BM_LOCKED);
6561 : :
517 tmunro@postgresql.or 6562 [ - + ]: 2185 : if ((buf_state & BM_VALID) == 0)
6563 : : {
517 tmunro@postgresql.or 6564 :UBC 0 : UnlockBufHdr(desc, buf_state);
6565 : 0 : return false;
6566 : : }
6567 : :
6568 : : /* Check that it's not pinned already. */
517 tmunro@postgresql.or 6569 [ - + ]:CBC 2185 : if (BUF_STATE_GET_REFCOUNT(buf_state) > 0)
6570 : : {
517 tmunro@postgresql.or 6571 :UBC 0 : UnlockBufHdr(desc, buf_state);
6572 : 0 : return false;
6573 : : }
6574 : :
517 tmunro@postgresql.or 6575 :CBC 2185 : PinBuffer_Locked(desc); /* releases spinlock */
6576 : :
6577 : : /* If it was dirty, try to clean it once. */
6578 [ + + ]: 2185 : if (buf_state & BM_DIRTY)
6579 : : {
6580 : 999 : LWLockAcquire(BufferDescriptorGetContentLock(desc), LW_SHARED);
6581 : 999 : FlushBuffer(desc, NULL, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
151 andres@anarazel.de 6582 : 999 : *buffer_flushed = true;
517 tmunro@postgresql.or 6583 : 999 : LWLockRelease(BufferDescriptorGetContentLock(desc));
6584 : : }
6585 : :
6586 : : /* This will return false if it becomes dirty or someone else pins it. */
6587 : 2185 : result = InvalidateVictimBuffer(desc);
6588 : :
6589 : 2185 : UnpinBuffer(desc);
6590 : :
6591 : 2185 : return result;
6592 : : }
6593 : :
6594 : : /*
6595 : : * Try to evict the current block in a shared buffer.
6596 : : *
6597 : : * This function is intended for testing/development use only!
6598 : : *
6599 : : * To succeed, the buffer must not be pinned on entry, so if the caller had a
6600 : : * particular block in mind, it might already have been replaced by some other
6601 : : * block by the time this function runs. It's also unpinned on return, so the
6602 : : * buffer might be occupied again by the time control is returned, potentially
6603 : : * even by the same block. This inherent raciness without other interlocking
6604 : : * makes the function unsuitable for non-testing usage.
6605 : : *
6606 : : * *buffer_flushed is set to true if the buffer was dirty and has been
6607 : : * flushed, false otherwise. However, *buffer_flushed=true does not
6608 : : * necessarily mean that we flushed the buffer, it could have been flushed by
6609 : : * someone else.
6610 : : *
6611 : : * Returns true if the buffer was valid and it has now been made invalid.
6612 : : * Returns false if it wasn't valid, if it couldn't be evicted due to a pin,
6613 : : * or if the buffer becomes dirty again while we're trying to write it out.
6614 : : */
6615 : : bool
151 andres@anarazel.de 6616 : 145 : EvictUnpinnedBuffer(Buffer buf, bool *buffer_flushed)
6617 : : {
6618 : : BufferDesc *desc;
6619 : :
6620 [ + - - + ]: 145 : Assert(BufferIsValid(buf) && !BufferIsLocal(buf));
6621 : :
6622 : : /* Make sure we can pin the buffer. */
6623 : 145 : ResourceOwnerEnlarge(CurrentResourceOwner);
6624 : 145 : ReservePrivateRefCountEntry();
6625 : :
6626 : 145 : desc = GetBufferDescriptor(buf - 1);
6627 : 145 : LockBufHdr(desc);
6628 : :
6629 : 145 : return EvictUnpinnedBufferInternal(desc, buffer_flushed);
6630 : : }
6631 : :
6632 : : /*
6633 : : * Try to evict all the shared buffers.
6634 : : *
6635 : : * This function is intended for testing/development use only! See
6636 : : * EvictUnpinnedBuffer().
6637 : : *
6638 : : * The buffers_* parameters are mandatory and indicate the total count of
6639 : : * buffers that:
6640 : : * - buffers_evicted - were evicted
6641 : : * - buffers_flushed - were flushed
6642 : : * - buffers_skipped - could not be evicted
6643 : : */
6644 : : void
6645 : 1 : EvictAllUnpinnedBuffers(int32 *buffers_evicted, int32 *buffers_flushed,
6646 : : int32 *buffers_skipped)
6647 : : {
6648 : 1 : *buffers_evicted = 0;
6649 : 1 : *buffers_skipped = 0;
6650 : 1 : *buffers_flushed = 0;
6651 : :
6652 [ + + ]: 16385 : for (int buf = 1; buf <= NBuffers; buf++)
6653 : : {
6654 : 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
6655 : : uint32 buf_state;
6656 : : bool buffer_flushed;
6657 : :
6658 : 16384 : buf_state = pg_atomic_read_u32(&desc->state);
6659 [ + + ]: 16384 : if (!(buf_state & BM_VALID))
6660 : 14344 : continue;
6661 : :
6662 : 2040 : ResourceOwnerEnlarge(CurrentResourceOwner);
6663 : 2040 : ReservePrivateRefCountEntry();
6664 : :
6665 : 2040 : LockBufHdr(desc);
6666 : :
6667 [ + - ]: 2040 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
6668 : 2040 : (*buffers_evicted)++;
6669 : : else
151 andres@anarazel.de 6670 :UBC 0 : (*buffers_skipped)++;
6671 : :
151 andres@anarazel.de 6672 [ + + ]:CBC 2040 : if (buffer_flushed)
6673 : 971 : (*buffers_flushed)++;
6674 : : }
6675 : 1 : }
6676 : :
6677 : : /*
6678 : : * Try to evict all the shared buffers containing provided relation's pages.
6679 : : *
6680 : : * This function is intended for testing/development use only! See
6681 : : * EvictUnpinnedBuffer().
6682 : : *
6683 : : * The caller must hold at least AccessShareLock on the relation to prevent
6684 : : * the relation from being dropped.
6685 : : *
6686 : : * The buffers_* parameters are mandatory and indicate the total count of
6687 : : * buffers that:
6688 : : * - buffers_evicted - were evicted
6689 : : * - buffers_flushed - were flushed
6690 : : * - buffers_skipped - could not be evicted
6691 : : */
6692 : : void
6693 : 1 : EvictRelUnpinnedBuffers(Relation rel, int32 *buffers_evicted,
6694 : : int32 *buffers_flushed, int32 *buffers_skipped)
6695 : : {
6696 [ - + ]: 1 : Assert(!RelationUsesLocalBuffers(rel));
6697 : :
6698 : 1 : *buffers_skipped = 0;
6699 : 1 : *buffers_evicted = 0;
6700 : 1 : *buffers_flushed = 0;
6701 : :
6702 [ + + ]: 16385 : for (int buf = 1; buf <= NBuffers; buf++)
6703 : : {
6704 : 16384 : BufferDesc *desc = GetBufferDescriptor(buf - 1);
6705 : 16384 : uint32 buf_state = pg_atomic_read_u32(&(desc->state));
6706 : : bool buffer_flushed;
6707 : :
6708 : : /* An unlocked precheck should be safe and saves some cycles. */
6709 [ + + ]: 16384 : if ((buf_state & BM_VALID) == 0 ||
6710 [ + - ]: 27 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
6711 : 16384 : continue;
6712 : :
6713 : : /* Make sure we can pin the buffer. */
151 andres@anarazel.de 6714 :UBC 0 : ResourceOwnerEnlarge(CurrentResourceOwner);
6715 : 0 : ReservePrivateRefCountEntry();
6716 : :
6717 : 0 : buf_state = LockBufHdr(desc);
6718 : :
6719 : : /* recheck, could have changed without the lock */
6720 [ # # ]: 0 : if ((buf_state & BM_VALID) == 0 ||
6721 [ # # ]: 0 : !BufTagMatchesRelFileLocator(&desc->tag, &rel->rd_locator))
6722 : : {
6723 : 0 : UnlockBufHdr(desc, buf_state);
6724 : 0 : continue;
6725 : : }
6726 : :
6727 [ # # ]: 0 : if (EvictUnpinnedBufferInternal(desc, &buffer_flushed))
6728 : 0 : (*buffers_evicted)++;
6729 : : else
6730 : 0 : (*buffers_skipped)++;
6731 : :
6732 [ # # ]: 0 : if (buffer_flushed)
6733 : 0 : (*buffers_flushed)++;
6734 : : }
151 andres@anarazel.de 6735 :CBC 1 : }
6736 : :
6737 : : /*
6738 : : * Generic implementation of the AIO handle staging callback for readv/writev
6739 : : * on local/shared buffers.
6740 : : *
6741 : : * Each readv/writev can target multiple buffers. The buffers have already
6742 : : * been registered with the IO handle.
6743 : : *
6744 : : * To make the IO ready for execution ("staging"), we need to ensure that the
6745 : : * targeted buffers are in an appropriate state while the IO is ongoing. For
6746 : : * that the AIO subsystem needs to have its own buffer pin, otherwise an error
6747 : : * in this backend could lead to this backend's buffer pin being released as
6748 : : * part of error handling, which in turn could lead to the buffer being
6749 : : * replaced while IO is ongoing.
6750 : : */
6751 : : static pg_attribute_always_inline void
160 6752 : 1245630 : buffer_stage_common(PgAioHandle *ioh, bool is_write, bool is_temp)
6753 : : {
6754 : : uint64 *io_data;
6755 : : uint8 handle_data_len;
6756 : : PgAioWaitRef io_ref;
6757 : 1245630 : BufferTag first PG_USED_FOR_ASSERTS_ONLY = {0};
6758 : :
6759 : 1245630 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
6760 : :
6761 : 1245630 : pgaio_io_get_wref(ioh, &io_ref);
6762 : :
6763 : : /* iterate over all buffers affected by the vectored readv/writev */
6764 [ + + ]: 2651194 : for (int i = 0; i < handle_data_len; i++)
6765 : : {
6766 : 1405564 : Buffer buffer = (Buffer) io_data[i];
6767 : 1405564 : BufferDesc *buf_hdr = is_temp ?
6768 : 8450 : GetLocalBufferDescriptor(-buffer - 1)
6769 [ + + ]: 1405564 : : GetBufferDescriptor(buffer - 1);
6770 : : uint32 buf_state;
6771 : :
6772 : : /*
6773 : : * Check that all the buffers are actually ones that could conceivably
6774 : : * be done in one IO, i.e. are sequential. This is the last
6775 : : * buffer-aware code before IO is actually executed and confusion
6776 : : * about which buffers are targeted by IO can be hard to debug, making
6777 : : * it worth doing extra-paranoid checks.
6778 : : */
6779 [ + + ]: 1405564 : if (i == 0)
6780 : 1245630 : first = buf_hdr->tag;
6781 : : else
6782 : : {
6783 [ - + ]: 159934 : Assert(buf_hdr->tag.relNumber == first.relNumber);
6784 [ - + ]: 159934 : Assert(buf_hdr->tag.blockNum == first.blockNum + i);
6785 : : }
6786 : :
6787 [ + + ]: 1405564 : if (is_temp)
6788 : 8450 : buf_state = pg_atomic_read_u32(&buf_hdr->state);
6789 : : else
6790 : 1397114 : buf_state = LockBufHdr(buf_hdr);
6791 : :
6792 : : /* verify the buffer is in the expected state */
6793 [ - + ]: 1405564 : Assert(buf_state & BM_TAG_VALID);
6794 [ - + ]: 1405564 : if (is_write)
6795 : : {
160 andres@anarazel.de 6796 [ # # ]:UBC 0 : Assert(buf_state & BM_VALID);
6797 [ # # ]: 0 : Assert(buf_state & BM_DIRTY);
6798 : : }
6799 : : else
6800 : : {
160 andres@anarazel.de 6801 [ - + ]:CBC 1405564 : Assert(!(buf_state & BM_VALID));
6802 [ - + ]: 1405564 : Assert(!(buf_state & BM_DIRTY));
6803 : : }
6804 : :
6805 : : /* temp buffers don't use BM_IO_IN_PROGRESS */
6806 [ + + ]: 1405564 : if (!is_temp)
6807 [ - + ]: 1397114 : Assert(buf_state & BM_IO_IN_PROGRESS);
6808 : :
6809 [ - + ]: 1405564 : Assert(BUF_STATE_GET_REFCOUNT(buf_state) >= 1);
6810 : :
6811 : : /*
6812 : : * Reflect that the buffer is now owned by the AIO subsystem.
6813 : : *
6814 : : * For local buffers: This can't be done just via LocalRefCount, as
6815 : : * one might initially think, as this backend could error out while
6816 : : * AIO is still in progress, releasing all the pins by the backend
6817 : : * itself.
6818 : : *
6819 : : * This pin is released again in TerminateBufferIO().
6820 : : */
6821 : 1405564 : buf_state += BUF_REFCOUNT_ONE;
6822 : 1405564 : buf_hdr->io_wref = io_ref;
6823 : :
6824 [ + + ]: 1405564 : if (is_temp)
6825 : 8450 : pg_atomic_unlocked_write_u32(&buf_hdr->state, buf_state);
6826 : : else
6827 : 1397114 : UnlockBufHdr(buf_hdr, buf_state);
6828 : :
6829 : : /*
6830 : : * Ensure the content lock that prevents buffer modifications while
6831 : : * the buffer is being written out is not released early due to an
6832 : : * error.
6833 : : */
6834 [ - + - - ]: 1405564 : if (is_write && !is_temp)
6835 : : {
6836 : : LWLock *content_lock;
6837 : :
160 andres@anarazel.de 6838 :UBC 0 : content_lock = BufferDescriptorGetContentLock(buf_hdr);
6839 : :
6840 [ # # ]: 0 : Assert(LWLockHeldByMe(content_lock));
6841 : :
6842 : : /*
6843 : : * Lock is now owned by AIO subsystem.
6844 : : */
6845 : 0 : LWLockDisown(content_lock);
6846 : : }
6847 : :
6848 : : /*
6849 : : * Stop tracking this buffer via the resowner - the AIO system now
6850 : : * keeps track.
6851 : : */
160 andres@anarazel.de 6852 [ + + ]:CBC 1405564 : if (!is_temp)
6853 : 1397114 : ResourceOwnerForgetBufferIO(CurrentResourceOwner, buffer);
6854 : : }
6855 : 1245630 : }
6856 : :
6857 : : /*
6858 : : * Decode readv errors as encoded by buffer_readv_encode_error().
6859 : : */
6860 : : static inline void
6861 : 765 : buffer_readv_decode_error(PgAioResult result,
6862 : : bool *zeroed_any,
6863 : : bool *ignored_any,
6864 : : uint8 *zeroed_or_error_count,
6865 : : uint8 *checkfail_count,
6866 : : uint8 *first_off)
6867 : : {
6868 : 765 : uint32 rem_error = result.error_data;
6869 : :
6870 : : /* see static asserts in buffer_readv_encode_error */
6871 : : #define READV_COUNT_BITS 7
6872 : : #define READV_COUNT_MASK ((1 << READV_COUNT_BITS) - 1)
6873 : :
6874 : 765 : *zeroed_any = rem_error & 1;
6875 : 765 : rem_error >>= 1;
6876 : :
6877 : 765 : *ignored_any = rem_error & 1;
6878 : 765 : rem_error >>= 1;
6879 : :
6880 : 765 : *zeroed_or_error_count = rem_error & READV_COUNT_MASK;
6881 : 765 : rem_error >>= READV_COUNT_BITS;
6882 : :
6883 : 765 : *checkfail_count = rem_error & READV_COUNT_MASK;
6884 : 765 : rem_error >>= READV_COUNT_BITS;
6885 : :
6886 : 765 : *first_off = rem_error & READV_COUNT_MASK;
6887 : 765 : rem_error >>= READV_COUNT_BITS;
6888 : 765 : }
6889 : :
6890 : : /*
6891 : : * Helper to encode errors for buffer_readv_complete()
6892 : : *
6893 : : * Errors are encoded as follows:
6894 : : * - bit 0 indicates whether any page was zeroed (1) or not (0)
6895 : : * - bit 1 indicates whether any checksum failure was ignored (1) or not (0)
6896 : : * - next READV_COUNT_BITS bits indicate the number of errored or zeroed pages
6897 : : * - next READV_COUNT_BITS bits indicate the number of checksum failures
6898 : : * - next READV_COUNT_BITS bits indicate the first offset of the first page
6899 : : * that was errored or zeroed or, if no errors/zeroes, the first ignored
6900 : : * checksum
6901 : : */
6902 : : static inline void
6903 : 282 : buffer_readv_encode_error(PgAioResult *result,
6904 : : bool is_temp,
6905 : : bool zeroed_any,
6906 : : bool ignored_any,
6907 : : uint8 error_count,
6908 : : uint8 zeroed_count,
6909 : : uint8 checkfail_count,
6910 : : uint8 first_error_off,
6911 : : uint8 first_zeroed_off,
6912 : : uint8 first_ignored_off)
6913 : : {
6914 : :
6915 : 282 : uint8 shift = 0;
6916 [ + + ]: 282 : uint8 zeroed_or_error_count =
6917 : : error_count > 0 ? error_count : zeroed_count;
6918 : : uint8 first_off;
6919 : :
6920 : : StaticAssertStmt(PG_IOV_MAX <= 1 << READV_COUNT_BITS,
6921 : : "PG_IOV_MAX is bigger than reserved space for error data");
6922 : : StaticAssertStmt((1 + 1 + 3 * READV_COUNT_BITS) <= PGAIO_RESULT_ERROR_BITS,
6923 : : "PGAIO_RESULT_ERROR_BITS is insufficient for buffer_readv");
6924 : :
6925 : : /*
6926 : : * We only have space to encode one offset - but luckily that's good
6927 : : * enough. If there is an error, the error is the interesting offset, same
6928 : : * with a zeroed buffer vs an ignored buffer.
6929 : : */
6930 [ + + ]: 282 : if (error_count > 0)
6931 : 135 : first_off = first_error_off;
6932 [ + + ]: 147 : else if (zeroed_count > 0)
6933 : 120 : first_off = first_zeroed_off;
6934 : : else
6935 : 27 : first_off = first_ignored_off;
6936 : :
6937 [ + + - + ]: 282 : Assert(!zeroed_any || error_count == 0);
6938 : :
6939 : 282 : result->error_data = 0;
6940 : :
6941 : 282 : result->error_data |= zeroed_any << shift;
6942 : 282 : shift += 1;
6943 : :
6944 : 282 : result->error_data |= ignored_any << shift;
6945 : 282 : shift += 1;
6946 : :
6947 : 282 : result->error_data |= ((uint32) zeroed_or_error_count) << shift;
6948 : 282 : shift += READV_COUNT_BITS;
6949 : :
6950 : 282 : result->error_data |= ((uint32) checkfail_count) << shift;
6951 : 282 : shift += READV_COUNT_BITS;
6952 : :
6953 : 282 : result->error_data |= ((uint32) first_off) << shift;
6954 : 282 : shift += READV_COUNT_BITS;
6955 : :
6956 [ + + ]: 282 : result->id = is_temp ? PGAIO_HCB_LOCAL_BUFFER_READV :
6957 : : PGAIO_HCB_SHARED_BUFFER_READV;
6958 : :
6959 [ + + ]: 282 : if (error_count > 0)
6960 : 135 : result->status = PGAIO_RS_ERROR;
6961 : : else
6962 : 147 : result->status = PGAIO_RS_WARNING;
6963 : :
6964 : : /*
6965 : : * The encoding is complicated enough to warrant cross-checking it against
6966 : : * the decode function.
6967 : : */
6968 : : #ifdef USE_ASSERT_CHECKING
6969 : : {
6970 : : bool zeroed_any_2,
6971 : : ignored_any_2;
6972 : : uint8 zeroed_or_error_count_2,
6973 : : checkfail_count_2,
6974 : : first_off_2;
6975 : :
6976 : 282 : buffer_readv_decode_error(*result,
6977 : : &zeroed_any_2, &ignored_any_2,
6978 : : &zeroed_or_error_count_2,
6979 : : &checkfail_count_2,
6980 : : &first_off_2);
6981 [ - + ]: 282 : Assert(zeroed_any == zeroed_any_2);
6982 [ - + ]: 282 : Assert(ignored_any == ignored_any_2);
6983 [ - + ]: 282 : Assert(zeroed_or_error_count == zeroed_or_error_count_2);
6984 [ - + ]: 282 : Assert(checkfail_count == checkfail_count_2);
6985 [ - + ]: 282 : Assert(first_off == first_off_2);
6986 : : }
6987 : : #endif
6988 : :
6989 : : #undef READV_COUNT_BITS
6990 : : #undef READV_COUNT_MASK
6991 : 282 : }
6992 : :
6993 : : /*
6994 : : * Helper for AIO readv completion callbacks, supporting both shared and temp
6995 : : * buffers. Gets called once for each buffer in a multi-page read.
6996 : : */
6997 : : static pg_attribute_always_inline void
6998 : 1269096 : buffer_readv_complete_one(PgAioTargetData *td, uint8 buf_off, Buffer buffer,
6999 : : uint8 flags, bool failed, bool is_temp,
7000 : : bool *buffer_invalid,
7001 : : bool *failed_checksum,
7002 : : bool *ignored_checksum,
7003 : : bool *zeroed_buffer)
7004 : : {
7005 : 1269096 : BufferDesc *buf_hdr = is_temp ?
7006 : 8450 : GetLocalBufferDescriptor(-buffer - 1)
7007 [ + + ]: 1269096 : : GetBufferDescriptor(buffer - 1);
7008 : 1269096 : BufferTag tag = buf_hdr->tag;
7009 : 1269096 : char *bufdata = BufferGetBlock(buffer);
7010 : : uint32 set_flag_bits;
7011 : : int piv_flags;
7012 : :
7013 : : /* check that the buffer is in the expected state for a read */
7014 : : #ifdef USE_ASSERT_CHECKING
7015 : : {
7016 : 1269096 : uint32 buf_state = pg_atomic_read_u32(&buf_hdr->state);
7017 : :
7018 [ - + ]: 1269096 : Assert(buf_state & BM_TAG_VALID);
7019 [ - + ]: 1269096 : Assert(!(buf_state & BM_VALID));
7020 : : /* temp buffers don't use BM_IO_IN_PROGRESS */
7021 [ + + ]: 1269096 : if (!is_temp)
7022 [ - + ]: 1260646 : Assert(buf_state & BM_IO_IN_PROGRESS);
7023 [ - + ]: 1269096 : Assert(!(buf_state & BM_DIRTY));
7024 : : }
7025 : : #endif
7026 : :
7027 : 1269096 : *buffer_invalid = false;
7028 : 1269096 : *failed_checksum = false;
7029 : 1269096 : *ignored_checksum = false;
7030 : 1269096 : *zeroed_buffer = false;
7031 : :
7032 : : /*
7033 : : * We ask PageIsVerified() to only log the message about checksum errors,
7034 : : * as the completion might be run in any backend (or IO workers). We will
7035 : : * report checksum errors in buffer_readv_report().
7036 : : */
7037 : 1269096 : piv_flags = PIV_LOG_LOG;
7038 : :
7039 : : /* the local zero_damaged_pages may differ from the definer's */
7040 [ + + ]: 1269096 : if (flags & READ_BUFFERS_IGNORE_CHECKSUM_FAILURES)
7041 : 57 : piv_flags |= PIV_IGNORE_CHECKSUM_FAILURE;
7042 : :
7043 : : /* Check for garbage data. */
7044 [ + - ]: 1269096 : if (!failed)
7045 : : {
7046 : : /*
7047 : : * If the buffer is not currently pinned by this backend, e.g. because
7048 : : * we're completing this IO after an error, the buffer data will have
7049 : : * been marked as inaccessible when the buffer was unpinned. The AIO
7050 : : * subsystem holds a pin, but that doesn't prevent the buffer from
7051 : : * having been marked as inaccessible. The completion might also be
7052 : : * executed in a different process.
7053 : : */
7054 : : #ifdef USE_VALGRIND
7055 : : if (!BufferIsPinned(buffer))
7056 : : VALGRIND_MAKE_MEM_DEFINED(bufdata, BLCKSZ);
7057 : : #endif
7058 : :
7059 [ + + ]: 1269096 : if (!PageIsVerified((Page) bufdata, tag.blockNum, piv_flags,
7060 : : failed_checksum))
7061 : : {
7062 [ + + ]: 141 : if (flags & READ_BUFFERS_ZERO_ON_ERROR)
7063 : : {
7064 : 69 : memset(bufdata, 0, BLCKSZ);
7065 : 69 : *zeroed_buffer = true;
7066 : : }
7067 : : else
7068 : : {
7069 : 72 : *buffer_invalid = true;
7070 : : /* mark buffer as having failed */
7071 : 72 : failed = true;
7072 : : }
7073 : : }
7074 [ + + ]: 1268955 : else if (*failed_checksum)
7075 : 18 : *ignored_checksum = true;
7076 : :
7077 : : /* undo what we did above */
7078 : : #ifdef USE_VALGRIND
7079 : : if (!BufferIsPinned(buffer))
7080 : : VALGRIND_MAKE_MEM_NOACCESS(bufdata, BLCKSZ);
7081 : : #endif
7082 : :
7083 : : /*
7084 : : * Immediately log a message about the invalid page, but only to the
7085 : : * server log. The reason to do so immediately is that this may be
7086 : : * executed in a different backend than the one that originated the
7087 : : * request. The reason to do so immediately is that the originator
7088 : : * might not process the query result immediately (because it is busy
7089 : : * doing another part of query processing) or at all (e.g. if it was
7090 : : * cancelled or errored out due to another IO also failing). The
7091 : : * definer of the IO will emit an ERROR or WARNING when processing the
7092 : : * IO's results
7093 : : *
7094 : : * To avoid duplicating the code to emit these log messages, we reuse
7095 : : * buffer_readv_report().
7096 : : */
7097 [ + + + + : 1269096 : if (*buffer_invalid || *failed_checksum || *zeroed_buffer)
+ + ]
7098 : : {
155 7099 : 159 : PgAioResult result_one = {0};
7100 : :
160 7101 : 159 : buffer_readv_encode_error(&result_one, is_temp,
7102 : 159 : *zeroed_buffer,
7103 : 159 : *ignored_checksum,
7104 : 159 : *buffer_invalid,
7105 : 159 : *zeroed_buffer ? 1 : 0,
7106 : 159 : *failed_checksum ? 1 : 0,
7107 : : buf_off, buf_off, buf_off);
7108 : 159 : pgaio_result_report(result_one, td, LOG_SERVER_ONLY);
7109 : : }
7110 : : }
7111 : :
7112 : : /* Terminate I/O and set BM_VALID. */
7113 [ + + ]: 1269096 : set_flag_bits = failed ? BM_IO_ERROR : BM_VALID;
7114 [ + + ]: 1269096 : if (is_temp)
7115 : 8450 : TerminateLocalBufferIO(buf_hdr, false, set_flag_bits, true);
7116 : : else
7117 : 1260646 : TerminateBufferIO(buf_hdr, false, set_flag_bits, false, true);
7118 : :
7119 : : /*
7120 : : * Call the BUFFER_READ_DONE tracepoint in the callback, even though the
7121 : : * callback may not be executed in the same backend that called
7122 : : * BUFFER_READ_START. The alternative would be to defer calling the
7123 : : * tracepoint to a later point (e.g. the local completion callback for
7124 : : * shared buffer reads), which seems even less helpful.
7125 : : */
7126 : : TRACE_POSTGRESQL_BUFFER_READ_DONE(tag.forkNum,
7127 : : tag.blockNum,
7128 : : tag.spcOid,
7129 : : tag.dbOid,
7130 : : tag.relNumber,
7131 : : is_temp ? MyProcNumber : INVALID_PROC_NUMBER,
7132 : : false);
7133 : 1269096 : }
7134 : :
7135 : : /*
7136 : : * Perform completion handling of a single AIO read. This read may cover
7137 : : * multiple blocks / buffers.
7138 : : *
7139 : : * Shared between shared and local buffers, to reduce code duplication.
7140 : : */
7141 : : static pg_attribute_always_inline PgAioResult
7142 : 1142147 : buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7143 : : uint8 cb_data, bool is_temp)
7144 : : {
7145 : 1142147 : PgAioResult result = prior_result;
7146 : 1142147 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
7147 : 1142147 : uint8 first_error_off = 0;
7148 : 1142147 : uint8 first_zeroed_off = 0;
7149 : 1142147 : uint8 first_ignored_off = 0;
7150 : 1142147 : uint8 error_count = 0;
7151 : 1142147 : uint8 zeroed_count = 0;
7152 : 1142147 : uint8 ignored_count = 0;
7153 : 1142147 : uint8 checkfail_count = 0;
7154 : : uint64 *io_data;
7155 : : uint8 handle_data_len;
7156 : :
7157 [ + + ]: 1142147 : if (is_temp)
7158 : : {
7159 [ - + ]: 1826 : Assert(td->smgr.is_temp);
7160 [ - + ]: 1826 : Assert(pgaio_io_get_owner(ioh) == MyProcNumber);
7161 : : }
7162 : : else
7163 [ - + ]: 1140321 : Assert(!td->smgr.is_temp);
7164 : :
7165 : : /*
7166 : : * Iterate over all the buffers affected by this IO and call the
7167 : : * per-buffer completion function for each buffer.
7168 : : */
7169 : 1142147 : io_data = pgaio_io_get_handle_data(ioh, &handle_data_len);
7170 [ + + ]: 2411243 : for (uint8 buf_off = 0; buf_off < handle_data_len; buf_off++)
7171 : : {
7172 : 1269096 : Buffer buf = io_data[buf_off];
7173 : : bool failed;
7174 : 1269096 : bool failed_verification = false;
7175 : 1269096 : bool failed_checksum = false;
7176 : 1269096 : bool zeroed_buffer = false;
7177 : 1269096 : bool ignored_checksum = false;
7178 : :
7179 [ - + ]: 1269096 : Assert(BufferIsValid(buf));
7180 : :
7181 : : /*
7182 : : * If the entire I/O failed on a lower-level, each buffer needs to be
7183 : : * marked as failed. In case of a partial read, the first few buffers
7184 : : * may be ok.
7185 : : */
7186 : 1269096 : failed =
7187 : 1269096 : prior_result.status == PGAIO_RS_ERROR
7188 [ + - - + ]: 1269096 : || prior_result.result <= buf_off;
7189 : :
7190 : 1269096 : buffer_readv_complete_one(td, buf_off, buf, cb_data, failed, is_temp,
7191 : : &failed_verification,
7192 : : &failed_checksum,
7193 : : &ignored_checksum,
7194 : : &zeroed_buffer);
7195 : :
7196 : : /*
7197 : : * Track information about the number of different kinds of error
7198 : : * conditions across all pages, as there can be multiple pages failing
7199 : : * verification as part of one IO.
7200 : : */
7201 [ + + + - : 1269096 : if (failed_verification && !zeroed_buffer && error_count++ == 0)
+ + ]
7202 : 63 : first_error_off = buf_off;
7203 [ + + + + ]: 1269096 : if (zeroed_buffer && zeroed_count++ == 0)
7204 : 51 : first_zeroed_off = buf_off;
7205 [ + + + + ]: 1269096 : if (ignored_checksum && ignored_count++ == 0)
7206 : 15 : first_ignored_off = buf_off;
7207 [ + + ]: 1269096 : if (failed_checksum)
7208 : 48 : checkfail_count++;
7209 : : }
7210 : :
7211 : : /*
7212 : : * If the smgr read succeeded [partially] and page verification failed for
7213 : : * some of the pages, adjust the IO's result state appropriately.
7214 : : */
7215 [ + - + + ]: 1142147 : if (prior_result.status != PGAIO_RS_ERROR &&
7216 [ + + + + ]: 1142084 : (error_count > 0 || ignored_count > 0 || zeroed_count > 0))
7217 : : {
7218 : 123 : buffer_readv_encode_error(&result, is_temp,
7219 : : zeroed_count > 0, ignored_count > 0,
7220 : : error_count, zeroed_count, checkfail_count,
7221 : : first_error_off, first_zeroed_off,
7222 : : first_ignored_off);
7223 : 123 : pgaio_result_report(result, td, DEBUG1);
7224 : : }
7225 : :
7226 : : /*
7227 : : * For shared relations this reporting is done in
7228 : : * shared_buffer_readv_complete_local().
7229 : : */
7230 [ + + + + ]: 1142147 : if (is_temp && checkfail_count > 0)
7231 : 3 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
7232 : : checkfail_count);
7233 : :
7234 : 1142147 : return result;
7235 : : }
7236 : :
7237 : : /*
7238 : : * AIO error reporting callback for aio_shared_buffer_readv_cb and
7239 : : * aio_local_buffer_readv_cb.
7240 : : *
7241 : : * The error is encoded / decoded in buffer_readv_encode_error() /
7242 : : * buffer_readv_decode_error().
7243 : : */
7244 : : static void
7245 : 399 : buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
7246 : : int elevel)
7247 : : {
7248 : 399 : int nblocks = td->smgr.nblocks;
7249 : 399 : BlockNumber first = td->smgr.blockNum;
7250 : 399 : BlockNumber last = first + nblocks - 1;
7251 : 399 : ProcNumber errProc =
7252 [ + + ]: 399 : td->smgr.is_temp ? MyProcNumber : INVALID_PROC_NUMBER;
7253 : : RelPathStr rpath =
7254 : 399 : relpathbackend(td->smgr.rlocator, errProc, td->smgr.forkNum);
7255 : : bool zeroed_any,
7256 : : ignored_any;
7257 : : uint8 zeroed_or_error_count,
7258 : : checkfail_count,
7259 : : first_off;
7260 : : uint8 affected_count;
7261 : : const char *msg_one,
7262 : : *msg_mult,
7263 : : *det_mult,
7264 : : *hint_mult;
7265 : :
7266 : 399 : buffer_readv_decode_error(result, &zeroed_any, &ignored_any,
7267 : : &zeroed_or_error_count,
7268 : : &checkfail_count,
7269 : : &first_off);
7270 : :
7271 : : /*
7272 : : * Treat a read that had both zeroed buffers *and* ignored checksums as a
7273 : : * special case, it's too irregular to be emitted the same way as the
7274 : : * other cases.
7275 : : */
7276 [ + + + + ]: 399 : if (zeroed_any && ignored_any)
7277 : : {
7278 [ + - - + ]: 6 : Assert(zeroed_any && ignored_any);
7279 [ - + ]: 6 : Assert(nblocks > 1); /* same block can't be both zeroed and ignored */
7280 [ - + ]: 6 : Assert(result.status != PGAIO_RS_ERROR);
7281 : 6 : affected_count = zeroed_or_error_count;
7282 : :
7283 [ + - + - ]: 6 : ereport(elevel,
7284 : : errcode(ERRCODE_DATA_CORRUPTED),
7285 : : errmsg("zeroing %u page(s) and ignoring %u checksum failure(s) among blocks %u..%u of relation \"%s\"",
7286 : : affected_count, checkfail_count, first, last, rpath.str),
7287 : : affected_count > 1 ?
7288 : : errdetail("Block %u held the first zeroed page.",
7289 : : first + first_off) : 0,
7290 : : errhint_plural("See server log for details about the other %d invalid block.",
7291 : : "See server log for details about the other %d invalid blocks.",
7292 : : affected_count + checkfail_count - 1,
7293 : : affected_count + checkfail_count - 1));
7294 : 6 : return;
7295 : : }
7296 : :
7297 : : /*
7298 : : * The other messages are highly repetitive. To avoid duplicating a long
7299 : : * and complicated ereport(), gather the translated format strings
7300 : : * separately and then do one common ereport.
7301 : : */
7302 [ + + ]: 393 : if (result.status == PGAIO_RS_ERROR)
7303 : : {
7304 [ - + ]: 195 : Assert(!zeroed_any); /* can't have invalid pages when zeroing them */
7305 : 195 : affected_count = zeroed_or_error_count;
11 peter@eisentraut.org 7306 : 195 : msg_one = _("invalid page in block %u of relation \"%s\"");
7307 : 195 : msg_mult = _("%u invalid pages among blocks %u..%u of relation \"%s\"");
7308 : 195 : det_mult = _("Block %u held the first invalid page.");
160 andres@anarazel.de 7309 : 195 : hint_mult = _("See server log for the other %u invalid block(s).");
7310 : : }
7311 [ + + + - ]: 198 : else if (zeroed_any && !ignored_any)
7312 : : {
7313 : 162 : affected_count = zeroed_or_error_count;
11 peter@eisentraut.org 7314 : 162 : msg_one = _("invalid page in block %u of relation \"%s\"; zeroing out page");
7315 : 162 : msg_mult = _("zeroing out %u invalid pages among blocks %u..%u of relation \"%s\"");
7316 : 162 : det_mult = _("Block %u held the first zeroed page.");
160 andres@anarazel.de 7317 : 162 : hint_mult = _("See server log for the other %u zeroed block(s).");
7318 : : }
7319 [ + - + - ]: 36 : else if (!zeroed_any && ignored_any)
7320 : : {
7321 : 36 : affected_count = checkfail_count;
11 peter@eisentraut.org 7322 : 36 : msg_one = _("ignoring checksum failure in block %u of relation \"%s\"");
7323 : 36 : msg_mult = _("ignoring %u checksum failures among blocks %u..%u of relation \"%s\"");
7324 : 36 : det_mult = _("Block %u held the first ignored page.");
160 andres@anarazel.de 7325 : 36 : hint_mult = _("See server log for the other %u ignored block(s).");
7326 : : }
7327 : : else
160 andres@anarazel.de 7328 :UBC 0 : pg_unreachable();
7329 : :
160 andres@anarazel.de 7330 [ + - + + :CBC 393 : ereport(elevel,
+ + + + ]
7331 : : errcode(ERRCODE_DATA_CORRUPTED),
7332 : : affected_count == 1 ?
7333 : : errmsg_internal(msg_one, first + first_off, rpath.str) :
7334 : : errmsg_internal(msg_mult, affected_count, first, last, rpath.str),
7335 : : affected_count > 1 ? errdetail_internal(det_mult, first + first_off) : 0,
7336 : : affected_count > 1 ? errhint_internal(hint_mult, affected_count - 1) : 0);
7337 : : }
7338 : :
7339 : : static void
7340 : 1243804 : shared_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
7341 : : {
7342 : 1243804 : buffer_stage_common(ioh, false, false);
7343 : 1243804 : }
7344 : :
7345 : : static PgAioResult
7346 : 1140321 : shared_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7347 : : uint8 cb_data)
7348 : : {
7349 : 1140321 : return buffer_readv_complete(ioh, prior_result, cb_data, false);
7350 : : }
7351 : :
7352 : : /*
7353 : : * We need a backend-local completion callback for shared buffers, to be able
7354 : : * to report checksum errors correctly. Unfortunately that can only safely
7355 : : * happen if the reporting backend has previously called
7356 : : * pgstat_prepare_report_checksum_failure(), which we can only guarantee in
7357 : : * the backend that started the IO. Hence this callback.
7358 : : */
7359 : : static PgAioResult
7360 : 1243804 : shared_buffer_readv_complete_local(PgAioHandle *ioh, PgAioResult prior_result,
7361 : : uint8 cb_data)
7362 : : {
7363 : : bool zeroed_any,
7364 : : ignored_any;
7365 : : uint8 zeroed_or_error_count,
7366 : : checkfail_count,
7367 : : first_off;
7368 : :
7369 [ + + ]: 1243804 : if (prior_result.status == PGAIO_RS_OK)
7370 : 1243720 : return prior_result;
7371 : :
7372 : 84 : buffer_readv_decode_error(prior_result,
7373 : : &zeroed_any,
7374 : : &ignored_any,
7375 : : &zeroed_or_error_count,
7376 : : &checkfail_count,
7377 : : &first_off);
7378 : :
7379 [ + + ]: 84 : if (checkfail_count)
7380 : : {
7381 : 36 : PgAioTargetData *td = pgaio_io_get_target_data(ioh);
7382 : :
7383 : 36 : pgstat_report_checksum_failures_in_db(td->smgr.rlocator.dbOid,
7384 : : checkfail_count);
7385 : : }
7386 : :
7387 : 84 : return prior_result;
7388 : : }
7389 : :
7390 : : static void
7391 : 1826 : local_buffer_readv_stage(PgAioHandle *ioh, uint8 cb_data)
7392 : : {
7393 : 1826 : buffer_stage_common(ioh, false, true);
7394 : 1826 : }
7395 : :
7396 : : static PgAioResult
7397 : 1826 : local_buffer_readv_complete(PgAioHandle *ioh, PgAioResult prior_result,
7398 : : uint8 cb_data)
7399 : : {
7400 : 1826 : return buffer_readv_complete(ioh, prior_result, cb_data, true);
7401 : : }
7402 : :
7403 : : /* readv callback is passed READ_BUFFERS_* flags as callback data */
7404 : : const PgAioHandleCallbacks aio_shared_buffer_readv_cb = {
7405 : : .stage = shared_buffer_readv_stage,
7406 : : .complete_shared = shared_buffer_readv_complete,
7407 : : /* need a local callback to report checksum failures */
7408 : : .complete_local = shared_buffer_readv_complete_local,
7409 : : .report = buffer_readv_report,
7410 : : };
7411 : :
7412 : : /* readv callback is passed READ_BUFFERS_* flags as callback data */
7413 : : const PgAioHandleCallbacks aio_local_buffer_readv_cb = {
7414 : : .stage = local_buffer_readv_stage,
7415 : :
7416 : : /*
7417 : : * Note that this, in contrast to the shared_buffers case, uses
7418 : : * complete_local, as only the issuing backend has access to the required
7419 : : * datastructures. This is important in case the IO completion may be
7420 : : * consumed incidentally by another backend.
7421 : : */
7422 : : .complete_local = local_buffer_readv_complete,
7423 : : .report = buffer_readv_report,
7424 : : };
|