Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * heapam.c
4 : : * heap access method code
5 : : *
6 : : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/access/heap/heapam.c
12 : : *
13 : : *
14 : : * INTERFACE ROUTINES
15 : : * heap_beginscan - begin relation scan
16 : : * heap_rescan - restart a relation scan
17 : : * heap_endscan - end relation scan
18 : : * heap_getnext - retrieve next tuple in scan
19 : : * heap_fetch - retrieve tuple with given tid
20 : : * heap_insert - insert tuple into a relation
21 : : * heap_multi_insert - insert multiple tuples into a relation
22 : : * heap_delete - delete a tuple from a relation
23 : : * heap_update - replace a tuple in a relation with another tuple
24 : : *
25 : : * NOTES
26 : : * This file contains the heap_ routines which implement
27 : : * the POSTGRES heap access method used for all POSTGRES
28 : : * relations.
29 : : *
30 : : *-------------------------------------------------------------------------
31 : : */
32 : : #include "postgres.h"
33 : :
34 : : #include "access/heapam.h"
35 : : #include "access/heaptoast.h"
36 : : #include "access/hio.h"
37 : : #include "access/multixact.h"
38 : : #include "access/subtrans.h"
39 : : #include "access/syncscan.h"
40 : : #include "access/valid.h"
41 : : #include "access/visibilitymap.h"
42 : : #include "access/xloginsert.h"
43 : : #include "catalog/pg_database.h"
44 : : #include "catalog/pg_database_d.h"
45 : : #include "commands/vacuum.h"
46 : : #include "pgstat.h"
47 : : #include "port/pg_bitutils.h"
48 : : #include "storage/lmgr.h"
49 : : #include "storage/predicate.h"
50 : : #include "storage/proc.h"
51 : : #include "storage/procarray.h"
52 : : #include "utils/datum.h"
53 : : #include "utils/injection_point.h"
54 : : #include "utils/inval.h"
55 : : #include "utils/spccache.h"
56 : : #include "utils/syscache.h"
57 : :
58 : :
59 : : static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup,
60 : : TransactionId xid, CommandId cid, int options);
61 : : static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
62 : : Buffer newbuf, HeapTuple oldtup,
63 : : HeapTuple newtup, HeapTuple old_key_tuple,
64 : : bool all_visible_cleared, bool new_all_visible_cleared);
65 : : #ifdef USE_ASSERT_CHECKING
66 : : static void check_lock_if_inplace_updateable_rel(Relation relation,
67 : : const ItemPointerData *otid,
68 : : HeapTuple newtup);
69 : : static void check_inplace_rel_lock(HeapTuple oldtup);
70 : : #endif
71 : : static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
72 : : Bitmapset *interesting_cols,
73 : : Bitmapset *external_cols,
74 : : HeapTuple oldtup, HeapTuple newtup,
75 : : bool *has_external);
76 : : static bool heap_acquire_tuplock(Relation relation, const ItemPointerData *tid,
77 : : LockTupleMode mode, LockWaitPolicy wait_policy,
78 : : bool *have_tuple_lock);
79 : : static inline BlockNumber heapgettup_advance_block(HeapScanDesc scan,
80 : : BlockNumber block,
81 : : ScanDirection dir);
82 : : static pg_noinline BlockNumber heapgettup_initial_block(HeapScanDesc scan,
83 : : ScanDirection dir);
84 : : static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
85 : : uint16 old_infomask2, TransactionId add_to_xmax,
86 : : LockTupleMode mode, bool is_update,
87 : : TransactionId *result_xmax, uint16 *result_infomask,
88 : : uint16 *result_infomask2);
89 : : static TM_Result heap_lock_updated_tuple(Relation rel,
90 : : uint16 prior_infomask,
91 : : TransactionId prior_raw_xmax,
92 : : const ItemPointerData *prior_ctid,
93 : : TransactionId xid,
94 : : LockTupleMode mode);
95 : : static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
96 : : uint16 *new_infomask2);
97 : : static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
98 : : uint16 t_infomask);
99 : : static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
100 : : LockTupleMode lockmode, bool *current_is_member);
101 : : static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
102 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
103 : : int *remaining);
104 : : static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
105 : : uint16 infomask, Relation rel, int *remaining,
106 : : bool logLockFailure);
107 : : static void index_delete_sort(TM_IndexDeleteOp *delstate);
108 : : static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate);
109 : : static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup);
110 : : static HeapTuple ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
111 : : bool *copy);
112 : :
113 : :
114 : : /*
115 : : * This table lists the heavyweight lock mode that corresponds to each tuple
116 : : * lock mode, as well as one or two corresponding MultiXactStatus values:
117 : : * .lockstatus to merely lock tuples, and .updstatus to update them. The
118 : : * latter is set to -1 if the corresponding tuple lock mode does not allow
119 : : * updating tuples -- see get_mxact_status_for_lock().
120 : : *
121 : : * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
122 : : *
123 : : * Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
124 : : * instead.
125 : : */
126 : : static const struct
127 : : {
128 : : LOCKMODE hwlock;
129 : : int lockstatus;
130 : : int updstatus;
131 : : } tupleLockExtraInfo[] =
132 : :
133 : : {
134 : : [LockTupleKeyShare] = {
135 : : .hwlock = AccessShareLock,
136 : : .lockstatus = MultiXactStatusForKeyShare,
137 : : /* KeyShare does not allow updating tuples */
138 : : .updstatus = -1
139 : : },
140 : : [LockTupleShare] = {
141 : : .hwlock = RowShareLock,
142 : : .lockstatus = MultiXactStatusForShare,
143 : : /* Share does not allow updating tuples */
144 : : .updstatus = -1
145 : : },
146 : : [LockTupleNoKeyExclusive] = {
147 : : .hwlock = ExclusiveLock,
148 : : .lockstatus = MultiXactStatusForNoKeyUpdate,
149 : : .updstatus = MultiXactStatusNoKeyUpdate
150 : : },
151 : : [LockTupleExclusive] = {
152 : : .hwlock = AccessExclusiveLock,
153 : : .lockstatus = MultiXactStatusForUpdate,
154 : : .updstatus = MultiXactStatusUpdate
155 : : }
156 : : };
157 : :
158 : : /* Get the LOCKMODE for a given MultiXactStatus */
159 : : #define LOCKMODE_from_mxstatus(status) \
160 : : (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
161 : :
162 : : /*
163 : : * Acquire heavyweight locks on tuples, using a LockTupleMode strength value.
164 : : * This is more readable than having every caller translate it to lock.h's
165 : : * LOCKMODE.
166 : : */
167 : : #define LockTupleTuplock(rel, tup, mode) \
168 : : LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
169 : : #define UnlockTupleTuplock(rel, tup, mode) \
170 : : UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
171 : : #define ConditionalLockTupleTuplock(rel, tup, mode, log) \
172 : : ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock, (log))
173 : :
174 : : #ifdef USE_PREFETCH
175 : : /*
176 : : * heap_index_delete_tuples and index_delete_prefetch_buffer use this
177 : : * structure to coordinate prefetching activity
178 : : */
179 : : typedef struct
180 : : {
181 : : BlockNumber cur_hblkno;
182 : : int next_item;
183 : : int ndeltids;
184 : : TM_IndexDelete *deltids;
185 : : } IndexDeletePrefetchState;
186 : : #endif
187 : :
188 : : /* heap_index_delete_tuples bottom-up index deletion costing constants */
189 : : #define BOTTOMUP_MAX_NBLOCKS 6
190 : : #define BOTTOMUP_TOLERANCE_NBLOCKS 3
191 : :
192 : : /*
193 : : * heap_index_delete_tuples uses this when determining which heap blocks it
194 : : * must visit to help its bottom-up index deletion caller
195 : : */
196 : : typedef struct IndexDeleteCounts
197 : : {
198 : : int16 npromisingtids; /* Number of "promising" TIDs in group */
199 : : int16 ntids; /* Number of TIDs in group */
200 : : int16 ifirsttid; /* Offset to group's first deltid */
201 : : } IndexDeleteCounts;
202 : :
203 : : /*
204 : : * This table maps tuple lock strength values for each particular
205 : : * MultiXactStatus value.
206 : : */
207 : : static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
208 : : {
209 : : LockTupleKeyShare, /* ForKeyShare */
210 : : LockTupleShare, /* ForShare */
211 : : LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
212 : : LockTupleExclusive, /* ForUpdate */
213 : : LockTupleNoKeyExclusive, /* NoKeyUpdate */
214 : : LockTupleExclusive /* Update */
215 : : };
216 : :
217 : : /* Get the LockTupleMode for a given MultiXactStatus */
218 : : #define TUPLOCK_from_mxstatus(status) \
219 : : (MultiXactStatusLock[(status)])
220 : :
221 : : /*
222 : : * Check that we have a valid snapshot if we might need TOAST access.
223 : : */
224 : : static inline void
289 nathan@postgresql.or 225 :CBC 11368759 : AssertHasSnapshotForToast(Relation rel)
226 : : {
227 : : #ifdef USE_ASSERT_CHECKING
228 : :
229 : : /* bootstrap mode in particular breaks this rule */
230 [ + + ]: 11368759 : if (!IsNormalProcessingMode())
231 : 601749 : return;
232 : :
233 : : /* if the relation doesn't have a TOAST table, we are good */
234 [ + + ]: 10767010 : if (!OidIsValid(rel->rd_rel->reltoastrelid))
235 : 5832353 : return;
236 : :
237 [ - + ]: 4934657 : Assert(HaveRegisteredOrActiveSnapshot());
238 : :
239 : : #endif /* USE_ASSERT_CHECKING */
240 : : }
241 : :
242 : : /* ----------------------------------------------------------------
243 : : * heap support routines
244 : : * ----------------------------------------------------------------
245 : : */
246 : :
247 : : /*
248 : : * Streaming read API callback for parallel sequential scans. Returns the next
249 : : * block the caller wants from the read stream or InvalidBlockNumber when done.
250 : : */
251 : : static BlockNumber
706 tmunro@postgresql.or 252 : 101777 : heap_scan_stream_read_next_parallel(ReadStream *stream,
253 : : void *callback_private_data,
254 : : void *per_buffer_data)
255 : : {
256 : 101777 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
257 : :
258 [ - + ]: 101777 : Assert(ScanDirectionIsForward(scan->rs_dir));
259 [ - + ]: 101777 : Assert(scan->rs_base.rs_parallel);
260 : :
261 [ + + ]: 101777 : if (unlikely(!scan->rs_inited))
262 : : {
263 : : /* parallel scan */
264 : 1712 : table_block_parallelscan_startblock_init(scan->rs_base.rs_rd,
265 : 1712 : scan->rs_parallelworkerdata,
108 drowley@postgresql.o 266 :GNC 1712 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel,
267 : : scan->rs_startblock,
268 : : scan->rs_numblocks);
269 : :
270 : : /* may return InvalidBlockNumber if there are no more blocks */
706 tmunro@postgresql.or 271 :CBC 3424 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
272 : 1712 : scan->rs_parallelworkerdata,
273 : 1712 : (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel);
274 : 1712 : scan->rs_inited = true;
275 : : }
276 : : else
277 : : {
278 : 100065 : scan->rs_prefetch_block = table_block_parallelscan_nextpage(scan->rs_base.rs_rd,
279 : 100065 : scan->rs_parallelworkerdata, (ParallelBlockTableScanDesc)
280 : 100065 : scan->rs_base.rs_parallel);
281 : : }
282 : :
283 : 101777 : return scan->rs_prefetch_block;
284 : : }
285 : :
286 : : /*
287 : : * Streaming read API callback for serial sequential and TID range scans.
288 : : * Returns the next block the caller wants from the read stream or
289 : : * InvalidBlockNumber when done.
290 : : */
291 : : static BlockNumber
292 : 4578758 : heap_scan_stream_read_next_serial(ReadStream *stream,
293 : : void *callback_private_data,
294 : : void *per_buffer_data)
295 : : {
296 : 4578758 : HeapScanDesc scan = (HeapScanDesc) callback_private_data;
297 : :
298 [ + + ]: 4578758 : if (unlikely(!scan->rs_inited))
299 : : {
300 : 1531645 : scan->rs_prefetch_block = heapgettup_initial_block(scan, scan->rs_dir);
301 : 1531645 : scan->rs_inited = true;
302 : : }
303 : : else
304 : 3047113 : scan->rs_prefetch_block = heapgettup_advance_block(scan,
305 : : scan->rs_prefetch_block,
306 : : scan->rs_dir);
307 : :
308 : 4578758 : return scan->rs_prefetch_block;
309 : : }
310 : :
311 : : /*
312 : : * Read stream API callback for bitmap heap scans.
313 : : * Returns the next block the caller wants from the read stream or
314 : : * InvalidBlockNumber when done.
315 : : */
316 : : static BlockNumber
365 melanieplageman@gmai 317 : 215415 : bitmapheap_stream_read_next(ReadStream *pgsr, void *private_data,
318 : : void *per_buffer_data)
319 : : {
320 : 215415 : TBMIterateResult *tbmres = per_buffer_data;
321 : 215415 : BitmapHeapScanDesc bscan = (BitmapHeapScanDesc) private_data;
322 : 215415 : HeapScanDesc hscan = (HeapScanDesc) bscan;
323 : 215415 : TableScanDesc sscan = &hscan->rs_base;
324 : :
325 : : for (;;)
326 : : {
327 [ - + ]: 215415 : CHECK_FOR_INTERRUPTS();
328 : :
329 : : /* no more entries in the bitmap */
330 [ + + ]: 215415 : if (!tbm_iterate(&sscan->st.rs_tbmiterator, tbmres))
331 : 12763 : return InvalidBlockNumber;
332 : :
333 : : /*
334 : : * Ignore any claimed entries past what we think is the end of the
335 : : * relation. It may have been extended after the start of our scan (we
336 : : * only hold an AccessShareLock, and it could be inserts from this
337 : : * backend). We don't take this optimization in SERIALIZABLE
338 : : * isolation though, as we need to examine all invisible tuples
339 : : * reachable by the index.
340 : : */
341 [ + + ]: 202652 : if (!IsolationIsSerializable() &&
342 [ - + ]: 202543 : tbmres->blockno >= hscan->rs_nblocks)
365 melanieplageman@gmai 343 :UBC 0 : continue;
344 : :
365 melanieplageman@gmai 345 :CBC 202652 : return tbmres->blockno;
346 : : }
347 : :
348 : : /* not reachable */
349 : : Assert(false);
350 : : }
351 : :
352 : : /* ----------------
353 : : * initscan - scan code common to heap_beginscan and heap_rescan
354 : : * ----------------
355 : : */
356 : : static void
3886 tgl@sss.pgh.pa.us 357 : 1556345 : initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
358 : : {
2561 andres@anarazel.de 359 : 1556345 : ParallelBlockTableScanDesc bpscan = NULL;
360 : : bool allow_strat;
361 : : bool allow_sync;
362 : :
363 : : /*
364 : : * Determine the number of blocks we have to scan.
365 : : *
366 : : * It is sufficient to do this once at scan start, since any tuples added
367 : : * while the scan is in progress will be invisible to my snapshot anyway.
368 : : * (That is not true when using a non-MVCC snapshot. However, we couldn't
369 : : * guarantee to return tuples added after scan start anyway, since they
370 : : * might go into pages we already scanned. To guarantee consistent
371 : : * results for a non-MVCC snapshot, the caller must hold some higher-level
372 : : * lock that ensures the interesting tuple(s) won't change.)
373 : : */
374 [ + + ]: 1556345 : if (scan->rs_base.rs_parallel != NULL)
375 : : {
376 : 2242 : bpscan = (ParallelBlockTableScanDesc) scan->rs_base.rs_parallel;
377 : 2242 : scan->rs_nblocks = bpscan->phs_nblocks;
378 : : }
379 : : else
380 : 1554103 : scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_base.rs_rd);
381 : :
382 : : /*
383 : : * If the table is large relative to NBuffers, use a bulk-read access
384 : : * strategy and enable synchronized scanning (see syncscan.c). Although
385 : : * the thresholds for these features could be different, we make them the
386 : : * same so that there are only two behaviors to tune rather than four.
387 : : * (However, some callers need to be able to disable one or both of these
388 : : * behaviors, independently of the size of the table; also there is a GUC
389 : : * variable that can disable synchronized scanning.)
390 : : *
391 : : * Note that table_block_parallelscan_initialize has a very similar test;
392 : : * if you change this, consider changing that one, too.
393 : : */
394 [ + + ]: 1556343 : if (!RelationUsesLocalBuffers(scan->rs_base.rs_rd) &&
6854 tgl@sss.pgh.pa.us 395 [ + + ]: 1548973 : scan->rs_nblocks > NBuffers / 4)
396 : : {
2492 andres@anarazel.de 397 : 14664 : allow_strat = (scan->rs_base.rs_flags & SO_ALLOW_STRAT) != 0;
398 : 14664 : allow_sync = (scan->rs_base.rs_flags & SO_ALLOW_SYNC) != 0;
399 : : }
400 : : else
6635 tgl@sss.pgh.pa.us 401 : 1541679 : allow_strat = allow_sync = false;
402 : :
403 [ + + ]: 1556343 : if (allow_strat)
404 : : {
405 : : /* During a rescan, keep the previous strategy object. */
6864 406 [ + + ]: 13336 : if (scan->rs_strategy == NULL)
407 : 13165 : scan->rs_strategy = GetAccessStrategy(BAS_BULKREAD);
408 : : }
409 : : else
410 : : {
411 [ - + ]: 1543007 : if (scan->rs_strategy != NULL)
6864 tgl@sss.pgh.pa.us 412 :UBC 0 : FreeAccessStrategy(scan->rs_strategy);
6864 tgl@sss.pgh.pa.us 413 :CBC 1543007 : scan->rs_strategy = NULL;
414 : : }
415 : :
2561 andres@anarazel.de 416 [ + + ]: 1556343 : if (scan->rs_base.rs_parallel != NULL)
417 : : {
418 : : /* For parallel scan, believe whatever ParallelTableScanDesc says. */
2492 419 [ + + ]: 2242 : if (scan->rs_base.rs_parallel->phs_syncscan)
420 : 2 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
421 : : else
422 : 2240 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
423 : :
424 : : /*
425 : : * If not rescanning, initialize the startblock. Finding the actual
426 : : * start location is done in table_block_parallelscan_startblock_init,
427 : : * based on whether an alternative start location has been set with
428 : : * heap_setscanlimits, or using the syncscan location, when syncscan
429 : : * is enabled.
430 : : */
107 drowley@postgresql.o 431 [ + + ]:GNC 2242 : if (!keep_startblock)
432 : 2128 : scan->rs_startblock = InvalidBlockNumber;
433 : : }
434 : : else
435 : : {
436 [ + + ]: 1554101 : if (keep_startblock)
437 : : {
438 : : /*
439 : : * When rescanning, we want to keep the previous startblock
440 : : * setting, so that rewinding a cursor doesn't generate surprising
441 : : * results. Reset the active syncscan setting, though.
442 : : */
443 [ + + + + ]: 862929 : if (allow_sync && synchronize_seqscans)
444 : 50 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
445 : : else
446 : 862879 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
447 : : }
448 [ + + + + ]: 691172 : else if (allow_sync && synchronize_seqscans)
449 : : {
450 : 73 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
451 : 73 : scan->rs_startblock = ss_get_location(scan->rs_base.rs_rd, scan->rs_nblocks);
452 : : }
453 : : else
454 : : {
455 : 691099 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
456 : 691099 : scan->rs_startblock = 0;
457 : : }
458 : : }
459 : :
4146 alvherre@alvh.no-ip. 460 :CBC 1556343 : scan->rs_numblocks = InvalidBlockNumber;
7414 tgl@sss.pgh.pa.us 461 : 1556343 : scan->rs_inited = false;
9045 462 : 1556343 : scan->rs_ctup.t_data = NULL;
7414 463 : 1556343 : ItemPointerSetInvalid(&scan->rs_ctup.t_self);
9045 464 : 1556343 : scan->rs_cbuf = InvalidBuffer;
7414 465 : 1556343 : scan->rs_cblock = InvalidBlockNumber;
452 melanieplageman@gmai 466 : 1556343 : scan->rs_ntuples = 0;
467 : 1556343 : scan->rs_cindex = 0;
468 : :
469 : : /*
470 : : * Initialize to ForwardScanDirection because it is most common and
471 : : * because heap scans go forward before going backward (e.g. CURSORs).
472 : : */
706 tmunro@postgresql.or 473 : 1556343 : scan->rs_dir = ForwardScanDirection;
474 : 1556343 : scan->rs_prefetch_block = InvalidBlockNumber;
475 : :
476 : : /* page-at-a-time fields are always invalid when not rs_inited */
477 : :
478 : : /*
479 : : * copy the scan key, if appropriate
480 : : */
1473 tgl@sss.pgh.pa.us 481 [ + + + + ]: 1556343 : if (key != NULL && scan->rs_base.rs_nkeys > 0)
2561 andres@anarazel.de 482 : 220471 : memcpy(scan->rs_base.rs_key, key, scan->rs_base.rs_nkeys * sizeof(ScanKeyData));
483 : :
484 : : /*
485 : : * Currently, we only have a stats counter for sequential heap scans (but
486 : : * e.g for bitmap scans the underlying bitmap index scans will be counted,
487 : : * and for sample scans we update stats for tuple fetches).
488 : : */
2492 489 [ + + ]: 1556343 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN)
2561 490 [ + + + + : 1532916 : pgstat_count_heap_scan(scan->rs_base.rs_rd);
+ + ]
10841 scrappy@hub.org 491 : 1556343 : }
492 : :
493 : : /*
494 : : * heap_setscanlimits - restrict range of a heapscan
495 : : *
496 : : * startBlk is the page to start at
497 : : * numBlks is number of pages to scan (InvalidBlockNumber means "all")
498 : : */
499 : : void
2561 andres@anarazel.de 500 : 2879 : heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
501 : : {
502 : 2879 : HeapScanDesc scan = (HeapScanDesc) sscan;
503 : :
3890 tgl@sss.pgh.pa.us 504 [ - + ]: 2879 : Assert(!scan->rs_inited); /* else too late to change */
505 : : /* else rs_startblock is significant */
2492 andres@anarazel.de 506 [ - + ]: 2879 : Assert(!(scan->rs_base.rs_flags & SO_ALLOW_SYNC));
507 : :
508 : : /* Check startBlk is valid (but allow case of zero blocks...) */
3890 tgl@sss.pgh.pa.us 509 [ + + - + ]: 2879 : Assert(startBlk == 0 || startBlk < scan->rs_nblocks);
510 : :
4146 alvherre@alvh.no-ip. 511 : 2879 : scan->rs_startblock = startBlk;
512 : 2879 : scan->rs_numblocks = numBlks;
513 : 2879 : }
514 : :
515 : : /*
516 : : * Per-tuple loop for heap_prepare_pagescan(). Pulled out so it can be called
517 : : * multiple times, with constant arguments for all_visible,
518 : : * check_serializable.
519 : : */
520 : : pg_attribute_always_inline
521 : : static int
707 andres@anarazel.de 522 : 3033512 : page_collect_tuples(HeapScanDesc scan, Snapshot snapshot,
523 : : Page page, Buffer buffer,
524 : : BlockNumber block, int lines,
525 : : bool all_visible, bool check_serializable)
526 : : {
62 andres@anarazel.de 527 :GNC 3033512 : Oid relid = RelationGetRelid(scan->rs_base.rs_rd);
708 andres@anarazel.de 528 :CBC 3033512 : int ntup = 0;
62 andres@anarazel.de 529 :GNC 3033512 : int nvis = 0;
530 : : BatchMVCCState batchmvcc;
531 : :
532 : : /* page at a time should have been disabled otherwise */
533 [ - + ]: 3033512 : Assert(IsMVCCSnapshot(snapshot));
534 : :
535 : : /* first find all tuples on the page */
536 [ + + ]: 168923399 : for (OffsetNumber lineoff = FirstOffsetNumber; lineoff <= lines; lineoff++)
537 : : {
708 andres@anarazel.de 538 :CBC 165889887 : ItemId lpp = PageGetItemId(page, lineoff);
539 : : HeapTuple tup;
540 : :
62 andres@anarazel.de 541 [ + + ]:GNC 165889887 : if (unlikely(!ItemIdIsNormal(lpp)))
708 andres@anarazel.de 542 :CBC 26049815 : continue;
543 : :
544 : : /*
545 : : * If the page is not all-visible or we need to check serializability,
546 : : * maintain enough state to be able to refind the tuple efficiently,
547 : : * without again first needing to fetch the item and then via that the
548 : : * tuple.
549 : : */
62 andres@anarazel.de 550 [ + + - + ]:GNC 139840072 : if (!all_visible || check_serializable)
551 : : {
552 : 65978963 : tup = &batchmvcc.tuples[ntup];
553 : :
554 : 65978963 : tup->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
555 : 65978963 : tup->t_len = ItemIdGetLength(lpp);
556 : 65978963 : tup->t_tableOid = relid;
557 : 65978963 : ItemPointerSet(&(tup->t_self), block, lineoff);
558 : : }
559 : :
560 : : /*
561 : : * If the page is all visible, these fields otherwise won't be
562 : : * populated in loop below.
563 : : */
564 [ + + ]: 139840072 : if (all_visible)
565 : : {
566 [ - + ]: 73861109 : if (check_serializable)
567 : : {
62 andres@anarazel.de 568 :UNC 0 : batchmvcc.visible[ntup] = true;
569 : : }
708 andres@anarazel.de 570 :CBC 73861109 : scan->rs_vistuples[ntup] = lineoff;
571 : : }
572 : :
62 andres@anarazel.de 573 :GNC 139840072 : ntup++;
574 : : }
575 : :
708 andres@anarazel.de 576 [ - + ]:CBC 3033512 : Assert(ntup <= MaxHeapTuplesPerPage);
577 : :
578 : : /*
579 : : * Unless the page is all visible, test visibility for all tuples one go.
580 : : * That is considerably more efficient than calling
581 : : * HeapTupleSatisfiesMVCC() one-by-one.
582 : : */
62 andres@anarazel.de 583 [ + + ]:GNC 3033512 : if (all_visible)
584 : 1349614 : nvis = ntup;
585 : : else
586 : 1683898 : nvis = HeapTupleSatisfiesMVCCBatch(snapshot, buffer,
587 : : ntup,
588 : : &batchmvcc,
589 : 1683898 : scan->rs_vistuples);
590 : :
591 : : /*
592 : : * So far we don't have batch API for testing serializabilty, so do so
593 : : * one-by-one.
594 : : */
595 [ + + ]: 3033512 : if (check_serializable)
596 : : {
597 [ + + ]: 2010 : for (int i = 0; i < ntup; i++)
598 : : {
599 : 1395 : HeapCheckForSerializableConflictOut(batchmvcc.visible[i],
600 : : scan->rs_base.rs_rd,
601 : : &batchmvcc.tuples[i],
602 : : buffer, snapshot);
603 : : }
604 : : }
605 : :
606 : 3033504 : return nvis;
607 : : }
608 : :
609 : : /*
610 : : * heap_prepare_pagescan - Prepare current scan page to be scanned in pagemode
611 : : *
612 : : * Preparation currently consists of 1. prune the scan's rs_cbuf page, and 2.
613 : : * fill the rs_vistuples[] array with the OffsetNumbers of visible tuples.
614 : : */
615 : : void
710 drowley@postgresql.o 616 :CBC 3033512 : heap_prepare_pagescan(TableScanDesc sscan)
617 : : {
2561 andres@anarazel.de 618 : 3033512 : HeapScanDesc scan = (HeapScanDesc) sscan;
710 drowley@postgresql.o 619 : 3033512 : Buffer buffer = scan->rs_cbuf;
620 : 3033512 : BlockNumber block = scan->rs_cblock;
621 : : Snapshot snapshot;
622 : : Page page;
623 : : int lines;
624 : : bool all_visible;
625 : : bool check_serializable;
626 : :
627 [ - + ]: 3033512 : Assert(BufferGetBlockNumber(buffer) == block);
628 : :
629 : : /* ensure we're not accidentally being used when not in pagemode */
630 [ - + ]: 3033512 : Assert(scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE);
2561 andres@anarazel.de 631 : 3033512 : snapshot = scan->rs_base.rs_snapshot;
632 : :
633 : : /*
634 : : * Prune and repair fragmentation for the whole page, if possible.
635 : : */
636 : 3033512 : heap_page_prune_opt(scan->rs_base.rs_rd, buffer);
637 : :
638 : : /*
639 : : * We must hold share lock on the buffer content while examining tuple
640 : : * visibility. Afterwards, however, the tuples we have found to be
641 : : * visible are guaranteed good as long as we hold the buffer pin.
642 : : */
7414 tgl@sss.pgh.pa.us 643 : 3033512 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
644 : :
1215 peter@eisentraut.org 645 : 3033512 : page = BufferGetPage(buffer);
646 : 3033512 : lines = PageGetMaxOffsetNumber(page);
647 : :
648 : : /*
649 : : * If the all-visible flag indicates that all tuples on the page are
650 : : * visible to everyone, we can skip the per-tuple visibility tests.
651 : : *
652 : : * Note: In hot standby, a tuple that's already visible to all
653 : : * transactions on the primary might still be invisible to a read-only
654 : : * transaction in the standby. We partly handle this problem by tracking
655 : : * the minimum xmin of visible tuples as the cut-off XID while marking a
656 : : * page all-visible on the primary and WAL log that along with the
657 : : * visibility map SET operation. In hot standby, we wait for (or abort)
658 : : * all transactions that can potentially may not see one or more tuples on
659 : : * the page. That's how index-only scans work fine in hot standby. A
660 : : * crucial difference between index-only scans and heap scans is that the
661 : : * index-only scan completely relies on the visibility map where as heap
662 : : * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
663 : : * the page-level flag can be trusted in the same way, because it might
664 : : * get propagated somehow without being explicitly WAL-logged, e.g. via a
665 : : * full page write. Until we can prove that beyond doubt, let's check each
666 : : * tuple for visibility the hard way.
667 : : */
668 [ + + + + ]: 3033512 : all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery;
669 : : check_serializable =
708 andres@anarazel.de 670 : 3033512 : CheckForSerializableConflictOutNeeded(scan->rs_base.rs_rd, snapshot);
671 : :
672 : : /*
673 : : * We call page_collect_tuples() with constant arguments, to get the
674 : : * compiler to constant fold the constant arguments. Separate calls with
675 : : * constant arguments, rather than variables, are needed on several
676 : : * compilers to actually perform constant folding.
677 : : */
678 [ + + ]: 3033512 : if (likely(all_visible))
679 : : {
680 [ + - ]: 1349614 : if (likely(!check_serializable))
707 681 : 1349614 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
682 : : block, lines, true, false);
683 : : else
707 andres@anarazel.de 684 :UBC 0 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
685 : : block, lines, true, true);
686 : : }
687 : : else
688 : : {
708 andres@anarazel.de 689 [ + + ]:CBC 1683898 : if (likely(!check_serializable))
707 690 : 1683275 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
691 : : block, lines, false, false);
692 : : else
693 : 623 : scan->rs_ntuples = page_collect_tuples(scan, snapshot, page, buffer,
694 : : block, lines, false, true);
695 : : }
696 : :
7414 tgl@sss.pgh.pa.us 697 : 3033504 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
698 : 3033504 : }
699 : :
700 : : /*
701 : : * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM.
702 : : *
703 : : * Read the next block of the scan relation from the read stream and save it
704 : : * in the scan descriptor. It is already pinned.
705 : : */
706 : : static inline void
710 drowley@postgresql.o 707 : 4195231 : heap_fetch_next_buffer(HeapScanDesc scan, ScanDirection dir)
708 : : {
706 tmunro@postgresql.or 709 [ - + ]: 4195231 : Assert(scan->rs_read_stream);
710 : :
711 : : /* release previous scan buffer, if any */
710 drowley@postgresql.o 712 [ + + ]: 4195231 : if (BufferIsValid(scan->rs_cbuf))
713 : : {
714 : 2661873 : ReleaseBuffer(scan->rs_cbuf);
715 : 2661873 : scan->rs_cbuf = InvalidBuffer;
716 : : }
717 : :
718 : : /*
719 : : * Be sure to check for interrupts at least once per page. Checks at
720 : : * higher code levels won't be able to stop a seqscan that encounters many
721 : : * pages' worth of consecutive dead tuples.
722 : : */
723 [ + + ]: 4195231 : CHECK_FOR_INTERRUPTS();
724 : :
725 : : /*
726 : : * If the scan direction is changing, reset the prefetch block to the
727 : : * current block. Otherwise, we will incorrectly prefetch the blocks
728 : : * between the prefetch block and the current block again before
729 : : * prefetching blocks in the new, correct scan direction.
730 : : */
706 tmunro@postgresql.or 731 [ + + ]: 4195228 : if (unlikely(scan->rs_dir != dir))
732 : : {
733 : 76 : scan->rs_prefetch_block = scan->rs_cblock;
734 : 76 : read_stream_reset(scan->rs_read_stream);
735 : : }
736 : :
737 : 4195228 : scan->rs_dir = dir;
738 : :
739 : 4195228 : scan->rs_cbuf = read_stream_next_buffer(scan->rs_read_stream, NULL);
740 [ + + ]: 4195193 : if (BufferIsValid(scan->rs_cbuf))
741 : 3124937 : scan->rs_cblock = BufferGetBlockNumber(scan->rs_cbuf);
710 drowley@postgresql.o 742 : 4195193 : }
743 : :
744 : : /*
745 : : * heapgettup_initial_block - return the first BlockNumber to scan
746 : : *
747 : : * Returns InvalidBlockNumber when there are no blocks to scan. This can
748 : : * occur with empty tables and in parallel scans when parallel workers get all
749 : : * of the pages before we can get a chance to get our first page.
750 : : */
751 : : static pg_noinline BlockNumber
1137 752 : 1531645 : heapgettup_initial_block(HeapScanDesc scan, ScanDirection dir)
753 : : {
754 [ - + ]: 1531645 : Assert(!scan->rs_inited);
706 tmunro@postgresql.or 755 [ - + ]: 1531645 : Assert(scan->rs_base.rs_parallel == NULL);
756 : :
757 : : /* When there are no pages to scan, return InvalidBlockNumber */
1137 drowley@postgresql.o 758 [ + + + + ]: 1531645 : if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0)
759 : 747395 : return InvalidBlockNumber;
760 : :
761 [ + + ]: 784250 : if (ScanDirectionIsForward(dir))
762 : : {
706 tmunro@postgresql.or 763 : 784219 : return scan->rs_startblock;
764 : : }
765 : : else
766 : : {
767 : : /*
768 : : * Disable reporting to syncscan logic in a backwards scan; it's not
769 : : * very likely anyone else is doing the same thing at the same time,
770 : : * and much more likely that we'll just bollix things for forward
771 : : * scanners.
772 : : */
1137 drowley@postgresql.o 773 : 31 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
774 : :
775 : : /*
776 : : * Start from last page of the scan. Ensure we take into account
777 : : * rs_numblocks if it's been adjusted by heap_setscanlimits().
778 : : */
779 [ + + ]: 31 : if (scan->rs_numblocks != InvalidBlockNumber)
780 : 3 : return (scan->rs_startblock + scan->rs_numblocks - 1) % scan->rs_nblocks;
781 : :
782 [ - + ]: 28 : if (scan->rs_startblock > 0)
1137 drowley@postgresql.o 783 :UBC 0 : return scan->rs_startblock - 1;
784 : :
1137 drowley@postgresql.o 785 :CBC 28 : return scan->rs_nblocks - 1;
786 : : }
787 : : }
788 : :
789 : :
790 : : /*
791 : : * heapgettup_start_page - helper function for heapgettup()
792 : : *
793 : : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
794 : : * to the number of tuples on this page. Also set *lineoff to the first
795 : : * offset to scan with forward scans getting the first offset and backward
796 : : * getting the final offset on the page.
797 : : */
798 : : static Page
1136 799 : 95701 : heapgettup_start_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
800 : : OffsetNumber *lineoff)
801 : : {
802 : : Page page;
803 : :
804 [ - + ]: 95701 : Assert(scan->rs_inited);
805 [ - + ]: 95701 : Assert(BufferIsValid(scan->rs_cbuf));
806 : :
807 : : /* Caller is responsible for ensuring buffer is locked if needed */
808 : 95701 : page = BufferGetPage(scan->rs_cbuf);
809 : :
1132 810 : 95701 : *linesleft = PageGetMaxOffsetNumber(page) - FirstOffsetNumber + 1;
811 : :
1136 812 [ + - ]: 95701 : if (ScanDirectionIsForward(dir))
813 : 95701 : *lineoff = FirstOffsetNumber;
814 : : else
1136 drowley@postgresql.o 815 :UBC 0 : *lineoff = (OffsetNumber) (*linesleft);
816 : :
817 : : /* lineoff now references the physically previous or next tid */
1136 drowley@postgresql.o 818 :CBC 95701 : return page;
819 : : }
820 : :
821 : :
822 : : /*
823 : : * heapgettup_continue_page - helper function for heapgettup()
824 : : *
825 : : * Return the next page to scan based on the scan->rs_cbuf and set *linesleft
826 : : * to the number of tuples left to scan on this page. Also set *lineoff to
827 : : * the next offset to scan according to the ScanDirection in 'dir'.
828 : : */
829 : : static inline Page
830 : 7851439 : heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
831 : : OffsetNumber *lineoff)
832 : : {
833 : : Page page;
834 : :
835 [ - + ]: 7851439 : Assert(scan->rs_inited);
836 [ - + ]: 7851439 : Assert(BufferIsValid(scan->rs_cbuf));
837 : :
838 : : /* Caller is responsible for ensuring buffer is locked if needed */
839 : 7851439 : page = BufferGetPage(scan->rs_cbuf);
840 : :
841 [ + - ]: 7851439 : if (ScanDirectionIsForward(dir))
842 : : {
843 : 7851439 : *lineoff = OffsetNumberNext(scan->rs_coffset);
844 : 7851439 : *linesleft = PageGetMaxOffsetNumber(page) - (*lineoff) + 1;
845 : : }
846 : : else
847 : : {
848 : : /*
849 : : * The previous returned tuple may have been vacuumed since the
850 : : * previous scan when we use a non-MVCC snapshot, so we must
851 : : * re-establish the lineoff <= PageGetMaxOffsetNumber(page) invariant
852 : : */
1136 drowley@postgresql.o 853 [ # # ]:UBC 0 : *lineoff = Min(PageGetMaxOffsetNumber(page), OffsetNumberPrev(scan->rs_coffset));
854 : 0 : *linesleft = *lineoff;
855 : : }
856 : :
857 : : /* lineoff now references the physically previous or next tid */
1136 drowley@postgresql.o 858 :CBC 7851439 : return page;
859 : : }
860 : :
861 : : /*
862 : : * heapgettup_advance_block - helper for heap_fetch_next_buffer()
863 : : *
864 : : * Given the current block number, the scan direction, and various information
865 : : * contained in the scan descriptor, calculate the BlockNumber to scan next
866 : : * and return it. If there are no further blocks to scan, return
867 : : * InvalidBlockNumber to indicate this fact to the caller.
868 : : *
869 : : * This should not be called to determine the initial block number -- only for
870 : : * subsequent blocks.
871 : : *
872 : : * This also adjusts rs_numblocks when a limit has been imposed by
873 : : * heap_setscanlimits().
874 : : */
875 : : static inline BlockNumber
876 : 3047113 : heapgettup_advance_block(HeapScanDesc scan, BlockNumber block, ScanDirection dir)
877 : : {
706 tmunro@postgresql.or 878 [ - + ]: 3047113 : Assert(scan->rs_base.rs_parallel == NULL);
879 : :
880 [ + + ]: 3047113 : if (likely(ScanDirectionIsForward(dir)))
881 : : {
882 : 3047055 : block++;
883 : :
884 : : /* wrap back to the start of the heap */
885 [ + + ]: 3047055 : if (block >= scan->rs_nblocks)
886 : 683428 : block = 0;
887 : :
888 : : /*
889 : : * Report our new scan position for synchronization purposes. We don't
890 : : * do that when moving backwards, however. That would just mess up any
891 : : * other forward-moving scanners.
892 : : *
893 : : * Note: we do this before checking for end of scan so that the final
894 : : * state of the position hint is back at the start of the rel. That's
895 : : * not strictly necessary, but otherwise when you run the same query
896 : : * multiple times the starting position would shift a little bit
897 : : * backwards on every invocation, which is confusing. We don't
898 : : * guarantee any specific ordering in general, though.
899 : : */
900 [ + + ]: 3047055 : if (scan->rs_base.rs_flags & SO_ALLOW_SYNC)
901 : 11419 : ss_report_location(scan->rs_base.rs_rd, block);
902 : :
903 : : /* we're done if we're back at where we started */
904 [ + + ]: 3047055 : if (block == scan->rs_startblock)
905 : 683387 : return InvalidBlockNumber;
906 : :
907 : : /* check if the limit imposed by heap_setscanlimits() is met */
908 [ + + ]: 2363668 : if (scan->rs_numblocks != InvalidBlockNumber)
909 : : {
910 [ + + ]: 2490 : if (--scan->rs_numblocks == 0)
911 : 1550 : return InvalidBlockNumber;
912 : : }
913 : :
914 : 2362118 : return block;
915 : : }
916 : : else
917 : : {
918 : : /* we're done if the last block is the start position */
1136 drowley@postgresql.o 919 [ + - ]: 58 : if (block == scan->rs_startblock)
920 : 58 : return InvalidBlockNumber;
921 : :
922 : : /* check if the limit imposed by heap_setscanlimits() is met */
1136 drowley@postgresql.o 923 [ # # ]:UBC 0 : if (scan->rs_numblocks != InvalidBlockNumber)
924 : : {
925 [ # # ]: 0 : if (--scan->rs_numblocks == 0)
926 : 0 : return InvalidBlockNumber;
927 : : }
928 : :
929 : : /* wrap to the end of the heap when the last page was page 0 */
930 [ # # ]: 0 : if (block == 0)
931 : 0 : block = scan->rs_nblocks;
932 : :
933 : 0 : block--;
934 : :
935 : 0 : return block;
936 : : }
937 : : }
938 : :
939 : : /* ----------------
940 : : * heapgettup - fetch next heap tuple
941 : : *
942 : : * Initialize the scan if not already done; then advance to the next
943 : : * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
944 : : * or set scan->rs_ctup.t_data = NULL if no more tuples.
945 : : *
946 : : * Note: the reason nkeys/key are passed separately, even though they are
947 : : * kept in the scan descriptor, is that the caller may not want us to check
948 : : * the scankeys.
949 : : *
950 : : * Note: when we fall off the end of the scan in either direction, we
951 : : * reset rs_inited. This means that a further request with the same
952 : : * scan direction will restart the scan, which is a bit odd, but a
953 : : * request with the opposite scan direction will start a fresh scan
954 : : * in the proper direction. The latter is required behavior for cursors,
955 : : * while the former case is generally undefined behavior in Postgres
956 : : * so we don't care too much.
957 : : * ----------------
958 : : */
959 : : static void
7414 tgl@sss.pgh.pa.us 960 :CBC 7873354 : heapgettup(HeapScanDesc scan,
961 : : ScanDirection dir,
962 : : int nkeys,
963 : : ScanKey key)
964 : : {
965 : 7873354 : HeapTuple tuple = &(scan->rs_ctup);
966 : : Page page;
967 : : OffsetNumber lineoff;
968 : : int linesleft;
969 : :
710 drowley@postgresql.o 970 [ + + ]: 7873354 : if (likely(scan->rs_inited))
971 : : {
972 : : /* continue from previously returned page/tuple */
1136 973 : 7851439 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
974 : 7851439 : page = heapgettup_continue_page(scan, dir, &linesleft, &lineoff);
1132 975 : 7851439 : goto continue_page;
976 : : }
977 : :
978 : : /*
979 : : * advance the scan until we find a qualifying tuple or run out of stuff
980 : : * to scan
981 : : */
982 : : while (true)
983 : : {
710 984 : 116774 : heap_fetch_next_buffer(scan, dir);
985 : :
986 : : /* did we run out of blocks to scan? */
987 [ + + ]: 116774 : if (!BufferIsValid(scan->rs_cbuf))
988 : 21073 : break;
989 : :
990 [ - + ]: 95701 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
991 : :
1132 992 : 95701 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
993 : 95701 : page = heapgettup_start_page(scan, dir, &linesleft, &lineoff);
994 : 7947140 : continue_page:
995 : :
996 : : /*
997 : : * Only continue scanning the page while we have lines left.
998 : : *
999 : : * Note that this protects us from accessing line pointers past
1000 : : * PageGetMaxOffsetNumber(); both for forward scans when we resume the
1001 : : * table scan, and for when we start scanning a new page.
1002 : : */
1003 [ + + ]: 8000808 : for (; linesleft > 0; linesleft--, lineoff += dir)
1004 : : {
1005 : : bool visible;
1006 : 7905949 : ItemId lpp = PageGetItemId(page, lineoff);
1007 : :
1008 [ + + ]: 7905949 : if (!ItemIdIsNormal(lpp))
1009 : 32740 : continue;
1010 : :
1011 : 7873209 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
1012 : 7873209 : tuple->t_len = ItemIdGetLength(lpp);
710 1013 : 7873209 : ItemPointerSet(&(tuple->t_self), scan->rs_cblock, lineoff);
1014 : :
1132 1015 : 7873209 : visible = HeapTupleSatisfiesVisibility(tuple,
1016 : : scan->rs_base.rs_snapshot,
1017 : : scan->rs_cbuf);
1018 : :
1019 : 7873209 : HeapCheckForSerializableConflictOut(visible, scan->rs_base.rs_rd,
1020 : : tuple, scan->rs_cbuf,
1021 : : scan->rs_base.rs_snapshot);
1022 : :
1023 : : /* skip tuples not visible to this snapshot */
1024 [ + + ]: 7873209 : if (!visible)
1025 : 7250 : continue;
1026 : :
1027 : : /* skip any tuples that don't match the scan key */
1028 [ + + ]: 7865959 : if (key != NULL &&
1132 drowley@postgresql.o 1029 [ + + ]:GBC 14371 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1030 : : nkeys, key))
1031 : 13678 : continue;
1032 : :
1132 drowley@postgresql.o 1033 :CBC 7852281 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1034 : 7852281 : scan->rs_coffset = lineoff;
1035 : 7852281 : return;
1036 : : }
1037 : :
1038 : : /*
1039 : : * if we get here, it means we've exhausted the items on this page and
1040 : : * it's time to move to the next.
1041 : : */
7414 tgl@sss.pgh.pa.us 1042 : 94859 : LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
1043 : : }
1044 : :
1045 : : /* end of scan */
1132 drowley@postgresql.o 1046 [ - + ]: 21073 : if (BufferIsValid(scan->rs_cbuf))
1132 drowley@postgresql.o 1047 :UBC 0 : ReleaseBuffer(scan->rs_cbuf);
1048 : :
1132 drowley@postgresql.o 1049 :CBC 21073 : scan->rs_cbuf = InvalidBuffer;
1050 : 21073 : scan->rs_cblock = InvalidBlockNumber;
706 tmunro@postgresql.or 1051 : 21073 : scan->rs_prefetch_block = InvalidBlockNumber;
1132 drowley@postgresql.o 1052 : 21073 : tuple->t_data = NULL;
1053 : 21073 : scan->rs_inited = false;
1054 : : }
1055 : :
1056 : : /* ----------------
1057 : : * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
1058 : : *
1059 : : * Same API as heapgettup, but used in page-at-a-time mode
1060 : : *
1061 : : * The internal logic is much the same as heapgettup's too, but there are some
1062 : : * differences: we do not take the buffer content lock (that only needs to
1063 : : * happen inside heap_prepare_pagescan), and we iterate through just the
1064 : : * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
1065 : : * that lineindex is 0-based, where the corresponding loop variable lineoff in
1066 : : * heapgettup is 1-based.
1067 : : * ----------------
1068 : : */
1069 : : static void
7414 tgl@sss.pgh.pa.us 1070 : 57553284 : heapgettup_pagemode(HeapScanDesc scan,
1071 : : ScanDirection dir,
1072 : : int nkeys,
1073 : : ScanKey key)
1074 : : {
1075 : 57553284 : HeapTuple tuple = &(scan->rs_ctup);
1076 : : Page page;
1077 : : uint32 lineindex;
1078 : : uint32 linesleft;
1079 : :
710 drowley@postgresql.o 1080 [ + + ]: 57553284 : if (likely(scan->rs_inited))
1081 : : {
1082 : : /* continue from previously returned page/tuple */
1136 1083 : 56041841 : page = BufferGetPage(scan->rs_cbuf);
1084 : :
1085 : 56041841 : lineindex = scan->rs_cindex + dir;
1086 [ + + ]: 56041841 : if (ScanDirectionIsForward(dir))
1087 : 56041513 : linesleft = scan->rs_ntuples - lineindex;
1088 : : else
1089 : 328 : linesleft = scan->rs_cindex;
1090 : : /* lineindex now references the next or previous visible tid */
1091 : :
1132 1092 : 56041841 : goto continue_page;
1093 : : }
1094 : :
1095 : : /*
1096 : : * advance the scan until we find a qualifying tuple or run out of stuff
1097 : : * to scan
1098 : : */
1099 : : while (true)
1100 : : {
710 1101 : 4078457 : heap_fetch_next_buffer(scan, dir);
1102 : :
1103 : : /* did we run out of blocks to scan? */
1104 [ + + ]: 4078419 : if (!BufferIsValid(scan->rs_cbuf))
1105 : 1049183 : break;
1106 : :
1107 [ - + ]: 3029236 : Assert(BufferGetBlockNumber(scan->rs_cbuf) == scan->rs_cblock);
1108 : :
1109 : : /* prune the page and determine visible tuple offsets */
1110 : 3029236 : heap_prepare_pagescan((TableScanDesc) scan);
1132 1111 : 3029228 : page = BufferGetPage(scan->rs_cbuf);
1112 : 3029228 : linesleft = scan->rs_ntuples;
1113 [ + + ]: 3029228 : lineindex = ScanDirectionIsForward(dir) ? 0 : linesleft - 1;
1114 : :
1115 : : /* block is the same for all tuples, set it once outside the loop */
348 heikki.linnakangas@i 1116 : 3029228 : ItemPointerSetBlockNumber(&tuple->t_self, scan->rs_cblock);
1117 : :
1118 : : /* lineindex now references the next or previous visible tid */
1132 drowley@postgresql.o 1119 : 59071069 : continue_page:
1120 : :
1121 [ + + ]: 106966829 : for (; linesleft > 0; linesleft--, lineindex += dir)
1122 : : {
1123 : : ItemId lpp;
1124 : : OffsetNumber lineoff;
1125 : :
69 heikki.linnakangas@i 1126 [ - + ]: 104399815 : Assert(lineindex < scan->rs_ntuples);
7414 tgl@sss.pgh.pa.us 1127 : 104399815 : lineoff = scan->rs_vistuples[lineindex];
1215 peter@eisentraut.org 1128 : 104399815 : lpp = PageGetItemId(page, lineoff);
6759 tgl@sss.pgh.pa.us 1129 [ - + ]: 104399815 : Assert(ItemIdIsNormal(lpp));
1130 : :
1215 peter@eisentraut.org 1131 : 104399815 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lpp);
7414 tgl@sss.pgh.pa.us 1132 : 104399815 : tuple->t_len = ItemIdGetLength(lpp);
348 heikki.linnakangas@i 1133 : 104399815 : ItemPointerSetOffsetNumber(&tuple->t_self, lineoff);
1134 : :
1135 : : /* skip any tuples that don't match the scan key */
1132 drowley@postgresql.o 1136 [ + + ]: 104399815 : if (key != NULL &&
1137 [ + + ]: 48226703 : !HeapKeyTest(tuple, RelationGetDescr(scan->rs_base.rs_rd),
1138 : : nkeys, key))
1139 : 47895760 : continue;
1140 : :
1141 : 56504055 : scan->rs_cindex = lineindex;
1142 : 56504055 : return;
1143 : : }
1144 : : }
1145 : :
1146 : : /* end of scan */
1147 [ - + ]: 1049183 : if (BufferIsValid(scan->rs_cbuf))
1132 drowley@postgresql.o 1148 :UBC 0 : ReleaseBuffer(scan->rs_cbuf);
1132 drowley@postgresql.o 1149 :CBC 1049183 : scan->rs_cbuf = InvalidBuffer;
1150 : 1049183 : scan->rs_cblock = InvalidBlockNumber;
706 tmunro@postgresql.or 1151 : 1049183 : scan->rs_prefetch_block = InvalidBlockNumber;
1132 drowley@postgresql.o 1152 : 1049183 : tuple->t_data = NULL;
1153 : 1049183 : scan->rs_inited = false;
1154 : : }
1155 : :
1156 : :
1157 : : /* ----------------------------------------------------------------
1158 : : * heap access method interface
1159 : : * ----------------------------------------------------------------
1160 : : */
1161 : :
1162 : :
1163 : : TableScanDesc
8700 tgl@sss.pgh.pa.us 1164 : 693302 : heap_beginscan(Relation relation, Snapshot snapshot,
1165 : : int nkeys, ScanKey key,
1166 : : ParallelTableScanDesc parallel_scan,
1167 : : uint32 flags)
1168 : : {
1169 : : HeapScanDesc scan;
1170 : :
1171 : : /*
1172 : : * increment relation ref count while scanning relation
1173 : : *
1174 : : * This is just to make really sure the relcache entry won't go away while
1175 : : * the scan has a pointer to it. Caller should be holding the rel open
1176 : : * anyway, so this is redundant in all normal scenarios...
1177 : : */
9258 1178 : 693302 : RelationIncrementReferenceCount(relation);
1179 : :
1180 : : /*
1181 : : * allocate and initialize scan descriptor
1182 : : */
423 melanieplageman@gmai 1183 [ + + ]: 693302 : if (flags & SO_TYPE_BITMAPSCAN)
1184 : : {
95 michael@paquier.xyz 1185 :GNC 10672 : BitmapHeapScanDesc bscan = palloc_object(BitmapHeapScanDescData);
1186 : :
1187 : : /*
1188 : : * Bitmap Heap scans do not have any fields that a normal Heap Scan
1189 : : * does not have, so no special initializations required here.
1190 : : */
423 melanieplageman@gmai 1191 :CBC 10672 : scan = (HeapScanDesc) bscan;
1192 : : }
1193 : : else
95 michael@paquier.xyz 1194 :GNC 682630 : scan = (HeapScanDesc) palloc_object(HeapScanDescData);
1195 : :
2561 andres@anarazel.de 1196 :CBC 693302 : scan->rs_base.rs_rd = relation;
1197 : 693302 : scan->rs_base.rs_snapshot = snapshot;
1198 : 693302 : scan->rs_base.rs_nkeys = nkeys;
2492 1199 : 693302 : scan->rs_base.rs_flags = flags;
2561 1200 : 693302 : scan->rs_base.rs_parallel = parallel_scan;
2492 1201 : 693302 : scan->rs_strategy = NULL; /* set in initscan */
365 melanieplageman@gmai 1202 : 693302 : scan->rs_cbuf = InvalidBuffer;
1203 : :
1204 : : /*
1205 : : * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1206 : : */
2492 andres@anarazel.de 1207 [ + + + + : 693302 : if (!(snapshot && IsMVCCSnapshot(snapshot)))
+ + ]
1208 : 30974 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1209 : :
1210 : : /* Check that a historic snapshot is not used for non-catalog tables */
205 heikki.linnakangas@i 1211 [ + + ]:GNC 693302 : if (snapshot &&
1212 [ + + ]: 684243 : IsHistoricMVCCSnapshot(snapshot) &&
1213 [ - + - - : 693 : !RelationIsAccessibleInLogicalDecoding(relation))
+ - - + -
- - - - +
- - - - -
- - - -
- ]
1214 : : {
205 heikki.linnakangas@i 1215 [ # # ]:UNC 0 : ereport(ERROR,
1216 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
1217 : : errmsg("cannot query non-catalog table \"%s\" during logical decoding",
1218 : : RelationGetRelationName(relation))));
1219 : : }
1220 : :
1221 : : /*
1222 : : * For seqscan and sample scans in a serializable transaction, acquire a
1223 : : * predicate lock on the entire relation. This is required not only to
1224 : : * lock all the matching tuples, but also to conflict with new insertions
1225 : : * into the table. In an indexscan, we take page locks on the index pages
1226 : : * covering the range specified in the scan qual, but in a heap scan there
1227 : : * is nothing more fine-grained to lock. A bitmap scan is a different
1228 : : * story, there we have already scanned the index and locked the index
1229 : : * pages covering the predicate. But in that case we still have to lock
1230 : : * any matching heap tuples. For sample scan we could optimize the locking
1231 : : * to be at least page-level granularity, but we'd need to add per-tuple
1232 : : * locking for that.
1233 : : */
2492 andres@anarazel.de 1234 [ + + ]:CBC 693302 : if (scan->rs_base.rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN))
1235 : : {
1236 : : /*
1237 : : * Ensure a missing snapshot is noticed reliably, even if the
1238 : : * isolation mode means predicate locking isn't performed (and
1239 : : * therefore the snapshot isn't used here).
1240 : : */
1241 [ - + ]: 672179 : Assert(snapshot);
5373 heikki.linnakangas@i 1242 : 672179 : PredicateLockRelation(relation, snapshot);
1243 : : }
1244 : :
1245 : : /* we only need to set this up once */
7414 tgl@sss.pgh.pa.us 1246 : 693302 : scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1247 : :
1248 : : /*
1249 : : * Allocate memory to keep track of page allocation for parallel workers
1250 : : * when doing a parallel scan.
1251 : : */
1811 drowley@postgresql.o 1252 [ + + ]: 693302 : if (parallel_scan != NULL)
95 michael@paquier.xyz 1253 :GNC 2128 : scan->rs_parallelworkerdata = palloc_object(ParallelBlockTableScanWorkerData);
1254 : : else
1811 drowley@postgresql.o 1255 :CBC 691174 : scan->rs_parallelworkerdata = NULL;
1256 : :
1257 : : /*
1258 : : * we do this here instead of in initscan() because heap_rescan also calls
1259 : : * initscan() and we don't want to allocate memory again
1260 : : */
8700 tgl@sss.pgh.pa.us 1261 [ + + ]: 693302 : if (nkeys > 0)
95 michael@paquier.xyz 1262 :GNC 220471 : scan->rs_base.rs_key = palloc_array(ScanKeyData, nkeys);
1263 : : else
2561 andres@anarazel.de 1264 :CBC 472831 : scan->rs_base.rs_key = NULL;
1265 : :
6122 tgl@sss.pgh.pa.us 1266 : 693302 : initscan(scan, key, false);
1267 : :
706 tmunro@postgresql.or 1268 : 693300 : scan->rs_read_stream = NULL;
1269 : :
1270 : : /*
1271 : : * Set up a read stream for sequential scans and TID range scans. This
1272 : : * should be done after initscan() because initscan() allocates the
1273 : : * BufferAccessStrategy object passed to the read stream API.
1274 : : */
1275 [ + + ]: 693300 : if (scan->rs_base.rs_flags & SO_TYPE_SEQSCAN ||
1276 [ + + ]: 21196 : scan->rs_base.rs_flags & SO_TYPE_TIDRANGESCAN)
1277 : 673095 : {
1278 : : ReadStreamBlockNumberCB cb;
1279 : :
1280 [ + + ]: 673095 : if (scan->rs_base.rs_parallel)
1281 : 2128 : cb = heap_scan_stream_read_next_parallel;
1282 : : else
1283 : 670967 : cb = heap_scan_stream_read_next_serial;
1284 : :
1285 : : /* ---
1286 : : * It is safe to use batchmode as the only locks taken by `cb`
1287 : : * are never taken while waiting for IO:
1288 : : * - SyncScanLock is used in the non-parallel case
1289 : : * - in the parallel case, only spinlocks and atomics are used
1290 : : * ---
1291 : : */
350 andres@anarazel.de 1292 : 673095 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_SEQUENTIAL |
1293 : : READ_STREAM_USE_BATCHING,
1294 : : scan->rs_strategy,
1295 : : scan->rs_base.rs_rd,
1296 : : MAIN_FORKNUM,
1297 : : cb,
1298 : : scan,
1299 : : 0);
1300 : : }
365 melanieplageman@gmai 1301 [ + + ]: 20205 : else if (scan->rs_base.rs_flags & SO_TYPE_BITMAPSCAN)
1302 : : {
346 1303 : 10672 : scan->rs_read_stream = read_stream_begin_relation(READ_STREAM_DEFAULT |
1304 : : READ_STREAM_USE_BATCHING,
1305 : : scan->rs_strategy,
1306 : : scan->rs_base.rs_rd,
1307 : : MAIN_FORKNUM,
1308 : : bitmapheap_stream_read_next,
1309 : : scan,
1310 : : sizeof(TBMIterateResult));
1311 : : }
1312 : :
1313 : :
2561 andres@anarazel.de 1314 : 693300 : return (TableScanDesc) scan;
1315 : : }
1316 : :
1317 : : void
1318 : 863043 : heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1319 : : bool allow_strat, bool allow_sync, bool allow_pagemode)
1320 : : {
1321 : 863043 : HeapScanDesc scan = (HeapScanDesc) sscan;
1322 : :
1323 [ + + ]: 863043 : if (set_params)
1324 : : {
2492 1325 [ + - ]: 15 : if (allow_strat)
1326 : 15 : scan->rs_base.rs_flags |= SO_ALLOW_STRAT;
1327 : : else
2492 andres@anarazel.de 1328 :UBC 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_STRAT;
1329 : :
2492 andres@anarazel.de 1330 [ + + ]:CBC 15 : if (allow_sync)
1331 : 6 : scan->rs_base.rs_flags |= SO_ALLOW_SYNC;
1332 : : else
1333 : 9 : scan->rs_base.rs_flags &= ~SO_ALLOW_SYNC;
1334 : :
1335 [ + - + - ]: 15 : if (allow_pagemode && scan->rs_base.rs_snapshot &&
1336 [ + - - - ]: 15 : IsMVCCSnapshot(scan->rs_base.rs_snapshot))
1337 : 15 : scan->rs_base.rs_flags |= SO_ALLOW_PAGEMODE;
1338 : : else
2492 andres@anarazel.de 1339 :UBC 0 : scan->rs_base.rs_flags &= ~SO_ALLOW_PAGEMODE;
1340 : : }
1341 : :
1342 : : /*
1343 : : * unpin scan buffers
1344 : : */
9045 tgl@sss.pgh.pa.us 1345 [ + + ]:CBC 863043 : if (BufferIsValid(scan->rs_cbuf))
1346 : : {
1347 : 1782 : ReleaseBuffer(scan->rs_cbuf);
365 melanieplageman@gmai 1348 : 1782 : scan->rs_cbuf = InvalidBuffer;
1349 : : }
1350 : :
1351 : : /*
1352 : : * SO_TYPE_BITMAPSCAN would be cleaned up here, but it does not hold any
1353 : : * additional data vs a normal HeapScan
1354 : : */
1355 : :
1356 : : /*
1357 : : * The read stream is reset on rescan. This must be done before
1358 : : * initscan(), as some state referred to by read_stream_reset() is reset
1359 : : * in initscan().
1360 : : */
706 tmunro@postgresql.or 1361 [ + + ]: 863043 : if (scan->rs_read_stream)
1362 : 863025 : read_stream_reset(scan->rs_read_stream);
1363 : :
1364 : : /*
1365 : : * reinitialize scan descriptor
1366 : : */
6122 tgl@sss.pgh.pa.us 1367 : 863043 : initscan(scan, key, true);
10841 scrappy@hub.org 1368 : 863043 : }
1369 : :
1370 : : void
2561 andres@anarazel.de 1371 : 690830 : heap_endscan(TableScanDesc sscan)
1372 : : {
1373 : 690830 : HeapScanDesc scan = (HeapScanDesc) sscan;
1374 : :
1375 : : /* Note: no locking manipulations needed */
1376 : :
1377 : : /*
1378 : : * unpin scan buffers
1379 : : */
9045 tgl@sss.pgh.pa.us 1380 [ + + ]: 690830 : if (BufferIsValid(scan->rs_cbuf))
1381 : 459579 : ReleaseBuffer(scan->rs_cbuf);
1382 : :
1383 : : /*
1384 : : * Must free the read stream before freeing the BufferAccessStrategy.
1385 : : */
706 tmunro@postgresql.or 1386 [ + + ]: 690830 : if (scan->rs_read_stream)
1387 : 681354 : read_stream_end(scan->rs_read_stream);
1388 : :
1389 : : /*
1390 : : * decrement relation reference count and free scan descriptor storage
1391 : : */
2561 andres@anarazel.de 1392 : 690830 : RelationDecrementReferenceCount(scan->rs_base.rs_rd);
1393 : :
1394 [ + + ]: 690830 : if (scan->rs_base.rs_key)
1395 : 220441 : pfree(scan->rs_base.rs_key);
1396 : :
6864 tgl@sss.pgh.pa.us 1397 [ + + ]: 690830 : if (scan->rs_strategy != NULL)
1398 : 13155 : FreeAccessStrategy(scan->rs_strategy);
1399 : :
1811 drowley@postgresql.o 1400 [ + + ]: 690830 : if (scan->rs_parallelworkerdata != NULL)
1401 : 2128 : pfree(scan->rs_parallelworkerdata);
1402 : :
2492 andres@anarazel.de 1403 [ + + ]: 690830 : if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
2561 1404 : 34893 : UnregisterSnapshot(scan->rs_base.rs_snapshot);
1405 : :
9675 tgl@sss.pgh.pa.us 1406 : 690830 : pfree(scan);
10841 scrappy@hub.org 1407 : 690830 : }
1408 : :
1409 : : HeapTuple
2561 andres@anarazel.de 1410 : 10094967 : heap_getnext(TableScanDesc sscan, ScanDirection direction)
1411 : : {
1412 : 10094967 : HeapScanDesc scan = (HeapScanDesc) sscan;
1413 : :
1414 : : /*
1415 : : * This is still widely used directly, without going through table AM, so
1416 : : * add a safety check. It's possible we should, at a later point,
1417 : : * downgrade this to an assert. The reason for checking the AM routine,
1418 : : * rather than the AM oid, is that this allows to write regression tests
1419 : : * that create another AM reusing the heap handler.
1420 : : */
1421 [ - + ]: 10094967 : if (unlikely(sscan->rs_rd->rd_tableam != GetHeapamTableAmRoutine()))
2561 andres@anarazel.de 1422 [ # # ]:UBC 0 : ereport(ERROR,
1423 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1424 : : errmsg_internal("only heap AM is supported")));
1425 : :
1426 : : /* Note: no locking manipulations needed */
1427 : :
2492 andres@anarazel.de 1428 [ + + ]:CBC 10094967 : if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
7327 neilc@samurai.com 1429 : 2705807 : heapgettup_pagemode(scan, direction,
2561 andres@anarazel.de 1430 : 2705807 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1431 : : else
1432 : 7389160 : heapgettup(scan, direction,
1433 : 7389160 : scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1434 : :
7414 tgl@sss.pgh.pa.us 1435 [ + + ]: 10094966 : if (scan->rs_ctup.t_data == NULL)
8700 1436 : 61939 : return NULL;
1437 : :
1438 : : /*
1439 : : * if we get here it means we have a new current scan tuple, so point to
1440 : : * the proper return buffer and return the tuple.
1441 : : */
1442 : :
2561 andres@anarazel.de 1443 [ - + - - : 10033027 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+ - ]
1444 : :
1445 : 10033027 : return &scan->rs_ctup;
1446 : : }
1447 : :
1448 : : bool
1449 : 55325715 : heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
1450 : : {
1451 : 55325715 : HeapScanDesc scan = (HeapScanDesc) sscan;
1452 : :
1453 : : /* Note: no locking manipulations needed */
1454 : :
2492 1455 [ + + ]: 55325715 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1456 : 54841521 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1457 : : else
1458 : 484194 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1459 : :
2561 1460 [ + + ]: 55325679 : if (scan->rs_ctup.t_data == NULL)
1461 : : {
1462 : 1008213 : ExecClearTuple(slot);
1463 : 1008213 : return false;
1464 : : }
1465 : :
1466 : : /*
1467 : : * if we get here it means we have a new current scan tuple, so point to
1468 : : * the proper return buffer and return the tuple.
1469 : : */
1470 : :
1471 [ + + - + : 54317466 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+ + ]
1472 : :
1473 : 54317466 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1474 : : scan->rs_cbuf);
1475 : 54317466 : return true;
1476 : : }
1477 : :
1478 : : void
1842 drowley@postgresql.o 1479 : 1036 : heap_set_tidrange(TableScanDesc sscan, ItemPointer mintid,
1480 : : ItemPointer maxtid)
1481 : : {
1482 : 1036 : HeapScanDesc scan = (HeapScanDesc) sscan;
1483 : : BlockNumber startBlk;
1484 : : BlockNumber numBlks;
1485 : : ItemPointerData highestItem;
1486 : : ItemPointerData lowestItem;
1487 : :
1488 : : /*
1489 : : * For relations without any pages, we can simply leave the TID range
1490 : : * unset. There will be no tuples to scan, therefore no tuples outside
1491 : : * the given TID range.
1492 : : */
1493 [ + + ]: 1036 : if (scan->rs_nblocks == 0)
1494 : 24 : return;
1495 : :
1496 : : /*
1497 : : * Set up some ItemPointers which point to the first and last possible
1498 : : * tuples in the heap.
1499 : : */
1500 : 1030 : ItemPointerSet(&highestItem, scan->rs_nblocks - 1, MaxOffsetNumber);
1501 : 1030 : ItemPointerSet(&lowestItem, 0, FirstOffsetNumber);
1502 : :
1503 : : /*
1504 : : * If the given maximum TID is below the highest possible TID in the
1505 : : * relation, then restrict the range to that, otherwise we scan to the end
1506 : : * of the relation.
1507 : : */
1508 [ + + ]: 1030 : if (ItemPointerCompare(maxtid, &highestItem) < 0)
1509 : 131 : ItemPointerCopy(maxtid, &highestItem);
1510 : :
1511 : : /*
1512 : : * If the given minimum TID is above the lowest possible TID in the
1513 : : * relation, then restrict the range to only scan for TIDs above that.
1514 : : */
1515 [ + + ]: 1030 : if (ItemPointerCompare(mintid, &lowestItem) > 0)
1516 : 911 : ItemPointerCopy(mintid, &lowestItem);
1517 : :
1518 : : /*
1519 : : * Check for an empty range and protect from would be negative results
1520 : : * from the numBlks calculation below.
1521 : : */
1522 [ + + ]: 1030 : if (ItemPointerCompare(&highestItem, &lowestItem) < 0)
1523 : : {
1524 : : /* Set an empty range of blocks to scan */
1525 : 18 : heap_setscanlimits(sscan, 0, 0);
1526 : 18 : return;
1527 : : }
1528 : :
1529 : : /*
1530 : : * Calculate the first block and the number of blocks we must scan. We
1531 : : * could be more aggressive here and perform some more validation to try
1532 : : * and further narrow the scope of blocks to scan by checking if the
1533 : : * lowestItem has an offset above MaxOffsetNumber. In this case, we could
1534 : : * advance startBlk by one. Likewise, if highestItem has an offset of 0
1535 : : * we could scan one fewer blocks. However, such an optimization does not
1536 : : * seem worth troubling over, currently.
1537 : : */
1538 : 1012 : startBlk = ItemPointerGetBlockNumberNoCheck(&lowestItem);
1539 : :
1540 : 1012 : numBlks = ItemPointerGetBlockNumberNoCheck(&highestItem) -
1541 : 1012 : ItemPointerGetBlockNumberNoCheck(&lowestItem) + 1;
1542 : :
1543 : : /* Set the start block and number of blocks to scan */
1544 : 1012 : heap_setscanlimits(sscan, startBlk, numBlks);
1545 : :
1546 : : /* Finally, set the TID range in sscan */
506 melanieplageman@gmai 1547 : 1012 : ItemPointerCopy(&lowestItem, &sscan->st.tidrange.rs_mintid);
1548 : 1012 : ItemPointerCopy(&highestItem, &sscan->st.tidrange.rs_maxtid);
1549 : : }
1550 : :
1551 : : bool
1842 drowley@postgresql.o 1552 : 5863 : heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
1553 : : TupleTableSlot *slot)
1554 : : {
1555 : 5863 : HeapScanDesc scan = (HeapScanDesc) sscan;
506 melanieplageman@gmai 1556 : 5863 : ItemPointer mintid = &sscan->st.tidrange.rs_mintid;
1557 : 5863 : ItemPointer maxtid = &sscan->st.tidrange.rs_maxtid;
1558 : :
1559 : : /* Note: no locking manipulations needed */
1560 : : for (;;)
1561 : : {
1842 drowley@postgresql.o 1562 [ + - ]: 5956 : if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1563 : 5956 : heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1564 : : else
1842 drowley@postgresql.o 1565 :UBC 0 : heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1566 : :
1842 drowley@postgresql.o 1567 [ + + ]:CBC 5947 : if (scan->rs_ctup.t_data == NULL)
1568 : : {
1569 : 104 : ExecClearTuple(slot);
1570 : 104 : return false;
1571 : : }
1572 : :
1573 : : /*
1574 : : * heap_set_tidrange will have used heap_setscanlimits to limit the
1575 : : * range of pages we scan to only ones that can contain the TID range
1576 : : * we're scanning for. Here we must filter out any tuples from these
1577 : : * pages that are outside of that range.
1578 : : */
1579 [ + + ]: 5843 : if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1580 : : {
1581 : 93 : ExecClearTuple(slot);
1582 : :
1583 : : /*
1584 : : * When scanning backwards, the TIDs will be in descending order.
1585 : : * Future tuples in this direction will be lower still, so we can
1586 : : * just return false to indicate there will be no more tuples.
1587 : : */
1588 [ - + ]: 93 : if (ScanDirectionIsBackward(direction))
1842 drowley@postgresql.o 1589 :UBC 0 : return false;
1590 : :
1842 drowley@postgresql.o 1591 :CBC 93 : continue;
1592 : : }
1593 : :
1594 : : /*
1595 : : * Likewise for the final page, we must filter out TIDs greater than
1596 : : * maxtid.
1597 : : */
1598 [ + + ]: 5750 : if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1599 : : {
1600 : 56 : ExecClearTuple(slot);
1601 : :
1602 : : /*
1603 : : * When scanning forward, the TIDs will be in ascending order.
1604 : : * Future tuples in this direction will be higher still, so we can
1605 : : * just return false to indicate there will be no more tuples.
1606 : : */
1607 [ + - ]: 56 : if (ScanDirectionIsForward(direction))
1608 : 56 : return false;
1842 drowley@postgresql.o 1609 :UBC 0 : continue;
1610 : : }
1611 : :
1842 drowley@postgresql.o 1612 :CBC 5694 : break;
1613 : : }
1614 : :
1615 : : /*
1616 : : * if we get here it means we have a new current scan tuple, so point to
1617 : : * the proper return buffer and return the tuple.
1618 : : */
1619 [ - + - - : 5694 : pgstat_count_heap_getnext(scan->rs_base.rs_rd);
+ - ]
1620 : :
1621 : 5694 : ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1622 : 5694 : return true;
1623 : : }
1624 : :
1625 : : /*
1626 : : * heap_fetch - retrieve tuple with given tid
1627 : : *
1628 : : * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1629 : : * the tuple, fill in the remaining fields of *tuple, and check the tuple
1630 : : * against the specified snapshot.
1631 : : *
1632 : : * If successful (tuple found and passes snapshot time qual), then *userbuf
1633 : : * is set to the buffer holding the tuple and true is returned. The caller
1634 : : * must unpin the buffer when done with the tuple.
1635 : : *
1636 : : * If the tuple is not found (ie, item number references a deleted slot),
1637 : : * then tuple->t_data is set to NULL, *userbuf is set to InvalidBuffer,
1638 : : * and false is returned.
1639 : : *
1640 : : * If the tuple is found but fails the time qual check, then the behavior
1641 : : * depends on the keep_buf parameter. If keep_buf is false, the results
1642 : : * are the same as for the tuple-not-found case. If keep_buf is true,
1643 : : * then tuple->t_data and *userbuf are returned as for the success case,
1644 : : * and again the caller must unpin the buffer; but false is returned.
1645 : : *
1646 : : * heap_fetch does not follow HOT chains: only the exact TID requested will
1647 : : * be fetched.
1648 : : *
1649 : : * It is somewhat inconsistent that we ereport() on invalid block number but
1650 : : * return false on invalid item number. There are a couple of reasons though.
1651 : : * One is that the caller can relatively easily check the block number for
1652 : : * validity, but cannot check the item number without reading the page
1653 : : * himself. Another is that when we are following a t_ctid link, we can be
1654 : : * reasonably confident that the page number is valid (since VACUUM shouldn't
1655 : : * truncate off the destination page without having killed the referencing
1656 : : * tuple first), but the item number might well not be good.
1657 : : */
1658 : : bool
10841 scrappy@hub.org 1659 : 581768 : heap_fetch(Relation relation,
1660 : : Snapshot snapshot,
1661 : : HeapTuple tuple,
1662 : : Buffer *userbuf,
1663 : : bool keep_buf)
1664 : : {
8696 tgl@sss.pgh.pa.us 1665 : 581768 : ItemPointer tid = &(tuple->t_self);
1666 : : ItemId lp;
1667 : : Buffer buffer;
1668 : : Page page;
1669 : : OffsetNumber offnum;
1670 : : bool valid;
1671 : :
1672 : : /*
1673 : : * Fetch and pin the appropriate page of the relation.
1674 : : */
6555 1675 : 581768 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1676 : :
1677 : : /*
1678 : : * Need share lock on buffer to examine tuple commit status.
1679 : : */
9952 vadim4o@yahoo.com 1680 : 581756 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
3616 kgrittn@postgresql.o 1681 : 581756 : page = BufferGetPage(buffer);
1682 : :
1683 : : /*
1684 : : * We'd better check for out-of-range offnum in case of VACUUM since the
1685 : : * TID was obtained.
1686 : : */
10416 bruce@momjian.us 1687 : 581756 : offnum = ItemPointerGetOffsetNumber(tid);
6454 tgl@sss.pgh.pa.us 1688 [ + - + + ]: 581756 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1689 : : {
7658 1690 : 3 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2549 andres@anarazel.de 1691 : 3 : ReleaseBuffer(buffer);
1692 : 3 : *userbuf = InvalidBuffer;
7658 tgl@sss.pgh.pa.us 1693 : 3 : tuple->t_data = NULL;
1694 : 3 : return false;
1695 : : }
1696 : :
1697 : : /*
1698 : : * get the item line pointer corresponding to the requested tid
1699 : : */
6454 1700 : 581753 : lp = PageGetItemId(page, offnum);
1701 : :
1702 : : /*
1703 : : * Must check for deleted tuple.
1704 : : */
6759 1705 [ + + ]: 581753 : if (!ItemIdIsNormal(lp))
1706 : : {
9385 vadim4o@yahoo.com 1707 : 312 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2549 andres@anarazel.de 1708 : 312 : ReleaseBuffer(buffer);
1709 : 312 : *userbuf = InvalidBuffer;
8696 tgl@sss.pgh.pa.us 1710 : 312 : tuple->t_data = NULL;
1711 : 312 : return false;
1712 : : }
1713 : :
1714 : : /*
1715 : : * fill in *tuple fields
1716 : : */
6454 1717 : 581441 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
9970 vadim4o@yahoo.com 1718 : 581441 : tuple->t_len = ItemIdGetLength(lp);
7512 tgl@sss.pgh.pa.us 1719 : 581441 : tuple->t_tableOid = RelationGetRelid(relation);
1720 : :
1721 : : /*
1722 : : * check tuple visibility, then release lock
1723 : : */
7414 1724 : 581441 : valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1725 : :
5515 heikki.linnakangas@i 1726 [ + + ]: 581441 : if (valid)
2238 tmunro@postgresql.or 1727 : 581388 : PredicateLockTID(relation, &(tuple->t_self), snapshot,
1728 : 581388 : HeapTupleHeaderGetXmin(tuple->t_data));
1729 : :
1730 : 581441 : HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1731 : :
5490 heikki.linnakangas@i 1732 : 581441 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1733 : :
8696 tgl@sss.pgh.pa.us 1734 [ + + ]: 581441 : if (valid)
1735 : : {
1736 : : /*
1737 : : * All checks passed, so return the tuple as valid. Caller is now
1738 : : * responsible for releasing the buffer.
1739 : : */
9669 1740 : 581388 : *userbuf = buffer;
1741 : :
8696 1742 : 581388 : return true;
1743 : : }
1744 : :
1745 : : /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1432 1746 [ + + ]: 53 : if (keep_buf)
1747 : 34 : *userbuf = buffer;
1748 : : else
1749 : : {
1750 : 19 : ReleaseBuffer(buffer);
1751 : 19 : *userbuf = InvalidBuffer;
1752 : 19 : tuple->t_data = NULL;
1753 : : }
1754 : :
8696 1755 : 53 : return false;
1756 : : }
1757 : :
1758 : : /*
1759 : : * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1760 : : *
1761 : : * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1762 : : * of a HOT chain), and buffer is the buffer holding this tuple. We search
1763 : : * for the first chain member satisfying the given snapshot. If one is
1764 : : * found, we update *tid to reference that tuple's offset number, and
1765 : : * return true. If no match, return false without modifying *tid.
1766 : : *
1767 : : * heapTuple is a caller-supplied buffer. When a match is found, we return
1768 : : * the tuple here, in addition to updating *tid. If no match is found, the
1769 : : * contents of this buffer on return are undefined.
1770 : : *
1771 : : * If all_dead is not NULL, we check non-visible tuples to see if they are
1772 : : * globally dead; *all_dead is set true if all members of the HOT chain
1773 : : * are vacuumable, false if not.
1774 : : *
1775 : : * Unlike heap_fetch, the caller must already have pin and (at least) share
1776 : : * lock on the buffer; it is still pinned/locked at exit.
1777 : : */
1778 : : bool
5515 heikki.linnakangas@i 1779 : 22232350 : heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
1780 : : Snapshot snapshot, HeapTuple heapTuple,
1781 : : bool *all_dead, bool first_call)
1782 : : {
1215 peter@eisentraut.org 1783 : 22232350 : Page page = BufferGetPage(buffer);
6751 tgl@sss.pgh.pa.us 1784 : 22232350 : TransactionId prev_xmax = InvalidTransactionId;
1785 : : BlockNumber blkno;
1786 : : OffsetNumber offnum;
1787 : : bool at_chain_start;
1788 : : bool valid;
1789 : : bool skip;
2041 andres@anarazel.de 1790 : 22232350 : GlobalVisState *vistest = NULL;
1791 : :
1792 : : /* If this is not the first call, previous call returned a (live!) tuple */
6751 tgl@sss.pgh.pa.us 1793 [ + + ]: 22232350 : if (all_dead)
5375 rhaas@postgresql.org 1794 : 18857995 : *all_dead = first_call;
1795 : :
2412 heikki.linnakangas@i 1796 : 22232350 : blkno = ItemPointerGetBlockNumber(tid);
6751 tgl@sss.pgh.pa.us 1797 : 22232350 : offnum = ItemPointerGetOffsetNumber(tid);
5375 rhaas@postgresql.org 1798 : 22232350 : at_chain_start = first_call;
1799 : 22232350 : skip = !first_call;
1800 : :
1801 : : /* XXX: we should assert that a snapshot is pushed or registered */
2041 andres@anarazel.de 1802 [ - + ]: 22232350 : Assert(TransactionIdIsValid(RecentXmin));
2412 heikki.linnakangas@i 1803 [ + - ]: 22232350 : Assert(BufferGetBlockNumber(buffer) == blkno);
1804 : :
1805 : : /* Scan through possible multiple members of HOT-chain */
1806 : : for (;;)
6751 tgl@sss.pgh.pa.us 1807 : 1638627 : {
1808 : : ItemId lp;
1809 : :
1810 : : /* check for bogus TID */
1215 peter@eisentraut.org 1811 [ + - + - ]: 23870977 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1812 : : break;
1813 : :
1814 : 23870977 : lp = PageGetItemId(page, offnum);
1815 : :
1816 : : /* check for unused, dead, or redirected items */
6751 tgl@sss.pgh.pa.us 1817 [ + + ]: 23870977 : if (!ItemIdIsNormal(lp))
1818 : : {
1819 : : /* We should only see a redirect at start of chain */
1820 [ + + + - ]: 791178 : if (ItemIdIsRedirected(lp) && at_chain_start)
1821 : : {
1822 : : /* Follow the redirect */
1823 : 433268 : offnum = ItemIdGetRedirect(lp);
1824 : 433268 : at_chain_start = false;
1825 : 433268 : continue;
1826 : : }
1827 : : /* else must be end of chain */
1828 : 357910 : break;
1829 : : }
1830 : :
1831 : : /*
1832 : : * Update heapTuple to point to the element of the HOT chain we're
1833 : : * currently investigating. Having t_self set correctly is important
1834 : : * because the SSI checks and the *Satisfies routine for historical
1835 : : * MVCC snapshots need the correct tid to decide about the visibility.
1836 : : */
1215 peter@eisentraut.org 1837 : 23079799 : heapTuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
5375 rhaas@postgresql.org 1838 : 23079799 : heapTuple->t_len = ItemIdGetLength(lp);
4619 1839 : 23079799 : heapTuple->t_tableOid = RelationGetRelid(relation);
2412 heikki.linnakangas@i 1840 : 23079799 : ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1841 : :
1842 : : /*
1843 : : * Shouldn't see a HEAP_ONLY tuple at chain start.
1844 : : */
5375 rhaas@postgresql.org 1845 [ + + - + ]: 23079799 : if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
6751 tgl@sss.pgh.pa.us 1846 :UBC 0 : break;
1847 : :
1848 : : /*
1849 : : * The xmin should match the previous xmax value, else chain is
1850 : : * broken.
1851 : : */
6751 tgl@sss.pgh.pa.us 1852 [ + + - + ]:CBC 24285158 : if (TransactionIdIsValid(prev_xmax) &&
3055 alvherre@alvh.no-ip. 1853 : 1205359 : !TransactionIdEquals(prev_xmax,
1854 : : HeapTupleHeaderGetXmin(heapTuple->t_data)))
6751 tgl@sss.pgh.pa.us 1855 :UBC 0 : break;
1856 : :
1857 : : /*
1858 : : * When first_call is true (and thus, skip is initially false) we'll
1859 : : * return the first tuple we find. But on later passes, heapTuple
1860 : : * will initially be pointing to the tuple we returned last time.
1861 : : * Returning it again would be incorrect (and would loop forever), so
1862 : : * we skip it and return the next match we find.
1863 : : */
5375 rhaas@postgresql.org 1864 [ + + ]:CBC 23079799 : if (!skip)
1865 : : {
1866 : : /* If it's visible per the snapshot, we must return it */
1867 : 22992646 : valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2238 tmunro@postgresql.or 1868 : 22992646 : HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1869 : : buffer, snapshot);
1870 : :
5375 rhaas@postgresql.org 1871 [ + + ]: 22992641 : if (valid)
1872 : : {
1873 : 15634054 : ItemPointerSetOffsetNumber(tid, offnum);
2238 tmunro@postgresql.or 1874 : 15634054 : PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1875 : 15634054 : HeapTupleHeaderGetXmin(heapTuple->t_data));
5375 rhaas@postgresql.org 1876 [ + + ]: 15634054 : if (all_dead)
1877 : 12558301 : *all_dead = false;
1878 : 15634054 : return true;
1879 : : }
1880 : : }
1881 : 7445740 : skip = false;
1882 : :
1883 : : /*
1884 : : * If we can't see it, maybe no one else can either. At caller
1885 : : * request, check whether all chain members are dead to all
1886 : : * transactions.
1887 : : *
1888 : : * Note: if you change the criterion here for what is "dead", fix the
1889 : : * planner's get_actual_variable_range() function to match.
1890 : : */
2041 andres@anarazel.de 1891 [ + + + + ]: 7445740 : if (all_dead && *all_dead)
1892 : : {
1893 [ + + ]: 6500358 : if (!vistest)
1894 : 6368312 : vistest = GlobalVisTestFor(relation);
1895 : :
1896 [ + + ]: 6500358 : if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1897 : 6130129 : *all_dead = false;
1898 : : }
1899 : :
1900 : : /*
1901 : : * Check to see if HOT chain continues past this tuple; if so fetch
1902 : : * the next offnum and loop around.
1903 : : */
5375 rhaas@postgresql.org 1904 [ + + ]: 7445740 : if (HeapTupleIsHotUpdated(heapTuple))
1905 : : {
1906 [ - + ]: 1205359 : Assert(ItemPointerGetBlockNumber(&heapTuple->t_data->t_ctid) ==
1907 : : blkno);
1908 : 1205359 : offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
6751 tgl@sss.pgh.pa.us 1909 : 1205359 : at_chain_start = false;
4799 alvherre@alvh.no-ip. 1910 : 1205359 : prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1911 : : }
1912 : : else
6695 bruce@momjian.us 1913 : 6240381 : break; /* end of chain */
1914 : : }
1915 : :
5403 heikki.linnakangas@i 1916 : 6598291 : return false;
1917 : : }
1918 : :
1919 : : /*
1920 : : * heap_get_latest_tid - get the latest tid of a specified tuple
1921 : : *
1922 : : * Actually, this gets the latest version that is visible according to the
1923 : : * scan's snapshot. Create a scan using SnapshotDirty to get the very latest,
1924 : : * possibly uncommitted version.
1925 : : *
1926 : : * *tid is both an input and an output parameter: it is updated to
1927 : : * show the latest version of the row. Note that it will not be changed
1928 : : * if no version of the row passes the snapshot test.
1929 : : */
1930 : : void
2494 andres@anarazel.de 1931 : 150 : heap_get_latest_tid(TableScanDesc sscan,
1932 : : ItemPointer tid)
1933 : : {
2489 tgl@sss.pgh.pa.us 1934 : 150 : Relation relation = sscan->rs_rd;
1935 : 150 : Snapshot snapshot = sscan->rs_snapshot;
1936 : : ItemPointerData ctid;
1937 : : TransactionId priorXmax;
1938 : :
1939 : : /*
1940 : : * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1941 : : * Assume that t_ctid links are valid however - there shouldn't be invalid
1942 : : * ones in the table.
1943 : : */
2494 andres@anarazel.de 1944 [ - + ]: 150 : Assert(ItemPointerIsValid(tid));
1945 : :
1946 : : /*
1947 : : * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1948 : : * need to examine, and *tid is the TID we will return if ctid turns out
1949 : : * to be bogus.
1950 : : *
1951 : : * Note that we will loop until we reach the end of the t_ctid chain.
1952 : : * Depending on the snapshot passed, there might be at most one visible
1953 : : * version of the row, but we don't try to optimize for that.
1954 : : */
7512 tgl@sss.pgh.pa.us 1955 : 150 : ctid = *tid;
1956 : 150 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1957 : : for (;;)
1958 : 45 : {
1959 : : Buffer buffer;
1960 : : Page page;
1961 : : OffsetNumber offnum;
1962 : : ItemId lp;
1963 : : HeapTupleData tp;
1964 : : bool valid;
1965 : :
1966 : : /*
1967 : : * Read, pin, and lock the page.
1968 : : */
1969 : 195 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1970 : 195 : LockBuffer(buffer, BUFFER_LOCK_SHARE);
3616 kgrittn@postgresql.o 1971 : 195 : page = BufferGetPage(buffer);
1972 : :
1973 : : /*
1974 : : * Check for bogus item number. This is not treated as an error
1975 : : * condition because it can happen while following a t_ctid link. We
1976 : : * just assume that the prior tid is OK and return it unchanged.
1977 : : */
7512 tgl@sss.pgh.pa.us 1978 : 195 : offnum = ItemPointerGetOffsetNumber(&ctid);
6454 1979 [ + - - + ]: 195 : if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1980 : : {
7289 tgl@sss.pgh.pa.us 1981 :UBC 0 : UnlockReleaseBuffer(buffer);
7512 1982 : 0 : break;
1983 : : }
6454 tgl@sss.pgh.pa.us 1984 :CBC 195 : lp = PageGetItemId(page, offnum);
6759 1985 [ - + ]: 195 : if (!ItemIdIsNormal(lp))
1986 : : {
7289 tgl@sss.pgh.pa.us 1987 :UBC 0 : UnlockReleaseBuffer(buffer);
7512 1988 : 0 : break;
1989 : : }
1990 : :
1991 : : /* OK to access the tuple */
7512 tgl@sss.pgh.pa.us 1992 :CBC 195 : tp.t_self = ctid;
6454 1993 : 195 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
7512 1994 : 195 : tp.t_len = ItemIdGetLength(lp);
4619 rhaas@postgresql.org 1995 : 195 : tp.t_tableOid = RelationGetRelid(relation);
1996 : :
1997 : : /*
1998 : : * After following a t_ctid link, we might arrive at an unrelated
1999 : : * tuple. Check for XMIN match.
2000 : : */
7512 tgl@sss.pgh.pa.us 2001 [ + + - + ]: 240 : if (TransactionIdIsValid(priorXmax) &&
3055 alvherre@alvh.no-ip. 2002 : 45 : !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
2003 : : {
7289 tgl@sss.pgh.pa.us 2004 :UBC 0 : UnlockReleaseBuffer(buffer);
7512 2005 : 0 : break;
2006 : : }
2007 : :
2008 : : /*
2009 : : * Check tuple visibility; if visible, set it as the new result
2010 : : * candidate.
2011 : : */
7414 tgl@sss.pgh.pa.us 2012 :CBC 195 : valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2238 tmunro@postgresql.or 2013 : 195 : HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
7512 tgl@sss.pgh.pa.us 2014 [ + + ]: 195 : if (valid)
2015 : 138 : *tid = ctid;
2016 : :
2017 : : /*
2018 : : * If there's a valid t_ctid link, follow it, else we're done.
2019 : : */
4799 alvherre@alvh.no-ip. 2020 [ + + + + ]: 276 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2021 [ + - ]: 138 : HeapTupleHeaderIsOnlyLocked(tp.t_data) ||
2899 andres@anarazel.de 2022 [ + + ]: 114 : HeapTupleHeaderIndicatesMovedPartitions(tp.t_data) ||
7512 tgl@sss.pgh.pa.us 2023 : 57 : ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2024 : : {
7289 2025 : 150 : UnlockReleaseBuffer(buffer);
7512 2026 : 150 : break;
2027 : : }
2028 : :
2029 : 45 : ctid = tp.t_data->t_ctid;
4799 alvherre@alvh.no-ip. 2030 : 45 : priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
7289 tgl@sss.pgh.pa.us 2031 : 45 : UnlockReleaseBuffer(buffer);
2032 : : } /* end of loop */
9652 inoue@tpf.co.jp 2033 : 150 : }
2034 : :
2035 : :
2036 : : /*
2037 : : * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
2038 : : *
2039 : : * This is called after we have waited for the XMAX transaction to terminate.
2040 : : * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
2041 : : * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
2042 : : * hint bit if possible --- but beware that that may not yet be possible,
2043 : : * if the transaction committed asynchronously.
2044 : : *
2045 : : * Note that if the transaction was a locker only, we set HEAP_XMAX_INVALID
2046 : : * even if it commits.
2047 : : *
2048 : : * Hence callers should look only at XMAX_INVALID.
2049 : : *
2050 : : * Note this is not allowed for tuples whose xmax is a multixact.
2051 : : */
2052 : : static void
6788 tgl@sss.pgh.pa.us 2053 : 231 : UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
2054 : : {
4799 alvherre@alvh.no-ip. 2055 [ - + ]: 231 : Assert(TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple), xid));
2056 [ - + ]: 231 : Assert(!(tuple->t_infomask & HEAP_XMAX_IS_MULTI));
2057 : :
6788 tgl@sss.pgh.pa.us 2058 [ + + ]: 231 : if (!(tuple->t_infomask & (HEAP_XMAX_COMMITTED | HEAP_XMAX_INVALID)))
2059 : : {
4799 alvherre@alvh.no-ip. 2060 [ + + + + ]: 410 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask) &&
2061 : 180 : TransactionIdDidCommit(xid))
6788 tgl@sss.pgh.pa.us 2062 : 153 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_COMMITTED,
2063 : : xid);
2064 : : else
2065 : 77 : HeapTupleSetHintBits(tuple, buffer, HEAP_XMAX_INVALID,
2066 : : InvalidTransactionId);
2067 : : }
2068 : 231 : }
2069 : :
2070 : :
2071 : : /*
2072 : : * GetBulkInsertState - prepare status object for a bulk insert
2073 : : */
2074 : : BulkInsertState
6338 2075 : 2782 : GetBulkInsertState(void)
2076 : : {
2077 : : BulkInsertState bistate;
2078 : :
95 michael@paquier.xyz 2079 :GNC 2782 : bistate = (BulkInsertState) palloc_object(BulkInsertStateData);
6338 tgl@sss.pgh.pa.us 2080 :CBC 2782 : bistate->strategy = GetAccessStrategy(BAS_BULKWRITE);
2081 : 2782 : bistate->current_buf = InvalidBuffer;
1074 andres@anarazel.de 2082 : 2782 : bistate->next_free = InvalidBlockNumber;
2083 : 2782 : bistate->last_free = InvalidBlockNumber;
944 2084 : 2782 : bistate->already_extended_by = 0;
6338 tgl@sss.pgh.pa.us 2085 : 2782 : return bistate;
2086 : : }
2087 : :
2088 : : /*
2089 : : * FreeBulkInsertState - clean up after finishing a bulk insert
2090 : : */
2091 : : void
2092 : 2612 : FreeBulkInsertState(BulkInsertState bistate)
2093 : : {
2094 [ + + ]: 2612 : if (bistate->current_buf != InvalidBuffer)
6121 bruce@momjian.us 2095 : 2052 : ReleaseBuffer(bistate->current_buf);
6338 tgl@sss.pgh.pa.us 2096 : 2612 : FreeAccessStrategy(bistate->strategy);
2097 : 2612 : pfree(bistate);
2098 : 2612 : }
2099 : :
2100 : : /*
2101 : : * ReleaseBulkInsertStatePin - release a buffer currently held in bistate
2102 : : */
2103 : : void
3337 rhaas@postgresql.org 2104 : 80758 : ReleaseBulkInsertStatePin(BulkInsertState bistate)
2105 : : {
2106 [ + + ]: 80758 : if (bistate->current_buf != InvalidBuffer)
2107 : 30021 : ReleaseBuffer(bistate->current_buf);
2108 : 80758 : bistate->current_buf = InvalidBuffer;
2109 : :
2110 : : /*
2111 : : * Despite the name, we also reset bulk relation extension state.
2112 : : * Otherwise we can end up erroring out due to looking for free space in
2113 : : * ->next_free of one partition, even though ->next_free was set when
2114 : : * extending another partition. It could obviously also be bad for
2115 : : * efficiency to look at existing blocks at offsets from another
2116 : : * partition, even if we don't error out.
2117 : : */
884 andres@anarazel.de 2118 : 80758 : bistate->next_free = InvalidBlockNumber;
2119 : 80758 : bistate->last_free = InvalidBlockNumber;
3337 rhaas@postgresql.org 2120 : 80758 : }
2121 : :
2122 : :
2123 : : /*
2124 : : * heap_insert - insert tuple into a heap
2125 : : *
2126 : : * The new tuple is stamped with current transaction ID and the specified
2127 : : * command ID.
2128 : : *
2129 : : * See table_tuple_insert for comments about most of the input flags, except
2130 : : * that this routine directly takes a tuple rather than a slot.
2131 : : *
2132 : : * There's corresponding HEAP_INSERT_ options to all the TABLE_INSERT_
2133 : : * options, and there additionally is HEAP_INSERT_SPECULATIVE which is used to
2134 : : * implement table_tuple_insert_speculative().
2135 : : *
2136 : : * On return the header fields of *tup are updated to match the stored tuple;
2137 : : * in particular tup->t_self receives the actual TID where the tuple was
2138 : : * stored. But note that any toasting of fields within the tuple data is NOT
2139 : : * reflected into *tup.
2140 : : */
2141 : : void
7573 tgl@sss.pgh.pa.us 2142 : 9188770 : heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2143 : : int options, BulkInsertState bistate)
2144 : : {
7850 2145 : 9188770 : TransactionId xid = GetCurrentTransactionId();
2146 : : HeapTuple heaptup;
2147 : : Buffer buffer;
5381 rhaas@postgresql.org 2148 : 9188770 : Buffer vmbuffer = InvalidBuffer;
6311 heikki.linnakangas@i 2149 : 9188770 : bool all_visible_cleared = false;
2150 : :
2151 : : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
1770 tgl@sss.pgh.pa.us 2152 [ - + ]: 9188770 : Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
2153 : : RelationGetNumberOfAttributes(relation));
2154 : :
289 nathan@postgresql.or 2155 : 9188770 : AssertHasSnapshotForToast(relation);
2156 : :
2157 : : /*
2158 : : * Fill in tuple header fields and toast the tuple if necessary.
2159 : : *
2160 : : * Note: below this point, heaptup is the data we actually intend to store
2161 : : * into the relation; tup is the caller's original untoasted data.
2162 : : */
5240 heikki.linnakangas@i 2163 : 9188770 : heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2164 : :
2165 : : /*
2166 : : * Find buffer to insert this tuple into. If the page is all visible,
2167 : : * this will also pin the requisite visibility map page.
2168 : : */
3788 kgrittn@postgresql.o 2169 : 9188770 : buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2170 : : InvalidBuffer, options, bistate,
2171 : : &vmbuffer, NULL,
2172 : : 0);
2173 : :
2174 : : /*
2175 : : * We're about to do the actual insert -- but check for conflict first, to
2176 : : * avoid possibly having to roll back work we've just done.
2177 : : *
2178 : : * This is safe without a recheck as long as there is no possibility of
2179 : : * another process scanning the page between this check and the insert
2180 : : * being visible to the scan (i.e., an exclusive buffer content lock is
2181 : : * continuously held from this point until the tuple insert is visible).
2182 : : *
2183 : : * For a heap insert, we only need to check for table-level SSI locks. Our
2184 : : * new tuple can't possibly conflict with existing tuple locks, and heap
2185 : : * page locks are only consolidated versions of tuple locks; they do not
2186 : : * lock "gaps" as index page locks do. So we don't need to specify a
2187 : : * buffer when making the call, which makes for a faster check.
2188 : : */
2238 tmunro@postgresql.or 2189 : 9188770 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2190 : :
2191 : : /* NO EREPORT(ERROR) from here till changes are logged */
9193 tgl@sss.pgh.pa.us 2192 : 9188758 : START_CRIT_SECTION();
2193 : :
3964 andres@anarazel.de 2194 : 9188758 : RelationPutHeapTuple(relation, buffer, heaptup,
2195 : 9188758 : (options & HEAP_INSERT_SPECULATIVE) != 0);
2196 : :
1746 tomas.vondra@postgre 2197 [ + + ]: 9188758 : if (PageIsAllVisible(BufferGetPage(buffer)))
2198 : : {
6311 heikki.linnakangas@i 2199 : 7533 : all_visible_cleared = true;
3616 kgrittn@postgresql.o 2200 : 7533 : PageClearAllVisible(BufferGetPage(buffer));
5381 rhaas@postgresql.org 2201 : 7533 : visibilitymap_clear(relation,
2202 : 7533 : ItemPointerGetBlockNumber(&(heaptup->t_self)),
2203 : : vmbuffer, VISIBILITYMAP_VALID_BITS);
2204 : : }
2205 : :
2206 : : /*
2207 : : * XXX Should we set PageSetPrunable on this page ?
2208 : : *
2209 : : * The inserting transaction may eventually abort thus making this tuple
2210 : : * DEAD and hence available for pruning. Though we don't want to optimize
2211 : : * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2212 : : * aborted tuple will never be pruned until next vacuum is triggered.
2213 : : *
2214 : : * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2215 : : */
2216 : :
7289 tgl@sss.pgh.pa.us 2217 : 9188758 : MarkBufferDirty(buffer);
2218 : :
2219 : : /* XLOG stuff */
2171 noah@leadboat.com 2220 [ + + + + : 9188758 : if (RelationNeedsWAL(relation))
+ + + + ]
2221 : : {
2222 : : xl_heap_insert xlrec;
2223 : : xl_heap_header xlhdr;
2224 : : XLogRecPtr recptr;
3616 kgrittn@postgresql.o 2225 : 8200827 : Page page = BufferGetPage(buffer);
9124 bruce@momjian.us 2226 : 8200827 : uint8 info = XLOG_HEAP_INSERT;
4133 heikki.linnakangas@i 2227 : 8200827 : int bufflags = 0;
2228 : :
2229 : : /*
2230 : : * If this is a catalog, we need to transmit combo CIDs to properly
2231 : : * decode, so log that as well.
2232 : : */
4478 rhaas@postgresql.org 2233 [ + + + + : 8200827 : if (RelationIsAccessibleInLogicalDecoding(relation))
+ - - + -
- - - + +
+ + - + -
- + + ]
2234 : 3496 : log_heap_new_cid(relation, heaptup);
2235 : :
2236 : : /*
2237 : : * If this is the single and first tuple on page, we can reinit the
2238 : : * page instead of restoring the whole thing. Set flag, and hide
2239 : : * buffer references from XLogInsert.
2240 : : */
4133 heikki.linnakangas@i 2241 [ + + + + ]: 8299387 : if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2242 : 98560 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
2243 : : {
2244 : 97540 : info |= XLOG_HEAP_INIT_PAGE;
2245 : 97540 : bufflags |= REGBUF_WILL_INIT;
2246 : : }
2247 : :
2248 : 8200827 : xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
3964 andres@anarazel.de 2249 : 8200827 : xlrec.flags = 0;
2250 [ + + ]: 8200827 : if (all_visible_cleared)
2251 : 7530 : xlrec.flags |= XLH_INSERT_ALL_VISIBLE_CLEARED;
2252 [ + + ]: 8200827 : if (options & HEAP_INSERT_SPECULATIVE)
2253 : 2112 : xlrec.flags |= XLH_INSERT_IS_SPECULATIVE;
4133 heikki.linnakangas@i 2254 [ - + ]: 8200827 : Assert(ItemPointerGetBlockNumber(&heaptup->t_self) == BufferGetBlockNumber(buffer));
2255 : :
2256 : : /*
2257 : : * For logical decoding, we need the tuple even if we're doing a full
2258 : : * page write, so make sure it's included even if we take a full-page
2259 : : * image. (XXX We could alternatively store a pointer into the FPW).
2260 : : */
2713 andres@anarazel.de 2261 [ + + + + : 8200827 : if (RelationIsLogicallyLogged(relation) &&
+ - - + -
- - - + -
+ + ]
2262 [ + + ]: 251632 : !(options & HEAP_INSERT_NO_LOGICAL))
2263 : : {
3964 2264 : 251593 : xlrec.flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
4133 heikki.linnakangas@i 2265 : 251593 : bufflags |= REGBUF_KEEP_DATA;
2266 : :
2045 akapila@postgresql.o 2267 [ + + ]: 251593 : if (IsToastRelation(relation))
2268 : 1781 : xlrec.flags |= XLH_INSERT_ON_TOAST_RELATION;
2269 : : }
2270 : :
4133 heikki.linnakangas@i 2271 : 8200827 : XLogBeginInsert();
397 peter@eisentraut.org 2272 : 8200827 : XLogRegisterData(&xlrec, SizeOfHeapInsert);
2273 : :
4133 heikki.linnakangas@i 2274 : 8200827 : xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2275 : 8200827 : xlhdr.t_infomask = heaptup->t_data->t_infomask;
2276 : 8200827 : xlhdr.t_hoff = heaptup->t_data->t_hoff;
2277 : :
2278 : : /*
2279 : : * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2280 : : * write the whole page to the xlog, we don't need to store
2281 : : * xl_heap_header in the xlog.
2282 : : */
2283 : 8200827 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
397 peter@eisentraut.org 2284 : 8200827 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
2285 : : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
4133 heikki.linnakangas@i 2286 : 8200827 : XLogRegisterBufData(0,
4040 tgl@sss.pgh.pa.us 2287 : 8200827 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2288 : 8200827 : heaptup->t_len - SizeofHeapTupleHeader);
2289 : :
2290 : : /* filtering by origin on a row level is much more efficient */
3370 andres@anarazel.de 2291 : 8200827 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2292 : :
4133 heikki.linnakangas@i 2293 : 8200827 : recptr = XLogInsert(RM_HEAP_ID, info);
2294 : :
9208 vadim4o@yahoo.com 2295 : 8200827 : PageSetLSN(page, recptr);
2296 : : }
2297 : :
9193 tgl@sss.pgh.pa.us 2298 [ - + ]: 9188758 : END_CRIT_SECTION();
2299 : :
7289 2300 : 9188758 : UnlockReleaseBuffer(buffer);
5381 rhaas@postgresql.org 2301 [ + + ]: 9188758 : if (vmbuffer != InvalidBuffer)
2302 : 7812 : ReleaseBuffer(vmbuffer);
2303 : :
2304 : : /*
2305 : : * If tuple is cacheable, mark it for invalidation from the caches in case
2306 : : * we abort. Note it is OK to do this after releasing the buffer, because
2307 : : * the heaptup data structure is all in local memory, not in the shared
2308 : : * buffer.
2309 : : */
5325 tgl@sss.pgh.pa.us 2310 : 9188758 : CacheInvalidateHeapTuple(relation, heaptup, NULL);
2311 : :
2312 : : /* Note: speculative insertions are counted too, even if aborted later */
5240 heikki.linnakangas@i 2313 : 9188758 : pgstat_count_heap_insert(relation, 1);
2314 : :
2315 : : /*
2316 : : * If heaptup is a private copy, release it. Don't forget to copy t_self
2317 : : * back to the caller's image, too.
2318 : : */
7420 tgl@sss.pgh.pa.us 2319 [ + + ]: 9188758 : if (heaptup != tup)
2320 : : {
2321 : 18745 : tup->t_self = heaptup->t_self;
2322 : 18745 : heap_freetuple(heaptup);
2323 : : }
10841 scrappy@hub.org 2324 : 9188758 : }
2325 : :
2326 : : /*
2327 : : * Subroutine for heap_insert(). Prepares a tuple for insertion. This sets the
2328 : : * tuple header fields and toasts the tuple if necessary. Returns a toasted
2329 : : * version of the tuple if it was toasted, or the original tuple if not. Note
2330 : : * that in any case, the header fields are also set in the original tuple.
2331 : : */
2332 : : static HeapTuple
5240 heikki.linnakangas@i 2333 : 10803502 : heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
2334 : : CommandId cid, int options)
2335 : : {
2336 : : /*
2337 : : * To allow parallel inserts, we need to ensure that they are safe to be
2338 : : * performed in workers. We have the infrastructure to allow parallel
2339 : : * inserts in general except for the cases where inserts generate a new
2340 : : * CommandId (eg. inserts into a table having a foreign key column).
2341 : : */
3083 rhaas@postgresql.org 2342 [ - + ]: 10803502 : if (IsParallelWorker())
3972 rhaas@postgresql.org 2343 [ # # ]:UBC 0 : ereport(ERROR,
2344 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2345 : : errmsg("cannot insert tuples in a parallel worker")));
2346 : :
5240 heikki.linnakangas@i 2347 :CBC 10803502 : tup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
2348 : 10803502 : tup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
2349 : 10803502 : tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
4466 rhaas@postgresql.org 2350 : 10803502 : HeapTupleHeaderSetXmin(tup->t_data, xid);
4851 simon@2ndQuadrant.co 2351 [ + + ]: 10803502 : if (options & HEAP_INSERT_FROZEN)
4466 rhaas@postgresql.org 2352 : 102088 : HeapTupleHeaderSetXminFrozen(tup->t_data);
2353 : :
5240 heikki.linnakangas@i 2354 : 10803502 : HeapTupleHeaderSetCmin(tup->t_data, cid);
3189 tgl@sss.pgh.pa.us 2355 : 10803502 : HeapTupleHeaderSetXmax(tup->t_data, 0); /* for cleanliness */
5240 heikki.linnakangas@i 2356 : 10803502 : tup->t_tableOid = RelationGetRelid(relation);
2357 : :
2358 : : /*
2359 : : * If the new tuple is too big for storage or contains already toasted
2360 : : * out-of-line attributes from some other relation, invoke the toaster.
2361 : : */
4760 kgrittn@postgresql.o 2362 [ + + ]: 10803502 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
2363 [ + + ]: 31016 : relation->rd_rel->relkind != RELKIND_MATVIEW)
2364 : : {
2365 : : /* toast table entries should never be recursively toasted */
5240 heikki.linnakangas@i 2366 [ - + ]: 30968 : Assert(!HeapTupleHasExternal(tup));
2367 : 30968 : return tup;
2368 : : }
2369 [ + + + + ]: 10772534 : else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2354 rhaas@postgresql.org 2370 : 18797 : return heap_toast_insert_or_update(relation, tup, NULL, options);
2371 : : else
5240 heikki.linnakangas@i 2372 : 10753737 : return tup;
2373 : : }
2374 : :
2375 : : /*
2376 : : * Helper for heap_multi_insert() that computes the number of entire pages
2377 : : * that inserting the remaining heaptuples requires. Used to determine how
2378 : : * much the relation needs to be extended by.
2379 : : */
2380 : : static int
1074 andres@anarazel.de 2381 : 384427 : heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
2382 : : {
2383 : 384427 : size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2384 : 384427 : int npages = 1;
2385 : :
2386 [ + + ]: 2685609 : for (int i = done; i < ntuples; i++)
2387 : : {
2388 : 2301182 : size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
2389 : :
2390 [ + + ]: 2301182 : if (page_avail < tup_sz)
2391 : : {
2392 : 16386 : npages++;
2393 : 16386 : page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
2394 : : }
2395 : 2301182 : page_avail -= tup_sz;
2396 : : }
2397 : :
2398 : 384427 : return npages;
2399 : : }
2400 : :
2401 : : /*
2402 : : * heap_multi_insert - insert multiple tuples into a heap
2403 : : *
2404 : : * This is like heap_insert(), but inserts multiple tuples in one operation.
2405 : : * That's faster than calling heap_insert() in a loop, because when multiple
2406 : : * tuples can be inserted on a single page, we can write just a single WAL
2407 : : * record covering all of them, and only need to lock/unlock the page once.
2408 : : *
2409 : : * Note: this leaks memory into the current memory context. You can create a
2410 : : * temporary context before calling this, if that's a problem.
2411 : : */
2412 : : void
2537 2413 : 377338 : heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
2414 : : CommandId cid, int options, BulkInsertState bistate)
2415 : : {
5240 heikki.linnakangas@i 2416 : 377338 : TransactionId xid = GetCurrentTransactionId();
2417 : : HeapTuple *heaptuples;
2418 : : int i;
2419 : : int ndone;
2420 : : PGAlignedBlock scratch;
2421 : : Page page;
1883 tomas.vondra@postgre 2422 : 377338 : Buffer vmbuffer = InvalidBuffer;
2423 : : bool needwal;
2424 : : Size saveFreeSpace;
4478 rhaas@postgresql.org 2425 [ + + + + : 377338 : bool need_tuple_data = RelationIsLogicallyLogged(relation);
+ - - + -
- - - + -
+ + ]
2426 [ + + + + : 377338 : bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
+ - - + -
- - - + +
- + - - -
- - - ]
1074 andres@anarazel.de 2427 : 377338 : bool starting_with_empty_page = false;
2428 : 377338 : int npages = 0;
2429 : 377338 : int npages_used = 0;
2430 : :
2431 : : /* currently not needed (thus unsupported) for heap_multi_insert() */
1234 peter@eisentraut.org 2432 [ - + ]: 377338 : Assert(!(options & HEAP_INSERT_NO_LOGICAL));
2433 : :
289 nathan@postgresql.or 2434 : 377338 : AssertHasSnapshotForToast(relation);
2435 : :
2171 noah@leadboat.com 2436 [ + + + + : 377338 : needwal = RelationNeedsWAL(relation);
+ - + + ]
703 akorotkov@postgresql 2437 [ + + ]: 377338 : saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
2438 : : HEAP_DEFAULT_FILLFACTOR);
2439 : :
2440 : : /* Toast and set header data in all the slots */
5240 heikki.linnakangas@i 2441 : 377338 : heaptuples = palloc(ntuples * sizeof(HeapTuple));
2442 [ + + ]: 1992070 : for (i = 0; i < ntuples; i++)
2443 : : {
2444 : : HeapTuple tuple;
2445 : :
2537 andres@anarazel.de 2446 : 1614732 : tuple = ExecFetchSlotHeapTuple(slots[i], true, NULL);
2447 : 1614732 : slots[i]->tts_tableOid = RelationGetRelid(relation);
2448 : 1614732 : tuple->t_tableOid = slots[i]->tts_tableOid;
2449 : 1614732 : heaptuples[i] = heap_prepare_insert(relation, tuple, xid, cid,
2450 : : options);
2451 : : }
2452 : :
2453 : : /*
2454 : : * We're about to do the actual inserts -- but check for conflict first,
2455 : : * to minimize the possibility of having to roll back work we've just
2456 : : * done.
2457 : : *
2458 : : * A check here does not definitively prevent a serialization anomaly;
2459 : : * that check MUST be done at least past the point of acquiring an
2460 : : * exclusive buffer content lock on every buffer that will be affected,
2461 : : * and MAY be done after all inserts are reflected in the buffers and
2462 : : * those locks are released; otherwise there is a race condition. Since
2463 : : * multiple buffers can be locked and unlocked in the loop below, and it
2464 : : * would not be feasible to identify and lock all of those buffers before
2465 : : * the loop, we must do a final check at the end.
2466 : : *
2467 : : * The check here could be omitted with no loss of correctness; it is
2468 : : * present strictly as an optimization.
2469 : : *
2470 : : * For heap inserts, we only need to check for table-level SSI locks. Our
2471 : : * new tuples can't possibly conflict with existing tuple locks, and heap
2472 : : * page locks are only consolidated versions of tuple locks; they do not
2473 : : * lock "gaps" as index page locks do. So we don't need to specify a
2474 : : * buffer when making the call, which makes for a faster check.
2475 : : */
2238 tmunro@postgresql.or 2476 : 377338 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2477 : :
5240 heikki.linnakangas@i 2478 : 377338 : ndone = 0;
2479 [ + + ]: 770271 : while (ndone < ntuples)
2480 : : {
2481 : : Buffer buffer;
2482 : 392933 : bool all_visible_cleared = false;
1883 tomas.vondra@postgre 2483 : 392933 : bool all_frozen_set = false;
2484 : : int nthispage;
2485 : :
4283 rhaas@postgresql.org 2486 [ - + ]: 392933 : CHECK_FOR_INTERRUPTS();
2487 : :
2488 : : /*
2489 : : * Compute number of pages needed to fit the to-be-inserted tuples in
2490 : : * the worst case. This will be used to determine how much to extend
2491 : : * the relation by in RelationGetBufferForTuple(), if needed. If we
2492 : : * filled a prior page from scratch, we can just update our last
2493 : : * computation, but if we started with a partially filled page,
2494 : : * recompute from scratch, the number of potentially required pages
2495 : : * can vary due to tuples needing to fit onto the page, page headers
2496 : : * etc.
2497 : : */
1074 andres@anarazel.de 2498 [ + + + + ]: 392933 : if (ndone == 0 || !starting_with_empty_page)
2499 : : {
2500 : 384427 : npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
2501 : : saveFreeSpace);
2502 : 384427 : npages_used = 0;
2503 : : }
2504 : : else
2505 : 8506 : npages_used++;
2506 : :
2507 : : /*
2508 : : * Find buffer where at least the next tuple will fit. If the page is
2509 : : * all-visible, this will also pin the requisite visibility map page.
2510 : : *
2511 : : * Also pin visibility map page if COPY FREEZE inserts tuples into an
2512 : : * empty page. See all_frozen_set below.
2513 : : */
5240 heikki.linnakangas@i 2514 : 392933 : buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
2515 : : InvalidBuffer, options, bistate,
2516 : : &vmbuffer, NULL,
2517 : : npages - npages_used);
3616 kgrittn@postgresql.o 2518 : 392933 : page = BufferGetPage(buffer);
2519 : :
1883 tomas.vondra@postgre 2520 : 392933 : starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2521 : :
2522 [ + + + + ]: 392933 : if (starting_with_empty_page && (options & HEAP_INSERT_FROZEN))
2523 : : {
2524 : 1661 : all_frozen_set = true;
2525 : : /* Lock the vmbuffer before entering the critical section */
157 melanieplageman@gmai 2526 :GNC 1661 : LockBuffer(vmbuffer, BUFFER_LOCK_EXCLUSIVE);
2527 : : }
2528 : :
2529 : : /* NO EREPORT(ERROR) from here till changes are logged */
5240 heikki.linnakangas@i 2530 :CBC 392933 : START_CRIT_SECTION();
2531 : :
2532 : : /*
2533 : : * RelationGetBufferForTuple has ensured that the first tuple fits.
2534 : : * Put that on the page, and then as many other tuples as fit.
2535 : : */
3964 andres@anarazel.de 2536 : 392933 : RelationPutHeapTuple(relation, buffer, heaptuples[ndone], false);
2537 : :
2538 : : /*
2539 : : * For logical decoding we need combo CIDs to properly decode the
2540 : : * catalog.
2541 : : */
2210 michael@paquier.xyz 2542 [ + + + + ]: 392933 : if (needwal && need_cids)
2543 : 5145 : log_heap_new_cid(relation, heaptuples[ndone]);
2544 : :
4841 heikki.linnakangas@i 2545 [ + + ]: 1614732 : for (nthispage = 1; ndone + nthispage < ntuples; nthispage++)
2546 : : {
5240 2547 : 1237394 : HeapTuple heaptup = heaptuples[ndone + nthispage];
2548 : :
5051 2549 [ + + ]: 1237394 : if (PageGetHeapFreeSpace(page) < MAXALIGN(heaptup->t_len) + saveFreeSpace)
5240 2550 : 15595 : break;
2551 : :
3964 andres@anarazel.de 2552 : 1221799 : RelationPutHeapTuple(relation, buffer, heaptup, false);
2553 : :
2554 : : /*
2555 : : * For logical decoding we need combo CIDs to properly decode the
2556 : : * catalog.
2557 : : */
4133 heikki.linnakangas@i 2558 [ + + + + ]: 1221799 : if (needwal && need_cids)
2559 : 4793 : log_heap_new_cid(relation, heaptup);
2560 : : }
2561 : :
2562 : : /*
2563 : : * If the page is all visible, need to clear that, unless we're only
2564 : : * going to add further frozen rows to it.
2565 : : *
2566 : : * If we're only adding already frozen rows to a previously empty
2567 : : * page, mark it as all-frozen and update the visibility map. We're
2568 : : * already holding a pin on the vmbuffer.
2569 : : */
1883 tomas.vondra@postgre 2570 [ + + + + ]: 392933 : if (PageIsAllVisible(page) && !(options & HEAP_INSERT_FROZEN))
2571 : : {
5029 rhaas@postgresql.org 2572 : 3761 : all_visible_cleared = true;
2573 : 3761 : PageClearAllVisible(page);
2574 : 3761 : visibilitymap_clear(relation,
2575 : : BufferGetBlockNumber(buffer),
2576 : : vmbuffer, VISIBILITYMAP_VALID_BITS);
2577 : : }
1883 tomas.vondra@postgre 2578 [ + + ]: 389172 : else if (all_frozen_set)
2579 : : {
2580 : 1661 : PageSetAllVisible(page);
13 melanieplageman@gmai 2581 :GNC 1661 : PageClearPrunable(page);
157 2582 : 1661 : visibilitymap_set_vmbits(BufferGetBlockNumber(buffer),
2583 : : vmbuffer,
2584 : : VISIBILITYMAP_ALL_VISIBLE |
2585 : : VISIBILITYMAP_ALL_FROZEN,
2586 : : relation->rd_locator);
2587 : : }
2588 : :
2589 : : /*
2590 : : * XXX Should we set PageSetPrunable on this page ? See heap_insert()
2591 : : */
2592 : :
5240 heikki.linnakangas@i 2593 :CBC 392933 : MarkBufferDirty(buffer);
2594 : :
2595 : : /* XLOG stuff */
2596 [ + + ]: 392933 : if (needwal)
2597 : : {
2598 : : XLogRecPtr recptr;
2599 : : xl_heap_multi_insert *xlrec;
2600 : 388568 : uint8 info = XLOG_HEAP2_MULTI_INSERT;
2601 : : char *tupledata;
2602 : : int totaldatalen;
2752 tgl@sss.pgh.pa.us 2603 : 388568 : char *scratchptr = scratch.data;
2604 : : bool init;
4133 heikki.linnakangas@i 2605 : 388568 : int bufflags = 0;
2606 : :
2607 : : /*
2608 : : * If the page was previously empty, we can reinit the page
2609 : : * instead of restoring the whole thing.
2610 : : */
1883 tomas.vondra@postgre 2611 : 388568 : init = starting_with_empty_page;
2612 : :
2613 : : /* allocate xl_heap_multi_insert struct from the scratch area */
5240 heikki.linnakangas@i 2614 : 388568 : xlrec = (xl_heap_multi_insert *) scratchptr;
2615 : 388568 : scratchptr += SizeOfHeapMultiInsert;
2616 : :
2617 : : /*
2618 : : * Allocate offsets array. Unless we're reinitializing the page,
2619 : : * in that case the tuples are stored in order starting at
2620 : : * FirstOffsetNumber and we don't need to store the offsets
2621 : : * explicitly.
2622 : : */
2623 [ + + ]: 388568 : if (!init)
2624 : 375508 : scratchptr += nthispage * sizeof(OffsetNumber);
2625 : :
2626 : : /* the rest of the scratch space is used for tuple data */
2627 : 388568 : tupledata = scratchptr;
2628 : :
2629 : : /* check that the mutually exclusive flags are not both set */
1768 tgl@sss.pgh.pa.us 2630 [ + + - + ]: 388568 : Assert(!(all_visible_cleared && all_frozen_set));
2631 : :
1883 tomas.vondra@postgre 2632 : 388568 : xlrec->flags = 0;
2633 [ + + ]: 388568 : if (all_visible_cleared)
2634 : 3761 : xlrec->flags = XLH_INSERT_ALL_VISIBLE_CLEARED;
2635 : :
2636 : : /*
2637 : : * We don't have to worry about including a conflict xid in the
2638 : : * WAL record, as HEAP_INSERT_FROZEN intentionally violates
2639 : : * visibility rules.
2640 : : */
2641 [ + + ]: 388568 : if (all_frozen_set)
2642 : 17 : xlrec->flags = XLH_INSERT_ALL_FROZEN_SET;
2643 : :
5240 heikki.linnakangas@i 2644 : 388568 : xlrec->ntuples = nthispage;
2645 : :
2646 : : /*
2647 : : * Write out an xl_multi_insert_tuple and the tuple data itself
2648 : : * for each tuple.
2649 : : */
2650 [ + + ]: 1697850 : for (i = 0; i < nthispage; i++)
2651 : : {
2652 : 1309282 : HeapTuple heaptup = heaptuples[ndone + i];
2653 : : xl_multi_insert_tuple *tuphdr;
2654 : : int datalen;
2655 : :
2656 [ + + ]: 1309282 : if (!init)
2657 : 786107 : xlrec->offsets[i] = ItemPointerGetOffsetNumber(&heaptup->t_self);
2658 : : /* xl_multi_insert_tuple needs two-byte alignment. */
2659 : 1309282 : tuphdr = (xl_multi_insert_tuple *) SHORTALIGN(scratchptr);
2660 : 1309282 : scratchptr = ((char *) tuphdr) + SizeOfMultiInsertTuple;
2661 : :
2662 : 1309282 : tuphdr->t_infomask2 = heaptup->t_data->t_infomask2;
2663 : 1309282 : tuphdr->t_infomask = heaptup->t_data->t_infomask;
2664 : 1309282 : tuphdr->t_hoff = heaptup->t_data->t_hoff;
2665 : :
2666 : : /* write bitmap [+ padding] [+ oid] + data */
4040 tgl@sss.pgh.pa.us 2667 : 1309282 : datalen = heaptup->t_len - SizeofHeapTupleHeader;
5240 heikki.linnakangas@i 2668 : 1309282 : memcpy(scratchptr,
4040 tgl@sss.pgh.pa.us 2669 : 1309282 : (char *) heaptup->t_data + SizeofHeapTupleHeader,
2670 : : datalen);
5240 heikki.linnakangas@i 2671 : 1309282 : tuphdr->datalen = datalen;
2672 : 1309282 : scratchptr += datalen;
2673 : : }
2674 : 388568 : totaldatalen = scratchptr - tupledata;
2752 tgl@sss.pgh.pa.us 2675 [ - + ]: 388568 : Assert((scratchptr - scratch.data) < BLCKSZ);
2676 : :
4478 rhaas@postgresql.org 2677 [ + + ]: 388568 : if (need_tuple_data)
3964 andres@anarazel.de 2678 : 72 : xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
2679 : :
2680 : : /*
2681 : : * Signal that this is the last xl_heap_multi_insert record
2682 : : * emitted by this call to heap_multi_insert(). Needed for logical
2683 : : * decoding so it knows when to cleanup temporary data.
2684 : : */
4133 heikki.linnakangas@i 2685 [ + + ]: 388568 : if (ndone + nthispage == ntuples)
3964 andres@anarazel.de 2686 : 376824 : xlrec->flags |= XLH_INSERT_LAST_IN_MULTI;
2687 : :
5240 heikki.linnakangas@i 2688 [ + + ]: 388568 : if (init)
2689 : : {
2690 : 13060 : info |= XLOG_HEAP_INIT_PAGE;
4133 2691 : 13060 : bufflags |= REGBUF_WILL_INIT;
2692 : : }
2693 : :
2694 : : /*
2695 : : * If we're doing logical decoding, include the new tuple data
2696 : : * even if we take a full-page image of the page.
2697 : : */
2698 [ + + ]: 388568 : if (need_tuple_data)
2699 : 72 : bufflags |= REGBUF_KEEP_DATA;
2700 : :
2701 : 388568 : XLogBeginInsert();
397 peter@eisentraut.org 2702 : 388568 : XLogRegisterData(xlrec, tupledata - scratch.data);
4133 heikki.linnakangas@i 2703 : 388568 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
157 melanieplageman@gmai 2704 [ + + ]:GNC 388568 : if (all_frozen_set)
2705 : 17 : XLogRegisterBuffer(1, vmbuffer, 0);
2706 : :
4133 heikki.linnakangas@i 2707 :CBC 388568 : XLogRegisterBufData(0, tupledata, totaldatalen);
2708 : :
2709 : : /* filtering by origin on a row level is much more efficient */
3370 andres@anarazel.de 2710 : 388568 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
2711 : :
4133 heikki.linnakangas@i 2712 : 388568 : recptr = XLogInsert(RM_HEAP2_ID, info);
2713 : :
5240 2714 : 388568 : PageSetLSN(page, recptr);
157 melanieplageman@gmai 2715 [ + + ]:GNC 388568 : if (all_frozen_set)
2716 : : {
2717 [ - + ]: 17 : Assert(BufferIsDirty(vmbuffer));
2718 : 17 : PageSetLSN(BufferGetPage(vmbuffer), recptr);
2719 : : }
2720 : : }
2721 : :
5240 heikki.linnakangas@i 2722 [ - + ]:CBC 392933 : END_CRIT_SECTION();
2723 : :
1883 tomas.vondra@postgre 2724 [ + + ]: 392933 : if (all_frozen_set)
157 melanieplageman@gmai 2725 :GNC 1661 : LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
2726 : :
1883 tomas.vondra@postgre 2727 :CBC 392933 : UnlockReleaseBuffer(buffer);
5240 heikki.linnakangas@i 2728 : 392933 : ndone += nthispage;
2729 : :
2730 : : /*
2731 : : * NB: Only release vmbuffer after inserting all tuples - it's fairly
2732 : : * likely that we'll insert into subsequent heap pages that are likely
2733 : : * to use the same vm page.
2734 : : */
2735 : : }
2736 : :
2737 : : /* We're done with inserting all tuples, so release the last vmbuffer. */
1883 tomas.vondra@postgre 2738 [ + + ]: 377338 : if (vmbuffer != InvalidBuffer)
2739 : 3839 : ReleaseBuffer(vmbuffer);
2740 : :
2741 : : /*
2742 : : * We're done with the actual inserts. Check for conflicts again, to
2743 : : * ensure that all rw-conflicts in to these inserts are detected. Without
2744 : : * this final check, a sequential scan of the heap may have locked the
2745 : : * table after the "before" check, missing one opportunity to detect the
2746 : : * conflict, and then scanned the table before the new tuples were there,
2747 : : * missing the other chance to detect the conflict.
2748 : : *
2749 : : * For heap inserts, we only need to check for table-level SSI locks. Our
2750 : : * new tuples can't possibly conflict with existing tuple locks, and heap
2751 : : * page locks are only consolidated versions of tuple locks; they do not
2752 : : * lock "gaps" as index page locks do. So we don't need to specify a
2753 : : * buffer when making the call.
2754 : : */
2238 tmunro@postgresql.or 2755 : 377338 : CheckForSerializableConflictIn(relation, NULL, InvalidBlockNumber);
2756 : :
2757 : : /*
2758 : : * If tuples are cacheable, mark them for invalidation from the caches in
2759 : : * case we abort. Note it is OK to do this after releasing the buffer,
2760 : : * because the heaptuples data structure is all in local memory, not in
2761 : : * the shared buffer.
2762 : : */
4490 rhaas@postgresql.org 2763 [ + + ]: 377338 : if (IsCatalogRelation(relation))
2764 : : {
5240 heikki.linnakangas@i 2765 [ + + ]: 1287319 : for (i = 0; i < ntuples; i++)
2766 : 911320 : CacheInvalidateHeapTuple(relation, heaptuples[i], NULL);
2767 : : }
2768 : :
2769 : : /* copy t_self fields back to the caller's slots */
5144 2770 [ + + ]: 1992070 : for (i = 0; i < ntuples; i++)
2537 andres@anarazel.de 2771 : 1614732 : slots[i]->tts_tid = heaptuples[i]->t_self;
2772 : :
5240 heikki.linnakangas@i 2773 : 377338 : pgstat_count_heap_insert(relation, ntuples);
2774 : 377338 : }
2775 : :
2776 : : /*
2777 : : * simple_heap_insert - insert a tuple
2778 : : *
2779 : : * Currently, this routine differs from heap_insert only in supplying
2780 : : * a default command ID and not allowing access to the speedup options.
2781 : : *
2782 : : * This should be used rather than using heap_insert directly in most places
2783 : : * where we are modifying system catalogs.
2784 : : */
2785 : : void
8699 tgl@sss.pgh.pa.us 2786 : 967389 : simple_heap_insert(Relation relation, HeapTuple tup)
2787 : : {
2672 andres@anarazel.de 2788 : 967389 : heap_insert(relation, tup, GetCurrentCommandId(true), 0, NULL);
8699 tgl@sss.pgh.pa.us 2789 : 967389 : }
2790 : :
2791 : : /*
2792 : : * Given infomask/infomask2, compute the bits that must be saved in the
2793 : : * "infobits" field of xl_heap_delete, xl_heap_update, xl_heap_lock,
2794 : : * xl_heap_lock_updated WAL records.
2795 : : *
2796 : : * See fix_infomask_from_infobits.
2797 : : */
2798 : : static uint8
4799 alvherre@alvh.no-ip. 2799 : 2026948 : compute_infobits(uint16 infomask, uint16 infomask2)
2800 : : {
2801 : : return
2802 : 2026948 : ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2803 : 2026948 : ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2804 : 2026948 : ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2805 : : /* note we ignore HEAP_XMAX_SHR_LOCK here */
2806 : 4053896 : ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2807 : : ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2808 : 2026948 : XLHL_KEYS_UPDATED : 0);
2809 : : }
2810 : :
2811 : : /*
2812 : : * Given two versions of the same t_infomask for a tuple, compare them and
2813 : : * return whether the relevant status for a tuple Xmax has changed. This is
2814 : : * used after a buffer lock has been released and reacquired: we want to ensure
2815 : : * that the tuple state continues to be the same it was when we previously
2816 : : * examined it.
2817 : : *
2818 : : * Note the Xmax field itself must be compared separately.
2819 : : */
2820 : : static inline bool
4343 2821 : 5390 : xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
2822 : : {
4331 bruce@momjian.us 2823 : 5390 : const uint16 interesting =
2824 : : HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
2825 : :
4343 alvherre@alvh.no-ip. 2826 [ + + ]: 5390 : if ((new_infomask & interesting) != (old_infomask & interesting))
2827 : 15 : return true;
2828 : :
2829 : 5375 : return false;
2830 : : }
2831 : :
2832 : : /*
2833 : : * heap_delete - delete a tuple
2834 : : *
2835 : : * See table_tuple_delete() for an explanation of the parameters, except that
2836 : : * this routine directly takes a tuple rather than a slot.
2837 : : *
2838 : : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
2839 : : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
2840 : : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
2841 : : * generated by another transaction).
2842 : : */
2843 : : TM_Result
136 peter@eisentraut.org 2844 :GNC 1494776 : heap_delete(Relation relation, const ItemPointerData *tid,
2845 : : CommandId cid, Snapshot crosscheck, bool wait,
2846 : : TM_FailureData *tmfd, bool changingPart)
2847 : : {
2848 : : TM_Result result;
7850 tgl@sss.pgh.pa.us 2849 :CBC 1494776 : TransactionId xid = GetCurrentTransactionId();
2850 : : ItemId lp;
2851 : : HeapTupleData tp;
2852 : : Page page;
2853 : : BlockNumber block;
2854 : : Buffer buffer;
5381 rhaas@postgresql.org 2855 : 1494776 : Buffer vmbuffer = InvalidBuffer;
2856 : : TransactionId new_xmax;
2857 : : uint16 new_infomask,
2858 : : new_infomask2;
7624 tgl@sss.pgh.pa.us 2859 : 1494776 : bool have_tuple_lock = false;
2860 : : bool iscombo;
6311 heikki.linnakangas@i 2861 : 1494776 : bool all_visible_cleared = false;
4331 bruce@momjian.us 2862 : 1494776 : HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
4478 rhaas@postgresql.org 2863 : 1494776 : bool old_key_copied = false;
2864 : :
10416 bruce@momjian.us 2865 [ - + ]: 1494776 : Assert(ItemPointerIsValid(tid));
2866 : :
289 nathan@postgresql.or 2867 : 1494776 : AssertHasSnapshotForToast(relation);
2868 : :
2869 : : /*
2870 : : * Forbid this during a parallel operation, lest it allocate a combo CID.
2871 : : * Other workers might need that combo CID for visibility checks, and we
2872 : : * have no provision for broadcasting it to them.
2873 : : */
3972 rhaas@postgresql.org 2874 [ - + ]: 1494776 : if (IsInParallelMode())
3972 rhaas@postgresql.org 2875 [ # # ]:UBC 0 : ereport(ERROR,
2876 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2877 : : errmsg("cannot delete tuples during a parallel operation")));
2878 : :
5381 rhaas@postgresql.org 2879 :CBC 1494776 : block = ItemPointerGetBlockNumber(tid);
2880 : 1494776 : buffer = ReadBuffer(relation, block);
3616 kgrittn@postgresql.o 2881 : 1494776 : page = BufferGetPage(buffer);
2882 : :
2883 : : /*
2884 : : * Before locking the buffer, pin the visibility map page if it appears to
2885 : : * be necessary. Since we haven't got the lock yet, someone else might be
2886 : : * in the middle of changing this, so we'll need to recheck after we have
2887 : : * the lock.
2888 : : */
5381 rhaas@postgresql.org 2889 [ + + ]: 1494776 : if (PageIsAllVisible(page))
2890 : 284 : visibilitymap_pin(relation, block, &vmbuffer);
2891 : :
9952 vadim4o@yahoo.com 2892 : 1494776 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2893 : :
1270 jdavis@postgresql.or 2894 : 1494776 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2895 [ - + ]: 1494776 : Assert(ItemIdIsNormal(lp));
2896 : :
2897 : 1494776 : tp.t_tableOid = RelationGetRelid(relation);
2898 : 1494776 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2899 : 1494776 : tp.t_len = ItemIdGetLength(lp);
2900 : 1494776 : tp.t_self = *tid;
2901 : :
2902 : 1 : l1:
2903 : :
2904 : : /*
2905 : : * If we didn't pin the visibility map page and the page has become all
2906 : : * visible while we were busy locking the buffer, we'll have to unlock and
2907 : : * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2908 : : * unfortunate, but hopefully shouldn't happen often.
2909 : : */
5381 rhaas@postgresql.org 2910 [ + + - + ]: 1494777 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2911 : : {
5381 rhaas@postgresql.org 2912 :UBC 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2913 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
2914 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2915 : : }
2916 : :
4619 rhaas@postgresql.org 2917 :CBC 1494777 : result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2918 : :
2549 andres@anarazel.de 2919 [ - + ]: 1494777 : if (result == TM_Invisible)
2920 : : {
7289 tgl@sss.pgh.pa.us 2921 :UBC 0 : UnlockReleaseBuffer(buffer);
3878 2922 [ # # ]: 0 : ereport(ERROR,
2923 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2924 : : errmsg("attempted to delete invisible tuple")));
2925 : : }
703 akorotkov@postgresql 2926 [ + + + - ]:CBC 1494777 : else if (result == TM_BeingModified && wait)
2927 : : {
2928 : : TransactionId xwait;
2929 : : uint16 infomask;
2930 : :
2931 : : /* must copy state data before unlocking buffer */
4799 alvherre@alvh.no-ip. 2932 : 40562 : xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
7624 tgl@sss.pgh.pa.us 2933 : 40562 : infomask = tp.t_data->t_infomask;
2934 : :
2935 : : /*
2936 : : * Sleep until concurrent transaction ends -- except when there's a
2937 : : * single locker and it's our own transaction. Note we don't care
2938 : : * which lock mode the locker has, because we need the strongest one.
2939 : : *
2940 : : * Before sleeping, we need to acquire tuple lock to establish our
2941 : : * priority for the tuple (see heap_lock_tuple). LockTuple will
2942 : : * release us when we are next-in-line for the tuple.
2943 : : *
2944 : : * If we are forced to "start over" below, we keep the tuple lock;
2945 : : * this arranges that we stay at the head of the line while rechecking
2946 : : * tuple state.
2947 : : */
7626 2948 [ + + ]: 40562 : if (infomask & HEAP_XMAX_IS_MULTI)
2949 : : {
2462 alvherre@alvh.no-ip. 2950 : 8 : bool current_is_member = false;
2951 : :
3992 2952 [ + - ]: 8 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2953 : : LockTupleExclusive, ¤t_is_member))
2954 : : {
2955 : 8 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2956 : :
2957 : : /*
2958 : : * Acquire the lock, if necessary (but skip it when we're
2959 : : * requesting a lock and already have one; avoids deadlock).
2960 : : */
2462 2961 [ + + ]: 8 : if (!current_is_member)
2962 : 6 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
2963 : : LockWaitBlock, &have_tuple_lock);
2964 : :
2965 : : /* wait for multixact */
3992 2966 : 8 : MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask,
2967 : : relation, &(tp.t_self), XLTW_Delete,
2968 : : NULL);
2969 : 8 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
2970 : :
2971 : : /*
2972 : : * If xwait had just locked the tuple then some other xact
2973 : : * could update this tuple before we get to this point. Check
2974 : : * for xmax change, and start over if so.
2975 : : *
2976 : : * We also must start over if we didn't pin the VM page, and
2977 : : * the page has become all visible.
2978 : : */
1270 jdavis@postgresql.or 2979 [ + - + - : 16 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
+ - ]
2980 [ - + ]: 16 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3992 alvherre@alvh.no-ip. 2981 : 8 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
2982 : : xwait))
3992 alvherre@alvh.no-ip. 2983 :UBC 0 : goto l1;
2984 : : }
2985 : :
2986 : : /*
2987 : : * You might think the multixact is necessarily done here, but not
2988 : : * so: it could have surviving members, namely our own xact or
2989 : : * other subxacts of this backend. It is legal for us to delete
2990 : : * the tuple in either case, however (the latter case is
2991 : : * essentially a situation of upgrading our former shared lock to
2992 : : * exclusive). We don't bother changing the on-disk hint bits
2993 : : * since we are about to overwrite the xmax altogether.
2994 : : */
2995 : : }
3992 alvherre@alvh.no-ip. 2996 [ + + ]:CBC 40554 : else if (!TransactionIdIsCurrentTransactionId(xwait))
2997 : : {
2998 : : /*
2999 : : * Wait for regular transaction to end; but first, acquire tuple
3000 : : * lock.
3001 : : */
3002 : 52 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3003 : 52 : heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive,
3004 : : LockWaitBlock, &have_tuple_lock);
4057 heikki.linnakangas@i 3005 : 52 : XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
7626 tgl@sss.pgh.pa.us 3006 : 48 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3007 : :
3008 : : /*
3009 : : * xwait is done, but if xwait had just locked the tuple then some
3010 : : * other xact could update this tuple before we get to this point.
3011 : : * Check for xmax change, and start over if so.
3012 : : *
3013 : : * We also must start over if we didn't pin the VM page, and the
3014 : : * page has become all visible.
3015 : : */
1270 jdavis@postgresql.or 3016 [ + - + - : 96 : if ((vmbuffer == InvalidBuffer && PageIsAllVisible(page)) ||
+ + ]
3017 [ - + ]: 95 : xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
4799 alvherre@alvh.no-ip. 3018 : 47 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tp.t_data),
3019 : : xwait))
7626 tgl@sss.pgh.pa.us 3020 : 1 : goto l1;
3021 : :
3022 : : /* Otherwise check if it committed or aborted */
6788 3023 : 47 : UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3024 : : }
3025 : :
3026 : : /*
3027 : : * We may overwrite if previous xmax aborted, or if it committed but
3028 : : * only locked the tuple without updating it.
3029 : : */
4799 alvherre@alvh.no-ip. 3030 [ + + + + ]: 81094 : if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3031 [ + + ]: 40568 : HEAP_XMAX_IS_LOCKED_ONLY(tp.t_data->t_infomask) ||
3032 : 31 : HeapTupleHeaderIsOnlyLocked(tp.t_data))
2549 andres@anarazel.de 3033 : 40530 : result = TM_Ok;
1847 alvherre@alvh.no-ip. 3034 [ + + ]: 27 : else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2549 andres@anarazel.de 3035 : 23 : result = TM_Updated;
3036 : : else
3037 : 4 : result = TM_Deleted;
3038 : : }
3039 : :
3040 : : /* sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3041 [ + + ]: 1494772 : if (result != TM_Ok)
3042 : : {
3043 [ + + + + : 61 : Assert(result == TM_SelfModified ||
- + - - ]
3044 : : result == TM_Updated ||
3045 : : result == TM_Deleted ||
3046 : : result == TM_BeingModified);
7512 tgl@sss.pgh.pa.us 3047 [ - + ]: 61 : Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID));
2549 andres@anarazel.de 3048 [ + + - + ]: 61 : Assert(result != TM_Updated ||
3049 : : !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
3050 : : }
3051 : :
838 heikki.linnakangas@i 3052 [ + + + - ]: 1494772 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3053 : : {
3054 : : /* Perform additional check for transaction-snapshot mode RI updates */
3055 [ + - ]: 1 : if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3056 : 1 : result = TM_Updated;
3057 : : }
3058 : :
3059 [ + + ]: 1494772 : if (result != TM_Ok)
3060 : : {
2549 andres@anarazel.de 3061 : 62 : tmfd->ctid = tp.t_data->t_ctid;
3062 : 62 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
3063 [ + + ]: 62 : if (result == TM_SelfModified)
3064 : 21 : tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3065 : : else
3066 : 41 : tmfd->cmax = InvalidCommandId;
703 akorotkov@postgresql 3067 : 62 : UnlockReleaseBuffer(buffer);
7624 tgl@sss.pgh.pa.us 3068 [ + + ]: 62 : if (have_tuple_lock)
4799 alvherre@alvh.no-ip. 3069 : 27 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
5381 rhaas@postgresql.org 3070 [ - + ]: 62 : if (vmbuffer != InvalidBuffer)
5381 rhaas@postgresql.org 3071 :UBC 0 : ReleaseBuffer(vmbuffer);
9952 vadim4o@yahoo.com 3072 :CBC 62 : return result;
3073 : : }
3074 : :
3075 : : /*
3076 : : * We're about to do the actual delete -- check for conflict first, to
3077 : : * avoid possibly having to roll back work we've just done.
3078 : : *
3079 : : * This is safe without a recheck as long as there is no possibility of
3080 : : * another process scanning the page between this check and the delete
3081 : : * being visible to the scan (i.e., an exclusive buffer content lock is
3082 : : * continuously held from this point until the tuple delete is visible).
3083 : : */
2238 tmunro@postgresql.or 3084 : 1494710 : CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
3085 : :
3086 : : /* replace cid with a combo CID if necessary */
6974 tgl@sss.pgh.pa.us 3087 : 1494696 : HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3088 : :
3089 : : /*
3090 : : * Compute replica identity tuple before entering the critical section so
3091 : : * we don't PANIC upon a memory allocation failure.
3092 : : */
4478 rhaas@postgresql.org 3093 : 1494696 : old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3094 : :
3095 : : /*
3096 : : * If this is the first possibly-multixact-able operation in the current
3097 : : * transaction, set my per-backend OldestMemberMXactId setting. We can be
3098 : : * certain that the transaction will never become a member of any older
3099 : : * MultiXactIds than that. (We have to do this even if we end up just
3100 : : * using our own TransactionId below, since some other backend could
3101 : : * incorporate our XID into a MultiXact immediately afterwards.)
3102 : : */
4363 heikki.linnakangas@i 3103 : 1494696 : MultiXactIdSetOldestMember();
3104 : :
3105 : 1494696 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(tp.t_data),
3106 : 1494696 : tp.t_data->t_infomask, tp.t_data->t_infomask2,
3107 : : xid, LockTupleExclusive, true,
3108 : : &new_xmax, &new_infomask, &new_infomask2);
3109 : :
9193 tgl@sss.pgh.pa.us 3110 : 1494696 : START_CRIT_SECTION();
3111 : :
3112 : : /*
3113 : : * If this transaction commits, the tuple will become DEAD sooner or
3114 : : * later. Set flag that this page is a candidate for pruning once our xid
3115 : : * falls below the OldestXmin horizon. If the transaction finally aborts,
3116 : : * the subsequent page pruning will be a no-op and the hint will be
3117 : : * cleared.
3118 : : */
6454 3119 [ - + + + : 1494696 : PageSetPrunable(page, xid);
+ + ]
3120 : :
6311 heikki.linnakangas@i 3121 [ + + ]: 1494696 : if (PageIsAllVisible(page))
3122 : : {
3123 : 284 : all_visible_cleared = true;
3124 : 284 : PageClearAllVisible(page);
5381 rhaas@postgresql.org 3125 : 284 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3126 : : vmbuffer, VISIBILITYMAP_VALID_BITS);
3127 : : }
3128 : :
3129 : : /* store transaction information of xact deleting the tuple */
4799 alvherre@alvh.no-ip. 3130 : 1494696 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3131 : 1494696 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
3132 : 1494696 : tp.t_data->t_infomask |= new_infomask;
3133 : 1494696 : tp.t_data->t_infomask2 |= new_infomask2;
6751 tgl@sss.pgh.pa.us 3134 : 1494696 : HeapTupleHeaderClearHotUpdated(tp.t_data);
4799 alvherre@alvh.no-ip. 3135 : 1494696 : HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
6974 tgl@sss.pgh.pa.us 3136 : 1494696 : HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3137 : : /* Make sure there is no forward chain link in t_ctid */
8615 3138 : 1494696 : tp.t_data->t_ctid = tp.t_self;
3139 : :
3140 : : /* Signal that this is actually a move into another partition */
2899 andres@anarazel.de 3141 [ + + ]: 1494696 : if (changingPart)
3142 : 493 : HeapTupleHeaderSetMovedPartitions(tp.t_data);
3143 : :
7289 tgl@sss.pgh.pa.us 3144 : 1494696 : MarkBufferDirty(buffer);
3145 : :
3146 : : /*
3147 : : * XLOG stuff
3148 : : *
3149 : : * NB: heap_abort_speculative() uses the same xlog record and replay
3150 : : * routines.
3151 : : */
5571 rhaas@postgresql.org 3152 [ + + + + : 1494696 : if (RelationNeedsWAL(relation))
+ - + + ]
3153 : : {
3154 : : xl_heap_delete xlrec;
3155 : : xl_heap_header xlhdr;
3156 : : XLogRecPtr recptr;
3157 : :
3158 : : /*
3159 : : * For logical decode we need combo CIDs to properly decode the
3160 : : * catalog
3161 : : */
4478 3162 [ + + - + : 1432060 : if (RelationIsAccessibleInLogicalDecoding(relation))
+ - - + -
- - - + +
- + - - -
- - - ]
3163 : 6383 : log_heap_new_cid(relation, &tp);
3164 : :
2899 andres@anarazel.de 3165 : 1432060 : xlrec.flags = 0;
3166 [ + + ]: 1432060 : if (all_visible_cleared)
3167 : 284 : xlrec.flags |= XLH_DELETE_ALL_VISIBLE_CLEARED;
3168 [ + + ]: 1432060 : if (changingPart)
3169 : 493 : xlrec.flags |= XLH_DELETE_IS_PARTITION_MOVE;
4799 alvherre@alvh.no-ip. 3170 : 2864120 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
3171 : 1432060 : tp.t_data->t_infomask2);
4133 heikki.linnakangas@i 3172 : 1432060 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
4799 alvherre@alvh.no-ip. 3173 : 1432060 : xlrec.xmax = new_xmax;
3174 : :
4133 heikki.linnakangas@i 3175 [ + + ]: 1432060 : if (old_key_tuple != NULL)
3176 : : {
3177 [ + + ]: 47019 : if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3964 andres@anarazel.de 3178 : 132 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_TUPLE;
3179 : : else
3180 : 46887 : xlrec.flags |= XLH_DELETE_CONTAINS_OLD_KEY;
3181 : : }
3182 : :
4133 heikki.linnakangas@i 3183 : 1432060 : XLogBeginInsert();
397 peter@eisentraut.org 3184 : 1432060 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
3185 : :
4133 heikki.linnakangas@i 3186 : 1432060 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3187 : :
3188 : : /*
3189 : : * Log replica identity of the deleted tuple if there is one
3190 : : */
4478 rhaas@postgresql.org 3191 [ + + ]: 1432060 : if (old_key_tuple != NULL)
3192 : : {
3193 : 47019 : xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3194 : 47019 : xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3195 : 47019 : xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3196 : :
397 peter@eisentraut.org 3197 : 47019 : XLogRegisterData(&xlhdr, SizeOfHeapHeader);
4133 heikki.linnakangas@i 3198 : 47019 : XLogRegisterData((char *) old_key_tuple->t_data
3199 : : + SizeofHeapTupleHeader,
3200 : 47019 : old_key_tuple->t_len
3201 : : - SizeofHeapTupleHeader);
3202 : : }
3203 : :
3204 : : /* filtering by origin on a row level is much more efficient */
3370 andres@anarazel.de 3205 : 1432060 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
3206 : :
4133 heikki.linnakangas@i 3207 : 1432060 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3208 : :
6454 tgl@sss.pgh.pa.us 3209 : 1432060 : PageSetLSN(page, recptr);
3210 : : }
3211 : :
9193 3212 [ - + ]: 1494696 : END_CRIT_SECTION();
3213 : :
9190 3214 : 1494696 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3215 : :
5381 rhaas@postgresql.org 3216 [ + + ]: 1494696 : if (vmbuffer != InvalidBuffer)
3217 : 284 : ReleaseBuffer(vmbuffer);
3218 : :
3219 : : /*
3220 : : * If the tuple has toasted out-of-line attributes, we need to delete
3221 : : * those items too. We have to do this before releasing the buffer
3222 : : * because we need to look at the contents of the tuple, but it's OK to
3223 : : * release the content lock on the buffer first.
3224 : : */
4760 kgrittn@postgresql.o 3225 [ + + ]: 1494696 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
3226 [ + + ]: 3071 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3227 : : {
3228 : : /* toast table entries should never be recursively toasted */
6921 tgl@sss.pgh.pa.us 3229 [ - + ]: 3061 : Assert(!HeapTupleHasExternal(&tp));
3230 : : }
3231 [ + + ]: 1491635 : else if (HeapTupleHasExternal(&tp))
2354 rhaas@postgresql.org 3232 : 392 : heap_toast_delete(relation, &tp, false);
3233 : :
3234 : : /*
3235 : : * Mark tuple for invalidation from system caches at next command
3236 : : * boundary. We have to do this before releasing the buffer because we
3237 : : * need to look at the contents of the tuple.
3238 : : */
5325 tgl@sss.pgh.pa.us 3239 : 1494696 : CacheInvalidateHeapTuple(relation, &tp, NULL);
3240 : :
3241 : : /* Now we can release the buffer */
703 akorotkov@postgresql 3242 : 1494696 : ReleaseBuffer(buffer);
3243 : :
3244 : : /*
3245 : : * Release the lmgr tuple lock, if we had it.
3246 : : */
7624 tgl@sss.pgh.pa.us 3247 [ + + ]: 1494696 : if (have_tuple_lock)
4799 alvherre@alvh.no-ip. 3248 : 26 : UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3249 : :
6867 tgl@sss.pgh.pa.us 3250 : 1494696 : pgstat_count_heap_delete(relation);
3251 : :
4478 rhaas@postgresql.org 3252 [ + + + + ]: 1494696 : if (old_key_tuple != NULL && old_key_copied)
3253 : 46888 : heap_freetuple(old_key_tuple);
3254 : :
2549 andres@anarazel.de 3255 : 1494696 : return TM_Ok;
3256 : : }
3257 : :
3258 : : /*
3259 : : * simple_heap_delete - delete a tuple
3260 : : *
3261 : : * This routine may be used to delete a tuple when concurrent updates of
3262 : : * the target tuple are not expected (for example, because we have a lock
3263 : : * on the relation associated with the tuple). Any failure is reported
3264 : : * via ereport().
3265 : : */
3266 : : void
136 peter@eisentraut.org 3267 :GNC 665281 : simple_heap_delete(Relation relation, const ItemPointerData *tid)
3268 : : {
3269 : : TM_Result result;
3270 : : TM_FailureData tmfd;
3271 : :
8217 tgl@sss.pgh.pa.us 3272 :CBC 665281 : result = heap_delete(relation, tid,
3273 : : GetCurrentCommandId(true), InvalidSnapshot,
3274 : : true /* wait for commit */ ,
3275 : : &tmfd, false /* changingPart */ );
9182 3276 [ - + - - : 665281 : switch (result)
- ]
3277 : : {
2549 andres@anarazel.de 3278 :UBC 0 : case TM_SelfModified:
3279 : : /* Tuple was already updated in current command? */
8273 tgl@sss.pgh.pa.us 3280 [ # # ]: 0 : elog(ERROR, "tuple already updated by self");
3281 : : break;
3282 : :
2549 andres@anarazel.de 3283 :CBC 665281 : case TM_Ok:
3284 : : /* done successfully */
9182 tgl@sss.pgh.pa.us 3285 : 665281 : break;
3286 : :
2549 andres@anarazel.de 3287 :UBC 0 : case TM_Updated:
8273 tgl@sss.pgh.pa.us 3288 [ # # ]: 0 : elog(ERROR, "tuple concurrently updated");
3289 : : break;
3290 : :
2549 andres@anarazel.de 3291 : 0 : case TM_Deleted:
3292 [ # # ]: 0 : elog(ERROR, "tuple concurrently deleted");
3293 : : break;
3294 : :
9182 tgl@sss.pgh.pa.us 3295 : 0 : default:
8273 3296 [ # # ]: 0 : elog(ERROR, "unrecognized heap_delete status: %u", result);
3297 : : break;
3298 : : }
9182 tgl@sss.pgh.pa.us 3299 :CBC 665281 : }
3300 : :
3301 : : /*
3302 : : * heap_update - replace a tuple
3303 : : *
3304 : : * See table_tuple_update() for an explanation of the parameters, except that
3305 : : * this routine directly takes a tuple rather than a slot.
3306 : : *
3307 : : * In the failure cases, the routine fills *tmfd with the tuple's t_ctid,
3308 : : * t_xmax (resolving a possible MultiXact, if necessary), and t_cmax (the last
3309 : : * only for TM_SelfModified, since we cannot obtain cmax from a combo CID
3310 : : * generated by another transaction).
3311 : : */
3312 : : TM_Result
136 peter@eisentraut.org 3313 :GNC 307875 : heap_update(Relation relation, const ItemPointerData *otid, HeapTuple newtup,
3314 : : CommandId cid, Snapshot crosscheck, bool wait,
3315 : : TM_FailureData *tmfd, LockTupleMode *lockmode,
3316 : : TU_UpdateIndexes *update_indexes)
3317 : : {
3318 : : TM_Result result;
7850 tgl@sss.pgh.pa.us 3319 :CBC 307875 : TransactionId xid = GetCurrentTransactionId();
3320 : : Bitmapset *hot_attrs;
3321 : : Bitmapset *sum_attrs;
3322 : : Bitmapset *key_attrs;
3323 : : Bitmapset *id_attrs;
3324 : : Bitmapset *interesting_attrs;
3325 : : Bitmapset *modified_attrs;
3326 : : ItemId lp;
3327 : : HeapTupleData oldtup;
3328 : : HeapTuple heaptup;
4478 rhaas@postgresql.org 3329 : 307875 : HeapTuple old_key_tuple = NULL;
3330 : 307875 : bool old_key_copied = false;
3331 : : Page page;
3332 : : BlockNumber block;
3333 : : MultiXactStatus mxact_status;
3334 : : Buffer buffer,
3335 : : newbuf,
5381 3336 : 307875 : vmbuffer = InvalidBuffer,
3337 : 307875 : vmbuffer_new = InvalidBuffer;
3338 : : bool need_toast;
3339 : : Size newtupsize,
3340 : : pagefree;
7624 tgl@sss.pgh.pa.us 3341 : 307875 : bool have_tuple_lock = false;
3342 : : bool iscombo;
6751 3343 : 307875 : bool use_hot_update = false;
1091 tomas.vondra@postgre 3344 : 307875 : bool summarized_update = false;
3345 : : bool key_intact;
6311 heikki.linnakangas@i 3346 : 307875 : bool all_visible_cleared = false;
3347 : 307875 : bool all_visible_cleared_new = false;
3348 : : bool checked_lockers;
3349 : : bool locker_remains;
1490 akapila@postgresql.o 3350 : 307875 : bool id_has_external = false;
3351 : : TransactionId xmax_new_tuple,
3352 : : xmax_old_tuple;
3353 : : uint16 infomask_old_tuple,
3354 : : infomask2_old_tuple,
3355 : : infomask_new_tuple,
3356 : : infomask2_new_tuple;
3357 : :
10416 bruce@momjian.us 3358 [ - + ]: 307875 : Assert(ItemPointerIsValid(otid));
3359 : :
3360 : : /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
1770 tgl@sss.pgh.pa.us 3361 [ - + ]: 307875 : Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
3362 : : RelationGetNumberOfAttributes(relation));
3363 : :
289 nathan@postgresql.or 3364 : 307875 : AssertHasSnapshotForToast(relation);
3365 : :
3366 : : /*
3367 : : * Forbid this during a parallel operation, lest it allocate a combo CID.
3368 : : * Other workers might need that combo CID for visibility checks, and we
3369 : : * have no provision for broadcasting it to them.
3370 : : */
3972 rhaas@postgresql.org 3371 [ - + ]: 307875 : if (IsInParallelMode())
3972 rhaas@postgresql.org 3372 [ # # ]:UBC 0 : ereport(ERROR,
3373 : : (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3374 : : errmsg("cannot update tuples during a parallel operation")));
3375 : :
3376 : : #ifdef USE_ASSERT_CHECKING
537 noah@leadboat.com 3377 :CBC 307875 : check_lock_if_inplace_updateable_rel(relation, otid, newtup);
3378 : : #endif
3379 : :
3380 : : /*
3381 : : * Fetch the list of attributes to be checked for various operations.
3382 : : *
3383 : : * For HOT considerations, this is wasted effort if we fail to update or
3384 : : * have to put the new tuple on a different page. But we must compute the
3385 : : * list before obtaining buffer lock --- in the worst case, if we are
3386 : : * doing an update on one of the relevant system catalogs, we could
3387 : : * deadlock if we try to fetch the list later. In any case, the relcache
3388 : : * caches the data so this is usually pretty cheap.
3389 : : *
3390 : : * We also need columns used by the replica identity and columns that are
3391 : : * considered the "key" of rows in the table.
3392 : : *
3393 : : * Note that we get copies of each bitmap, so we need not worry about
3394 : : * relcache flush happening midway through.
3395 : : */
1091 tomas.vondra@postgre 3396 : 307875 : hot_attrs = RelationGetIndexAttrBitmap(relation,
3397 : : INDEX_ATTR_BITMAP_HOT_BLOCKING);
3398 : 307875 : sum_attrs = RelationGetIndexAttrBitmap(relation,
3399 : : INDEX_ATTR_BITMAP_SUMMARIZED);
4478 rhaas@postgresql.org 3400 : 307875 : key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
3401 : 307875 : id_attrs = RelationGetIndexAttrBitmap(relation,
3402 : : INDEX_ATTR_BITMAP_IDENTITY_KEY);
1570 pg@bowt.ie 3403 : 307875 : interesting_attrs = NULL;
3404 : 307875 : interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
1091 tomas.vondra@postgre 3405 : 307875 : interesting_attrs = bms_add_members(interesting_attrs, sum_attrs);
1570 pg@bowt.ie 3406 : 307875 : interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
3407 : 307875 : interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
3408 : :
5381 rhaas@postgresql.org 3409 : 307875 : block = ItemPointerGetBlockNumber(otid);
309 michael@paquier.xyz 3410 : 307875 : INJECTION_POINT("heap_update-before-pin", NULL);
5381 rhaas@postgresql.org 3411 : 307875 : buffer = ReadBuffer(relation, block);
3616 kgrittn@postgresql.o 3412 : 307875 : page = BufferGetPage(buffer);
3413 : :
3414 : : /*
3415 : : * Before locking the buffer, pin the visibility map page if it appears to
3416 : : * be necessary. Since we haven't got the lock yet, someone else might be
3417 : : * in the middle of changing this, so we'll need to recheck after we have
3418 : : * the lock.
3419 : : */
5381 rhaas@postgresql.org 3420 [ + + ]: 307875 : if (PageIsAllVisible(page))
3421 : 1652 : visibilitymap_pin(relation, block, &vmbuffer);
3422 : :
9952 vadim4o@yahoo.com 3423 : 307875 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3424 : :
6454 tgl@sss.pgh.pa.us 3425 : 307875 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(otid));
3426 : :
3427 : : /*
3428 : : * Usually, a buffer pin and/or snapshot blocks pruning of otid, ensuring
3429 : : * we see LP_NORMAL here. When the otid origin is a syscache, we may have
3430 : : * neither a pin nor a snapshot. Hence, we may see other LP_ states, each
3431 : : * of which indicates concurrent pruning.
3432 : : *
3433 : : * Failing with TM_Updated would be most accurate. However, unlike other
3434 : : * TM_Updated scenarios, we don't know the successor ctid in LP_UNUSED and
3435 : : * LP_DEAD cases. While the distinction between TM_Updated and TM_Deleted
3436 : : * does matter to SQL statements UPDATE and MERGE, those SQL statements
3437 : : * hold a snapshot that ensures LP_NORMAL. Hence, the choice between
3438 : : * TM_Updated and TM_Deleted affects only the wording of error messages.
3439 : : * Settle on TM_Deleted, for two reasons. First, it avoids complicating
3440 : : * the specification of when tmfd->ctid is valid. Second, it creates
3441 : : * error log evidence that we took this branch.
3442 : : *
3443 : : * Since it's possible to see LP_UNUSED at otid, it's also possible to see
3444 : : * LP_NORMAL for a tuple that replaced LP_UNUSED. If it's a tuple for an
3445 : : * unrelated row, we'll fail with "duplicate key value violates unique".
3446 : : * XXX if otid is the live, newer version of the newtup row, we'll discard
3447 : : * changes originating in versions of this catalog row after the version
3448 : : * the caller got from syscache. See syscache-update-pruned.spec.
3449 : : */
414 noah@leadboat.com 3450 [ + + ]: 307875 : if (!ItemIdIsNormal(lp))
3451 : : {
3452 [ - + ]: 1 : Assert(RelationSupportsSysCache(RelationGetRelid(relation)));
3453 : :
3454 : 1 : UnlockReleaseBuffer(buffer);
3455 [ - + ]: 1 : Assert(!have_tuple_lock);
3456 [ + - ]: 1 : if (vmbuffer != InvalidBuffer)
3457 : 1 : ReleaseBuffer(vmbuffer);
3458 : 1 : tmfd->ctid = *otid;
3459 : 1 : tmfd->xmax = InvalidTransactionId;
3460 : 1 : tmfd->cmax = InvalidCommandId;
3461 : 1 : *update_indexes = TU_None;
3462 : :
3463 : 1 : bms_free(hot_attrs);
3464 : 1 : bms_free(sum_attrs);
3465 : 1 : bms_free(key_attrs);
3466 : 1 : bms_free(id_attrs);
3467 : : /* modified_attrs not yet initialized */
3468 : 1 : bms_free(interesting_attrs);
3469 : 1 : return TM_Deleted;
3470 : : }
3471 : :
3472 : : /*
3473 : : * Fill in enough data in oldtup for HeapDetermineColumnsInfo to work
3474 : : * properly.
3475 : : */
4790 alvherre@alvh.no-ip. 3476 : 307874 : oldtup.t_tableOid = RelationGetRelid(relation);
6454 tgl@sss.pgh.pa.us 3477 : 307874 : oldtup.t_data = (HeapTupleHeader) PageGetItem(page, lp);
9970 vadim4o@yahoo.com 3478 : 307874 : oldtup.t_len = ItemIdGetLength(lp);
3479 : 307874 : oldtup.t_self = *otid;
3480 : :
3481 : : /* the new tuple is ready, except for this: */
4790 alvherre@alvh.no-ip. 3482 : 307874 : newtup->t_tableOid = RelationGetRelid(relation);
3483 : :
3484 : : /*
3485 : : * Determine columns modified by the update. Additionally, identify
3486 : : * whether any of the unmodified replica identity key attributes in the
3487 : : * old tuple is externally stored or not. This is required because for
3488 : : * such attributes the flattened value won't be WAL logged as part of the
3489 : : * new tuple so we must include it as part of the old_key_tuple. See
3490 : : * ExtractReplicaIdentity.
3491 : : */
1490 akapila@postgresql.o 3492 : 307874 : modified_attrs = HeapDetermineColumnsInfo(relation, interesting_attrs,
3493 : : id_attrs, &oldtup,
3494 : : newtup, &id_has_external);
3495 : :
3496 : : /*
3497 : : * If we're not updating any "key" column, we can grab a weaker lock type.
3498 : : * This allows for more concurrency when we are running simultaneously
3499 : : * with foreign key checks.
3500 : : *
3501 : : * Note that if a column gets detoasted while executing the update, but
3502 : : * the value ends up being the same, this test will fail and we will use
3503 : : * the stronger lock. This is acceptable; the important case to optimize
3504 : : * is updates that don't manipulate key columns, not those that
3505 : : * serendipitously arrive at the same key values.
3506 : : */
3273 alvherre@alvh.no-ip. 3507 [ + + ]: 307874 : if (!bms_overlap(modified_attrs, key_attrs))
3508 : : {
2894 simon@2ndQuadrant.co 3509 : 303518 : *lockmode = LockTupleNoKeyExclusive;
4799 alvherre@alvh.no-ip. 3510 : 303518 : mxact_status = MultiXactStatusNoKeyUpdate;
3511 : 303518 : key_intact = true;
3512 : :
3513 : : /*
3514 : : * If this is the first possibly-multixact-able operation in the
3515 : : * current transaction, set my per-backend OldestMemberMXactId
3516 : : * setting. We can be certain that the transaction will never become a
3517 : : * member of any older MultiXactIds than that. (We have to do this
3518 : : * even if we end up just using our own TransactionId below, since
3519 : : * some other backend could incorporate our XID into a MultiXact
3520 : : * immediately afterwards.)
3521 : : */
3522 : 303518 : MultiXactIdSetOldestMember();
3523 : : }
3524 : : else
3525 : : {
2894 simon@2ndQuadrant.co 3526 : 4356 : *lockmode = LockTupleExclusive;
4799 alvherre@alvh.no-ip. 3527 : 4356 : mxact_status = MultiXactStatusUpdate;
3528 : 4356 : key_intact = false;
3529 : : }
3530 : :
3531 : : /*
3532 : : * Note: beyond this point, use oldtup not otid to refer to old tuple.
3533 : : * otid may very well point at newtup->t_self, which we will overwrite
3534 : : * with the new tuple's location, so there's great risk of confusion if we
3535 : : * use otid anymore.
3536 : : */
3537 : :
9952 vadim4o@yahoo.com 3538 : 1 : l2:
4799 alvherre@alvh.no-ip. 3539 : 307875 : checked_lockers = false;
3540 : 307875 : locker_remains = false;
4619 rhaas@postgresql.org 3541 : 307875 : result = HeapTupleSatisfiesUpdate(&oldtup, cid, buffer);
3542 : :
3543 : : /* see below about the "no wait" case */
703 akorotkov@postgresql 3544 [ + + - + ]: 307875 : Assert(result != TM_BeingModified || wait);
3545 : :
2549 andres@anarazel.de 3546 [ - + ]: 307875 : if (result == TM_Invisible)
3547 : : {
7289 tgl@sss.pgh.pa.us 3548 :UBC 0 : UnlockReleaseBuffer(buffer);
3878 3549 [ # # ]: 0 : ereport(ERROR,
3550 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3551 : : errmsg("attempted to update invisible tuple")));
3552 : : }
703 akorotkov@postgresql 3553 [ + + + - ]:CBC 307875 : else if (result == TM_BeingModified && wait)
3554 : : {
3555 : : TransactionId xwait;
3556 : : uint16 infomask;
4799 alvherre@alvh.no-ip. 3557 : 36119 : bool can_continue = false;
3558 : :
3559 : : /*
3560 : : * XXX note that we don't consider the "no wait" case here. This
3561 : : * isn't a problem currently because no caller uses that case, but it
3562 : : * should be fixed if such a caller is introduced. It wasn't a
3563 : : * problem previously because this code would always wait, but now
3564 : : * that some tuple locks do not conflict with one of the lock modes we
3565 : : * use, it is possible that this case is interesting to handle
3566 : : * specially.
3567 : : *
3568 : : * This may cause failures with third-party code that calls
3569 : : * heap_update directly.
3570 : : */
3571 : :
3572 : : /* must copy state data before unlocking buffer */
3573 : 36119 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
7624 tgl@sss.pgh.pa.us 3574 : 36119 : infomask = oldtup.t_data->t_infomask;
3575 : :
3576 : : /*
3577 : : * Now we have to do something about the existing locker. If it's a
3578 : : * multi, sleep on it; we might be awakened before it is completely
3579 : : * gone (or even not sleep at all in some cases); we need to preserve
3580 : : * it as locker, unless it is gone completely.
3581 : : *
3582 : : * If it's not a multi, we need to check for sleeping conditions
3583 : : * before actually going to sleep. If the update doesn't conflict
3584 : : * with the locks, we just continue without sleeping (but making sure
3585 : : * it is preserved).
3586 : : *
3587 : : * Before sleeping, we need to acquire tuple lock to establish our
3588 : : * priority for the tuple (see heap_lock_tuple). LockTuple will
3589 : : * release us when we are next-in-line for the tuple. Note we must
3590 : : * not acquire the tuple lock until we're sure we're going to sleep;
3591 : : * otherwise we're open for race conditions with other transactions
3592 : : * holding the tuple lock which sleep on us.
3593 : : *
3594 : : * If we are forced to "start over" below, we keep the tuple lock;
3595 : : * this arranges that we stay at the head of the line while rechecking
3596 : : * tuple state.
3597 : : */
7626 3598 [ + + ]: 36119 : if (infomask & HEAP_XMAX_IS_MULTI)
3599 : : {
3600 : : TransactionId update_xact;
3601 : : int remain;
2462 alvherre@alvh.no-ip. 3602 : 179 : bool current_is_member = false;
3603 : :
3992 3604 [ + + ]: 179 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3605 : : *lockmode, ¤t_is_member))
3606 : : {
3607 : 8 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3608 : :
3609 : : /*
3610 : : * Acquire the lock, if necessary (but skip it when we're
3611 : : * requesting a lock and already have one; avoids deadlock).
3612 : : */
2462 3613 [ - + ]: 8 : if (!current_is_member)
2462 alvherre@alvh.no-ip. 3614 :UBC 0 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3615 : : LockWaitBlock, &have_tuple_lock);
3616 : :
3617 : : /* wait for multixact */
3992 alvherre@alvh.no-ip. 3618 :CBC 8 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
3619 : : relation, &oldtup.t_self, XLTW_Update,
3620 : : &remain);
3621 : 8 : checked_lockers = true;
3622 : 8 : locker_remains = remain != 0;
3623 : 8 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3624 : :
3625 : : /*
3626 : : * If xwait had just locked the tuple then some other xact
3627 : : * could update this tuple before we get to this point. Check
3628 : : * for xmax change, and start over if so.
3629 : : */
3630 [ + - ]: 8 : if (xmax_infomask_changed(oldtup.t_data->t_infomask,
3631 [ - + ]: 8 : infomask) ||
3189 tgl@sss.pgh.pa.us 3632 : 8 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3633 : : xwait))
3992 alvherre@alvh.no-ip. 3634 :UBC 0 : goto l2;
3635 : : }
3636 : :
3637 : : /*
3638 : : * Note that the multixact may not be done by now. It could have
3639 : : * surviving members; our own xact or other subxacts of this
3640 : : * backend, and also any other concurrent transaction that locked
3641 : : * the tuple with LockTupleKeyShare if we only got
3642 : : * LockTupleNoKeyExclusive. If this is the case, we have to be
3643 : : * careful to mark the updated tuple with the surviving members in
3644 : : * Xmax.
3645 : : *
3646 : : * Note that there could have been another update in the
3647 : : * MultiXact. In that case, we need to check whether it committed
3648 : : * or aborted. If it aborted we are safe to update it again;
3649 : : * otherwise there is an update conflict, and we have to return
3650 : : * TableTuple{Deleted, Updated} below.
3651 : : *
3652 : : * In the LockTupleExclusive case, we still need to preserve the
3653 : : * surviving members: those would include the tuple locks we had
3654 : : * before this one, which are important to keep in case this
3655 : : * subxact aborts.
3656 : : */
4799 alvherre@alvh.no-ip. 3657 [ + + ]:CBC 179 : if (!HEAP_XMAX_IS_LOCKED_ONLY(oldtup.t_data->t_infomask))
3658 : 8 : update_xact = HeapTupleGetUpdateXid(oldtup.t_data);
3659 : : else
3992 3660 : 171 : update_xact = InvalidTransactionId;
3661 : :
3662 : : /*
3663 : : * There was no UPDATE in the MultiXact; or it aborted. No
3664 : : * TransactionIdIsInProgress() call needed here, since we called
3665 : : * MultiXactIdWait() above.
3666 : : */
4799 3667 [ + + + + ]: 187 : if (!TransactionIdIsValid(update_xact) ||
3668 : 8 : TransactionIdDidAbort(update_xact))
3669 : 172 : can_continue = true;
3670 : : }
3992 3671 [ + + ]: 35940 : else if (TransactionIdIsCurrentTransactionId(xwait))
3672 : : {
3673 : : /*
3674 : : * The only locker is ourselves; we can avoid grabbing the tuple
3675 : : * lock here, but must preserve our locking information.
3676 : : */
3677 : 35827 : checked_lockers = true;
3678 : 35827 : locker_remains = true;
3679 : 35827 : can_continue = true;
3680 : : }
3681 [ + + + + ]: 113 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
3682 : : {
3683 : : /*
3684 : : * If it's just a key-share locker, and we're not changing the key
3685 : : * columns, we don't need to wait for it to end; but we need to
3686 : : * preserve it as locker.
3687 : : */
3688 : 29 : checked_lockers = true;
3689 : 29 : locker_remains = true;
3690 : 29 : can_continue = true;
3691 : : }
3692 : : else
3693 : : {
3694 : : /*
3695 : : * Wait for regular transaction to end; but first, acquire tuple
3696 : : * lock.
3697 : : */
3698 : 84 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2894 simon@2ndQuadrant.co 3699 : 84 : heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode,
3700 : : LockWaitBlock, &have_tuple_lock);
3992 alvherre@alvh.no-ip. 3701 : 84 : XactLockTableWait(xwait, relation, &oldtup.t_self,
3702 : : XLTW_Update);
3703 : 84 : checked_lockers = true;
3704 : 84 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
3705 : :
3706 : : /*
3707 : : * xwait is done, but if xwait had just locked the tuple then some
3708 : : * other xact could update this tuple before we get to this point.
3709 : : * Check for xmax change, and start over if so.
3710 : : */
3711 [ + + - + ]: 167 : if (xmax_infomask_changed(oldtup.t_data->t_infomask, infomask) ||
3712 : 83 : !TransactionIdEquals(xwait,
3713 : : HeapTupleHeaderGetRawXmax(oldtup.t_data)))
3714 : 1 : goto l2;
3715 : :
3716 : : /* Otherwise check if it committed or aborted */
3717 : 83 : UpdateXmaxHintBits(oldtup.t_data, buffer, xwait);
3718 [ + + ]: 83 : if (oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)
4799 3719 : 22 : can_continue = true;
3720 : : }
3721 : :
2549 andres@anarazel.de 3722 [ + + ]: 36118 : if (can_continue)
3723 : 36050 : result = TM_Ok;
1847 alvherre@alvh.no-ip. 3724 [ + + ]: 68 : else if (!ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid))
2549 andres@anarazel.de 3725 : 63 : result = TM_Updated;
3726 : : else
3727 : 5 : result = TM_Deleted;
3728 : : }
3729 : :
3730 : : /* Sanity check the result HeapTupleSatisfiesUpdate() and the logic above */
3731 [ + + ]: 307874 : if (result != TM_Ok)
3732 : : {
3733 [ + + + + : 166 : Assert(result == TM_SelfModified ||
- + - - ]
3734 : : result == TM_Updated ||
3735 : : result == TM_Deleted ||
3736 : : result == TM_BeingModified);
7512 tgl@sss.pgh.pa.us 3737 [ - + ]: 166 : Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID));
2549 andres@anarazel.de 3738 [ + + - + ]: 166 : Assert(result != TM_Updated ||
3739 : : !ItemPointerEquals(&oldtup.t_self, &oldtup.t_data->t_ctid));
3740 : : }
3741 : :
838 heikki.linnakangas@i 3742 [ + + + - ]: 307874 : if (crosscheck != InvalidSnapshot && result == TM_Ok)
3743 : : {
3744 : : /* Perform additional check for transaction-snapshot mode RI updates */
3745 [ + - ]: 1 : if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer))
3746 : 1 : result = TM_Updated;
3747 : : }
3748 : :
3749 [ + + ]: 307874 : if (result != TM_Ok)
3750 : : {
2549 andres@anarazel.de 3751 : 167 : tmfd->ctid = oldtup.t_data->t_ctid;
3752 : 167 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(oldtup.t_data);
3753 [ + + ]: 167 : if (result == TM_SelfModified)
3754 : 52 : tmfd->cmax = HeapTupleHeaderGetCmax(oldtup.t_data);
3755 : : else
3756 : 115 : tmfd->cmax = InvalidCommandId;
703 akorotkov@postgresql 3757 : 167 : UnlockReleaseBuffer(buffer);
7624 tgl@sss.pgh.pa.us 3758 [ + + ]: 167 : if (have_tuple_lock)
2894 simon@2ndQuadrant.co 3759 : 61 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
5381 rhaas@postgresql.org 3760 [ - + ]: 167 : if (vmbuffer != InvalidBuffer)
5381 rhaas@postgresql.org 3761 :UBC 0 : ReleaseBuffer(vmbuffer);
1091 tomas.vondra@postgre 3762 :CBC 167 : *update_indexes = TU_None;
3763 : :
6751 tgl@sss.pgh.pa.us 3764 : 167 : bms_free(hot_attrs);
1091 tomas.vondra@postgre 3765 : 167 : bms_free(sum_attrs);
4799 alvherre@alvh.no-ip. 3766 : 167 : bms_free(key_attrs);
3490 tgl@sss.pgh.pa.us 3767 : 167 : bms_free(id_attrs);
3273 alvherre@alvh.no-ip. 3768 : 167 : bms_free(modified_attrs);
3769 : 167 : bms_free(interesting_attrs);
9952 vadim4o@yahoo.com 3770 : 167 : return result;
3771 : : }
3772 : :
3773 : : /*
3774 : : * If we didn't pin the visibility map page and the page has become all
3775 : : * visible while we were busy locking the buffer, or during some
3776 : : * subsequent window during which we had it unlocked, we'll have to unlock
3777 : : * and re-lock, to avoid holding the buffer lock across an I/O. That's a
3778 : : * bit unfortunate, especially since we'll now have to recheck whether the
3779 : : * tuple has been locked or updated under us, but hopefully it won't
3780 : : * happen very often.
3781 : : */
5375 rhaas@postgresql.org 3782 [ + + - + ]: 307707 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3783 : : {
5375 rhaas@postgresql.org 3784 :UBC 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3785 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
3786 : 0 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
5283 3787 : 0 : goto l2;
3788 : : }
3789 : :
3790 : : /* Fill in transaction status data */
3791 : :
3792 : : /*
3793 : : * If the tuple we're updating is locked, we need to preserve the locking
3794 : : * info in the old tuple's Xmax. Prepare a new Xmax value for this.
3795 : : */
4799 alvherre@alvh.no-ip. 3796 :CBC 307707 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3797 : 307707 : oldtup.t_data->t_infomask,
3798 : 307707 : oldtup.t_data->t_infomask2,
3799 : : xid, *lockmode, true,
3800 : : &xmax_old_tuple, &infomask_old_tuple,
3801 : : &infomask2_old_tuple);
3802 : :
3803 : : /*
3804 : : * And also prepare an Xmax value for the new copy of the tuple. If there
3805 : : * was no xmax previously, or there was one but all lockers are now gone,
3806 : : * then use InvalidTransactionId; otherwise, get the xmax from the old
3807 : : * tuple. (In rare cases that might also be InvalidTransactionId and yet
3808 : : * not have the HEAP_XMAX_INVALID bit set; that's fine.)
3809 : : */
3810 [ + + + - ]: 343735 : if ((oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3551 3811 [ + + ]: 72056 : HEAP_LOCKED_UPGRADED(oldtup.t_data->t_infomask) ||
4799 3812 [ - + ]: 35857 : (checked_lockers && !locker_remains))
3813 : 271679 : xmax_new_tuple = InvalidTransactionId;
3814 : : else
3815 : 36028 : xmax_new_tuple = HeapTupleHeaderGetRawXmax(oldtup.t_data);
3816 : :
3817 [ + + ]: 307707 : if (!TransactionIdIsValid(xmax_new_tuple))
3818 : : {
3819 : 271679 : infomask_new_tuple = HEAP_XMAX_INVALID;
3820 : 271679 : infomask2_new_tuple = 0;
3821 : : }
3822 : : else
3823 : : {
3824 : : /*
3825 : : * If we found a valid Xmax for the new tuple, then the infomask bits
3826 : : * to use on the new tuple depend on what was there on the old one.
3827 : : * Note that since we're doing an update, the only possibility is that
3828 : : * the lockers had FOR KEY SHARE lock.
3829 : : */
3830 [ + + ]: 36028 : if (oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI)
3831 : : {
3832 : 172 : GetMultiXactIdHintBits(xmax_new_tuple, &infomask_new_tuple,
3833 : : &infomask2_new_tuple);
3834 : : }
3835 : : else
3836 : : {
3837 : 35856 : infomask_new_tuple = HEAP_XMAX_KEYSHR_LOCK | HEAP_XMAX_LOCK_ONLY;
3838 : 35856 : infomask2_new_tuple = 0;
3839 : : }
3840 : : }
3841 : :
3842 : : /*
3843 : : * Prepare the new tuple with the appropriate initial values of Xmin and
3844 : : * Xmax, as well as initial infomask bits as computed above.
3845 : : */
9970 vadim4o@yahoo.com 3846 : 307707 : newtup->t_data->t_infomask &= ~(HEAP_XACT_MASK);
6751 tgl@sss.pgh.pa.us 3847 : 307707 : newtup->t_data->t_infomask2 &= ~(HEAP2_XACT_MASK);
7850 3848 : 307707 : HeapTupleHeaderSetXmin(newtup->t_data, xid);
8674 bruce@momjian.us 3849 : 307707 : HeapTupleHeaderSetCmin(newtup->t_data, cid);
4799 alvherre@alvh.no-ip. 3850 : 307707 : newtup->t_data->t_infomask |= HEAP_UPDATED | infomask_new_tuple;
3851 : 307707 : newtup->t_data->t_infomask2 |= infomask2_new_tuple;
3852 : 307707 : HeapTupleHeaderSetXmax(newtup->t_data, xmax_new_tuple);
3853 : :
3854 : : /*
3855 : : * Replace cid with a combo CID if necessary. Note that we already put
3856 : : * the plain cid into the new tuple.
3857 : : */
6974 tgl@sss.pgh.pa.us 3858 : 307707 : HeapTupleHeaderAdjustCmax(oldtup.t_data, &cid, &iscombo);
3859 : :
3860 : : /*
3861 : : * If the toaster needs to be activated, OR if the new tuple will not fit
3862 : : * on the same page as the old, then we need to release the content lock
3863 : : * (but not the pin!) on the old tuple's buffer while we are off doing
3864 : : * TOAST and/or table-file-extension work. We must mark the old tuple to
3865 : : * show that it's locked, else other processes may try to update it
3866 : : * themselves.
3867 : : *
3868 : : * We need to invoke the toaster if there are already any out-of-line
3869 : : * toasted values present, or if the new tuple is over-threshold.
3870 : : */
4760 kgrittn@postgresql.o 3871 [ - + ]: 307707 : if (relation->rd_rel->relkind != RELKIND_RELATION &&
4760 kgrittn@postgresql.o 3872 [ # # ]:UBC 0 : relation->rd_rel->relkind != RELKIND_MATVIEW)
3873 : : {
3874 : : /* toast table entries should never be recursively toasted */
6921 tgl@sss.pgh.pa.us 3875 [ # # ]: 0 : Assert(!HeapTupleHasExternal(&oldtup));
3876 [ # # ]: 0 : Assert(!HeapTupleHasExternal(newtup));
3877 : 0 : need_toast = false;
3878 : : }
3879 : : else
6921 tgl@sss.pgh.pa.us 3880 [ + + ]:CBC 922663 : need_toast = (HeapTupleHasExternal(&oldtup) ||
3881 [ + + ]: 614956 : HeapTupleHasExternal(newtup) ||
3882 [ + + ]: 307225 : newtup->t_len > TOAST_TUPLE_THRESHOLD);
3883 : :
6454 3884 : 307707 : pagefree = PageGetHeapFreeSpace(page);
3885 : :
6979 3886 : 307707 : newtupsize = MAXALIGN(newtup->t_len);
3887 : :
9069 3888 [ + + + + ]: 307707 : if (need_toast || newtupsize > pagefree)
9320 vadim4o@yahoo.com 3889 : 149966 : {
3890 : : TransactionId xmax_lock_old_tuple;
3891 : : uint16 infomask_lock_old_tuple,
3892 : : infomask2_lock_old_tuple;
3527 andres@anarazel.de 3893 : 149966 : bool cleared_all_frozen = false;
3894 : :
3895 : : /*
3896 : : * To prevent concurrent sessions from updating the tuple, we have to
3897 : : * temporarily mark it locked, while we release the page-level lock.
3898 : : *
3899 : : * To satisfy the rule that any xid potentially appearing in a buffer
3900 : : * written out to disk, we unfortunately have to WAL log this
3901 : : * temporary modification. We can reuse xl_heap_lock for this
3902 : : * purpose. If we crash/error before following through with the
3903 : : * actual update, xmax will be of an aborted transaction, allowing
3904 : : * other sessions to proceed.
3905 : : */
3906 : :
3907 : : /*
3908 : : * Compute xmax / infomask appropriate for locking the tuple. This has
3909 : : * to be done separately from the combo that's going to be used for
3910 : : * updating, because the potentially created multixact would otherwise
3911 : : * be wrong.
3912 : : */
3530 3913 : 149966 : compute_new_xmax_infomask(HeapTupleHeaderGetRawXmax(oldtup.t_data),
3914 : 149966 : oldtup.t_data->t_infomask,
3915 : 149966 : oldtup.t_data->t_infomask2,
3916 : : xid, *lockmode, false,
3917 : : &xmax_lock_old_tuple, &infomask_lock_old_tuple,
3918 : : &infomask2_lock_old_tuple);
3919 : :
3920 [ - + ]: 149966 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(infomask_lock_old_tuple));
3921 : :
3922 : 149966 : START_CRIT_SECTION();
3923 : :
3924 : : /* Clear obsolete visibility flags ... */
4799 alvherre@alvh.no-ip. 3925 : 149966 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
3926 : 149966 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6751 tgl@sss.pgh.pa.us 3927 : 149966 : HeapTupleClearHotUpdated(&oldtup);
3928 : : /* ... and store info about transaction updating this tuple */
3530 andres@anarazel.de 3929 [ - + ]: 149966 : Assert(TransactionIdIsValid(xmax_lock_old_tuple));
3930 : 149966 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_lock_old_tuple);
3931 : 149966 : oldtup.t_data->t_infomask |= infomask_lock_old_tuple;
3932 : 149966 : oldtup.t_data->t_infomask2 |= infomask2_lock_old_tuple;
6974 tgl@sss.pgh.pa.us 3933 : 149966 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
3934 : :
3935 : : /* temporarily make it look not-updated, but locked */
7420 3936 : 149966 : oldtup.t_data->t_ctid = oldtup.t_self;
3937 : :
3938 : : /*
3939 : : * Clear all-frozen bit on visibility map if needed. We could
3940 : : * immediately reset ALL_VISIBLE, but given that the WAL logging
3941 : : * overhead would be unchanged, that doesn't seem necessarily
3942 : : * worthwhile.
3943 : : */
1797 3944 [ + + + + ]: 150911 : if (PageIsAllVisible(page) &&
3527 andres@anarazel.de 3945 : 945 : visibilitymap_clear(relation, block, vmbuffer,
3946 : : VISIBILITYMAP_ALL_FROZEN))
3947 : 720 : cleared_all_frozen = true;
3948 : :
3530 3949 : 149966 : MarkBufferDirty(buffer);
3950 : :
3951 [ + + + + : 149966 : if (RelationNeedsWAL(relation))
+ - + + ]
3952 : : {
3953 : : xl_heap_lock xlrec;
3954 : : XLogRecPtr recptr;
3955 : :
3956 : 139835 : XLogBeginInsert();
3957 : 139835 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3958 : :
3959 : 139835 : xlrec.offnum = ItemPointerGetOffsetNumber(&oldtup.t_self);
1069 pg@bowt.ie 3960 : 139835 : xlrec.xmax = xmax_lock_old_tuple;
3530 andres@anarazel.de 3961 : 279670 : xlrec.infobits_set = compute_infobits(oldtup.t_data->t_infomask,
3962 : 139835 : oldtup.t_data->t_infomask2);
3527 3963 : 139835 : xlrec.flags =
3964 : 139835 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
397 peter@eisentraut.org 3965 : 139835 : XLogRegisterData(&xlrec, SizeOfHeapLock);
3530 andres@anarazel.de 3966 : 139835 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
3967 : 139835 : PageSetLSN(page, recptr);
3968 : : }
3969 : :
3970 [ - + ]: 149966 : END_CRIT_SECTION();
3971 : :
9320 vadim4o@yahoo.com 3972 : 149966 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3973 : :
3974 : : /*
3975 : : * Let the toaster do its thing, if needed.
3976 : : *
3977 : : * Note: below this point, heaptup is the data we actually intend to
3978 : : * store into the relation; newtup is the caller's original untoasted
3979 : : * data.
3980 : : */
9190 tgl@sss.pgh.pa.us 3981 [ + + ]: 149966 : if (need_toast)
3982 : : {
3983 : : /* Note we always use WAL and FSM during updates */
2354 rhaas@postgresql.org 3984 : 1791 : heaptup = heap_toast_insert_or_update(relation, newtup, &oldtup, 0);
7420 tgl@sss.pgh.pa.us 3985 : 1791 : newtupsize = MAXALIGN(heaptup->t_len);
3986 : : }
3987 : : else
3988 : 148175 : heaptup = newtup;
3989 : :
3990 : : /*
3991 : : * Now, do we need a new page for the tuple, or not? This is a bit
3992 : : * tricky since someone else could have added tuples to the page while
3993 : : * we weren't looking. We have to recheck the available space after
3994 : : * reacquiring the buffer lock. But don't bother to do that if the
3995 : : * former amount of free space is still not enough; it's unlikely
3996 : : * there's more free now than before.
3997 : : *
3998 : : * What's more, if we need to get a new page, we will need to acquire
3999 : : * buffer locks on both old and new pages. To avoid deadlock against
4000 : : * some other backend trying to get the same two locks in the other
4001 : : * order, we must be consistent about the order we get the locks in.
4002 : : * We use the rule "lock the lower-numbered page of the relation
4003 : : * first". To implement this, we must do RelationGetBufferForTuple
4004 : : * while not holding the lock on the old page, and we must rely on it
4005 : : * to get the locks on both pages in the correct order.
4006 : : *
4007 : : * Another consideration is that we need visibility map page pin(s) if
4008 : : * we will have to clear the all-visible flag on either page. If we
4009 : : * call RelationGetBufferForTuple, we rely on it to acquire any such
4010 : : * pins; but if we don't, we have to handle that here. Hence we need
4011 : : * a loop.
4012 : : */
4013 : : for (;;)
4014 : : {
1797 4015 [ + + ]: 149966 : if (newtupsize > pagefree)
4016 : : {
4017 : : /* It doesn't fit, must use RelationGetBufferForTuple. */
4018 : 149392 : newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
4019 : : buffer, 0, NULL,
4020 : : &vmbuffer_new, &vmbuffer,
4021 : : 0);
4022 : : /* We're all done. */
4023 : 149392 : break;
4024 : : }
4025 : : /* Acquire VM page pin if needed and we don't have it. */
4026 [ + + - + ]: 574 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
1797 tgl@sss.pgh.pa.us 4027 :UBC 0 : visibilitymap_pin(relation, block, &vmbuffer);
4028 : : /* Re-acquire the lock on the old tuple's page. */
9069 tgl@sss.pgh.pa.us 4029 :CBC 574 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
4030 : : /* Re-check using the up-to-date free space */
6454 4031 : 574 : pagefree = PageGetHeapFreeSpace(page);
1797 4032 [ + - ]: 574 : if (newtupsize > pagefree ||
4033 [ + + - + ]: 574 : (vmbuffer == InvalidBuffer && PageIsAllVisible(page)))
4034 : : {
4035 : : /*
4036 : : * Rats, it doesn't fit anymore, or somebody just now set the
4037 : : * all-visible flag. We must now unlock and loop to avoid
4038 : : * deadlock. Fortunately, this path should seldom be taken.
4039 : : */
9069 tgl@sss.pgh.pa.us 4040 :UBC 0 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4041 : : }
4042 : : else
4043 : : {
4044 : : /* We're all done. */
9069 tgl@sss.pgh.pa.us 4045 :CBC 574 : newbuf = buffer;
1797 4046 : 574 : break;
4047 : : }
4048 : : }
4049 : : }
4050 : : else
4051 : : {
4052 : : /* No TOAST work needed, and it'll fit on same page */
9190 4053 : 157741 : newbuf = buffer;
7420 4054 : 157741 : heaptup = newtup;
4055 : : }
4056 : :
4057 : : /*
4058 : : * We're about to do the actual update -- check for conflict first, to
4059 : : * avoid possibly having to roll back work we've just done.
4060 : : *
4061 : : * This is safe without a recheck as long as there is no possibility of
4062 : : * another process scanning the pages between this check and the update
4063 : : * being visible to the scan (i.e., exclusive buffer content lock(s) are
4064 : : * continuously held from this point until the tuple update is visible).
4065 : : *
4066 : : * For the new tuple the only check needed is at the relation level, but
4067 : : * since both tuples are in the same relation and the check for oldtup
4068 : : * will include checking the relation level, there is no benefit to a
4069 : : * separate check for the new tuple.
4070 : : */
1797 tmunro@postgresql.or 4071 : 307707 : CheckForSerializableConflictIn(relation, &oldtup.t_self,
4072 : : BufferGetBlockNumber(buffer));
4073 : :
4074 : : /*
4075 : : * At this point newbuf and buffer are both pinned and locked, and newbuf
4076 : : * has enough space for the new tuple. If they are the same buffer, only
4077 : : * one pin is held.
4078 : : */
4079 : :
6751 tgl@sss.pgh.pa.us 4080 [ + + ]: 307695 : if (newbuf == buffer)
4081 : : {
4082 : : /*
4083 : : * Since the new tuple is going into the same page, we might be able
4084 : : * to do a HOT update. Check if any of the index columns have been
4085 : : * changed.
4086 : : */
1570 pg@bowt.ie 4087 [ + + ]: 158303 : if (!bms_overlap(modified_attrs, hot_attrs))
4088 : : {
6751 tgl@sss.pgh.pa.us 4089 : 145679 : use_hot_update = true;
4090 : :
4091 : : /*
4092 : : * If none of the columns that are used in hot-blocking indexes
4093 : : * were updated, we can apply HOT, but we do still need to check
4094 : : * if we need to update the summarizing indexes, and update those
4095 : : * indexes if the columns were updated, or we may fail to detect
4096 : : * e.g. value bound changes in BRIN minmax indexes.
4097 : : */
1091 tomas.vondra@postgre 4098 [ + + ]: 145679 : if (bms_overlap(modified_attrs, sum_attrs))
4099 : 1641 : summarized_update = true;
4100 : : }
4101 : : }
4102 : : else
4103 : : {
4104 : : /* Set a hint that the old page could use prune/defrag */
6454 tgl@sss.pgh.pa.us 4105 : 149392 : PageSetFull(page);
4106 : : }
4107 : :
4108 : : /*
4109 : : * Compute replica identity tuple before entering the critical section so
4110 : : * we don't PANIC upon a memory allocation failure.
4111 : : * ExtractReplicaIdentity() will return NULL if nothing needs to be
4112 : : * logged. Pass old key required as true only if the replica identity key
4113 : : * columns are modified or it has external data.
4114 : : */
3273 alvherre@alvh.no-ip. 4115 : 307695 : old_key_tuple = ExtractReplicaIdentity(relation, &oldtup,
1490 akapila@postgresql.o 4116 [ + + + + ]: 307695 : bms_overlap(modified_attrs, id_attrs) ||
4117 : : id_has_external,
4118 : : &old_key_copied);
4119 : :
4120 : : /* NO EREPORT(ERROR) from here till changes are logged */
9193 tgl@sss.pgh.pa.us 4121 : 307695 : START_CRIT_SECTION();
4122 : :
4123 : : /*
4124 : : * If this transaction commits, the old tuple will become DEAD sooner or
4125 : : * later. Set flag that this page is a candidate for pruning once our xid
4126 : : * falls below the OldestXmin horizon. If the transaction finally aborts,
4127 : : * the subsequent page pruning will be a no-op and the hint will be
4128 : : * cleared.
4129 : : *
4130 : : * XXX Should we set hint on newbuf as well? If the transaction aborts,
4131 : : * there would be a prunable tuple in the newbuf; but for now we choose
4132 : : * not to optimize for aborts. Note that heap_xlog_update must be kept in
4133 : : * sync if this decision changes.
4134 : : */
6454 4135 [ - + + + : 307695 : PageSetPrunable(page, xid);
+ + ]
4136 : :
6751 4137 [ + + ]: 307695 : if (use_hot_update)
4138 : : {
4139 : : /* Mark the old tuple as HOT-updated */
4140 : 145679 : HeapTupleSetHotUpdated(&oldtup);
4141 : : /* And mark the new tuple as heap-only */
4142 : 145679 : HeapTupleSetHeapOnly(heaptup);
4143 : : /* Mark the caller's copy too, in case different from heaptup */
4144 : 145679 : HeapTupleSetHeapOnly(newtup);
4145 : : }
4146 : : else
4147 : : {
4148 : : /* Make sure tuples are correctly marked as not-HOT */
4149 : 162016 : HeapTupleClearHotUpdated(&oldtup);
4150 : 162016 : HeapTupleClearHeapOnly(heaptup);
4151 : 162016 : HeapTupleClearHeapOnly(newtup);
4152 : : }
4153 : :
3189 4154 : 307695 : RelationPutHeapTuple(relation, newbuf, heaptup, false); /* insert new tuple */
4155 : :
4156 : :
4157 : : /* Clear obsolete visibility flags, possibly set by ourselves above... */
3530 andres@anarazel.de 4158 : 307695 : oldtup.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
4159 : 307695 : oldtup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
4160 : : /* ... and store info about transaction updating this tuple */
4161 [ - + ]: 307695 : Assert(TransactionIdIsValid(xmax_old_tuple));
4162 : 307695 : HeapTupleHeaderSetXmax(oldtup.t_data, xmax_old_tuple);
4163 : 307695 : oldtup.t_data->t_infomask |= infomask_old_tuple;
4164 : 307695 : oldtup.t_data->t_infomask2 |= infomask2_old_tuple;
4165 : 307695 : HeapTupleHeaderSetCmax(oldtup.t_data, cid, iscombo);
4166 : :
4167 : : /* record address of new tuple in t_ctid of old one */
7420 tgl@sss.pgh.pa.us 4168 : 307695 : oldtup.t_data->t_ctid = heaptup->t_self;
4169 : :
4170 : : /* clear PD_ALL_VISIBLE flags, reset all visibilitymap bits */
3616 kgrittn@postgresql.o 4171 [ + + ]: 307695 : if (PageIsAllVisible(BufferGetPage(buffer)))
4172 : : {
6047 tgl@sss.pgh.pa.us 4173 : 1651 : all_visible_cleared = true;
3616 kgrittn@postgresql.o 4174 : 1651 : PageClearAllVisible(BufferGetPage(buffer));
5375 rhaas@postgresql.org 4175 : 1651 : visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
4176 : : vmbuffer, VISIBILITYMAP_VALID_BITS);
4177 : : }
3616 kgrittn@postgresql.o 4178 [ + + + + ]: 307695 : if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf)))
4179 : : {
6047 tgl@sss.pgh.pa.us 4180 : 964 : all_visible_cleared_new = true;
3616 kgrittn@postgresql.o 4181 : 964 : PageClearAllVisible(BufferGetPage(newbuf));
5375 rhaas@postgresql.org 4182 : 964 : visibilitymap_clear(relation, BufferGetBlockNumber(newbuf),
4183 : : vmbuffer_new, VISIBILITYMAP_VALID_BITS);
4184 : : }
4185 : :
7289 tgl@sss.pgh.pa.us 4186 [ + + ]: 307695 : if (newbuf != buffer)
4187 : 149392 : MarkBufferDirty(newbuf);
4188 : 307695 : MarkBufferDirty(buffer);
4189 : :
4190 : : /* XLOG stuff */
5571 rhaas@postgresql.org 4191 [ + + + + : 307695 : if (RelationNeedsWAL(relation))
+ - + + ]
4192 : : {
4193 : : XLogRecPtr recptr;
4194 : :
4195 : : /*
4196 : : * For logical decoding we need combo CIDs to properly decode the
4197 : : * catalog.
4198 : : */
4478 4199 [ + + - + : 296331 : if (RelationIsAccessibleInLogicalDecoding(relation))
+ - - + -
- - - + +
+ + - + -
- + - ]
4200 : : {
4201 : 2576 : log_heap_new_cid(relation, &oldtup);
4202 : 2576 : log_heap_new_cid(relation, heaptup);
4203 : : }
4204 : :
4205 : 296331 : recptr = log_heap_update(relation, buffer,
4206 : : newbuf, &oldtup, heaptup,
4207 : : old_key_tuple,
4208 : : all_visible_cleared,
4209 : : all_visible_cleared_new);
9386 vadim4o@yahoo.com 4210 [ + + ]: 296331 : if (newbuf != buffer)
4211 : : {
3616 kgrittn@postgresql.o 4212 : 139267 : PageSetLSN(BufferGetPage(newbuf), recptr);
4213 : : }
4214 : 296331 : PageSetLSN(BufferGetPage(buffer), recptr);
4215 : : }
4216 : :
9193 tgl@sss.pgh.pa.us 4217 [ - + ]: 307695 : END_CRIT_SECTION();
4218 : :
9386 vadim4o@yahoo.com 4219 [ + + ]: 307695 : if (newbuf != buffer)
4220 : 149392 : LockBuffer(newbuf, BUFFER_LOCK_UNLOCK);
9952 4221 : 307695 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
4222 : :
4223 : : /*
4224 : : * Mark old tuple for invalidation from system caches at next command
4225 : : * boundary, and mark the new tuple for invalidation in case we abort. We
4226 : : * have to do this before releasing the buffer because oldtup is in the
4227 : : * buffer. (heaptup is all in local memory, but it's necessary to process
4228 : : * both tuple versions in one call to inval.c so we can avoid redundant
4229 : : * sinval messages.)
4230 : : */
5325 tgl@sss.pgh.pa.us 4231 : 307695 : CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
4232 : :
4233 : : /* Now we can release the buffer(s) */
9198 4234 [ + + ]: 307695 : if (newbuf != buffer)
7289 4235 : 149392 : ReleaseBuffer(newbuf);
703 akorotkov@postgresql 4236 : 307695 : ReleaseBuffer(buffer);
5381 rhaas@postgresql.org 4237 [ + + ]: 307695 : if (BufferIsValid(vmbuffer_new))
4238 : 964 : ReleaseBuffer(vmbuffer_new);
4239 [ + + ]: 307695 : if (BufferIsValid(vmbuffer))
4240 : 1651 : ReleaseBuffer(vmbuffer);
4241 : :
4242 : : /*
4243 : : * Release the lmgr tuple lock, if we had it.
4244 : : */
7624 tgl@sss.pgh.pa.us 4245 [ + + ]: 307695 : if (have_tuple_lock)
2894 simon@2ndQuadrant.co 4246 : 22 : UnlockTupleTuplock(relation, &(oldtup.t_self), *lockmode);
4247 : :
1088 pg@bowt.ie 4248 : 307695 : pgstat_count_heap_update(relation, use_hot_update, newbuf != buffer);
4249 : :
4250 : : /*
4251 : : * If heaptup is a private copy, release it. Don't forget to copy t_self
4252 : : * back to the caller's image, too.
4253 : : */
7420 tgl@sss.pgh.pa.us 4254 [ + + ]: 307695 : if (heaptup != newtup)
4255 : : {
4256 : 1743 : newtup->t_self = heaptup->t_self;
4257 : 1743 : heap_freetuple(heaptup);
4258 : : }
4259 : :
4260 : : /*
4261 : : * If it is a HOT update, the update may still need to update summarized
4262 : : * indexes, lest we fail to update those summaries and get incorrect
4263 : : * results (for example, minmax bounds of the block may change with this
4264 : : * update).
4265 : : */
1091 tomas.vondra@postgre 4266 [ + + ]: 307695 : if (use_hot_update)
4267 : : {
4268 [ + + ]: 145679 : if (summarized_update)
4269 : 1641 : *update_indexes = TU_Summarizing;
4270 : : else
4271 : 144038 : *update_indexes = TU_None;
4272 : : }
4273 : : else
4274 : 162016 : *update_indexes = TU_All;
4275 : :
4478 rhaas@postgresql.org 4276 [ + + + + ]: 307695 : if (old_key_tuple != NULL && old_key_copied)
4277 : 84 : heap_freetuple(old_key_tuple);
4278 : :
6751 tgl@sss.pgh.pa.us 4279 : 307695 : bms_free(hot_attrs);
1091 tomas.vondra@postgre 4280 : 307695 : bms_free(sum_attrs);
4799 alvherre@alvh.no-ip. 4281 : 307695 : bms_free(key_attrs);
3490 tgl@sss.pgh.pa.us 4282 : 307695 : bms_free(id_attrs);
3273 alvherre@alvh.no-ip. 4283 : 307695 : bms_free(modified_attrs);
4284 : 307695 : bms_free(interesting_attrs);
4285 : :
2549 andres@anarazel.de 4286 : 307695 : return TM_Ok;
4287 : : }
4288 : :
4289 : : #ifdef USE_ASSERT_CHECKING
4290 : : /*
4291 : : * Confirm adequate lock held during heap_update(), per rules from
4292 : : * README.tuplock section "Locking to write inplace-updated tables".
4293 : : */
4294 : : static void
537 noah@leadboat.com 4295 : 307875 : check_lock_if_inplace_updateable_rel(Relation relation,
4296 : : const ItemPointerData *otid,
4297 : : HeapTuple newtup)
4298 : : {
4299 : : /* LOCKTAG_TUPLE acceptable for any catalog */
4300 [ + + ]: 307875 : switch (RelationGetRelid(relation))
4301 : : {
4302 : 71151 : case RelationRelationId:
4303 : : case DatabaseRelationId:
4304 : : {
4305 : : LOCKTAG tuptag;
4306 : :
4307 : 71151 : SET_LOCKTAG_TUPLE(tuptag,
4308 : : relation->rd_lockInfo.lockRelId.dbId,
4309 : : relation->rd_lockInfo.lockRelId.relId,
4310 : : ItemPointerGetBlockNumber(otid),
4311 : : ItemPointerGetOffsetNumber(otid));
4312 [ + + ]: 71151 : if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock, false))
4313 : 31432 : return;
4314 : : }
4315 : 39719 : break;
4316 : 236724 : default:
4317 [ - + ]: 236724 : Assert(!IsInplaceUpdateRelation(relation));
4318 : 236724 : return;
4319 : : }
4320 : :
4321 [ + - - ]: 39719 : switch (RelationGetRelid(relation))
4322 : : {
4323 : 39719 : case RelationRelationId:
4324 : : {
4325 : : /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
4326 : 39719 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
4327 : 39719 : Oid relid = classForm->oid;
4328 : : Oid dbid;
4329 : : LOCKTAG tag;
4330 : :
4331 [ + + ]: 39719 : if (IsSharedRelation(relid))
4332 : 44 : dbid = InvalidOid;
4333 : : else
4334 : 39675 : dbid = MyDatabaseId;
4335 : :
4336 [ + + ]: 39719 : if (classForm->relkind == RELKIND_INDEX)
4337 : : {
4338 : 1078 : Relation irel = index_open(relid, AccessShareLock);
4339 : :
4340 : 1078 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4341 : 1078 : index_close(irel, AccessShareLock);
4342 : : }
4343 : : else
4344 : 38641 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4345 : :
4346 [ + + ]: 39719 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, false) &&
4347 [ - + ]: 36092 : !LockHeldByMe(&tag, ShareRowExclusiveLock, true))
537 noah@leadboat.com 4348 [ # # ]:UBC 0 : elog(WARNING,
4349 : : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4350 : : NameStr(classForm->relname),
4351 : : relid,
4352 : : classForm->relkind,
4353 : : ItemPointerGetBlockNumber(otid),
4354 : : ItemPointerGetOffsetNumber(otid));
4355 : : }
537 noah@leadboat.com 4356 :CBC 39719 : break;
537 noah@leadboat.com 4357 :UBC 0 : case DatabaseRelationId:
4358 : : {
4359 : : /* LOCKTAG_TUPLE required */
4360 : 0 : Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
4361 : :
4362 [ # # ]: 0 : elog(WARNING,
4363 : : "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
4364 : : NameStr(dbForm->datname),
4365 : : dbForm->oid,
4366 : : ItemPointerGetBlockNumber(otid),
4367 : : ItemPointerGetOffsetNumber(otid));
4368 : : }
4369 : 0 : break;
4370 : : }
4371 : : }
4372 : :
4373 : : /*
4374 : : * Confirm adequate relation lock held, per rules from README.tuplock section
4375 : : * "Locking to write inplace-updated tables".
4376 : : */
4377 : : static void
537 noah@leadboat.com 4378 :CBC 94163 : check_inplace_rel_lock(HeapTuple oldtup)
4379 : : {
4380 : 94163 : Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
4381 : 94163 : Oid relid = classForm->oid;
4382 : : Oid dbid;
4383 : : LOCKTAG tag;
4384 : :
4385 [ + + ]: 94163 : if (IsSharedRelation(relid))
4386 : 8833 : dbid = InvalidOid;
4387 : : else
4388 : 85330 : dbid = MyDatabaseId;
4389 : :
4390 [ + + ]: 94163 : if (classForm->relkind == RELKIND_INDEX)
4391 : : {
4392 : 40950 : Relation irel = index_open(relid, AccessShareLock);
4393 : :
4394 : 40950 : SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
4395 : 40950 : index_close(irel, AccessShareLock);
4396 : : }
4397 : : else
4398 : 53213 : SET_LOCKTAG_RELATION(tag, dbid, relid);
4399 : :
4400 [ - + ]: 94163 : if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock, true))
537 noah@leadboat.com 4401 [ # # ]:UBC 0 : elog(WARNING,
4402 : : "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
4403 : : NameStr(classForm->relname),
4404 : : relid,
4405 : : classForm->relkind,
4406 : : ItemPointerGetBlockNumber(&oldtup->t_self),
4407 : : ItemPointerGetOffsetNumber(&oldtup->t_self));
537 noah@leadboat.com 4408 :CBC 94163 : }
4409 : : #endif
4410 : :
4411 : : /*
4412 : : * Check if the specified attribute's values are the same. Subroutine for
4413 : : * HeapDetermineColumnsInfo.
4414 : : */
4415 : : static bool
1490 akapila@postgresql.o 4416 : 749086 : heap_attr_equals(TupleDesc tupdesc, int attrnum, Datum value1, Datum value2,
4417 : : bool isnull1, bool isnull2)
4418 : : {
4419 : : /*
4420 : : * If one value is NULL and other is not, then they are certainly not
4421 : : * equal
4422 : : */
6751 tgl@sss.pgh.pa.us 4423 [ + + ]: 749086 : if (isnull1 != isnull2)
4424 : 45 : return false;
4425 : :
4426 : : /*
4427 : : * If both are NULL, they can be considered equal.
4428 : : */
4429 [ + + ]: 749041 : if (isnull1)
4430 : 4991 : return true;
4431 : :
4432 : : /*
4433 : : * We do simple binary comparison of the two datums. This may be overly
4434 : : * strict because there can be multiple binary representations for the
4435 : : * same logical value. But we should be OK as long as there are no false
4436 : : * positives. Using a type-specific equality operator is messy because
4437 : : * there could be multiple notions of equality in different operator
4438 : : * classes; furthermore, we cannot safely invoke user-defined functions
4439 : : * while holding exclusive buffer lock.
4440 : : */
4441 [ - + ]: 744050 : if (attrnum <= 0)
4442 : : {
4443 : : /* The only allowed system columns are OIDs, so do this */
6751 tgl@sss.pgh.pa.us 4444 :UBC 0 : return (DatumGetObjectId(value1) == DatumGetObjectId(value2));
4445 : : }
4446 : : else
4447 : : {
4448 : : CompactAttribute *att;
4449 : :
6751 tgl@sss.pgh.pa.us 4450 [ - + ]:CBC 744050 : Assert(attrnum <= tupdesc->natts);
450 drowley@postgresql.o 4451 : 744050 : att = TupleDescCompactAttr(tupdesc, attrnum - 1);
6751 tgl@sss.pgh.pa.us 4452 : 744050 : return datumIsEqual(value1, value2, att->attbyval, att->attlen);
4453 : : }
4454 : : }
4455 : :
4456 : : /*
4457 : : * Check which columns are being updated.
4458 : : *
4459 : : * Given an updated tuple, determine (and return into the output bitmapset),
4460 : : * from those listed as interesting, the set of columns that changed.
4461 : : *
4462 : : * has_external indicates if any of the unmodified attributes (from those
4463 : : * listed as interesting) of the old tuple is a member of external_cols and is
4464 : : * stored externally.
4465 : : */
4466 : : static Bitmapset *
1490 akapila@postgresql.o 4467 : 307874 : HeapDetermineColumnsInfo(Relation relation,
4468 : : Bitmapset *interesting_cols,
4469 : : Bitmapset *external_cols,
4470 : : HeapTuple oldtup, HeapTuple newtup,
4471 : : bool *has_external)
4472 : : {
4473 : : int attidx;
3224 bruce@momjian.us 4474 : 307874 : Bitmapset *modified = NULL;
1490 akapila@postgresql.o 4475 : 307874 : TupleDesc tupdesc = RelationGetDescr(relation);
4476 : :
1109 tgl@sss.pgh.pa.us 4477 : 307874 : attidx = -1;
4478 [ + + ]: 1056960 : while ((attidx = bms_next_member(interesting_cols, attidx)) >= 0)
4479 : : {
4480 : : /* attidx is zero-based, attrnum is the normal attribute number */
4481 : 749086 : AttrNumber attrnum = attidx + FirstLowInvalidHeapAttributeNumber;
4482 : : Datum value1,
4483 : : value2;
4484 : : bool isnull1,
4485 : : isnull2;
4486 : :
4487 : : /*
4488 : : * If it's a whole-tuple reference, say "not equal". It's not really
4489 : : * worth supporting this case, since it could only succeed after a
4490 : : * no-op update, which is hardly a case worth optimizing for.
4491 : : */
1490 akapila@postgresql.o 4492 [ - + ]: 749086 : if (attrnum == 0)
4493 : : {
1109 tgl@sss.pgh.pa.us 4494 :UBC 0 : modified = bms_add_member(modified, attidx);
1490 akapila@postgresql.o 4495 :CBC 723131 : continue;
4496 : : }
4497 : :
4498 : : /*
4499 : : * Likewise, automatically say "not equal" for any system attribute
4500 : : * other than tableOID; we cannot expect these to be consistent in a
4501 : : * HOT chain, or even to be set correctly yet in the new tuple.
4502 : : */
4503 [ - + ]: 749086 : if (attrnum < 0)
4504 : : {
1490 akapila@postgresql.o 4505 [ # # ]:UBC 0 : if (attrnum != TableOidAttributeNumber)
4506 : : {
1109 tgl@sss.pgh.pa.us 4507 : 0 : modified = bms_add_member(modified, attidx);
1490 akapila@postgresql.o 4508 : 0 : continue;
4509 : : }
4510 : : }
4511 : :
4512 : : /*
4513 : : * Extract the corresponding values. XXX this is pretty inefficient
4514 : : * if there are many indexed columns. Should we do a single
4515 : : * heap_deform_tuple call on each tuple, instead? But that doesn't
4516 : : * work for system columns ...
4517 : : */
1490 akapila@postgresql.o 4518 :CBC 749086 : value1 = heap_getattr(oldtup, attrnum, tupdesc, &isnull1);
4519 : 749086 : value2 = heap_getattr(newtup, attrnum, tupdesc, &isnull2);
4520 : :
4521 [ + + ]: 749086 : if (!heap_attr_equals(tupdesc, attrnum, value1,
4522 : : value2, isnull1, isnull2))
4523 : : {
1109 tgl@sss.pgh.pa.us 4524 : 27557 : modified = bms_add_member(modified, attidx);
1490 akapila@postgresql.o 4525 : 27557 : continue;
4526 : : }
4527 : :
4528 : : /*
4529 : : * No need to check attributes that can't be stored externally. Note
4530 : : * that system attributes can't be stored externally.
4531 : : */
4532 [ + - + + ]: 721529 : if (attrnum < 0 || isnull1 ||
450 drowley@postgresql.o 4533 [ + + ]: 716538 : TupleDescCompactAttr(tupdesc, attrnum - 1)->attlen != -1)
1490 akapila@postgresql.o 4534 : 695574 : continue;
4535 : :
4536 : : /*
4537 : : * Check if the old tuple's attribute is stored externally and is a
4538 : : * member of external_cols.
4539 : : */
32 michael@paquier.xyz 4540 [ + + + + ]:GNC 25960 : if (VARATT_IS_EXTERNAL((varlena *) DatumGetPointer(value1)) &&
1109 tgl@sss.pgh.pa.us 4541 :CBC 5 : bms_is_member(attidx, external_cols))
1490 akapila@postgresql.o 4542 : 2 : *has_external = true;
4543 : : }
4544 : :
3273 alvherre@alvh.no-ip. 4545 : 307874 : return modified;
4546 : : }
4547 : :
4548 : : /*
4549 : : * simple_heap_update - replace a tuple
4550 : : *
4551 : : * This routine may be used to update a tuple when concurrent updates of
4552 : : * the target tuple are not expected (for example, because we have a lock
4553 : : * on the relation associated with the tuple). Any failure is reported
4554 : : * via ereport().
4555 : : */
4556 : : void
136 peter@eisentraut.org 4557 :GNC 113021 : simple_heap_update(Relation relation, const ItemPointerData *otid, HeapTuple tup,
4558 : : TU_UpdateIndexes *update_indexes)
4559 : : {
4560 : : TM_Result result;
4561 : : TM_FailureData tmfd;
4562 : : LockTupleMode lockmode;
4563 : :
8217 tgl@sss.pgh.pa.us 4564 :CBC 113021 : result = heap_update(relation, otid, tup,
4565 : : GetCurrentCommandId(true), InvalidSnapshot,
4566 : : true /* wait for commit */ ,
4567 : : &tmfd, &lockmode, update_indexes);
9182 4568 [ - + - + : 113021 : switch (result)
- ]
4569 : : {
2549 andres@anarazel.de 4570 :UBC 0 : case TM_SelfModified:
4571 : : /* Tuple was already updated in current command? */
8273 tgl@sss.pgh.pa.us 4572 [ # # ]: 0 : elog(ERROR, "tuple already updated by self");
4573 : : break;
4574 : :
2549 andres@anarazel.de 4575 :CBC 113020 : case TM_Ok:
4576 : : /* done successfully */
9182 tgl@sss.pgh.pa.us 4577 : 113020 : break;
4578 : :
2549 andres@anarazel.de 4579 :UBC 0 : case TM_Updated:
8273 tgl@sss.pgh.pa.us 4580 [ # # ]: 0 : elog(ERROR, "tuple concurrently updated");
4581 : : break;
4582 : :
2549 andres@anarazel.de 4583 :CBC 1 : case TM_Deleted:
4584 [ + - ]: 1 : elog(ERROR, "tuple concurrently deleted");
4585 : : break;
4586 : :
9182 tgl@sss.pgh.pa.us 4587 :UBC 0 : default:
8273 4588 [ # # ]: 0 : elog(ERROR, "unrecognized heap_update status: %u", result);
4589 : : break;
4590 : : }
9182 tgl@sss.pgh.pa.us 4591 :CBC 113020 : }
4592 : :
4593 : :
4594 : : /*
4595 : : * Return the MultiXactStatus corresponding to the given tuple lock mode.
4596 : : */
4597 : : static MultiXactStatus
4799 alvherre@alvh.no-ip. 4598 : 115453 : get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
4599 : : {
4600 : : int retval;
4601 : :
4602 [ + + ]: 115453 : if (is_update)
4603 : 215 : retval = tupleLockExtraInfo[mode].updstatus;
4604 : : else
4605 : 115238 : retval = tupleLockExtraInfo[mode].lockstatus;
4606 : :
4607 [ - + ]: 115453 : if (retval == -1)
4799 alvherre@alvh.no-ip. 4608 [ # # # # ]:UBC 0 : elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4609 : : is_update ? "true" : "false");
4610 : :
4646 alvherre@alvh.no-ip. 4611 :CBC 115453 : return (MultiXactStatus) retval;
4612 : : }
4613 : :
4614 : : /*
4615 : : * heap_lock_tuple - lock a tuple in shared or exclusive mode
4616 : : *
4617 : : * Note that this acquires a buffer pin, which the caller must release.
4618 : : *
4619 : : * Input parameters:
4620 : : * relation: relation containing tuple (caller must hold suitable lock)
4621 : : * cid: current command ID (used for visibility test, and stored into
4622 : : * tuple's cmax if lock is successful)
4623 : : * mode: indicates if shared or exclusive tuple lock is desired
4624 : : * wait_policy: what to do if tuple lock is not available
4625 : : * follow_updates: if true, follow the update chain to also lock descendant
4626 : : * tuples.
4627 : : *
4628 : : * Output parameters:
4629 : : * *tuple: all fields filled in
4630 : : * *buffer: set to buffer holding tuple (pinned but not locked at exit)
4631 : : * *tmfd: filled in failure cases (see below)
4632 : : *
4633 : : * Function results are the same as the ones for table_tuple_lock().
4634 : : *
4635 : : * In the failure cases other than TM_Invisible, the routine fills
4636 : : * *tmfd with the tuple's t_ctid, t_xmax (resolving a possible MultiXact,
4637 : : * if necessary), and t_cmax (the last only for TM_SelfModified,
4638 : : * since we cannot obtain cmax from a combo CID generated by another
4639 : : * transaction).
4640 : : * See comments for struct TM_FailureData for additional info.
4641 : : *
4642 : : * See README.tuplock for a thorough explanation of this mechanism.
4643 : : */
4644 : : TM_Result
703 akorotkov@postgresql 4645 : 558510 : heap_lock_tuple(Relation relation, HeapTuple tuple,
4646 : : CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy,
4647 : : bool follow_updates,
4648 : : Buffer *buffer, TM_FailureData *tmfd)
4649 : : {
4650 : : TM_Result result;
4651 : 558510 : ItemPointer tid = &(tuple->t_self);
4652 : : ItemId lp;
4653 : : Page page;
3527 andres@anarazel.de 4654 : 558510 : Buffer vmbuffer = InvalidBuffer;
4655 : : BlockNumber block;
4656 : : TransactionId xid,
4657 : : xmax;
4658 : : uint16 old_infomask,
4659 : : new_infomask,
4660 : : new_infomask2;
3992 alvherre@alvh.no-ip. 4661 : 558510 : bool first_time = true;
2462 4662 : 558510 : bool skip_tuple_lock = false;
7624 tgl@sss.pgh.pa.us 4663 : 558510 : bool have_tuple_lock = false;
3527 andres@anarazel.de 4664 : 558510 : bool cleared_all_frozen = false;
4665 : :
703 akorotkov@postgresql 4666 : 558510 : *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3527 andres@anarazel.de 4667 : 558510 : block = ItemPointerGetBlockNumber(tid);
4668 : :
4669 : : /*
4670 : : * Before locking the buffer, pin the visibility map page if it appears to
4671 : : * be necessary. Since we haven't got the lock yet, someone else might be
4672 : : * in the middle of changing this, so we'll need to recheck after we have
4673 : : * the lock.
4674 : : */
703 akorotkov@postgresql 4675 [ + + ]: 558510 : if (PageIsAllVisible(BufferGetPage(*buffer)))
3527 andres@anarazel.de 4676 : 401674 : visibilitymap_pin(relation, block, &vmbuffer);
4677 : :
703 akorotkov@postgresql 4678 : 558510 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4679 : :
4680 : 558510 : page = BufferGetPage(*buffer);
6454 tgl@sss.pgh.pa.us 4681 : 558510 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6759 4682 [ - + ]: 558510 : Assert(ItemIdIsNormal(lp));
4683 : :
6454 4684 : 558510 : tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
9952 vadim4o@yahoo.com 4685 : 558510 : tuple->t_len = ItemIdGetLength(lp);
7512 tgl@sss.pgh.pa.us 4686 : 558510 : tuple->t_tableOid = RelationGetRelid(relation);
4687 : :
9952 vadim4o@yahoo.com 4688 : 14 : l3:
703 akorotkov@postgresql 4689 : 558524 : result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4690 : :
2549 andres@anarazel.de 4691 [ + + ]: 558524 : if (result == TM_Invisible)
4692 : : {
4693 : : /*
4694 : : * This is possible, but only when locking a tuple for ON CONFLICT DO
4695 : : * SELECT/UPDATE. We return this value here rather than throwing an
4696 : : * error in order to give that case the opportunity to throw a more
4697 : : * specific error.
4698 : : */
4699 : 21 : result = TM_Invisible;
3527 4700 : 21 : goto out_locked;
4701 : : }
2549 4702 [ + + + + ]: 558503 : else if (result == TM_BeingModified ||
4703 [ + + ]: 77475 : result == TM_Updated ||
4704 : : result == TM_Deleted)
4705 : : {
4706 : : TransactionId xwait;
4707 : : uint16 infomask;
4708 : : uint16 infomask2;
4709 : : bool require_sleep;
4710 : : ItemPointerData t_ctid;
4711 : :
4712 : : /* must copy state data before unlocking buffer */
4799 alvherre@alvh.no-ip. 4713 : 481029 : xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
7624 tgl@sss.pgh.pa.us 4714 : 481029 : infomask = tuple->t_data->t_infomask;
4799 alvherre@alvh.no-ip. 4715 : 481029 : infomask2 = tuple->t_data->t_infomask2;
4716 : 481029 : ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4717 : :
703 akorotkov@postgresql 4718 : 481029 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4719 : :
4720 : : /*
4721 : : * If any subtransaction of the current top transaction already holds
4722 : : * a lock as strong as or stronger than what we're requesting, we
4723 : : * effectively hold the desired lock already. We *must* succeed
4724 : : * without trying to take the tuple lock, else we will deadlock
4725 : : * against anyone wanting to acquire a stronger lock.
4726 : : *
4727 : : * Note we only do this the first time we loop on the HTSU result;
4728 : : * there is no point in testing in subsequent passes, because
4729 : : * evidently our own transaction cannot have acquired a new lock after
4730 : : * the first time we checked.
4731 : : */
3992 alvherre@alvh.no-ip. 4732 [ + + ]: 481029 : if (first_time)
4733 : : {
4734 : 481018 : first_time = false;
4735 : :
4736 [ + + ]: 481018 : if (infomask & HEAP_XMAX_IS_MULTI)
4737 : : {
4738 : : int i;
4739 : : int nmembers;
4740 : : MultiXactMember *members;
4741 : :
4742 : : /*
4743 : : * We don't need to allow old multixacts here; if that had
4744 : : * been the case, HeapTupleSatisfiesUpdate would have returned
4745 : : * MayBeUpdated and we wouldn't be here.
4746 : : */
4747 : : nmembers =
4748 : 73304 : GetMultiXactIdMembers(xwait, &members, false,
4749 : 73304 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4750 : :
4751 [ + + ]: 1422687 : for (i = 0; i < nmembers; i++)
4752 : : {
4753 : : /* only consider members of our own transaction */
4754 [ + + ]: 1349397 : if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4755 : 1349348 : continue;
4756 : :
4757 [ + + ]: 49 : if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4758 : : {
4799 4759 : 14 : pfree(members);
2549 andres@anarazel.de 4760 : 14 : result = TM_Ok;
3527 4761 : 14 : goto out_unlocked;
4762 : : }
4763 : : else
4764 : : {
4765 : : /*
4766 : : * Disable acquisition of the heavyweight tuple lock.
4767 : : * Otherwise, when promoting a weaker lock, we might
4768 : : * deadlock with another locker that has acquired the
4769 : : * heavyweight tuple lock and is waiting for our
4770 : : * transaction to finish.
4771 : : *
4772 : : * Note that in this case we still need to wait for
4773 : : * the multixact if required, to avoid acquiring
4774 : : * conflicting locks.
4775 : : */
2462 alvherre@alvh.no-ip. 4776 : 35 : skip_tuple_lock = true;
4777 : : }
4778 : : }
4779 : :
3992 4780 [ + - ]: 73290 : if (members)
4781 : 73290 : pfree(members);
4782 : : }
4783 [ + + ]: 407714 : else if (TransactionIdIsCurrentTransactionId(xwait))
4784 : : {
4785 [ + + + + : 406377 : switch (mode)
- ]
4786 : : {
4787 : 399972 : case LockTupleKeyShare:
4788 [ - + - - : 399972 : Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
- - ]
4789 : : HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4790 : : HEAP_XMAX_IS_EXCL_LOCKED(infomask));
2549 andres@anarazel.de 4791 : 399972 : result = TM_Ok;
3527 4792 : 399972 : goto out_unlocked;
3992 alvherre@alvh.no-ip. 4793 : 28 : case LockTupleShare:
4794 [ + + - + ]: 34 : if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4795 : 6 : HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4796 : : {
2549 andres@anarazel.de 4797 : 22 : result = TM_Ok;
3527 4798 : 22 : goto out_unlocked;
4799 : : }
3992 alvherre@alvh.no-ip. 4800 : 6 : break;
4801 : 72 : case LockTupleNoKeyExclusive:
4802 [ + + ]: 72 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4803 : : {
2549 andres@anarazel.de 4804 : 60 : result = TM_Ok;
3527 4805 : 60 : goto out_unlocked;
4806 : : }
3992 alvherre@alvh.no-ip. 4807 : 12 : break;
4808 : 6305 : case LockTupleExclusive:
4809 [ + + ]: 6305 : if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4810 [ + + ]: 1265 : infomask2 & HEAP_KEYS_UPDATED)
4811 : : {
2549 andres@anarazel.de 4812 : 1244 : result = TM_Ok;
3527 4813 : 1244 : goto out_unlocked;
4814 : : }
3992 alvherre@alvh.no-ip. 4815 : 5061 : break;
4816 : : }
4817 : : }
4818 : : }
4819 : :
4820 : : /*
4821 : : * Initially assume that we will have to wait for the locking
4822 : : * transaction(s) to finish. We check various cases below in which
4823 : : * this can be turned off.
4824 : : */
4799 4825 : 79717 : require_sleep = true;
4826 [ + + ]: 79717 : if (mode == LockTupleKeyShare)
4827 : : {
4828 : : /*
4829 : : * If we're requesting KeyShare, and there's no update present, we
4830 : : * don't need to wait. Even if there is an update, we can still
4831 : : * continue if the key hasn't been modified.
4832 : : *
4833 : : * However, if there are updates, we need to walk the update chain
4834 : : * to mark future versions of the row as locked, too. That way,
4835 : : * if somebody deletes that future version, we're protected
4836 : : * against the key going away. This locking of future versions
4837 : : * could block momentarily, if a concurrent transaction is
4838 : : * deleting a key; or it could return a value to the effect that
4839 : : * the transaction deleting the key has already committed. So we
4840 : : * do this before re-locking the buffer; otherwise this would be
4841 : : * prone to deadlocks.
4842 : : *
4843 : : * Note that the TID we're locking was grabbed before we unlocked
4844 : : * the buffer. For it to change while we're not looking, the
4845 : : * other properties we're testing for below after re-locking the
4846 : : * buffer would also change, in which case we would restart this
4847 : : * loop above.
4848 : : */
4849 [ + + ]: 73868 : if (!(infomask2 & HEAP_KEYS_UPDATED))
4850 : : {
4851 : : bool updated;
4852 : :
4853 : 73825 : updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4854 : :
4855 : : /*
4856 : : * If there are updates, follow the update chain; bail out if
4857 : : * that cannot be done.
4858 : : */
82 heikki.linnakangas@i 4859 [ + - + + ]: 73825 : if (follow_updates && updated &&
4860 [ + - ]: 2169 : !ItemPointerEquals(&tuple->t_self, &t_ctid))
4861 : : {
4862 : : TM_Result res;
4863 : :
4864 : 2169 : res = heap_lock_updated_tuple(relation,
4865 : : infomask, xwait, &t_ctid,
4866 : : GetCurrentTransactionId(),
4867 : : mode);
2549 andres@anarazel.de 4868 [ + + ]: 2169 : if (res != TM_Ok)
4869 : : {
4799 alvherre@alvh.no-ip. 4870 : 6 : result = res;
4871 : : /* recovery code expects to have buffer lock held */
703 akorotkov@postgresql 4872 : 6 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4799 alvherre@alvh.no-ip. 4873 : 202 : goto failed;
4874 : : }
4875 : : }
4876 : :
703 akorotkov@postgresql 4877 : 73819 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4878 : :
4879 : : /*
4880 : : * Make sure it's still an appropriate lock, else start over.
4881 : : * Also, if it wasn't updated before we released the lock, but
4882 : : * is updated now, we start over too; the reason is that we
4883 : : * now need to follow the update chain to lock the new
4884 : : * versions.
4885 : : */
4799 alvherre@alvh.no-ip. 4886 [ + + ]: 73819 : if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4887 [ + - ]: 2151 : ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4888 [ - + ]: 2151 : !updated))
4889 : 14 : goto l3;
4890 : :
4891 : : /* Things look okay, so we can skip sleeping */
4892 : 73819 : require_sleep = false;
4893 : :
4894 : : /*
4895 : : * Note we allow Xmax to change here; other updaters/lockers
4896 : : * could have modified it before we grabbed the buffer lock.
4897 : : * However, this is not a problem, because with the recheck we
4898 : : * just did we ensure that they still don't conflict with the
4899 : : * lock we want.
4900 : : */
4901 : : }
4902 : : }
4903 [ + + ]: 5849 : else if (mode == LockTupleShare)
4904 : : {
4905 : : /*
4906 : : * If we're requesting Share, we can similarly avoid sleeping if
4907 : : * there's no update and no exclusive lock present.
4908 : : */
4909 [ + - ]: 447 : if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4910 [ + + ]: 447 : !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4911 : : {
703 akorotkov@postgresql 4912 : 441 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4913 : :
4914 : : /*
4915 : : * Make sure it's still an appropriate lock, else start over.
4916 : : * See above about allowing xmax to change.
4917 : : */
4799 alvherre@alvh.no-ip. 4918 [ + - - + ]: 882 : if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4919 : 441 : HEAP_XMAX_IS_EXCL_LOCKED(tuple->t_data->t_infomask))
4799 alvherre@alvh.no-ip. 4920 :UBC 0 : goto l3;
4799 alvherre@alvh.no-ip. 4921 :CBC 441 : require_sleep = false;
4922 : : }
4923 : : }
4924 [ + + ]: 5402 : else if (mode == LockTupleNoKeyExclusive)
4925 : : {
4926 : : /*
4927 : : * If we're requesting NoKeyExclusive, we might also be able to
4928 : : * avoid sleeping; just ensure that there no conflicting lock
4929 : : * already acquired.
4930 : : */
4931 [ + + ]: 176 : if (infomask & HEAP_XMAX_IS_MULTI)
4932 : : {
4097 4933 [ + + ]: 26 : if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4934 : : mode, NULL))
4935 : : {
4936 : : /*
4937 : : * No conflict, but if the xmax changed under us in the
4938 : : * meantime, start over.
4939 : : */
703 akorotkov@postgresql 4940 : 13 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4097 alvherre@alvh.no-ip. 4941 [ + - - + ]: 26 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4942 : 13 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4943 : : xwait))
4097 alvherre@alvh.no-ip. 4944 :UBC 0 : goto l3;
4945 : :
4946 : : /* otherwise, we're good */
4097 alvherre@alvh.no-ip. 4947 :CBC 13 : require_sleep = false;
4948 : : }
4949 : : }
4799 4950 [ + + ]: 150 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4951 : : {
703 akorotkov@postgresql 4952 : 18 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4953 : :
4954 : : /* if the xmax changed in the meantime, start over */
4343 alvherre@alvh.no-ip. 4955 [ + - - + ]: 36 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
2236 4956 : 18 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4957 : : xwait))
4799 alvherre@alvh.no-ip. 4958 :UBC 0 : goto l3;
4959 : : /* otherwise, we're good */
4799 alvherre@alvh.no-ip. 4960 :CBC 18 : require_sleep = false;
4961 : : }
4962 : : }
4963 : :
4964 : : /*
4965 : : * As a check independent from those above, we can also avoid sleeping
4966 : : * if the current transaction is the sole locker of the tuple. Note
4967 : : * that the strength of the lock already held is irrelevant; this is
4968 : : * not about recording the lock in Xmax (which will be done regardless
4969 : : * of this optimization, below). Also, note that the cases where we
4970 : : * hold a lock stronger than we are requesting are already handled
4971 : : * above by not doing anything.
4972 : : *
4973 : : * Note we only deal with the non-multixact case here; MultiXactIdWait
4974 : : * is well equipped to deal with this situation on its own.
4975 : : */
3992 4976 [ + + + + : 85089 : if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
+ + ]
4977 : 5378 : TransactionIdIsCurrentTransactionId(xwait))
4978 : : {
4979 : : /* ... but if the xmax changed in the meantime, start over */
703 akorotkov@postgresql 4980 : 5061 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3992 alvherre@alvh.no-ip. 4981 [ + - - + ]: 10122 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4982 : 5061 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
4983 : : xwait))
3992 alvherre@alvh.no-ip. 4984 :UBC 0 : goto l3;
3992 alvherre@alvh.no-ip. 4985 [ - + ]:CBC 5061 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask));
4986 : 5061 : require_sleep = false;
4987 : : }
4988 : :
4989 : : /*
4990 : : * Time to sleep on the other transaction/multixact, if necessary.
4991 : : *
4992 : : * If the other transaction is an update/delete that's already
4993 : : * committed, then sleeping cannot possibly do any good: if we're
4994 : : * required to sleep, get out to raise an error instead.
4995 : : *
4996 : : * By here, we either have already acquired the buffer exclusive lock,
4997 : : * or we must wait for the locking transaction or multixact; so below
4998 : : * we ensure that we grab buffer lock after the sleep.
4999 : : */
2549 andres@anarazel.de 5000 [ + + + + : 79711 : if (require_sleep && (result == TM_Updated || result == TM_Deleted))
+ + ]
5001 : : {
703 akorotkov@postgresql 5002 : 158 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3530 alvherre@alvh.no-ip. 5003 : 158 : goto failed;
5004 : : }
5005 [ + + ]: 79553 : else if (require_sleep)
5006 : : {
5007 : : /*
5008 : : * Acquire tuple lock to establish our priority for the tuple, or
5009 : : * die trying. LockTuple will release us when we are next-in-line
5010 : : * for the tuple. We must do this even if we are share-locking,
5011 : : * but not if we already have a weaker lock on the tuple.
5012 : : *
5013 : : * If we are forced to "start over" below, we keep the tuple lock;
5014 : : * this arranges that we stay at the head of the line while
5015 : : * rechecking tuple state.
5016 : : */
2462 5017 [ + + ]: 201 : if (!skip_tuple_lock &&
5018 [ + + ]: 185 : !heap_acquire_tuplock(relation, tid, mode, wait_policy,
5019 : : &have_tuple_lock))
5020 : : {
5021 : : /*
5022 : : * This can only happen if wait_policy is Skip and the lock
5023 : : * couldn't be obtained.
5024 : : */
2549 andres@anarazel.de 5025 : 1 : result = TM_WouldBlock;
5026 : : /* recovery code expects to have buffer lock held */
703 akorotkov@postgresql 5027 : 1 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4097 alvherre@alvh.no-ip. 5028 : 1 : goto failed;
5029 : : }
5030 : :
4799 5031 [ + + ]: 199 : if (infomask & HEAP_XMAX_IS_MULTI)
5032 : : {
5033 : 42 : MultiXactStatus status = get_mxact_status_for_lock(mode, false);
5034 : :
5035 : : /* We only ever lock tuples, never update them */
5036 [ - + ]: 42 : if (status >= MultiXactStatusNoKeyUpdate)
4799 alvherre@alvh.no-ip. 5037 [ # # ]:UBC 0 : elog(ERROR, "invalid lock mode in heap_lock_tuple");
5038 : :
5039 : : /* wait for multixact to end, or die trying */
4177 alvherre@alvh.no-ip. 5040 [ + + + - ]:CBC 42 : switch (wait_policy)
5041 : : {
5042 : 36 : case LockWaitBlock:
5043 : 36 : MultiXactIdWait((MultiXactId) xwait, status, infomask,
3189 tgl@sss.pgh.pa.us 5044 :GIC 36 : relation, &tuple->t_self, XLTW_Lock, NULL);
4177 alvherre@alvh.no-ip. 5045 :CBC 36 : break;
5046 : 2 : case LockWaitSkip:
5047 [ + - ]: 2 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
5048 : : status, infomask, relation,
5049 : : NULL, false))
5050 : : {
2549 andres@anarazel.de 5051 : 2 : result = TM_WouldBlock;
5052 : : /* recovery code expects to have buffer lock held */
703 akorotkov@postgresql 5053 : 2 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4177 alvherre@alvh.no-ip. 5054 : 2 : goto failed;
5055 : : }
4177 alvherre@alvh.no-ip. 5056 :UBC 0 : break;
4177 alvherre@alvh.no-ip. 5057 :CBC 4 : case LockWaitError:
5058 [ + - ]: 4 : if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
5059 : : status, infomask, relation,
5060 : : NULL, log_lock_failures))
5061 [ + - ]: 4 : ereport(ERROR,
5062 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5063 : : errmsg("could not obtain lock on row in relation \"%s\"",
5064 : : RelationGetRelationName(relation))));
5065 : :
4177 alvherre@alvh.no-ip. 5066 :UBC 0 : break;
5067 : : }
5068 : :
5069 : : /*
5070 : : * Of course, the multixact might not be done here: if we're
5071 : : * requesting a light lock mode, other transactions with light
5072 : : * locks could still be alive, as well as locks owned by our
5073 : : * own xact or other subxacts of this backend. We need to
5074 : : * preserve the surviving MultiXact members. Note that it
5075 : : * isn't absolutely necessary in the latter case, but doing so
5076 : : * is simpler.
5077 : : */
5078 : : }
5079 : : else
5080 : : {
5081 : : /* wait for regular transaction to end, or die trying */
4177 alvherre@alvh.no-ip. 5082 [ + + + - ]:CBC 157 : switch (wait_policy)
5083 : : {
5084 : 116 : case LockWaitBlock:
4057 heikki.linnakangas@i 5085 : 116 : XactLockTableWait(xwait, relation, &tuple->t_self,
5086 : : XLTW_Lock);
4177 alvherre@alvh.no-ip. 5087 : 116 : break;
5088 : 33 : case LockWaitSkip:
366 fujii@postgresql.org 5089 [ + - ]: 33 : if (!ConditionalXactLockTableWait(xwait, false))
5090 : : {
2549 andres@anarazel.de 5091 : 33 : result = TM_WouldBlock;
5092 : : /* recovery code expects to have buffer lock held */
703 akorotkov@postgresql 5093 : 33 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
4177 alvherre@alvh.no-ip. 5094 : 33 : goto failed;
5095 : : }
4177 alvherre@alvh.no-ip. 5096 :UBC 0 : break;
4177 alvherre@alvh.no-ip. 5097 :CBC 8 : case LockWaitError:
285 fujii@postgresql.org 5098 [ + - ]: 8 : if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
4177 alvherre@alvh.no-ip. 5099 [ + - ]: 8 : ereport(ERROR,
5100 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5101 : : errmsg("could not obtain lock on row in relation \"%s\"",
5102 : : RelationGetRelationName(relation))));
4177 alvherre@alvh.no-ip. 5103 :UBC 0 : break;
5104 : : }
5105 : : }
5106 : :
5107 : : /* if there are updates, follow the update chain */
82 heikki.linnakangas@i 5108 [ + + + + ]:CBC 152 : if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
5109 [ + + ]: 60 : !ItemPointerEquals(&tuple->t_self, &t_ctid))
5110 : : {
5111 : : TM_Result res;
5112 : :
5113 : 45 : res = heap_lock_updated_tuple(relation,
5114 : : infomask, xwait, &t_ctid,
5115 : : GetCurrentTransactionId(),
5116 : : mode);
2549 andres@anarazel.de 5117 [ + + ]: 45 : if (res != TM_Ok)
5118 : : {
3992 alvherre@alvh.no-ip. 5119 : 2 : result = res;
5120 : : /* recovery code expects to have buffer lock held */
703 akorotkov@postgresql 5121 : 2 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3992 alvherre@alvh.no-ip. 5122 : 2 : goto failed;
5123 : : }
5124 : : }
5125 : :
703 akorotkov@postgresql 5126 : 150 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
5127 : :
5128 : : /*
5129 : : * xwait is done, but if xwait had just locked the tuple then some
5130 : : * other xact could update this tuple before we get to this point.
5131 : : * Check for xmax change, and start over if so.
5132 : : */
3992 alvherre@alvh.no-ip. 5133 [ + + + + ]: 287 : if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5134 : 137 : !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
5135 : : xwait))
5136 : 14 : goto l3;
5137 : :
5138 [ + + ]: 136 : if (!(infomask & HEAP_XMAX_IS_MULTI))
5139 : : {
5140 : : /*
5141 : : * Otherwise check if it committed or aborted. Note we cannot
5142 : : * be here if the tuple was only locked by somebody who didn't
5143 : : * conflict with us; that would have been handled above. So
5144 : : * that transaction must necessarily be gone by now. But
5145 : : * don't check for this in the multixact case, because some
5146 : : * locker transactions might still be running.
5147 : : */
703 akorotkov@postgresql 5148 : 101 : UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5149 : : }
5150 : : }
5151 : :
5152 : : /* By here, we're certain that we hold buffer exclusive lock again */
5153 : :
5154 : : /*
5155 : : * We may lock if previous xmax aborted, or if it committed but only
5156 : : * locked the tuple without updating it; or if we didn't have to wait
5157 : : * at all for whatever reason.
5158 : : */
4799 alvherre@alvh.no-ip. 5159 [ + + ]: 79488 : if (!require_sleep ||
5160 [ + + + + ]: 236 : (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5161 [ + + ]: 184 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
5162 : 84 : HeapTupleHeaderIsOnlyLocked(tuple->t_data))
2549 andres@anarazel.de 5163 : 79412 : result = TM_Ok;
1847 alvherre@alvh.no-ip. 5164 [ + + ]: 76 : else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid))
2549 andres@anarazel.de 5165 : 57 : result = TM_Updated;
5166 : : else
5167 : 19 : result = TM_Deleted;
5168 : : }
5169 : :
4799 alvherre@alvh.no-ip. 5170 : 77474 : failed:
2549 andres@anarazel.de 5171 [ + + ]: 157164 : if (result != TM_Ok)
5172 : : {
5173 [ + + + + : 284 : Assert(result == TM_SelfModified || result == TM_Updated ||
+ + - + ]
5174 : : result == TM_Deleted || result == TM_WouldBlock);
5175 : :
5176 : : /*
5177 : : * When locking a tuple under LockWaitSkip semantics and we fail with
5178 : : * TM_WouldBlock above, it's possible for concurrent transactions to
5179 : : * release the lock and set HEAP_XMAX_INVALID in the meantime. So
5180 : : * this assert is slightly different from the equivalent one in
5181 : : * heap_delete and heap_update.
5182 : : */
1531 alvherre@alvh.no-ip. 5183 [ + + - + ]: 284 : Assert((result == TM_WouldBlock) ||
5184 : : !(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
2549 andres@anarazel.de 5185 [ + + - + ]: 284 : Assert(result != TM_Updated ||
5186 : : !ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid));
5187 : 284 : tmfd->ctid = tuple->t_data->t_ctid;
5188 : 284 : tmfd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5189 [ + + ]: 284 : if (result == TM_SelfModified)
5190 : 6 : tmfd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5191 : : else
5192 : 278 : tmfd->cmax = InvalidCommandId;
3527 5193 : 284 : goto out_locked;
5194 : : }
5195 : :
5196 : : /*
5197 : : * If we didn't pin the visibility map page and the page has become all
5198 : : * visible while we were busy locking the buffer, or during some
5199 : : * subsequent window during which we had it unlocked, we'll have to unlock
5200 : : * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5201 : : * unfortunate, especially since we'll now have to recheck whether the
5202 : : * tuple has been locked or updated under us, but hopefully it won't
5203 : : * happen very often.
5204 : : */
3510 5205 [ + + - + ]: 156880 : if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5206 : : {
703 akorotkov@postgresql 5207 :UBC 0 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
3510 andres@anarazel.de 5208 : 0 : visibilitymap_pin(relation, block, &vmbuffer);
703 akorotkov@postgresql 5209 : 0 : LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
3510 andres@anarazel.de 5210 : 0 : goto l3;
5211 : : }
5212 : :
4799 alvherre@alvh.no-ip. 5213 :CBC 156880 : xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5214 : 156880 : old_infomask = tuple->t_data->t_infomask;
5215 : :
5216 : : /*
5217 : : * If this is the first possibly-multixact-able operation in the current
5218 : : * transaction, set my per-backend OldestMemberMXactId setting. We can be
5219 : : * certain that the transaction will never become a member of any older
5220 : : * MultiXactIds than that. (We have to do this even if we end up just
5221 : : * using our own TransactionId below, since some other backend could
5222 : : * incorporate our XID into a MultiXact immediately afterwards.)
5223 : : */
5224 : 156880 : MultiXactIdSetOldestMember();
5225 : :
5226 : : /*
5227 : : * Compute the new xmax and infomask to store into the tuple. Note we do
5228 : : * not modify the tuple just yet, because that would leave it in the wrong
5229 : : * state if multixact.c elogs.
5230 : : */
5231 : 156880 : compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5232 : : GetCurrentTransactionId(), mode, false,
5233 : : &xid, &new_infomask, &new_infomask2);
5234 : :
7626 tgl@sss.pgh.pa.us 5235 : 156880 : START_CRIT_SECTION();
5236 : :
5237 : : /*
5238 : : * Store transaction information of xact locking the tuple.
5239 : : *
5240 : : * Note: Cmax is meaningless in this context, so don't set it; this avoids
5241 : : * possibly generating a useless combo CID. Moreover, if we're locking a
5242 : : * previously updated tuple, it's important to preserve the Cmax.
5243 : : *
5244 : : * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5245 : : * we would break the HOT chain.
5246 : : */
4799 alvherre@alvh.no-ip. 5247 : 156880 : tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5248 : 156880 : tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5249 : 156880 : tuple->t_data->t_infomask |= new_infomask;
5250 : 156880 : tuple->t_data->t_infomask2 |= new_infomask2;
5251 [ + + ]: 156880 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5252 : 154733 : HeapTupleHeaderClearHotUpdated(tuple->t_data);
7850 tgl@sss.pgh.pa.us 5253 : 156880 : HeapTupleHeaderSetXmax(tuple->t_data, xid);
5254 : :
5255 : : /*
5256 : : * Make sure there is no forward chain link in t_ctid. Note that in the
5257 : : * cases where the tuple has been updated, we must not overwrite t_ctid,
5258 : : * because it was set by the updater. Moreover, if the tuple has been
5259 : : * updated, we need to follow the update chain to lock the new versions of
5260 : : * the tuple as well.
5261 : : */
4799 alvherre@alvh.no-ip. 5262 [ + + ]: 156880 : if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5263 : 154733 : tuple->t_data->t_ctid = *tid;
5264 : :
5265 : : /* Clear only the all-frozen bit on visibility map if needed */
3527 andres@anarazel.de 5266 [ + + + + ]: 158760 : if (PageIsAllVisible(page) &&
5267 : 1880 : visibilitymap_clear(relation, block, vmbuffer,
5268 : : VISIBILITYMAP_ALL_FROZEN))
5269 : 14 : cleared_all_frozen = true;
5270 : :
5271 : :
703 akorotkov@postgresql 5272 : 156880 : MarkBufferDirty(*buffer);
5273 : :
5274 : : /*
5275 : : * XLOG stuff. You might think that we don't need an XLOG record because
5276 : : * there is no state change worth restoring after a crash. You would be
5277 : : * wrong however: we have just written either a TransactionId or a
5278 : : * MultiXactId that may never have been seen on disk before, and we need
5279 : : * to make sure that there are XLOG entries covering those ID numbers.
5280 : : * Else the same IDs might be re-used after a crash, which would be
5281 : : * disastrous if this page made it to disk before the crash. Essentially
5282 : : * we have to enforce the WAL log-before-data rule even in this case.
5283 : : * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5284 : : * entries for everything anyway.)
5285 : : */
5571 rhaas@postgresql.org 5286 [ + + + + : 156880 : if (RelationNeedsWAL(relation))
+ - + - ]
5287 : : {
5288 : : xl_heap_lock xlrec;
5289 : : XLogRecPtr recptr;
5290 : :
4133 heikki.linnakangas@i 5291 : 156529 : XLogBeginInsert();
703 akorotkov@postgresql 5292 : 156529 : XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5293 : :
4133 heikki.linnakangas@i 5294 : 156529 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
1069 pg@bowt.ie 5295 : 156529 : xlrec.xmax = xid;
4799 alvherre@alvh.no-ip. 5296 : 313058 : xlrec.infobits_set = compute_infobits(new_infomask,
5297 : 156529 : tuple->t_data->t_infomask2);
3527 andres@anarazel.de 5298 : 156529 : xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
397 peter@eisentraut.org 5299 : 156529 : XLogRegisterData(&xlrec, SizeOfHeapLock);
5300 : :
5301 : : /* we don't decode row locks atm, so no need to log the origin */
5302 : :
4133 heikki.linnakangas@i 5303 : 156529 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_LOCK);
5304 : :
6454 tgl@sss.pgh.pa.us 5305 : 156529 : PageSetLSN(page, recptr);
5306 : : }
5307 : :
7626 5308 [ - + ]: 156880 : END_CRIT_SECTION();
5309 : :
2549 andres@anarazel.de 5310 : 156880 : result = TM_Ok;
5311 : :
3527 5312 : 157185 : out_locked:
703 akorotkov@postgresql 5313 : 157185 : LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5314 : :
3527 andres@anarazel.de 5315 : 558497 : out_unlocked:
5316 [ + + ]: 558497 : if (BufferIsValid(vmbuffer))
5317 : 401674 : ReleaseBuffer(vmbuffer);
5318 : :
5319 : : /*
5320 : : * Don't update the visibility map here. Locking a tuple doesn't change
5321 : : * visibility info.
5322 : : */
5323 : :
5324 : : /*
5325 : : * Now that we have successfully marked the tuple as locked, we can
5326 : : * release the lmgr tuple lock, if we had it.
5327 : : */
7624 tgl@sss.pgh.pa.us 5328 [ + + ]: 558497 : if (have_tuple_lock)
4799 alvherre@alvh.no-ip. 5329 : 166 : UnlockTupleTuplock(relation, tid, mode);
5330 : :
3527 andres@anarazel.de 5331 : 558497 : return result;
5332 : : }
5333 : :
5334 : : /*
5335 : : * Acquire heavyweight lock on the given tuple, in preparation for acquiring
5336 : : * its normal, Xmax-based tuple lock.
5337 : : *
5338 : : * have_tuple_lock is an input and output parameter: on input, it indicates
5339 : : * whether the lock has previously been acquired (and this function does
5340 : : * nothing in that case). If this function returns success, have_tuple_lock
5341 : : * has been flipped to true.
5342 : : *
5343 : : * Returns false if it was unable to obtain the lock; this can only happen if
5344 : : * wait_policy is Skip.
5345 : : */
5346 : : static bool
136 peter@eisentraut.org 5347 :GNC 327 : heap_acquire_tuplock(Relation relation, const ItemPointerData *tid, LockTupleMode mode,
5348 : : LockWaitPolicy wait_policy, bool *have_tuple_lock)
5349 : : {
4097 alvherre@alvh.no-ip. 5350 [ + + ]:CBC 327 : if (*have_tuple_lock)
5351 : 9 : return true;
5352 : :
5353 [ + + + - ]: 318 : switch (wait_policy)
5354 : : {
5355 : 273 : case LockWaitBlock:
5356 : 273 : LockTupleTuplock(relation, tid, mode);
5357 : 273 : break;
5358 : :
5359 : 34 : case LockWaitSkip:
366 fujii@postgresql.org 5360 [ + + ]: 34 : if (!ConditionalLockTupleTuplock(relation, tid, mode, false))
4097 alvherre@alvh.no-ip. 5361 : 1 : return false;
5362 : 33 : break;
5363 : :
5364 : 11 : case LockWaitError:
285 fujii@postgresql.org 5365 [ + + ]: 11 : if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
4097 alvherre@alvh.no-ip. 5366 [ + - ]: 1 : ereport(ERROR,
5367 : : (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5368 : : errmsg("could not obtain lock on row in relation \"%s\"",
5369 : : RelationGetRelationName(relation))));
5370 : 10 : break;
5371 : : }
5372 : 316 : *have_tuple_lock = true;
5373 : :
5374 : 316 : return true;
5375 : : }
5376 : :
5377 : : /*
5378 : : * Given an original set of Xmax and infomask, and a transaction (identified by
5379 : : * add_to_xmax) acquiring a new lock of some mode, compute the new Xmax and
5380 : : * corresponding infomasks to use on the tuple.
5381 : : *
5382 : : * Note that this might have side effects such as creating a new MultiXactId.
5383 : : *
5384 : : * Most callers will have called HeapTupleSatisfiesUpdate before this function;
5385 : : * that will have set the HEAP_XMAX_INVALID bit if the xmax was a MultiXactId
5386 : : * but it was not running anymore. There is a race condition, which is that the
5387 : : * MultiXactId may have finished since then, but that uncommon case is handled
5388 : : * either here, or within MultiXactIdExpand.
5389 : : *
5390 : : * There is a similar race condition possible when the old xmax was a regular
5391 : : * TransactionId. We test TransactionIdIsInProgress again just to narrow the
5392 : : * window, but it's still possible to end up creating an unnecessary
5393 : : * MultiXactId. Fortunately this is harmless.
5394 : : */
5395 : : static void
4799 5396 : 2111430 : compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
5397 : : uint16 old_infomask2, TransactionId add_to_xmax,
5398 : : LockTupleMode mode, bool is_update,
5399 : : TransactionId *result_xmax, uint16 *result_infomask,
5400 : : uint16 *result_infomask2)
5401 : : {
5402 : : TransactionId new_xmax;
5403 : : uint16 new_infomask,
5404 : : new_infomask2;
5405 : :
4469 5406 [ + - ]: 2111430 : Assert(TransactionIdIsCurrentTransactionId(add_to_xmax));
5407 : :
4799 5408 : 2215479 : l5:
5409 : 2215479 : new_infomask = 0;
5410 : 2215479 : new_infomask2 = 0;
5411 [ + + ]: 2215479 : if (old_infomask & HEAP_XMAX_INVALID)
5412 : : {
5413 : : /*
5414 : : * No previous locker; we just insert our own TransactionId.
5415 : : *
5416 : : * Note that it's critical that this case be the first one checked,
5417 : : * because there are several blocks below that come back to this one
5418 : : * to implement certain optimizations; old_infomask might contain
5419 : : * other dirty bits in those cases, but we don't really care.
5420 : : */
5421 [ + + ]: 2034793 : if (is_update)
5422 : : {
5423 : 1802188 : new_xmax = add_to_xmax;
5424 [ + + ]: 1802188 : if (mode == LockTupleExclusive)
5425 : 1531090 : new_infomask2 |= HEAP_KEYS_UPDATED;
5426 : : }
5427 : : else
5428 : : {
5429 : 232605 : new_infomask |= HEAP_XMAX_LOCK_ONLY;
5430 [ + + + + : 232605 : switch (mode)
- ]
5431 : : {
5432 : 2842 : case LockTupleKeyShare:
5433 : 2842 : new_xmax = add_to_xmax;
5434 : 2842 : new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5435 : 2842 : break;
5436 : 756 : case LockTupleShare:
5437 : 756 : new_xmax = add_to_xmax;
5438 : 756 : new_infomask |= HEAP_XMAX_SHR_LOCK;
5439 : 756 : break;
5440 : 133255 : case LockTupleNoKeyExclusive:
5441 : 133255 : new_xmax = add_to_xmax;
5442 : 133255 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5443 : 133255 : break;
5444 : 95752 : case LockTupleExclusive:
5445 : 95752 : new_xmax = add_to_xmax;
5446 : 95752 : new_infomask |= HEAP_XMAX_EXCL_LOCK;
5447 : 95752 : new_infomask2 |= HEAP_KEYS_UPDATED;
5448 : 95752 : break;
4799 alvherre@alvh.no-ip. 5449 :UBC 0 : default:
5450 : 0 : new_xmax = InvalidTransactionId; /* silence compiler */
5451 [ # # ]: 0 : elog(ERROR, "invalid lock mode");
5452 : : }
5453 : : }
5454 : : }
4799 alvherre@alvh.no-ip. 5455 [ + + ]:CBC 180686 : else if (old_infomask & HEAP_XMAX_IS_MULTI)
5456 : : {
5457 : : MultiXactStatus new_status;
5458 : :
5459 : : /*
5460 : : * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5461 : : * cross-check.
5462 : : */
5463 [ - + ]: 75568 : Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5464 : :
5465 : : /*
5466 : : * A multixact together with LOCK_ONLY set but neither lock bit set
5467 : : * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5468 : : * anymore. This check is critical for databases upgraded by
5469 : : * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5470 : : * that such multis are never passed.
5471 : : */
3551 5472 [ - + ]: 75568 : if (HEAP_LOCKED_UPGRADED(old_infomask))
5473 : : {
4799 alvherre@alvh.no-ip. 5474 :UBC 0 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5475 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5476 : 0 : goto l5;
5477 : : }
5478 : :
5479 : : /*
5480 : : * If the XMAX is already a MultiXactId, then we need to expand it to
5481 : : * include add_to_xmax; but if all the members were lockers and are
5482 : : * all gone, we can do away with the IS_MULTI bit and just set
5483 : : * add_to_xmax as the only locker/updater. If all lockers are gone
5484 : : * and we have an updater that aborted, we can also do without a
5485 : : * multi.
5486 : : *
5487 : : * The cost of doing GetMultiXactIdMembers would be paid by
5488 : : * MultiXactIdExpand if we weren't to do this, so this check is not
5489 : : * incurring extra work anyhow.
5490 : : */
4247 alvherre@alvh.no-ip. 5491 [ + + ]:CBC 75568 : if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5492 : : {
4799 5493 [ + + ]: 26 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
3992 5494 [ + - ]: 10 : !TransactionIdDidCommit(MultiXactIdGetUpdateXid(xmax,
5495 : : old_infomask)))
5496 : : {
5497 : : /*
5498 : : * Reset these bits and restart; otherwise fall through to
5499 : : * create a new multi below.
5500 : : */
4799 5501 : 26 : old_infomask &= ~HEAP_XMAX_IS_MULTI;
5502 : 26 : old_infomask |= HEAP_XMAX_INVALID;
5503 : 26 : goto l5;
5504 : : }
5505 : : }
5506 : :
5507 : 75542 : new_status = get_mxact_status_for_lock(mode, is_update);
5508 : :
5509 : 75542 : new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5510 : : new_status);
5511 : 75542 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5512 : : }
5513 [ + + ]: 105118 : else if (old_infomask & HEAP_XMAX_COMMITTED)
5514 : : {
5515 : : /*
5516 : : * It's a committed update, so we need to preserve him as updater of
5517 : : * the tuple.
5518 : : */
5519 : : MultiXactStatus status;
5520 : : MultiXactStatus new_status;
5521 : :
5522 [ - + ]: 13 : if (old_infomask2 & HEAP_KEYS_UPDATED)
4799 alvherre@alvh.no-ip. 5523 :UBC 0 : status = MultiXactStatusUpdate;
5524 : : else
4799 alvherre@alvh.no-ip. 5525 :CBC 13 : status = MultiXactStatusNoKeyUpdate;
5526 : :
5527 : 13 : new_status = get_mxact_status_for_lock(mode, is_update);
5528 : :
5529 : : /*
5530 : : * since it's not running, it's obviously impossible for the old
5531 : : * updater to be identical to the current one, so we need not check
5532 : : * for that case as we do in the block above.
5533 : : */
5534 : 13 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5535 : 13 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5536 : : }
5537 [ + + ]: 105105 : else if (TransactionIdIsInProgress(xmax))
5538 : : {
5539 : : /*
5540 : : * If the XMAX is a valid, in-progress TransactionId, then we need to
5541 : : * create a new MultiXactId that includes both the old locker or
5542 : : * updater and our own TransactionId.
5543 : : */
5544 : : MultiXactStatus new_status;
5545 : : MultiXactStatus old_status;
5546 : : LockTupleMode old_mode;
5547 : :
5548 [ + + ]: 105096 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5549 : : {
5550 [ + + ]: 105070 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4469 5551 : 5678 : old_status = MultiXactStatusForKeyShare;
4799 5552 [ + + ]: 99392 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4469 5553 : 437 : old_status = MultiXactStatusForShare;
4799 5554 [ + - ]: 98955 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5555 : : {
5556 [ + + ]: 98955 : if (old_infomask2 & HEAP_KEYS_UPDATED)
4469 5557 : 92775 : old_status = MultiXactStatusForUpdate;
5558 : : else
5559 : 6180 : old_status = MultiXactStatusForNoKeyUpdate;
5560 : : }
5561 : : else
5562 : : {
5563 : : /*
5564 : : * LOCK_ONLY can be present alone only when a page has been
5565 : : * upgraded by pg_upgrade. But in that case,
5566 : : * TransactionIdIsInProgress() should have returned false. We
5567 : : * assume it's no longer locked in this case.
5568 : : */
4799 alvherre@alvh.no-ip. 5569 [ # # ]:UBC 0 : elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5570 : 0 : old_infomask |= HEAP_XMAX_INVALID;
5571 : 0 : old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5572 : 0 : goto l5;
5573 : : }
5574 : : }
5575 : : else
5576 : : {
5577 : : /* it's an update, but which kind? */
4799 alvherre@alvh.no-ip. 5578 [ - + ]:CBC 26 : if (old_infomask2 & HEAP_KEYS_UPDATED)
4469 alvherre@alvh.no-ip. 5579 :UBC 0 : old_status = MultiXactStatusUpdate;
5580 : : else
4469 alvherre@alvh.no-ip. 5581 :CBC 26 : old_status = MultiXactStatusNoKeyUpdate;
5582 : : }
5583 : :
5584 : 105096 : old_mode = TUPLOCK_from_mxstatus(old_status);
5585 : :
5586 : : /*
5587 : : * If the lock to be acquired is for the same TransactionId as the
5588 : : * existing lock, there's an optimization possible: consider only the
5589 : : * strongest of both locks as the only one present, and restart.
5590 : : */
4799 5591 [ + + ]: 105096 : if (xmax == add_to_xmax)
5592 : : {
5593 : : /*
5594 : : * Note that it's not possible for the original tuple to be
5595 : : * updated: we wouldn't be here because the tuple would have been
5596 : : * invisible and we wouldn't try to update it. As a subtlety,
5597 : : * this code can also run when traversing an update chain to lock
5598 : : * future versions of a tuple. But we wouldn't be here either,
5599 : : * because the add_to_xmax would be different from the original
5600 : : * updater.
5601 : : */
4469 5602 [ - + ]: 104015 : Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5603 : :
5604 : : /* acquire the strongest of both */
5605 [ + + ]: 104015 : if (mode < old_mode)
5606 : 52208 : mode = old_mode;
5607 : : /* mustn't touch is_update */
5608 : :
5609 : 104015 : old_infomask |= HEAP_XMAX_INVALID;
5610 : 104015 : goto l5;
5611 : : }
5612 : :
5613 : : /* otherwise, just fall back to creating a new multixact */
5614 : 1081 : new_status = get_mxact_status_for_lock(mode, is_update);
5615 : 1081 : new_xmax = MultiXactIdCreate(xmax, old_status,
5616 : : add_to_xmax, new_status);
4799 5617 : 1081 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5618 : : }
5619 [ + + + + ]: 14 : else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5620 : 5 : TransactionIdDidCommit(xmax))
5621 : 1 : {
5622 : : /*
5623 : : * It's a committed update, so we gotta preserve him as updater of the
5624 : : * tuple.
5625 : : */
5626 : : MultiXactStatus status;
5627 : : MultiXactStatus new_status;
5628 : :
5629 [ - + ]: 1 : if (old_infomask2 & HEAP_KEYS_UPDATED)
4799 alvherre@alvh.no-ip. 5630 :UBC 0 : status = MultiXactStatusUpdate;
5631 : : else
4799 alvherre@alvh.no-ip. 5632 :CBC 1 : status = MultiXactStatusNoKeyUpdate;
5633 : :
5634 : 1 : new_status = get_mxact_status_for_lock(mode, is_update);
5635 : :
5636 : : /*
5637 : : * since it's not running, it's obviously impossible for the old
5638 : : * updater to be identical to the current one, so we need not check
5639 : : * for that case as we do in the block above.
5640 : : */
5641 : 1 : new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5642 : 1 : GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5643 : : }
5644 : : else
5645 : : {
5646 : : /*
5647 : : * Can get here iff the locking/updating transaction was running when
5648 : : * the infomask was extracted from the tuple, but finished before
5649 : : * TransactionIdIsInProgress got to run. Deal with it as if there was
5650 : : * no locker at all in the first place.
5651 : : */
5652 : 8 : old_infomask |= HEAP_XMAX_INVALID;
5653 : 8 : goto l5;
5654 : : }
5655 : :
5656 : 2111430 : *result_infomask = new_infomask;
5657 : 2111430 : *result_infomask2 = new_infomask2;
5658 : 2111430 : *result_xmax = new_xmax;
5659 : 2111430 : }
5660 : :
5661 : : /*
5662 : : * Subroutine for heap_lock_updated_tuple_rec.
5663 : : *
5664 : : * Given a hypothetical multixact status held by the transaction identified
5665 : : * with the given xid, does the current transaction need to wait, fail, or can
5666 : : * it continue if it wanted to acquire a lock of the given mode? "needwait"
5667 : : * is set to true if waiting is necessary; if it can continue, then TM_Ok is
5668 : : * returned. If the lock is already held by the current transaction, return
5669 : : * TM_SelfModified. In case of a conflict with another transaction, a
5670 : : * different HeapTupleSatisfiesUpdate return code is returned.
5671 : : *
5672 : : * The held status is said to be hypothetical because it might correspond to a
5673 : : * lock held by a single Xid, i.e. not a real MultiXactId; we express it this
5674 : : * way for simplicity of API.
5675 : : */
5676 : : static TM_Result
4491 5677 : 38774 : test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid,
5678 : : LockTupleMode mode, HeapTuple tup,
5679 : : bool *needwait)
5680 : : {
5681 : : MultiXactStatus wantedstatus;
5682 : :
5683 : 38774 : *needwait = false;
5684 : 38774 : wantedstatus = get_mxact_status_for_lock(mode, false);
5685 : :
5686 : : /*
5687 : : * Note: we *must* check TransactionIdIsInProgress before
5688 : : * TransactionIdDidAbort/Commit; see comment at top of heapam_visibility.c
5689 : : * for an explanation.
5690 : : */
5691 [ - + ]: 38774 : if (TransactionIdIsCurrentTransactionId(xid))
5692 : : {
5693 : : /*
5694 : : * The tuple has already been locked by our own transaction. This is
5695 : : * very rare but can happen if multiple transactions are trying to
5696 : : * lock an ancient version of the same tuple.
5697 : : */
2549 andres@anarazel.de 5698 :UBC 0 : return TM_SelfModified;
5699 : : }
4491 alvherre@alvh.no-ip. 5700 [ + + ]:CBC 38774 : else if (TransactionIdIsInProgress(xid))
5701 : : {
5702 : : /*
5703 : : * If the locking transaction is running, what we do depends on
5704 : : * whether the lock modes conflict: if they do, then we must wait for
5705 : : * it to finish; otherwise we can fall through to lock this tuple
5706 : : * version without waiting.
5707 : : */
5708 [ + + ]: 36539 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5709 : 36539 : LOCKMODE_from_mxstatus(wantedstatus)))
5710 : : {
5711 : 8 : *needwait = true;
5712 : : }
5713 : :
5714 : : /*
5715 : : * If we set needwait above, then this value doesn't matter;
5716 : : * otherwise, this value signals to caller that it's okay to proceed.
5717 : : */
2549 andres@anarazel.de 5718 : 36539 : return TM_Ok;
5719 : : }
4491 alvherre@alvh.no-ip. 5720 [ + + ]: 2235 : else if (TransactionIdDidAbort(xid))
2549 andres@anarazel.de 5721 : 206 : return TM_Ok;
4491 alvherre@alvh.no-ip. 5722 [ + - ]: 2029 : else if (TransactionIdDidCommit(xid))
5723 : : {
5724 : : /*
5725 : : * The other transaction committed. If it was only a locker, then the
5726 : : * lock is completely gone now and we can return success; but if it
5727 : : * was an update, then what we do depends on whether the two lock
5728 : : * modes conflict. If they conflict, then we must report error to
5729 : : * caller. But if they don't, we can fall through to allow the current
5730 : : * transaction to lock the tuple.
5731 : : *
5732 : : * Note: the reason we worry about ISUPDATE here is because as soon as
5733 : : * a transaction ends, all its locks are gone and meaningless, and
5734 : : * thus we can ignore them; whereas its updates persist. In the
5735 : : * TransactionIdIsInProgress case, above, we don't need to check
5736 : : * because we know the lock is still "alive" and thus a conflict needs
5737 : : * always be checked.
5738 : : */
4483 5739 [ + + ]: 2029 : if (!ISUPDATE_from_mxstatus(status))
2549 andres@anarazel.de 5740 : 2020 : return TM_Ok;
5741 : :
4491 alvherre@alvh.no-ip. 5742 [ + + ]: 9 : if (DoLockModesConflict(LOCKMODE_from_mxstatus(status),
5743 : 9 : LOCKMODE_from_mxstatus(wantedstatus)))
5744 : : {
5745 : : /* bummer */
1847 5746 [ + + ]: 8 : if (!ItemPointerEquals(&tup->t_self, &tup->t_data->t_ctid))
2549 andres@anarazel.de 5747 : 6 : return TM_Updated;
5748 : : else
5749 : 2 : return TM_Deleted;
5750 : : }
5751 : :
5752 : 1 : return TM_Ok;
5753 : : }
5754 : :
5755 : : /* Not in progress, not aborted, not committed -- must have crashed */
2549 andres@anarazel.de 5756 :UBC 0 : return TM_Ok;
5757 : : }
5758 : :
5759 : :
5760 : : /*
5761 : : * Recursive part of heap_lock_updated_tuple
5762 : : *
5763 : : * Fetch the tuple pointed to by tid in rel, and mark it as locked by the given
5764 : : * xid with the given mode; if this tuple is updated, recurse to lock the new
5765 : : * version as well.
5766 : : */
5767 : : static TM_Result
82 heikki.linnakangas@i 5768 :CBC 2212 : heap_lock_updated_tuple_rec(Relation rel, TransactionId priorXmax,
5769 : : const ItemPointerData *tid, TransactionId xid,
5770 : : LockTupleMode mode)
5771 : : {
5772 : : TM_Result result;
5773 : : ItemPointerData tupid;
5774 : : HeapTupleData mytup;
5775 : : Buffer buf;
5776 : : uint16 new_infomask,
5777 : : new_infomask2,
5778 : : old_infomask,
5779 : : old_infomask2;
5780 : : TransactionId xmax,
5781 : : new_xmax;
3527 andres@anarazel.de 5782 : 2212 : bool cleared_all_frozen = false;
5783 : : bool pinned_desired_page;
5784 : 2212 : Buffer vmbuffer = InvalidBuffer;
5785 : : BlockNumber block;
5786 : :
4799 alvherre@alvh.no-ip. 5787 : 2212 : ItemPointerCopy(tid, &tupid);
5788 : :
5789 : : for (;;)
5790 : : {
5791 : 2215 : new_infomask = 0;
5792 : 2215 : new_xmax = InvalidTransactionId;
3527 andres@anarazel.de 5793 : 2215 : block = ItemPointerGetBlockNumber(&tupid);
4799 alvherre@alvh.no-ip. 5794 : 2215 : ItemPointerCopy(&tupid, &(mytup.t_self));
5795 : :
1432 tgl@sss.pgh.pa.us 5796 [ + - ]: 2215 : if (!heap_fetch(rel, SnapshotAny, &mytup, &buf, false))
5797 : : {
5798 : : /*
5799 : : * if we fail to find the updated version of the tuple, it's
5800 : : * because it was vacuumed/pruned away after its creator
5801 : : * transaction aborted. So behave as if we got to the end of the
5802 : : * chain, and there's no further tuple to lock: return success to
5803 : : * caller.
5804 : : */
2549 andres@anarazel.de 5805 :UBC 0 : result = TM_Ok;
2935 tgl@sss.pgh.pa.us 5806 : 0 : goto out_unlocked;
5807 : : }
5808 : :
4799 alvherre@alvh.no-ip. 5809 :CBC 2215 : l4:
5810 [ - + ]: 2223 : CHECK_FOR_INTERRUPTS();
5811 : :
5812 : : /*
5813 : : * Before locking the buffer, pin the visibility map page if it
5814 : : * appears to be necessary. Since we haven't got the lock yet,
5815 : : * someone else might be in the middle of changing this, so we'll need
5816 : : * to recheck after we have the lock.
5817 : : */
3527 andres@anarazel.de 5818 [ - + ]: 2223 : if (PageIsAllVisible(BufferGetPage(buf)))
5819 : : {
3527 andres@anarazel.de 5820 :UBC 0 : visibilitymap_pin(rel, block, &vmbuffer);
2935 tgl@sss.pgh.pa.us 5821 : 0 : pinned_desired_page = true;
5822 : : }
5823 : : else
2935 tgl@sss.pgh.pa.us 5824 :CBC 2223 : pinned_desired_page = false;
5825 : :
4799 alvherre@alvh.no-ip. 5826 : 2223 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5827 : :
5828 : : /*
5829 : : * If we didn't pin the visibility map page and the page has become
5830 : : * all visible while we were busy locking the buffer, we'll have to
5831 : : * unlock and re-lock, to avoid holding the buffer lock across I/O.
5832 : : * That's a bit unfortunate, but hopefully shouldn't happen often.
5833 : : *
5834 : : * Note: in some paths through this function, we will reach here
5835 : : * holding a pin on a vm page that may or may not be the one matching
5836 : : * this page. If this page isn't all-visible, we won't use the vm
5837 : : * page, but we hold onto such a pin till the end of the function.
5838 : : */
2935 tgl@sss.pgh.pa.us 5839 [ + - - + ]: 2223 : if (!pinned_desired_page && PageIsAllVisible(BufferGetPage(buf)))
5840 : : {
3510 andres@anarazel.de 5841 :UBC 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
5842 : 0 : visibilitymap_pin(rel, block, &vmbuffer);
5843 : 0 : LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
5844 : : }
5845 : :
5846 : : /*
5847 : : * Check the tuple XMIN against prior XMAX, if any. If we reached the
5848 : : * end of the chain, we're done, so return success.
5849 : : */
4491 alvherre@alvh.no-ip. 5850 [ + - + + ]:CBC 4446 : if (TransactionIdIsValid(priorXmax) &&
3055 5851 : 2223 : !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data),
5852 : : priorXmax))
5853 : : {
2549 andres@anarazel.de 5854 : 2 : result = TM_Ok;
3527 5855 : 2 : goto out_locked;
5856 : : }
5857 : :
5858 : : /*
5859 : : * Also check Xmin: if this tuple was created by an aborted
5860 : : * (sub)transaction, then we already locked the last live one in the
5861 : : * chain, thus we're done, so return success.
5862 : : */
3474 alvherre@alvh.no-ip. 5863 [ + + ]: 2221 : if (TransactionIdDidAbort(HeapTupleHeaderGetXmin(mytup.t_data)))
5864 : : {
2549 andres@anarazel.de 5865 : 24 : result = TM_Ok;
2935 tgl@sss.pgh.pa.us 5866 : 24 : goto out_locked;
5867 : : }
5868 : :
4799 alvherre@alvh.no-ip. 5869 : 2197 : old_infomask = mytup.t_data->t_infomask;
4491 5870 : 2197 : old_infomask2 = mytup.t_data->t_infomask2;
4799 5871 : 2197 : xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5872 : :
5873 : : /*
5874 : : * If this tuple version has been updated or locked by some concurrent
5875 : : * transaction(s), what we do depends on whether our lock mode
5876 : : * conflicts with what those other transactions hold, and also on the
5877 : : * status of them.
5878 : : */
4491 5879 [ + + ]: 2197 : if (!(old_infomask & HEAP_XMAX_INVALID))
5880 : : {
5881 : : TransactionId rawxmax;
5882 : : bool needwait;
5883 : :
5884 : 2138 : rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
5885 [ + + ]: 2138 : if (old_infomask & HEAP_XMAX_IS_MULTI)
5886 : : {
5887 : : int nmembers;
5888 : : int i;
5889 : : MultiXactMember *members;
5890 : :
5891 : : /*
5892 : : * We don't need a test for pg_upgrade'd tuples: this is only
5893 : : * applied to tuples after the first in an update chain. Said
5894 : : * first tuple in the chain may well be locked-in-9.2-and-
5895 : : * pg_upgraded, but that one was already locked by our caller,
5896 : : * not us; and any subsequent ones cannot be because our
5897 : : * caller must necessarily have obtained a snapshot later than
5898 : : * the pg_upgrade itself.
5899 : : */
3551 5900 [ - + ]: 2109 : Assert(!HEAP_LOCKED_UPGRADED(mytup.t_data->t_infomask));
5901 : :
4247 5902 : 2109 : nmembers = GetMultiXactIdMembers(rawxmax, &members, false,
3189 tgl@sss.pgh.pa.us 5903 : 2109 : HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4491 alvherre@alvh.no-ip. 5904 [ + + ]: 40854 : for (i = 0; i < nmembers; i++)
5905 : : {
3527 andres@anarazel.de 5906 : 38745 : result = test_lockmode_for_conflict(members[i].status,
5907 : 38745 : members[i].xid,
5908 : : mode,
5909 : : &mytup,
5910 : : &needwait);
5911 : :
5912 : : /*
5913 : : * If the tuple was already locked by ourselves in a
5914 : : * previous iteration of this (say heap_lock_tuple was
5915 : : * forced to restart the locking loop because of a change
5916 : : * in xmax), then we hold the lock already on this tuple
5917 : : * version and we don't need to do anything; and this is
5918 : : * not an error condition either. We just need to skip
5919 : : * this tuple and continue locking the next version in the
5920 : : * update chain.
5921 : : */
2549 5922 [ - + ]: 38745 : if (result == TM_SelfModified)
5923 : : {
3154 alvherre@alvh.no-ip. 5924 :UBC 0 : pfree(members);
5925 : 0 : goto next;
5926 : : }
5927 : :
4491 alvherre@alvh.no-ip. 5928 [ - + ]:CBC 38745 : if (needwait)
5929 : : {
4491 alvherre@alvh.no-ip. 5930 :UBC 0 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
4379 5931 : 0 : XactLockTableWait(members[i].xid, rel,
5932 : : &mytup.t_self,
5933 : : XLTW_LockUpdated);
4491 5934 : 0 : pfree(members);
5935 : 0 : goto l4;
5936 : : }
2549 andres@anarazel.de 5937 [ - + ]:CBC 38745 : if (result != TM_Ok)
5938 : : {
4491 alvherre@alvh.no-ip. 5939 :UBC 0 : pfree(members);
3527 andres@anarazel.de 5940 : 0 : goto out_locked;
5941 : : }
5942 : : }
4491 alvherre@alvh.no-ip. 5943 [ + - ]:CBC 2109 : if (members)
5944 : 2109 : pfree(members);
5945 : : }
5946 : : else
5947 : : {
5948 : : MultiXactStatus status;
5949 : :
5950 : : /*
5951 : : * For a non-multi Xmax, we first need to compute the
5952 : : * corresponding MultiXactStatus by using the infomask bits.
5953 : : */
5954 [ + + ]: 29 : if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5955 : : {
5956 [ + - ]: 10 : if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5957 : 10 : status = MultiXactStatusForKeyShare;
4491 alvherre@alvh.no-ip. 5958 [ # # ]:UBC 0 : else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5959 : 0 : status = MultiXactStatusForShare;
5960 [ # # ]: 0 : else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5961 : : {
5962 [ # # ]: 0 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5963 : 0 : status = MultiXactStatusForUpdate;
5964 : : else
5965 : 0 : status = MultiXactStatusForNoKeyUpdate;
5966 : : }
5967 : : else
5968 : : {
5969 : : /*
5970 : : * LOCK_ONLY present alone (a pg_upgraded tuple marked
5971 : : * as share-locked in the old cluster) shouldn't be
5972 : : * seen in the middle of an update chain.
5973 : : */
5974 [ # # ]: 0 : elog(ERROR, "invalid lock status in tuple");
5975 : : }
5976 : : }
5977 : : else
5978 : : {
5979 : : /* it's an update, but which kind? */
4491 alvherre@alvh.no-ip. 5980 [ + + ]:CBC 19 : if (old_infomask2 & HEAP_KEYS_UPDATED)
5981 : 14 : status = MultiXactStatusUpdate;
5982 : : else
5983 : 5 : status = MultiXactStatusNoKeyUpdate;
5984 : : }
5985 : :
3527 andres@anarazel.de 5986 : 29 : result = test_lockmode_for_conflict(status, rawxmax, mode,
5987 : : &mytup, &needwait);
5988 : :
5989 : : /*
5990 : : * If the tuple was already locked by ourselves in a previous
5991 : : * iteration of this (say heap_lock_tuple was forced to
5992 : : * restart the locking loop because of a change in xmax), then
5993 : : * we hold the lock already on this tuple version and we don't
5994 : : * need to do anything; and this is not an error condition
5995 : : * either. We just need to skip this tuple and continue
5996 : : * locking the next version in the update chain.
5997 : : */
2549 5998 [ - + ]: 29 : if (result == TM_SelfModified)
3154 alvherre@alvh.no-ip. 5999 :UBC 0 : goto next;
6000 : :
4491 alvherre@alvh.no-ip. 6001 [ + + ]:CBC 29 : if (needwait)
6002 : : {
6003 : 8 : LockBuffer(buf, BUFFER_LOCK_UNLOCK);
4057 heikki.linnakangas@i 6004 : 8 : XactLockTableWait(rawxmax, rel, &mytup.t_self,
6005 : : XLTW_LockUpdated);
4491 alvherre@alvh.no-ip. 6006 : 8 : goto l4;
6007 : : }
2549 andres@anarazel.de 6008 [ + + ]: 21 : if (result != TM_Ok)
6009 : : {
3527 6010 : 8 : goto out_locked;
6011 : : }
6012 : : }
6013 : : }
6014 : :
6015 : : /* compute the new Xmax and infomask values for the tuple ... */
4799 alvherre@alvh.no-ip. 6016 : 2181 : compute_new_xmax_infomask(xmax, old_infomask, mytup.t_data->t_infomask2,
6017 : : xid, mode, false,
6018 : : &new_xmax, &new_infomask, &new_infomask2);
6019 : :
3527 andres@anarazel.de 6020 [ - + - - ]: 2181 : if (PageIsAllVisible(BufferGetPage(buf)) &&
3527 andres@anarazel.de 6021 :UBC 0 : visibilitymap_clear(rel, block, vmbuffer,
6022 : : VISIBILITYMAP_ALL_FROZEN))
6023 : 0 : cleared_all_frozen = true;
6024 : :
4799 alvherre@alvh.no-ip. 6025 :CBC 2181 : START_CRIT_SECTION();
6026 : :
6027 : : /* ... and set them */
6028 : 2181 : HeapTupleHeaderSetXmax(mytup.t_data, new_xmax);
6029 : 2181 : mytup.t_data->t_infomask &= ~HEAP_XMAX_BITS;
6030 : 2181 : mytup.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6031 : 2181 : mytup.t_data->t_infomask |= new_infomask;
6032 : 2181 : mytup.t_data->t_infomask2 |= new_infomask2;
6033 : :
6034 : 2181 : MarkBufferDirty(buf);
6035 : :
6036 : : /* XLOG stuff */
6037 [ + - + + : 2181 : if (RelationNeedsWAL(rel))
+ - + - ]
6038 : : {
6039 : : xl_heap_lock_updated xlrec;
6040 : : XLogRecPtr recptr;
3616 kgrittn@postgresql.o 6041 : 2181 : Page page = BufferGetPage(buf);
6042 : :
4133 heikki.linnakangas@i 6043 : 2181 : XLogBeginInsert();
6044 : 2181 : XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
6045 : :
6046 : 2181 : xlrec.offnum = ItemPointerGetOffsetNumber(&mytup.t_self);
4799 alvherre@alvh.no-ip. 6047 : 2181 : xlrec.xmax = new_xmax;
6048 : 2181 : xlrec.infobits_set = compute_infobits(new_infomask, new_infomask2);
3527 andres@anarazel.de 6049 : 2181 : xlrec.flags =
6050 : 2181 : cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;
6051 : :
397 peter@eisentraut.org 6052 : 2181 : XLogRegisterData(&xlrec, SizeOfHeapLockUpdated);
6053 : :
4133 heikki.linnakangas@i 6054 : 2181 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_LOCK_UPDATED);
6055 : :
4799 alvherre@alvh.no-ip. 6056 : 2181 : PageSetLSN(page, recptr);
6057 : : }
6058 : :
6059 [ - + ]: 2181 : END_CRIT_SECTION();
6060 : :
3154 6061 : 2181 : next:
6062 : : /* if we find the end of update chain, we're done. */
4799 6063 [ + - + - ]: 4362 : if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
2899 andres@anarazel.de 6064 [ + + ]: 4362 : HeapTupleHeaderIndicatesMovedPartitions(mytup.t_data) ||
4673 bruce@momjian.us 6065 [ + + ]: 2185 : ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
4799 alvherre@alvh.no-ip. 6066 : 4 : HeapTupleHeaderIsOnlyLocked(mytup.t_data))
6067 : : {
2549 andres@anarazel.de 6068 : 2178 : result = TM_Ok;
3527 6069 : 2178 : goto out_locked;
6070 : : }
6071 : :
6072 : : /* tail recursion */
4491 alvherre@alvh.no-ip. 6073 : 3 : priorXmax = HeapTupleHeaderGetUpdateXid(mytup.t_data);
4799 6074 : 3 : ItemPointerCopy(&(mytup.t_data->t_ctid), &tupid);
6075 : 3 : UnlockReleaseBuffer(buf);
6076 : : }
6077 : :
6078 : : result = TM_Ok;
6079 : :
3527 andres@anarazel.de 6080 : 2212 : out_locked:
6081 : 2212 : UnlockReleaseBuffer(buf);
6082 : :
2935 tgl@sss.pgh.pa.us 6083 : 2212 : out_unlocked:
3527 andres@anarazel.de 6084 [ - + ]: 2212 : if (vmbuffer != InvalidBuffer)
3527 andres@anarazel.de 6085 :UBC 0 : ReleaseBuffer(vmbuffer);
6086 : :
3527 andres@anarazel.de 6087 :CBC 2212 : return result;
6088 : : }
6089 : :
6090 : : /*
6091 : : * heap_lock_updated_tuple
6092 : : * Follow update chain when locking an updated tuple, acquiring locks (row
6093 : : * marks) on the updated versions.
6094 : : *
6095 : : * 'prior_infomask', 'prior_raw_xmax' and 'prior_ctid' are the corresponding
6096 : : * fields from the initial tuple. We will lock the tuples starting from the
6097 : : * one that 'prior_ctid' points to. Note: This function does not lock the
6098 : : * initial tuple itself.
6099 : : *
6100 : : * This function doesn't check visibility, it just unconditionally marks the
6101 : : * tuple(s) as locked. If any tuple in the updated chain is being deleted
6102 : : * concurrently (or updated with the key being modified), sleep until the
6103 : : * transaction doing it is finished.
6104 : : *
6105 : : * Note that we don't acquire heavyweight tuple locks on the tuples we walk
6106 : : * when we have to wait for other transactions to release them, as opposed to
6107 : : * what heap_lock_tuple does. The reason is that having more than one
6108 : : * transaction walking the chain is probably uncommon enough that risk of
6109 : : * starvation is not likely: one of the preconditions for being here is that
6110 : : * the snapshot in use predates the update that created this tuple (because we
6111 : : * started at an earlier version of the tuple), but at the same time such a
6112 : : * transaction cannot be using repeatable read or serializable isolation
6113 : : * levels, because that would lead to a serializability failure.
6114 : : */
6115 : : static TM_Result
82 heikki.linnakangas@i 6116 : 2214 : heap_lock_updated_tuple(Relation rel,
6117 : : uint16 prior_infomask,
6118 : : TransactionId prior_raw_xmax,
6119 : : const ItemPointerData *prior_ctid,
6120 : : TransactionId xid, LockTupleMode mode)
6121 : : {
6122 : 2214 : INJECTION_POINT("heap_lock_updated_tuple", NULL);
6123 : :
6124 : : /*
6125 : : * If the tuple has moved into another partition (effectively a delete)
6126 : : * stop here.
6127 : : */
6128 [ + + ]: 2214 : if (!ItemPointerIndicatesMovedPartitions(prior_ctid))
6129 : : {
6130 : : TransactionId prior_xmax;
6131 : :
6132 : : /*
6133 : : * If this is the first possibly-multixact-able operation in the
6134 : : * current transaction, set my per-backend OldestMemberMXactId
6135 : : * setting. We can be certain that the transaction will never become a
6136 : : * member of any older MultiXactIds than that. (We have to do this
6137 : : * even if we end up just using our own TransactionId below, since
6138 : : * some other backend could incorporate our XID into a MultiXact
6139 : : * immediately afterwards.)
6140 : : */
4799 alvherre@alvh.no-ip. 6141 : 2212 : MultiXactIdSetOldestMember();
6142 : :
82 heikki.linnakangas@i 6143 : 4424 : prior_xmax = (prior_infomask & HEAP_XMAX_IS_MULTI) ?
6144 [ + + ]: 2212 : MultiXactIdGetUpdateXid(prior_raw_xmax, prior_infomask) : prior_raw_xmax;
6145 : 2212 : return heap_lock_updated_tuple_rec(rel, prior_xmax, prior_ctid, xid, mode);
6146 : : }
6147 : :
6148 : : /* nothing to lock */
2549 andres@anarazel.de 6149 : 2 : return TM_Ok;
6150 : : }
6151 : :
6152 : : /*
6153 : : * heap_finish_speculative - mark speculative insertion as successful
6154 : : *
6155 : : * To successfully finish a speculative insertion we have to clear speculative
6156 : : * token from tuple. To do so the t_ctid field, which will contain a
6157 : : * speculative token value, is modified in place to point to the tuple itself,
6158 : : * which is characteristic of a newly inserted ordinary tuple.
6159 : : *
6160 : : * NB: It is not ok to commit without either finishing or aborting a
6161 : : * speculative insertion. We could treat speculative tuples of committed
6162 : : * transactions implicitly as completed, but then we would have to be prepared
6163 : : * to deal with speculative tokens on committed tuples. That wouldn't be
6164 : : * difficult - no-one looks at the ctid field of a tuple with invalid xmax -
6165 : : * but clearing the token at completion isn't very expensive either.
6166 : : * An explicit confirmation WAL record also makes logical decoding simpler.
6167 : : */
6168 : : void
136 peter@eisentraut.org 6169 :GNC 2118 : heap_finish_speculative(Relation relation, const ItemPointerData *tid)
6170 : : {
6171 : : Buffer buffer;
6172 : : Page page;
6173 : : OffsetNumber offnum;
6174 : : ItemId lp;
6175 : : HeapTupleHeader htup;
6176 : :
2549 andres@anarazel.de 6177 :CBC 2118 : buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3964 6178 : 2118 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
198 peter@eisentraut.org 6179 :GNC 2118 : page = BufferGetPage(buffer);
6180 : :
2549 andres@anarazel.de 6181 :CBC 2118 : offnum = ItemPointerGetOffsetNumber(tid);
90 tgl@sss.pgh.pa.us 6182 [ + - - + ]:GNC 2118 : if (offnum < 1 || offnum > PageGetMaxOffsetNumber(page))
90 tgl@sss.pgh.pa.us 6183 [ # # ]:UNC 0 : elog(ERROR, "offnum out of range");
90 tgl@sss.pgh.pa.us 6184 :GNC 2118 : lp = PageGetItemId(page, offnum);
6185 [ - + ]: 2118 : if (!ItemIdIsNormal(lp))
3769 andres@anarazel.de 6186 [ # # ]:UBC 0 : elog(ERROR, "invalid lp");
6187 : :
3964 andres@anarazel.de 6188 :CBC 2118 : htup = (HeapTupleHeader) PageGetItem(page, lp);
6189 : :
6190 : : /* NO EREPORT(ERROR) from here till changes are logged */
6191 : 2118 : START_CRIT_SECTION();
6192 : :
2549 6193 [ - + ]: 2118 : Assert(HeapTupleHeaderIsSpeculative(htup));
6194 : :
3964 6195 : 2118 : MarkBufferDirty(buffer);
6196 : :
6197 : : /*
6198 : : * Replace the speculative insertion token with a real t_ctid, pointing to
6199 : : * itself like it does on regular tuples.
6200 : : */
2549 6201 : 2118 : htup->t_ctid = *tid;
6202 : :
6203 : : /* XLOG stuff */
3964 6204 [ + + + + : 2118 : if (RelationNeedsWAL(relation))
+ - + - ]
6205 : : {
6206 : : xl_heap_confirm xlrec;
6207 : : XLogRecPtr recptr;
6208 : :
2549 6209 : 2102 : xlrec.offnum = ItemPointerGetOffsetNumber(tid);
6210 : :
3964 6211 : 2102 : XLogBeginInsert();
6212 : :
6213 : : /* We want the same filtering on this as on a plain insert */
3370 6214 : 2102 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
6215 : :
397 peter@eisentraut.org 6216 : 2102 : XLogRegisterData(&xlrec, SizeOfHeapConfirm);
3964 andres@anarazel.de 6217 : 2102 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6218 : :
6219 : 2102 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6220 : :
6221 : 2102 : PageSetLSN(page, recptr);
6222 : : }
6223 : :
6224 [ - + ]: 2118 : END_CRIT_SECTION();
6225 : :
6226 : 2118 : UnlockReleaseBuffer(buffer);
6227 : 2118 : }
6228 : :
6229 : : /*
6230 : : * heap_abort_speculative - kill a speculatively inserted tuple
6231 : : *
6232 : : * Marks a tuple that was speculatively inserted in the same command as dead,
6233 : : * by setting its xmin as invalid. That makes it immediately appear as dead
6234 : : * to all transactions, including our own. In particular, it makes
6235 : : * HeapTupleSatisfiesDirty() regard the tuple as dead, so that another backend
6236 : : * inserting a duplicate key value won't unnecessarily wait for our whole
6237 : : * transaction to finish (it'll just wait for our speculative insertion to
6238 : : * finish).
6239 : : *
6240 : : * Killing the tuple prevents "unprincipled deadlocks", which are deadlocks
6241 : : * that arise due to a mutual dependency that is not user visible. By
6242 : : * definition, unprincipled deadlocks cannot be prevented by the user
6243 : : * reordering lock acquisition in client code, because the implementation level
6244 : : * lock acquisitions are not under the user's direct control. If speculative
6245 : : * inserters did not take this precaution, then under high concurrency they
6246 : : * could deadlock with each other, which would not be acceptable.
6247 : : *
6248 : : * This is somewhat redundant with heap_delete, but we prefer to have a
6249 : : * dedicated routine with stripped down requirements. Note that this is also
6250 : : * used to delete the TOAST tuples created during speculative insertion.
6251 : : *
6252 : : * This routine does not affect logical decoding as it only looks at
6253 : : * confirmation records.
6254 : : */
6255 : : void
136 peter@eisentraut.org 6256 :GNC 16 : heap_abort_speculative(Relation relation, const ItemPointerData *tid)
6257 : : {
3964 andres@anarazel.de 6258 :CBC 16 : TransactionId xid = GetCurrentTransactionId();
6259 : : ItemId lp;
6260 : : HeapTupleData tp;
6261 : : Page page;
6262 : : BlockNumber block;
6263 : : Buffer buffer;
6264 : :
6265 [ - + ]: 16 : Assert(ItemPointerIsValid(tid));
6266 : :
6267 : 16 : block = ItemPointerGetBlockNumber(tid);
6268 : 16 : buffer = ReadBuffer(relation, block);
3616 kgrittn@postgresql.o 6269 : 16 : page = BufferGetPage(buffer);
6270 : :
3964 andres@anarazel.de 6271 : 16 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6272 : :
6273 : : /*
6274 : : * Page can't be all visible, we just inserted into it, and are still
6275 : : * running.
6276 : : */
6277 [ - + ]: 16 : Assert(!PageIsAllVisible(page));
6278 : :
6279 : 16 : lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6280 [ - + ]: 16 : Assert(ItemIdIsNormal(lp));
6281 : :
6282 : 16 : tp.t_tableOid = RelationGetRelid(relation);
6283 : 16 : tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6284 : 16 : tp.t_len = ItemIdGetLength(lp);
6285 : 16 : tp.t_self = *tid;
6286 : :
6287 : : /*
6288 : : * Sanity check that the tuple really is a speculatively inserted tuple,
6289 : : * inserted by us.
6290 : : */
6291 [ - + ]: 16 : if (tp.t_data->t_choice.t_heap.t_xmin != xid)
3964 andres@anarazel.de 6292 [ # # ]:UBC 0 : elog(ERROR, "attempted to kill a tuple inserted by another transaction");
3497 andres@anarazel.de 6293 [ + + - + ]:CBC 16 : if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
3964 andres@anarazel.de 6294 [ # # ]:UBC 0 : elog(ERROR, "attempted to kill a non-speculative tuple");
3964 andres@anarazel.de 6295 [ - + ]:CBC 16 : Assert(!HeapTupleHeaderIsHeapOnly(tp.t_data));
6296 : :
6297 : : /*
6298 : : * No need to check for serializable conflicts here. There is never a
6299 : : * need for a combo CID, either. No need to extract replica identity, or
6300 : : * do anything special with infomask bits.
6301 : : */
6302 : :
6303 : 16 : START_CRIT_SECTION();
6304 : :
6305 : : /*
6306 : : * The tuple will become DEAD immediately. Flag that this page is a
6307 : : * candidate for pruning by setting xmin to TransactionXmin. While not
6308 : : * immediately prunable, it is the oldest xid we can cheaply determine
6309 : : * that's safe against wraparound / being older than the table's
6310 : : * relfrozenxid. To defend against the unlikely case of a new relation
6311 : : * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
6312 : : * if so (vacuum can't subsequently move relfrozenxid to beyond
6313 : : * TransactionXmin, so there's no race here).
6314 : : */
2170 6315 [ - + ]: 16 : Assert(TransactionIdIsValid(TransactionXmin));
6316 : : {
685 noah@leadboat.com 6317 : 16 : TransactionId relfrozenxid = relation->rd_rel->relfrozenxid;
6318 : : TransactionId prune_xid;
6319 : :
6320 [ - + ]: 16 : if (TransactionIdPrecedes(TransactionXmin, relfrozenxid))
685 noah@leadboat.com 6321 :UBC 0 : prune_xid = relfrozenxid;
6322 : : else
685 noah@leadboat.com 6323 :CBC 16 : prune_xid = TransactionXmin;
6324 [ - + + + : 16 : PageSetPrunable(page, prune_xid);
- + ]
6325 : : }
6326 : :
6327 : : /* store transaction information of xact deleting the tuple */
3964 andres@anarazel.de 6328 : 16 : tp.t_data->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
6329 : 16 : tp.t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
6330 : :
6331 : : /*
6332 : : * Set the tuple header xmin to InvalidTransactionId. This makes the
6333 : : * tuple immediately invisible everyone. (In particular, to any
6334 : : * transactions waiting on the speculative token, woken up later.)
6335 : : */
6336 : 16 : HeapTupleHeaderSetXmin(tp.t_data, InvalidTransactionId);
6337 : :
6338 : : /* Clear the speculative insertion token too */
6339 : 16 : tp.t_data->t_ctid = tp.t_self;
6340 : :
6341 : 16 : MarkBufferDirty(buffer);
6342 : :
6343 : : /*
6344 : : * XLOG stuff
6345 : : *
6346 : : * The WAL records generated here match heap_delete(). The same recovery
6347 : : * routines are used.
6348 : : */
6349 [ + + + + : 16 : if (RelationNeedsWAL(relation))
+ - + - ]
6350 : : {
6351 : : xl_heap_delete xlrec;
6352 : : XLogRecPtr recptr;
6353 : :
6354 : 12 : xlrec.flags = XLH_DELETE_IS_SUPER;
6355 : 24 : xlrec.infobits_set = compute_infobits(tp.t_data->t_infomask,
6356 : 12 : tp.t_data->t_infomask2);
6357 : 12 : xlrec.offnum = ItemPointerGetOffsetNumber(&tp.t_self);
6358 : 12 : xlrec.xmax = xid;
6359 : :
6360 : 12 : XLogBeginInsert();
397 peter@eisentraut.org 6361 : 12 : XLogRegisterData(&xlrec, SizeOfHeapDelete);
3964 andres@anarazel.de 6362 : 12 : XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6363 : :
6364 : : /* No replica identity & replication origin logged */
6365 : :
6366 : 12 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6367 : :
6368 : 12 : PageSetLSN(page, recptr);
6369 : : }
6370 : :
6371 [ - + ]: 16 : END_CRIT_SECTION();
6372 : :
6373 : 16 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6374 : :
6375 [ + + ]: 16 : if (HeapTupleHasExternal(&tp))
6376 : : {
3497 6377 [ - + ]: 1 : Assert(!IsToastRelation(relation));
2354 rhaas@postgresql.org 6378 : 1 : heap_toast_delete(relation, &tp, true);
6379 : : }
6380 : :
6381 : : /*
6382 : : * Never need to mark tuple for invalidation, since catalogs don't support
6383 : : * speculative insertion
6384 : : */
6385 : :
6386 : : /* Now we can release the buffer */
3964 andres@anarazel.de 6387 : 16 : ReleaseBuffer(buffer);
6388 : :
6389 : : /* count deletion, as we counted the insertion too */
6390 : 16 : pgstat_count_heap_delete(relation);
6391 : 16 : }
6392 : :
6393 : : /*
6394 : : * heap_inplace_lock - protect inplace update from concurrent heap_update()
6395 : : *
6396 : : * Evaluate whether the tuple's state is compatible with a no-key update.
6397 : : * Current transaction rowmarks are fine, as is KEY SHARE from any
6398 : : * transaction. If compatible, return true with the buffer exclusive-locked,
6399 : : * and the caller must release that by calling
6400 : : * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising
6401 : : * an error. Otherwise, call release_callback(arg), wait for blocking
6402 : : * transactions to end, and return false.
6403 : : *
6404 : : * Since this is intended for system catalogs and SERIALIZABLE doesn't cover
6405 : : * DDL, this doesn't guarantee any particular predicate locking.
6406 : : *
6407 : : * heap_delete() is a rarer source of blocking transactions (xwait). We'll
6408 : : * wait for such a transaction just like for the normal heap_update() case.
6409 : : * Normal concurrent DROP commands won't cause that, because all inplace
6410 : : * updaters take some lock that conflicts with DROP. An explicit SQL "DELETE
6411 : : * FROM pg_class" can cause it. By waiting, if the concurrent transaction
6412 : : * executed both "DELETE FROM pg_class" and "INSERT INTO pg_class", our caller
6413 : : * can find the successor tuple.
6414 : : *
6415 : : * Readers of inplace-updated fields expect changes to those fields are
6416 : : * durable. For example, vac_truncate_clog() reads datfrozenxid from
6417 : : * pg_database tuples via catalog snapshots. A future snapshot must not
6418 : : * return a lower datfrozenxid for the same database OID (lower in the
6419 : : * FullTransactionIdPrecedes() sense). We achieve that since no update of a
6420 : : * tuple can start while we hold a lock on its buffer. In cases like
6421 : : * BEGIN;GRANT;CREATE INDEX;COMMIT we're inplace-updating a tuple visible only
6422 : : * to this transaction. ROLLBACK then is one case where it's okay to lose
6423 : : * inplace updates. (Restoring relhasindex=false on ROLLBACK is fine, since
6424 : : * any concurrent CREATE INDEX would have blocked, then inplace-updated the
6425 : : * committed tuple.)
6426 : : *
6427 : : * In principle, we could avoid waiting by overwriting every tuple in the
6428 : : * updated tuple chain. Reader expectations permit updating a tuple only if
6429 : : * it's aborted, is the tail of the chain, or we already updated the tuple
6430 : : * referenced in its t_ctid. Hence, we would need to overwrite the tuples in
6431 : : * order from tail to head. That would imply either (a) mutating all tuples
6432 : : * in one critical section or (b) accepting a chance of partial completion.
6433 : : * Partial completion of a relfrozenxid update would have the weird
6434 : : * consequence that the table's next VACUUM could see the table's relfrozenxid
6435 : : * move forward between vacuum_get_cutoffs() and finishing.
6436 : : */
6437 : : bool
537 noah@leadboat.com 6438 : 95178 : heap_inplace_lock(Relation relation,
6439 : : HeapTuple oldtup_ptr, Buffer buffer,
6440 : : void (*release_callback) (void *), void *arg)
6441 : : {
6442 : 95178 : HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */
6443 : : TM_Result result;
6444 : : bool ret;
6445 : :
6446 : : #ifdef USE_ASSERT_CHECKING
6447 [ + + ]: 95178 : if (RelationGetRelid(relation) == RelationRelationId)
6448 : 94163 : check_inplace_rel_lock(oldtup_ptr);
6449 : : #endif
6450 : :
6451 [ - + ]: 95178 : Assert(BufferIsValid(buffer));
6452 : :
6453 : : /*
6454 : : * Register shared cache invals if necessary. Other sessions may finish
6455 : : * inplace updates of this tuple between this step and LockTuple(). Since
6456 : : * inplace updates don't change cache keys, that's harmless.
6457 : : *
6458 : : * While it's tempting to register invals only after confirming we can
6459 : : * return true, the following obstacle precludes reordering steps that
6460 : : * way. Registering invals might reach a CatalogCacheInitializeCache()
6461 : : * that locks "buffer". That would hang indefinitely if running after our
6462 : : * own LockBuffer(). Hence, we must register invals before LockBuffer().
6463 : : */
90 6464 : 95178 : CacheInvalidateHeapTupleInplace(relation, oldtup_ptr);
6465 : :
537 6466 : 95178 : LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
6467 : 95178 : LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
6468 : :
6469 : : /*----------
6470 : : * Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
6471 : : *
6472 : : * - wait unconditionally
6473 : : * - already locked tuple above, since inplace needs that unconditionally
6474 : : * - don't recheck header after wait: simpler to defer to next iteration
6475 : : * - don't try to continue even if the updater aborts: likewise
6476 : : * - no crosscheck
6477 : : */
6478 : 95178 : result = HeapTupleSatisfiesUpdate(&oldtup, GetCurrentCommandId(false),
6479 : : buffer);
6480 : :
6481 [ - + ]: 95178 : if (result == TM_Invisible)
6482 : : {
6483 : : /* no known way this can happen */
3972 rhaas@postgresql.org 6484 [ # # ]:UBC 0 : ereport(ERROR,
6485 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6486 : : errmsg_internal("attempted to overwrite invisible tuple")));
6487 : : }
537 noah@leadboat.com 6488 [ - + ]:CBC 95178 : else if (result == TM_SelfModified)
6489 : : {
6490 : : /*
6491 : : * CREATE INDEX might reach this if an expression is silly enough to
6492 : : * call e.g. SELECT ... FROM pg_class FOR SHARE. C code of other SQL
6493 : : * statements might get here after a heap_update() of the same row, in
6494 : : * the absence of an intervening CommandCounterIncrement().
6495 : : */
537 noah@leadboat.com 6496 [ # # ]:UBC 0 : ereport(ERROR,
6497 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
6498 : : errmsg("tuple to be updated was already modified by an operation triggered by the current command")));
6499 : : }
537 noah@leadboat.com 6500 [ + + ]:CBC 95178 : else if (result == TM_BeingModified)
6501 : : {
6502 : : TransactionId xwait;
6503 : : uint16 infomask;
6504 : :
6505 : 61 : xwait = HeapTupleHeaderGetRawXmax(oldtup.t_data);
6506 : 61 : infomask = oldtup.t_data->t_infomask;
6507 : :
6508 [ + + ]: 61 : if (infomask & HEAP_XMAX_IS_MULTI)
6509 : : {
6510 : 5 : LockTupleMode lockmode = LockTupleNoKeyExclusive;
6511 : 5 : MultiXactStatus mxact_status = MultiXactStatusNoKeyUpdate;
6512 : : int remain;
6513 : :
6514 [ + + ]: 5 : if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
6515 : : lockmode, NULL))
6516 : : {
6517 : 2 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
502 6518 : 2 : release_callback(arg);
537 6519 : 2 : ret = false;
6520 : 2 : MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask,
6521 : : relation, &oldtup.t_self, XLTW_Update,
6522 : : &remain);
6523 : : }
6524 : : else
6525 : 3 : ret = true;
6526 : : }
6527 [ + + ]: 56 : else if (TransactionIdIsCurrentTransactionId(xwait))
6528 : 1 : ret = true;
6529 [ + + ]: 55 : else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
6530 : 1 : ret = true;
6531 : : else
6532 : : {
6533 : 54 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
502 6534 : 54 : release_callback(arg);
537 6535 : 54 : ret = false;
6536 : 54 : XactLockTableWait(xwait, relation, &oldtup.t_self,
6537 : : XLTW_Update);
6538 : : }
6539 : : }
6540 : : else
6541 : : {
6542 : 95117 : ret = (result == TM_Ok);
6543 [ + + ]: 95117 : if (!ret)
6544 : : {
6545 : 1 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
502 6546 : 1 : release_callback(arg);
6547 : : }
6548 : : }
6549 : :
6550 : : /*
6551 : : * GetCatalogSnapshot() relies on invalidation messages to know when to
6552 : : * take a new snapshot. COMMIT of xwait is responsible for sending the
6553 : : * invalidation. We're not acquiring heavyweight locks sufficient to
6554 : : * block if not yet sent, so we must take a new snapshot to ensure a later
6555 : : * attempt has a fair chance. While we don't need this if xwait aborted,
6556 : : * don't bother optimizing that.
6557 : : */
537 6558 [ + + ]: 95178 : if (!ret)
6559 : : {
6560 : 57 : UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
498 6561 : 57 : ForgetInplace_Inval();
537 6562 : 57 : InvalidateCatalogSnapshot();
6563 : : }
6564 : 95178 : return ret;
6565 : : }
6566 : :
6567 : : /*
6568 : : * heap_inplace_update_and_unlock - core of systable_inplace_update_finish
6569 : : *
6570 : : * The tuple cannot change size, and therefore its header fields and null
6571 : : * bitmap (if any) don't change either.
6572 : : *
6573 : : * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
6574 : : */
6575 : : void
6576 : 65763 : heap_inplace_update_and_unlock(Relation relation,
6577 : : HeapTuple oldtup, HeapTuple tuple,
6578 : : Buffer buffer)
6579 : : {
6580 : 65763 : HeapTupleHeader htup = oldtup->t_data;
6581 : : uint32 oldlen;
6582 : : uint32 newlen;
6583 : : char *dst;
6584 : : char *src;
506 6585 : 65763 : int nmsgs = 0;
6586 : 65763 : SharedInvalidationMessage *invalMessages = NULL;
6587 : 65763 : bool RelcacheInitFileInval = false;
6588 : :
537 6589 [ - + ]: 65763 : Assert(ItemPointerEquals(&oldtup->t_self, &tuple->t_self));
6590 : 65763 : oldlen = oldtup->t_len - htup->t_hoff;
7249 tgl@sss.pgh.pa.us 6591 : 65763 : newlen = tuple->t_len - tuple->t_data->t_hoff;
6592 [ + - - + ]: 65763 : if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
3769 andres@anarazel.de 6593 [ # # ]:UBC 0 : elog(ERROR, "wrong tuple length");
6594 : :
506 noah@leadboat.com 6595 :CBC 65763 : dst = (char *) htup + htup->t_hoff;
6596 : 65763 : src = (char *) tuple->t_data + tuple->t_data->t_hoff;
6597 : :
6598 : : /* Like RecordTransactionCommit(), log only if needed */
6599 [ + + ]: 65763 : if (XLogStandbyInfoActive())
6600 : 58879 : nmsgs = inplaceGetInvalidationMessages(&invalMessages,
6601 : : &RelcacheInitFileInval);
6602 : :
6603 : : /*
6604 : : * Unlink relcache init files as needed. If unlinking, acquire
6605 : : * RelCacheInitLock until after associated invalidations. By doing this
6606 : : * in advance, if we checkpoint and then crash between inplace
6607 : : * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6608 : : * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6609 : : * neglect to PANIC on EIO.
6610 : : */
6611 : 65763 : PreInplace_Inval();
6612 : :
6613 : : /*----------
6614 : : * NO EREPORT(ERROR) from here till changes are complete
6615 : : *
6616 : : * Our exclusive buffer lock won't stop a reader having already pinned and
6617 : : * checked visibility for this tuple. With the usual order of changes
6618 : : * (i.e. updating the buffer contents before WAL logging), a reader could
6619 : : * observe our not-yet-persistent update to relfrozenxid and update
6620 : : * datfrozenxid based on that. A crash in that moment could allow
6621 : : * datfrozenxid to overtake relfrozenxid:
6622 : : *
6623 : : * ["D" is a VACUUM (ONLY_DATABASE_STATS)]
6624 : : * ["R" is a VACUUM tbl]
6625 : : * D: vac_update_datfrozenxid() -> systable_beginscan(pg_class)
6626 : : * D: systable_getnext() returns pg_class tuple of tbl
6627 : : * R: memcpy() into pg_class tuple of tbl
6628 : : * D: raise pg_database.datfrozenxid, XLogInsert(), finish
6629 : : * [crash]
6630 : : * [recovery restores datfrozenxid w/o relfrozenxid]
6631 : : *
6632 : : * We avoid that by using a temporary copy of the buffer to hide our
6633 : : * change from other backends until the change has been WAL-logged. We
6634 : : * apply our change to the temporary copy and WAL-log it, before modifying
6635 : : * the real page. That way any action a reader of the in-place-updated
6636 : : * value takes will be WAL logged after this change.
6637 : : */
6638 : 65763 : START_CRIT_SECTION();
6639 : :
5 andres@anarazel.de 6640 :GNC 65763 : MarkBufferDirty(buffer);
6641 : :
6642 : : /* XLOG stuff */
5571 rhaas@postgresql.org 6643 [ + - + + :CBC 65763 : if (RelationNeedsWAL(relation))
+ - + + ]
6644 : : {
6645 : : xl_heap_inplace xlrec;
6646 : : PGAlignedBlock copied_buffer;
506 noah@leadboat.com 6647 : 65759 : char *origdata = (char *) BufferGetBlock(buffer);
6648 : 65759 : Page page = BufferGetPage(buffer);
6649 : 65759 : uint16 lower = ((PageHeader) page)->pd_lower;
6650 : 65759 : uint16 upper = ((PageHeader) page)->pd_upper;
6651 : : uintptr_t dst_offset_in_block;
6652 : : RelFileLocator rlocator;
6653 : : ForkNumber forkno;
6654 : : BlockNumber blkno;
6655 : : XLogRecPtr recptr;
6656 : :
4133 heikki.linnakangas@i 6657 : 65759 : xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
506 noah@leadboat.com 6658 : 65759 : xlrec.dbId = MyDatabaseId;
6659 : 65759 : xlrec.tsId = MyDatabaseTableSpace;
6660 : 65759 : xlrec.relcacheInitFileInval = RelcacheInitFileInval;
6661 : 65759 : xlrec.nmsgs = nmsgs;
6662 : :
4133 heikki.linnakangas@i 6663 : 65759 : XLogBeginInsert();
397 peter@eisentraut.org 6664 : 65759 : XLogRegisterData(&xlrec, MinSizeOfHeapInplace);
506 noah@leadboat.com 6665 [ + + ]: 65759 : if (nmsgs != 0)
397 peter@eisentraut.org 6666 : 43936 : XLogRegisterData(invalMessages,
6667 : : nmsgs * sizeof(SharedInvalidationMessage));
6668 : :
6669 : : /* register block matching what buffer will look like after changes */
506 noah@leadboat.com 6670 : 65759 : memcpy(copied_buffer.data, origdata, lower);
6671 : 65759 : memcpy(copied_buffer.data + upper, origdata + upper, BLCKSZ - upper);
6672 : 65759 : dst_offset_in_block = dst - origdata;
6673 : 65759 : memcpy(copied_buffer.data + dst_offset_in_block, src, newlen);
6674 : 65759 : BufferGetTag(buffer, &rlocator, &forkno, &blkno);
6675 [ - + ]: 65759 : Assert(forkno == MAIN_FORKNUM);
6676 : 65759 : XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data,
6677 : : REGBUF_STANDARD);
6678 : 65759 : XLogRegisterBufData(0, src, newlen);
6679 : :
6680 : : /* inplace updates aren't decoded atm, don't log the origin */
6681 : :
4133 heikki.linnakangas@i 6682 : 65759 : recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6683 : :
506 noah@leadboat.com 6684 : 65759 : PageSetLSN(page, recptr);
6685 : : }
6686 : :
6687 : 65763 : memcpy(dst, src, newlen);
6688 : :
6689 : 65763 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6690 : :
6691 : : /*
6692 : : * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6693 : : * do this before UnlockTuple().
6694 : : */
6695 : 65763 : AtInplace_Inval();
6696 : :
7249 tgl@sss.pgh.pa.us 6697 [ - + ]: 65763 : END_CRIT_SECTION();
506 noah@leadboat.com 6698 : 65763 : UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
6699 : :
6700 : 65763 : AcceptInvalidationMessages(); /* local processing of just-sent inval */
6701 : :
6702 : : /*
6703 : : * Queue a transactional inval, for logical decoding and for third-party
6704 : : * code that might have been relying on it since long before inplace
6705 : : * update adopted immediate invalidation. See README.tuplock section
6706 : : * "Reading inplace-updated columns" for logical decoding details.
6707 : : */
7249 tgl@sss.pgh.pa.us 6708 [ + + ]: 65763 : if (!IsBootstrapProcessingMode())
5325 6709 : 50820 : CacheInvalidateHeapTuple(relation, tuple, NULL);
7249 6710 : 65763 : }
6711 : :
6712 : : /*
6713 : : * heap_inplace_unlock - reverse of heap_inplace_lock
6714 : : */
6715 : : void
537 noah@leadboat.com 6716 : 29358 : heap_inplace_unlock(Relation relation,
6717 : : HeapTuple oldtup, Buffer buffer)
6718 : : {
6719 : 29358 : LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6720 : 29358 : UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
498 6721 : 29358 : ForgetInplace_Inval();
537 6722 : 29358 : }
6723 : :
6724 : : #define FRM_NOOP 0x0001
6725 : : #define FRM_INVALIDATE_XMAX 0x0002
6726 : : #define FRM_RETURN_IS_XID 0x0004
6727 : : #define FRM_RETURN_IS_MULTI 0x0008
6728 : : #define FRM_MARK_COMMITTED 0x0010
6729 : :
6730 : : /*
6731 : : * FreezeMultiXactId
6732 : : * Determine what to do during freezing when a tuple is marked by a
6733 : : * MultiXactId.
6734 : : *
6735 : : * "flags" is an output value; it's used to tell caller what to do on return.
6736 : : * "pagefrz" is an input/output value, used to manage page level freezing.
6737 : : *
6738 : : * Possible values that we can set in "flags":
6739 : : * FRM_NOOP
6740 : : * don't do anything -- keep existing Xmax
6741 : : * FRM_INVALIDATE_XMAX
6742 : : * mark Xmax as InvalidTransactionId and set XMAX_INVALID flag.
6743 : : * FRM_RETURN_IS_XID
6744 : : * The Xid return value is a single update Xid to set as xmax.
6745 : : * FRM_MARK_COMMITTED
6746 : : * Xmax can be marked as HEAP_XMAX_COMMITTED
6747 : : * FRM_RETURN_IS_MULTI
6748 : : * The return value is a new MultiXactId to set as new Xmax.
6749 : : * (caller must obtain proper infomask bits using GetMultiXactIdHintBits)
6750 : : *
6751 : : * Caller delegates control of page freezing to us. In practice we always
6752 : : * force freezing of caller's page unless FRM_NOOP processing is indicated.
6753 : : * We help caller ensure that XIDs < FreezeLimit and MXIDs < MultiXactCutoff
6754 : : * can never be left behind. We freely choose when and how to process each
6755 : : * Multi, without ever violating the cutoff postconditions for freezing.
6756 : : *
6757 : : * It's useful to remove Multis on a proactive timeline (relative to freezing
6758 : : * XIDs) to keep MultiXact member SLRU buffer misses to a minimum. It can also
6759 : : * be cheaper in the short run, for us, since we too can avoid SLRU buffer
6760 : : * misses through eager processing.
6761 : : *
6762 : : * NB: Creates a _new_ MultiXactId when FRM_RETURN_IS_MULTI is set, though only
6763 : : * when FreezeLimit and/or MultiXactCutoff cutoffs leave us with no choice.
6764 : : * This can usually be put off, which is usually enough to avoid it altogether.
6765 : : * Allocating new multis during VACUUM should be avoided on general principle;
6766 : : * only VACUUM can advance relminmxid, so allocating new Multis here comes with
6767 : : * its own special risks.
6768 : : *
6769 : : * NB: Caller must maintain "no freeze" NewRelfrozenXid/NewRelminMxid trackers
6770 : : * using heap_tuple_should_freeze when we haven't forced page-level freezing.
6771 : : *
6772 : : * NB: Caller should avoid needlessly calling heap_tuple_should_freeze when we
6773 : : * have already forced page-level freezing, since that might incur the same
6774 : : * SLRU buffer misses that we specifically intended to avoid by freezing.
6775 : : */
6776 : : static TransactionId
4472 alvherre@alvh.no-ip. 6777 : 8 : FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
6778 : : const struct VacuumCutoffs *cutoffs, uint16 *flags,
6779 : : HeapPageFreeze *pagefrz)
6780 : : {
6781 : : TransactionId newxmax;
6782 : : MultiXactMember *members;
6783 : : int nmembers;
6784 : : bool need_replace;
6785 : : int nnewmembers;
6786 : : MultiXactMember *newmembers;
6787 : : bool has_lockers;
6788 : : TransactionId update_xid;
6789 : : bool update_committed;
6790 : : TransactionId FreezePageRelfrozenXid;
6791 : :
6792 : 8 : *flags = 0;
6793 : :
6794 : : /* We should only be called in Multis */
6795 [ - + ]: 8 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6796 : :
3551 6797 [ + - - + ]: 16 : if (!MultiXactIdIsValid(multi) ||
6798 : 8 : HEAP_LOCKED_UPGRADED(t_infomask))
6799 : : {
4472 alvherre@alvh.no-ip. 6800 :UBC 0 : *flags |= FRM_INVALIDATE_XMAX;
1173 pg@bowt.ie 6801 : 0 : pagefrz->freeze_required = true;
4472 alvherre@alvh.no-ip. 6802 : 0 : return InvalidTransactionId;
6803 : : }
1179 pg@bowt.ie 6804 [ - + ]:CBC 8 : else if (MultiXactIdPrecedes(multi, cutoffs->relminmxid))
3044 andres@anarazel.de 6805 [ # # ]:UBC 0 : ereport(ERROR,
6806 : : (errcode(ERRCODE_DATA_CORRUPTED),
6807 : : errmsg_internal("found multixact %u from before relminmxid %u",
6808 : : multi, cutoffs->relminmxid)));
1173 pg@bowt.ie 6809 [ + + ]:CBC 8 : else if (MultiXactIdPrecedes(multi, cutoffs->OldestMxact))
6810 : : {
6811 : : TransactionId update_xact;
6812 : :
6813 : : /*
6814 : : * This old multi cannot possibly have members still running, but
6815 : : * verify just in case. If it was a locker only, it can be removed
6816 : : * without any further consideration; but if it contained an update,
6817 : : * we might need to preserve it.
6818 : : */
3044 andres@anarazel.de 6819 [ - + ]: 6 : if (MultiXactIdIsRunning(multi,
6820 : 6 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
3044 andres@anarazel.de 6821 [ # # ]:UBC 0 : ereport(ERROR,
6822 : : (errcode(ERRCODE_DATA_CORRUPTED),
6823 : : errmsg_internal("multixact %u from before multi freeze cutoff %u found to be still running",
6824 : : multi, cutoffs->OldestMxact)));
6825 : :
4472 alvherre@alvh.no-ip. 6826 [ + - ]:CBC 6 : if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6827 : : {
6828 : 6 : *flags |= FRM_INVALIDATE_XMAX;
1173 pg@bowt.ie 6829 : 6 : pagefrz->freeze_required = true;
6830 : 6 : return InvalidTransactionId;
6831 : : }
6832 : :
6833 : : /* replace multi with single XID for its updater? */
1173 pg@bowt.ie 6834 :UBC 0 : update_xact = MultiXactIdGetUpdateXid(multi, t_infomask);
6835 [ # # ]: 0 : if (TransactionIdPrecedes(update_xact, cutoffs->relfrozenxid))
6836 [ # # ]: 0 : ereport(ERROR,
6837 : : (errcode(ERRCODE_DATA_CORRUPTED),
6838 : : errmsg_internal("multixact %u contains update XID %u from before relfrozenxid %u",
6839 : : multi, update_xact,
6840 : : cutoffs->relfrozenxid)));
6841 [ # # ]: 0 : else if (TransactionIdPrecedes(update_xact, cutoffs->OldestXmin))
6842 : : {
6843 : : /*
6844 : : * Updater XID has to have aborted (otherwise the tuple would have
6845 : : * been pruned away instead, since updater XID is < OldestXmin).
6846 : : * Just remove xmax.
6847 : : */
1167 6848 [ # # ]: 0 : if (TransactionIdDidCommit(update_xact))
1173 6849 [ # # ]: 0 : ereport(ERROR,
6850 : : (errcode(ERRCODE_DATA_CORRUPTED),
6851 : : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
6852 : : multi, update_xact,
6853 : : cutoffs->OldestXmin)));
6854 : 0 : *flags |= FRM_INVALIDATE_XMAX;
6855 : 0 : pagefrz->freeze_required = true;
6856 : 0 : return InvalidTransactionId;
6857 : : }
6858 : :
6859 : : /* Have to keep updater XID as new xmax */
6860 : 0 : *flags |= FRM_RETURN_IS_XID;
6861 : 0 : pagefrz->freeze_required = true;
6862 : 0 : return update_xact;
6863 : : }
6864 : :
6865 : : /*
6866 : : * Some member(s) of this Multi may be below FreezeLimit xid cutoff, so we
6867 : : * need to walk the whole members array to figure out what to do, if
6868 : : * anything.
6869 : : */
6870 : : nmembers =
3551 alvherre@alvh.no-ip. 6871 :CBC 2 : GetMultiXactIdMembers(multi, &members, false,
4247 6872 : 2 : HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
4472 6873 [ - + ]: 2 : if (nmembers <= 0)
6874 : : {
6875 : : /* Nothing worth keeping */
4472 alvherre@alvh.no-ip. 6876 :UBC 0 : *flags |= FRM_INVALIDATE_XMAX;
1173 pg@bowt.ie 6877 : 0 : pagefrz->freeze_required = true;
4472 alvherre@alvh.no-ip. 6878 : 0 : return InvalidTransactionId;
6879 : : }
6880 : :
6881 : : /*
6882 : : * The FRM_NOOP case is the only case where we might need to ratchet back
6883 : : * FreezePageRelfrozenXid or FreezePageRelminMxid. It is also the only
6884 : : * case where our caller might ratchet back its NoFreezePageRelfrozenXid
6885 : : * or NoFreezePageRelminMxid "no freeze" trackers to deal with a multi.
6886 : : * FRM_NOOP handling should result in the NewRelfrozenXid/NewRelminMxid
6887 : : * trackers managed by VACUUM being ratcheting back by xmax to the degree
6888 : : * required to make it safe to leave xmax undisturbed, independent of
6889 : : * whether or not page freezing is triggered somewhere else.
6890 : : *
6891 : : * Our policy is to force freezing in every case other than FRM_NOOP,
6892 : : * which obviates the need to maintain either set of trackers, anywhere.
6893 : : * Every other case will reliably execute a freeze plan for xmax that
6894 : : * either replaces xmax with an XID/MXID >= OldestXmin/OldestMxact, or
6895 : : * sets xmax to an InvalidTransactionId XID, rendering xmax fully frozen.
6896 : : * (VACUUM's NewRelfrozenXid/NewRelminMxid trackers are initialized with
6897 : : * OldestXmin/OldestMxact, so later values never need to be tracked here.)
6898 : : */
4472 alvherre@alvh.no-ip. 6899 :CBC 2 : need_replace = false;
1173 pg@bowt.ie 6900 : 2 : FreezePageRelfrozenXid = pagefrz->FreezePageRelfrozenXid;
1179 6901 [ + + ]: 4 : for (int i = 0; i < nmembers; i++)
6902 : : {
6903 : 3 : TransactionId xid = members[i].xid;
6904 : :
6905 [ - + ]: 3 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6906 : :
6907 [ + + ]: 3 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
6908 : : {
6909 : : /* Can't violate the FreezeLimit postcondition */
4472 alvherre@alvh.no-ip. 6910 : 1 : need_replace = true;
6911 : 1 : break;
6912 : : }
1173 pg@bowt.ie 6913 [ - + ]: 2 : if (TransactionIdPrecedes(xid, FreezePageRelfrozenXid))
1173 pg@bowt.ie 6914 :UBC 0 : FreezePageRelfrozenXid = xid;
6915 : : }
6916 : :
6917 : : /* Can't violate the MultiXactCutoff postcondition, either */
1173 pg@bowt.ie 6918 [ + + ]:CBC 2 : if (!need_replace)
6919 : 1 : need_replace = MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff);
6920 : :
4472 alvherre@alvh.no-ip. 6921 [ + + ]: 2 : if (!need_replace)
6922 : : {
6923 : : /*
6924 : : * vacuumlazy.c might ratchet back NewRelminMxid, NewRelfrozenXid, or
6925 : : * both together to make it safe to retain this particular multi after
6926 : : * freezing its page
6927 : : */
6928 : 1 : *flags |= FRM_NOOP;
1173 pg@bowt.ie 6929 : 1 : pagefrz->FreezePageRelfrozenXid = FreezePageRelfrozenXid;
6930 [ - + ]: 1 : if (MultiXactIdPrecedes(multi, pagefrz->FreezePageRelminMxid))
1173 pg@bowt.ie 6931 :UBC 0 : pagefrz->FreezePageRelminMxid = multi;
4472 alvherre@alvh.no-ip. 6932 :CBC 1 : pfree(members);
1442 pg@bowt.ie 6933 : 1 : return multi;
6934 : : }
6935 : :
6936 : : /*
6937 : : * Do a more thorough second pass over the multi to figure out which
6938 : : * member XIDs actually need to be kept. Checking the precise status of
6939 : : * individual members might even show that we don't need to keep anything.
6940 : : * That is quite possible even though the Multi must be >= OldestMxact,
6941 : : * since our second pass only keeps member XIDs when it's truly necessary;
6942 : : * even member XIDs >= OldestXmin often won't be kept by second pass.
6943 : : */
4472 alvherre@alvh.no-ip. 6944 : 1 : nnewmembers = 0;
95 michael@paquier.xyz 6945 :GNC 1 : newmembers = palloc_array(MultiXactMember, nmembers);
4472 alvherre@alvh.no-ip. 6946 :CBC 1 : has_lockers = false;
6947 : 1 : update_xid = InvalidTransactionId;
6948 : 1 : update_committed = false;
6949 : :
6950 : : /*
6951 : : * Determine whether to keep each member xid, or to ignore it instead
6952 : : */
1179 pg@bowt.ie 6953 [ + + ]: 3 : for (int i = 0; i < nmembers; i++)
6954 : : {
6955 : 2 : TransactionId xid = members[i].xid;
6956 : 2 : MultiXactStatus mstatus = members[i].status;
6957 : :
6958 [ - + ]: 2 : Assert(!TransactionIdPrecedes(xid, cutoffs->relfrozenxid));
6959 : :
6960 [ + - ]: 2 : if (!ISUPDATE_from_mxstatus(mstatus))
6961 : : {
6962 : : /*
6963 : : * Locker XID (not updater XID). We only keep lockers that are
6964 : : * still running.
6965 : : */
6966 [ + - + + ]: 4 : if (TransactionIdIsCurrentTransactionId(xid) ||
6967 : 2 : TransactionIdIsInProgress(xid))
6968 : : {
1173 6969 [ - + ]: 1 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
1173 pg@bowt.ie 6970 [ # # ]:UBC 0 : ereport(ERROR,
6971 : : (errcode(ERRCODE_DATA_CORRUPTED),
6972 : : errmsg_internal("multixact %u contains running locker XID %u from before removable cutoff %u",
6973 : : multi, xid,
6974 : : cutoffs->OldestXmin)));
1179 pg@bowt.ie 6975 :CBC 1 : newmembers[nnewmembers++] = members[i];
6976 : 1 : has_lockers = true;
6977 : : }
6978 : :
6979 : 2 : continue;
6980 : : }
6981 : :
6982 : : /*
6983 : : * Updater XID (not locker XID). Should we keep it?
6984 : : *
6985 : : * Since the tuple wasn't totally removed when vacuum pruned, the
6986 : : * update Xid cannot possibly be older than OldestXmin cutoff unless
6987 : : * the updater XID aborted. If the updater transaction is known
6988 : : * aborted or crashed then it's okay to ignore it, otherwise not.
6989 : : *
6990 : : * In any case the Multi should never contain two updaters, whatever
6991 : : * their individual commit status. Check for that first, in passing.
6992 : : */
1179 pg@bowt.ie 6993 [ # # ]:UBC 0 : if (TransactionIdIsValid(update_xid))
6994 [ # # ]: 0 : ereport(ERROR,
6995 : : (errcode(ERRCODE_DATA_CORRUPTED),
6996 : : errmsg_internal("multixact %u has two or more updating members",
6997 : : multi),
6998 : : errdetail_internal("First updater XID=%u second updater XID=%u.",
6999 : : update_xid, xid)));
7000 : :
7001 : : /*
7002 : : * As with all tuple visibility routines, it's critical to test
7003 : : * TransactionIdIsInProgress before TransactionIdDidCommit, because of
7004 : : * race conditions explained in detail in heapam_visibility.c.
7005 : : */
7006 [ # # # # ]: 0 : if (TransactionIdIsCurrentTransactionId(xid) ||
7007 : 0 : TransactionIdIsInProgress(xid))
7008 : 0 : update_xid = xid;
7009 [ # # ]: 0 : else if (TransactionIdDidCommit(xid))
7010 : : {
7011 : : /*
7012 : : * The transaction committed, so we can tell caller to set
7013 : : * HEAP_XMAX_COMMITTED. (We can only do this because we know the
7014 : : * transaction is not running.)
7015 : : */
7016 : 0 : update_committed = true;
7017 : 0 : update_xid = xid;
7018 : : }
7019 : : else
7020 : : {
7021 : : /*
7022 : : * Not in progress, not committed -- must be aborted or crashed;
7023 : : * we can ignore it.
7024 : : */
7025 : 0 : continue;
7026 : : }
7027 : :
7028 : : /*
7029 : : * We determined that updater must be kept -- add it to pending new
7030 : : * members list
7031 : : */
1173 7032 [ # # ]: 0 : if (TransactionIdPrecedes(xid, cutoffs->OldestXmin))
7033 [ # # ]: 0 : ereport(ERROR,
7034 : : (errcode(ERRCODE_DATA_CORRUPTED),
7035 : : errmsg_internal("multixact %u contains committed update XID %u from before removable cutoff %u",
7036 : : multi, xid, cutoffs->OldestXmin)));
1179 7037 : 0 : newmembers[nnewmembers++] = members[i];
7038 : : }
7039 : :
4472 alvherre@alvh.no-ip. 7040 :CBC 1 : pfree(members);
7041 : :
7042 : : /*
7043 : : * Determine what to do with caller's multi based on information gathered
7044 : : * during our second pass
7045 : : */
7046 [ - + ]: 1 : if (nnewmembers == 0)
7047 : : {
7048 : : /* Nothing worth keeping */
4472 alvherre@alvh.no-ip. 7049 :UBC 0 : *flags |= FRM_INVALIDATE_XMAX;
1179 pg@bowt.ie 7050 : 0 : newxmax = InvalidTransactionId;
7051 : : }
4472 alvherre@alvh.no-ip. 7052 [ - + - - ]:CBC 1 : else if (TransactionIdIsValid(update_xid) && !has_lockers)
7053 : : {
7054 : : /*
7055 : : * If there's a single member and it's an update, pass it back alone
7056 : : * without creating a new Multi. (XXX we could do this when there's a
7057 : : * single remaining locker, too, but that would complicate the API too
7058 : : * much; moreover, the case with the single updater is more
7059 : : * interesting, because those are longer-lived.)
7060 : : */
4472 alvherre@alvh.no-ip. 7061 [ # # ]:UBC 0 : Assert(nnewmembers == 1);
7062 : 0 : *flags |= FRM_RETURN_IS_XID;
7063 [ # # ]: 0 : if (update_committed)
7064 : 0 : *flags |= FRM_MARK_COMMITTED;
1179 pg@bowt.ie 7065 : 0 : newxmax = update_xid;
7066 : : }
7067 : : else
7068 : : {
7069 : : /*
7070 : : * Create a new multixact with the surviving members of the previous
7071 : : * one, to set as new Xmax in the tuple
7072 : : */
1179 pg@bowt.ie 7073 :CBC 1 : newxmax = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
4472 alvherre@alvh.no-ip. 7074 : 1 : *flags |= FRM_RETURN_IS_MULTI;
7075 : : }
7076 : :
7077 : 1 : pfree(newmembers);
7078 : :
1173 pg@bowt.ie 7079 : 1 : pagefrz->freeze_required = true;
1179 7080 : 1 : return newxmax;
7081 : : }
7082 : :
7083 : : /*
7084 : : * heap_prepare_freeze_tuple
7085 : : *
7086 : : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7087 : : * are older than the OldestXmin and/or OldestMxact freeze cutoffs. If so,
7088 : : * setup enough state (in the *frz output argument) to enable caller to
7089 : : * process this tuple as part of freezing its page, and return true. Return
7090 : : * false if nothing can be changed about the tuple right now.
7091 : : *
7092 : : * FreezePageConflictXid is advanced only for xmin/xvac freezing, not for xmax
7093 : : * changes. We only remove xmax state here when it is lock-only, or when the
7094 : : * updater XID (including an updater member of a MultiXact) must be aborted;
7095 : : * otherwise, the tuple would already be removable. Neither case affects
7096 : : * visibility on a standby.
7097 : : *
7098 : : * Also sets *totally_frozen to true if the tuple will be totally frozen once
7099 : : * caller executes returned freeze plan (or if the tuple was already totally
7100 : : * frozen by an earlier VACUUM). This indicates that there are no remaining
7101 : : * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
7102 : : *
7103 : : * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
7104 : : * tuple that we returned true for, and then execute freezing. Caller must
7105 : : * initialize pagefrz fields for page as a whole before first call here for
7106 : : * each heap page.
7107 : : *
7108 : : * VACUUM caller decides on whether or not to freeze the page as a whole.
7109 : : * We'll often prepare freeze plans for a page that caller just discards.
7110 : : * However, VACUUM doesn't always get to make a choice; it must freeze when
7111 : : * pagefrz.freeze_required is set, to ensure that any XIDs < FreezeLimit (and
7112 : : * MXIDs < MultiXactCutoff) can never be left behind. We help to make sure
7113 : : * that VACUUM always follows that rule.
7114 : : *
7115 : : * We sometimes force freezing of xmax MultiXactId values long before it is
7116 : : * strictly necessary to do so just to ensure the FreezeLimit postcondition.
7117 : : * It's worth processing MultiXactIds proactively when it is cheap to do so,
7118 : : * and it's convenient to make that happen by piggy-backing it on the "force
7119 : : * freezing" mechanism. Conversely, we sometimes delay freezing MultiXactIds
7120 : : * because it is expensive right now (though only when it's still possible to
7121 : : * do so without violating the FreezeLimit/MultiXactCutoff postcondition).
7122 : : *
7123 : : * It is assumed that the caller has checked the tuple with
7124 : : * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
7125 : : * (else we should be removing the tuple, not freezing it).
7126 : : *
7127 : : * NB: This function has side effects: it might allocate a new MultiXactId.
7128 : : * It will be set as tuple's new xmax when our *frz output is processed within
7129 : : * heap_execute_freeze_tuple later on. If the tuple is in a shared buffer
7130 : : * then caller had better have an exclusive lock on it already.
7131 : : */
7132 : : bool
3044 andres@anarazel.de 7133 : 5246767 : heap_prepare_freeze_tuple(HeapTupleHeader tuple,
7134 : : const struct VacuumCutoffs *cutoffs,
7135 : : HeapPageFreeze *pagefrz,
7136 : : HeapTupleFreeze *frz, bool *totally_frozen)
7137 : : {
1179 pg@bowt.ie 7138 : 5246767 : bool xmin_already_frozen = false,
7139 : 5246767 : xmax_already_frozen = false;
7140 : 5246767 : bool freeze_xmin = false,
7141 : 5246767 : replace_xvac = false,
7142 : 5246767 : replace_xmax = false,
7143 : 5246767 : freeze_xmax = false;
7144 : : TransactionId xid;
7145 : :
1167 7146 : 5246767 : frz->xmax = HeapTupleHeaderGetRawXmax(tuple);
4472 alvherre@alvh.no-ip. 7147 : 5246767 : frz->t_infomask2 = tuple->t_infomask2;
7148 : 5246767 : frz->t_infomask = tuple->t_infomask;
1167 pg@bowt.ie 7149 : 5246767 : frz->frzflags = 0;
7150 : 5246767 : frz->checkflags = 0;
7151 : :
7152 : : /*
7153 : : * Process xmin, while keeping track of whether it's already frozen, or
7154 : : * will become frozen iff our freeze plan is executed by caller (could be
7155 : : * neither).
7156 : : */
7070 tgl@sss.pgh.pa.us 7157 : 5246767 : xid = HeapTupleHeaderGetXmin(tuple);
2509 alvherre@alvh.no-ip. 7158 [ + + ]: 5246767 : if (!TransactionIdIsNormal(xid))
1179 pg@bowt.ie 7159 : 1425344 : xmin_already_frozen = true;
7160 : : else
7161 : : {
7162 [ - + ]: 3821423 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
3044 andres@anarazel.de 7163 [ # # ]:UBC 0 : ereport(ERROR,
7164 : : (errcode(ERRCODE_DATA_CORRUPTED),
7165 : : errmsg_internal("found xmin %u from before relfrozenxid %u",
7166 : : xid, cutoffs->relfrozenxid)));
7167 : :
7168 : : /* Will set freeze_xmin flags in freeze plan below */
1173 pg@bowt.ie 7169 :CBC 3821423 : freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7170 : :
7171 : : /* Verify that xmin committed if and when freeze plan is executed */
1167 7172 [ + + ]: 3821423 : if (freeze_xmin)
7173 : : {
7174 : 3206462 : frz->checkflags |= HEAP_FREEZE_CHECK_XMIN_COMMITTED;
5 melanieplageman@gmai 7175 [ + + ]:GNC 3206462 : if (TransactionIdFollows(xid, pagefrz->FreezePageConflictXid))
7176 : 378481 : pagefrz->FreezePageConflictXid = xid;
7177 : : }
7178 : : }
7179 : :
7180 : : /*
7181 : : * Old-style VACUUM FULL is gone, but we have to process xvac for as long
7182 : : * as we support having MOVED_OFF/MOVED_IN tuples in the database
7183 : : */
1179 pg@bowt.ie 7184 :CBC 5246767 : xid = HeapTupleHeaderGetXvac(tuple);
7185 [ - + ]: 5246767 : if (TransactionIdIsNormal(xid))
7186 : : {
1179 pg@bowt.ie 7187 [ # # ]:UBC 0 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7188 [ # # ]: 0 : Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
7189 : :
7190 : : /*
7191 : : * For Xvac, we always freeze proactively. This allows totally_frozen
7192 : : * tracking to ignore xvac.
7193 : : */
1173 7194 : 0 : replace_xvac = pagefrz->freeze_required = true;
7195 : :
5 melanieplageman@gmai 7196 [ # # ]:UNC 0 : if (TransactionIdFollows(xid, pagefrz->FreezePageConflictXid))
7197 : 0 : pagefrz->FreezePageConflictXid = xid;
7198 : :
7199 : : /* Will set replace_xvac flags in freeze plan below */
7200 : : }
7201 : :
7202 : : /* Now process xmax */
1167 pg@bowt.ie 7203 :CBC 5246767 : xid = frz->xmax;
4490 alvherre@alvh.no-ip. 7204 [ + + ]: 5246767 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7205 : : {
7206 : : /* Raw xmax is a MultiXactId */
7207 : : TransactionId newxmax;
7208 : : uint16 flags;
7209 : :
7210 : : /*
7211 : : * We will either remove xmax completely (in the "freeze_xmax" path),
7212 : : * process xmax by replacing it (in the "replace_xmax" path), or
7213 : : * perform no-op xmax processing. The only constraint is that the
7214 : : * FreezeLimit/MultiXactCutoff postcondition must never be violated.
7215 : : */
1179 pg@bowt.ie 7216 : 8 : newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
7217 : : &flags, pagefrz);
7218 : :
1173 7219 [ + + ]: 8 : if (flags & FRM_NOOP)
7220 : : {
7221 : : /*
7222 : : * xmax is a MultiXactId, and nothing about it changes for now.
7223 : : * This is the only case where 'freeze_required' won't have been
7224 : : * set for us by FreezeMultiXactId, as well as the only case where
7225 : : * neither freeze_xmax nor replace_xmax are set (given a multi).
7226 : : *
7227 : : * This is a no-op, but the call to FreezeMultiXactId might have
7228 : : * ratcheted back NewRelfrozenXid and/or NewRelminMxid trackers
7229 : : * for us (the "freeze page" variants, specifically). That'll
7230 : : * make it safe for our caller to freeze the page later on, while
7231 : : * leaving this particular xmax undisturbed.
7232 : : *
7233 : : * FreezeMultiXactId is _not_ responsible for the "no freeze"
7234 : : * NewRelfrozenXid/NewRelminMxid trackers, though -- that's our
7235 : : * job. A call to heap_tuple_should_freeze for this same tuple
7236 : : * will take place below if 'freeze_required' isn't set already.
7237 : : * (This repeats work from FreezeMultiXactId, but allows "no
7238 : : * freeze" tracker maintenance to happen in only one place.)
7239 : : */
7240 [ - + ]: 1 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
7241 [ + - - + ]: 1 : Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
7242 : : }
7243 [ - + ]: 7 : else if (flags & FRM_RETURN_IS_XID)
7244 : : {
7245 : : /*
7246 : : * xmax will become an updater Xid (original MultiXact's updater
7247 : : * member Xid will be carried forward as a simple Xid in Xmax).
7248 : : */
1179 pg@bowt.ie 7249 [ # # ]:UBC 0 : Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
7250 : :
7251 : : /*
7252 : : * NB -- some of these transformations are only valid because we
7253 : : * know the return Xid is a tuple updater (i.e. not merely a
7254 : : * locker.) Also note that the only reason we don't explicitly
7255 : : * worry about HEAP_KEYS_UPDATED is because it lives in
7256 : : * t_infomask2 rather than t_infomask.
7257 : : */
4472 alvherre@alvh.no-ip. 7258 : 0 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7259 : 0 : frz->xmax = newxmax;
7260 [ # # ]: 0 : if (flags & FRM_MARK_COMMITTED)
3174 teodor@sigaev.ru 7261 : 0 : frz->t_infomask |= HEAP_XMAX_COMMITTED;
1179 pg@bowt.ie 7262 : 0 : replace_xmax = true;
7263 : : }
4472 alvherre@alvh.no-ip. 7264 [ + + ]:CBC 7 : else if (flags & FRM_RETURN_IS_MULTI)
7265 : : {
7266 : : uint16 newbits;
7267 : : uint16 newbits2;
7268 : :
7269 : : /*
7270 : : * xmax is an old MultiXactId that we have to replace with a new
7271 : : * MultiXactId, to carry forward two or more original member XIDs.
7272 : : */
1179 pg@bowt.ie 7273 [ - + ]: 1 : Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
7274 : :
7275 : : /*
7276 : : * We can't use GetMultiXactIdHintBits directly on the new multi
7277 : : * here; that routine initializes the masks to all zeroes, which
7278 : : * would lose other bits we need. Doing it this way ensures all
7279 : : * unrelated bits remain untouched.
7280 : : */
4472 alvherre@alvh.no-ip. 7281 : 1 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7282 : 1 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7283 : 1 : GetMultiXactIdHintBits(newxmax, &newbits, &newbits2);
7284 : 1 : frz->t_infomask |= newbits;
7285 : 1 : frz->t_infomask2 |= newbits2;
7286 : 1 : frz->xmax = newxmax;
1179 pg@bowt.ie 7287 : 1 : replace_xmax = true;
7288 : : }
7289 : : else
7290 : : {
7291 : : /*
7292 : : * Freeze plan for tuple "freezes xmax" in the strictest sense:
7293 : : * it'll leave nothing in xmax (neither an Xid nor a MultiXactId).
7294 : : */
7295 [ - + ]: 6 : Assert(flags & FRM_INVALIDATE_XMAX);
1442 7296 [ - + ]: 6 : Assert(!TransactionIdIsValid(newxmax));
7297 : :
7298 : : /* Will set freeze_xmax flags in freeze plan below */
1179 7299 : 6 : freeze_xmax = true;
7300 : : }
7301 : :
7302 : : /* MultiXactId processing forces freezing (barring FRM_NOOP case) */
1173 7303 [ - + - - : 8 : Assert(pagefrz->freeze_required || (!freeze_xmax && !replace_xmax));
- - ]
7304 : : }
3560 rhaas@postgresql.org 7305 [ + + ]: 5246759 : else if (TransactionIdIsNormal(xid))
7306 : : {
7307 : : /* Raw xmax is normal XID */
1179 pg@bowt.ie 7308 [ - + ]: 277395 : if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
3044 andres@anarazel.de 7309 [ # # ]:UBC 0 : ereport(ERROR,
7310 : : (errcode(ERRCODE_DATA_CORRUPTED),
7311 : : errmsg_internal("found xmax %u from before relfrozenxid %u",
7312 : : xid, cutoffs->relfrozenxid)));
7313 : :
7314 : : /* Will set freeze_xmax flags in freeze plan below */
1167 pg@bowt.ie 7315 :CBC 277395 : freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
7316 : :
7317 : : /*
7318 : : * Verify that xmax aborted if and when freeze plan is executed,
7319 : : * provided it's from an update. (A lock-only xmax can be removed
7320 : : * independent of this, since the lock is released at xact end.)
7321 : : */
7322 [ + + + + ]: 277395 : if (freeze_xmax && !HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
7323 : 2203 : frz->checkflags |= HEAP_FREEZE_CHECK_XMAX_ABORTED;
7324 : : }
1208 7325 [ + - ]: 4969364 : else if (!TransactionIdIsValid(xid))
7326 : : {
7327 : : /* Raw xmax is InvalidTransactionId XID */
7328 [ - + ]: 4969364 : Assert((tuple->t_infomask & HEAP_XMAX_IS_MULTI) == 0);
2872 alvherre@alvh.no-ip. 7329 : 4969364 : xmax_already_frozen = true;
7330 : : }
7331 : : else
2872 alvherre@alvh.no-ip. 7332 [ # # ]:UBC 0 : ereport(ERROR,
7333 : : (errcode(ERRCODE_DATA_CORRUPTED),
7334 : : errmsg_internal("found raw xmax %u (infomask 0x%04x) not invalid and not multi",
7335 : : xid, tuple->t_infomask)));
7336 : :
1179 pg@bowt.ie 7337 [ + + ]:CBC 5246767 : if (freeze_xmin)
7338 : : {
7339 [ - + ]: 3206462 : Assert(!xmin_already_frozen);
7340 : :
7341 : 3206462 : frz->t_infomask |= HEAP_XMIN_FROZEN;
7342 : : }
7343 [ - + ]: 5246767 : if (replace_xvac)
7344 : : {
7345 : : /*
7346 : : * If a MOVED_OFF tuple is not dead, the xvac transaction must have
7347 : : * failed; whereas a non-dead MOVED_IN tuple must mean the xvac
7348 : : * transaction succeeded.
7349 : : */
1173 pg@bowt.ie 7350 [ # # ]:UBC 0 : Assert(pagefrz->freeze_required);
1179 7351 [ # # ]: 0 : if (tuple->t_infomask & HEAP_MOVED_OFF)
7352 : 0 : frz->frzflags |= XLH_INVALID_XVAC;
7353 : : else
7354 : 0 : frz->frzflags |= XLH_FREEZE_XVAC;
7355 : : }
1179 pg@bowt.ie 7356 [ + + ]:CBC 5246767 : if (replace_xmax)
7357 : : {
7358 [ + - - + ]: 1 : Assert(!xmax_already_frozen && !freeze_xmax);
1173 7359 [ - + ]: 1 : Assert(pagefrz->freeze_required);
7360 : :
7361 : : /* Already set replace_xmax flags in freeze plan earlier */
7362 : : }
4490 alvherre@alvh.no-ip. 7363 [ + + ]: 5246767 : if (freeze_xmax)
7364 : : {
1179 pg@bowt.ie 7365 [ + - - + ]: 3065 : Assert(!xmax_already_frozen && !replace_xmax);
7366 : :
4472 alvherre@alvh.no-ip. 7367 : 3065 : frz->xmax = InvalidTransactionId;
7368 : :
7369 : : /*
7370 : : * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
7371 : : * LOCKED. Normalize to INVALID just to be sure no one gets confused.
7372 : : * Also get rid of the HEAP_KEYS_UPDATED bit.
7373 : : */
7374 : 3065 : frz->t_infomask &= ~HEAP_XMAX_BITS;
7375 : 3065 : frz->t_infomask |= HEAP_XMAX_INVALID;
7376 : 3065 : frz->t_infomask2 &= ~HEAP_HOT_UPDATED;
7377 : 3065 : frz->t_infomask2 &= ~HEAP_KEYS_UPDATED;
7378 : : }
7379 : :
7380 : : /*
7381 : : * Determine if this tuple is already totally frozen, or will become
7382 : : * totally frozen (provided caller executes freeze plans for the page)
7383 : : */
1179 pg@bowt.ie 7384 [ + + + + : 9875508 : *totally_frozen = ((freeze_xmin || xmin_already_frozen) &&
+ + ]
7385 [ + + ]: 4628741 : (freeze_xmax || xmax_already_frozen));
7386 : :
1173 7387 [ + + + + : 5246767 : if (!pagefrz->freeze_required && !(xmin_already_frozen &&
+ + ]
7388 : : xmax_already_frozen))
7389 : : {
7390 : : /*
7391 : : * So far no previous tuple from the page made freezing mandatory.
7392 : : * Does this tuple force caller to freeze the entire page?
7393 : : */
7394 : 2688811 : pagefrz->freeze_required =
7395 : 2688811 : heap_tuple_should_freeze(tuple, cutoffs,
7396 : : &pagefrz->NoFreezePageRelfrozenXid,
7397 : : &pagefrz->NoFreezePageRelminMxid);
7398 : : }
7399 : :
7400 : : /* Tell caller if this tuple has a usable freeze plan set in *frz */
1179 7401 [ + + + - : 5246767 : return freeze_xmin || replace_xvac || replace_xmax || freeze_xmax;
+ - + + ]
7402 : : }
7403 : :
7404 : : /*
7405 : : * Perform xmin/xmax XID status sanity checks before actually executing freeze
7406 : : * plans.
7407 : : *
7408 : : * heap_prepare_freeze_tuple doesn't perform these checks directly because
7409 : : * pg_xact lookups are relatively expensive. They shouldn't be repeated by
7410 : : * successive VACUUMs that each decide against freezing the same page.
7411 : : */
7412 : : void
711 heikki.linnakangas@i 7413 : 20236 : heap_pre_freeze_checks(Buffer buffer,
7414 : : HeapTupleFreeze *tuples, int ntuples)
7415 : : {
1216 pg@bowt.ie 7416 : 20236 : Page page = BufferGetPage(buffer);
7417 : :
1167 7418 [ + + ]: 953008 : for (int i = 0; i < ntuples; i++)
7419 : : {
7420 : 932772 : HeapTupleFreeze *frz = tuples + i;
7421 : 932772 : ItemId itemid = PageGetItemId(page, frz->offset);
7422 : : HeapTupleHeader htup;
7423 : :
7424 : 932772 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
7425 : :
7426 : : /* Deliberately avoid relying on tuple hint bits here */
7427 [ + + ]: 932772 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMIN_COMMITTED)
7428 : : {
7429 : 932771 : TransactionId xmin = HeapTupleHeaderGetRawXmin(htup);
7430 : :
7431 [ - + ]: 932771 : Assert(!HeapTupleHeaderXminFrozen(htup));
7432 [ - + ]: 932771 : if (unlikely(!TransactionIdDidCommit(xmin)))
1167 pg@bowt.ie 7433 [ # # ]:UBC 0 : ereport(ERROR,
7434 : : (errcode(ERRCODE_DATA_CORRUPTED),
7435 : : errmsg_internal("uncommitted xmin %u needs to be frozen",
7436 : : xmin)));
7437 : : }
7438 : :
7439 : : /*
7440 : : * TransactionIdDidAbort won't work reliably in the presence of XIDs
7441 : : * left behind by transactions that were in progress during a crash,
7442 : : * so we can only check that xmax didn't commit
7443 : : */
1167 pg@bowt.ie 7444 [ + + ]:CBC 932772 : if (frz->checkflags & HEAP_FREEZE_CHECK_XMAX_ABORTED)
7445 : : {
7446 : 830 : TransactionId xmax = HeapTupleHeaderGetRawXmax(htup);
7447 : :
7448 [ - + ]: 830 : Assert(TransactionIdIsNormal(xmax));
7449 [ - + ]: 830 : if (unlikely(TransactionIdDidCommit(xmax)))
1167 pg@bowt.ie 7450 [ # # ]:UBC 0 : ereport(ERROR,
7451 : : (errcode(ERRCODE_DATA_CORRUPTED),
7452 : : errmsg_internal("cannot freeze committed xmax %u",
7453 : : xmax)));
7454 : : }
7455 : : }
711 heikki.linnakangas@i 7456 :CBC 20236 : }
7457 : :
7458 : : /*
7459 : : * Helper which executes freezing of one or more heap tuples on a page on
7460 : : * behalf of caller. Caller passes an array of tuple plans from
7461 : : * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
7462 : : * Must be called in a critical section that also marks the buffer dirty and,
7463 : : * if needed, emits WAL.
7464 : : */
7465 : : void
7466 : 20236 : heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
7467 : : {
7468 : 20236 : Page page = BufferGetPage(buffer);
7469 : :
1216 pg@bowt.ie 7470 [ + + ]: 953008 : for (int i = 0; i < ntuples; i++)
7471 : : {
1167 7472 : 932772 : HeapTupleFreeze *frz = tuples + i;
7473 : 932772 : ItemId itemid = PageGetItemId(page, frz->offset);
7474 : : HeapTupleHeader htup;
7475 : :
1216 7476 : 932772 : htup = (HeapTupleHeader) PageGetItem(page, itemid);
1167 7477 : 932772 : heap_execute_freeze_tuple(htup, frz);
7478 : : }
1216 7479 : 20236 : }
7480 : :
7481 : : /*
7482 : : * heap_freeze_tuple
7483 : : * Freeze tuple in place, without WAL logging.
7484 : : *
7485 : : * Useful for callers like CLUSTER that perform their own WAL logging.
7486 : : */
7487 : : bool
3044 andres@anarazel.de 7488 : 361464 : heap_freeze_tuple(HeapTupleHeader tuple,
7489 : : TransactionId relfrozenxid, TransactionId relminmxid,
7490 : : TransactionId FreezeLimit, TransactionId MultiXactCutoff)
7491 : : {
7492 : : HeapTupleFreeze frz;
7493 : : bool do_freeze;
7494 : : bool totally_frozen;
7495 : : struct VacuumCutoffs cutoffs;
7496 : : HeapPageFreeze pagefrz;
7497 : :
1179 pg@bowt.ie 7498 : 361464 : cutoffs.relfrozenxid = relfrozenxid;
7499 : 361464 : cutoffs.relminmxid = relminmxid;
7500 : 361464 : cutoffs.OldestXmin = FreezeLimit;
7501 : 361464 : cutoffs.OldestMxact = MultiXactCutoff;
7502 : 361464 : cutoffs.FreezeLimit = FreezeLimit;
7503 : 361464 : cutoffs.MultiXactCutoff = MultiXactCutoff;
7504 : :
1173 7505 : 361464 : pagefrz.freeze_required = true;
7506 : 361464 : pagefrz.FreezePageRelfrozenXid = FreezeLimit;
7507 : 361464 : pagefrz.FreezePageRelminMxid = MultiXactCutoff;
5 melanieplageman@gmai 7508 :GNC 361464 : pagefrz.FreezePageConflictXid = InvalidTransactionId;
1173 pg@bowt.ie 7509 :CBC 361464 : pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
7510 : 361464 : pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
7511 : :
1179 7512 : 361464 : do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
7513 : : &pagefrz, &frz, &totally_frozen);
7514 : :
7515 : : /*
7516 : : * Note that because this is not a WAL-logged operation, we don't need to
7517 : : * fill in the offset in the freeze record.
7518 : : */
7519 : :
4472 alvherre@alvh.no-ip. 7520 [ + + ]: 361464 : if (do_freeze)
7521 : 256986 : heap_execute_freeze_tuple(tuple, &frz);
7522 : 361464 : return do_freeze;
7523 : : }
7524 : :
7525 : : /*
7526 : : * For a given MultiXactId, return the hint bits that should be set in the
7527 : : * tuple's infomask.
7528 : : *
7529 : : * Normally this should be called for a multixact that was just created, and
7530 : : * so is on our local cache, so the GetMembers call is fast.
7531 : : */
7532 : : static void
4799 7533 : 76810 : GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
7534 : : uint16 *new_infomask2)
7535 : : {
7536 : : int nmembers;
7537 : : MultiXactMember *members;
7538 : : int i;
4673 bruce@momjian.us 7539 : 76810 : uint16 bits = HEAP_XMAX_IS_MULTI;
7540 : 76810 : uint16 bits2 = 0;
7541 : 76810 : bool has_update = false;
7542 : 76810 : LockTupleMode strongest = LockTupleKeyShare;
7543 : :
7544 : : /*
7545 : : * We only use this in multis we just created, so they cannot be values
7546 : : * pre-pg_upgrade.
7547 : : */
4247 alvherre@alvh.no-ip. 7548 : 76810 : nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7549 : :
4799 7550 [ + + ]: 1472644 : for (i = 0; i < nmembers; i++)
7551 : : {
7552 : : LockTupleMode mode;
7553 : :
7554 : : /*
7555 : : * Remember the strongest lock mode held by any member of the
7556 : : * multixact.
7557 : : */
4791 7558 : 1395834 : mode = TUPLOCK_from_mxstatus(members[i].status);
7559 [ + + ]: 1395834 : if (mode > strongest)
7560 : 2892 : strongest = mode;
7561 : :
7562 : : /* See what other bits we need */
4799 7563 [ + + + + : 1395834 : switch (members[i].status)
- ]
7564 : : {
7565 : 1393416 : case MultiXactStatusForKeyShare:
7566 : : case MultiXactStatusForShare:
7567 : : case MultiXactStatusForNoKeyUpdate:
7568 : 1393416 : break;
7569 : :
7570 : 52 : case MultiXactStatusForUpdate:
7571 : 52 : bits2 |= HEAP_KEYS_UPDATED;
7572 : 52 : break;
7573 : :
7574 : 2356 : case MultiXactStatusNoKeyUpdate:
7575 : 2356 : has_update = true;
7576 : 2356 : break;
7577 : :
7578 : 10 : case MultiXactStatusUpdate:
7579 : 10 : bits2 |= HEAP_KEYS_UPDATED;
7580 : 10 : has_update = true;
7581 : 10 : break;
7582 : : }
7583 : : }
7584 : :
4791 7585 [ + + + + ]: 76810 : if (strongest == LockTupleExclusive ||
7586 : : strongest == LockTupleNoKeyExclusive)
7587 : 2446 : bits |= HEAP_XMAX_EXCL_LOCK;
7588 [ + + ]: 74364 : else if (strongest == LockTupleShare)
7589 : 443 : bits |= HEAP_XMAX_SHR_LOCK;
7590 [ + - ]: 73921 : else if (strongest == LockTupleKeyShare)
7591 : 73921 : bits |= HEAP_XMAX_KEYSHR_LOCK;
7592 : :
4799 7593 [ + + ]: 76810 : if (!has_update)
7594 : 74444 : bits |= HEAP_XMAX_LOCK_ONLY;
7595 : :
7596 [ + - ]: 76810 : if (nmembers > 0)
7597 : 76810 : pfree(members);
7598 : :
7599 : 76810 : *new_infomask = bits;
7600 : 76810 : *new_infomask2 = bits2;
7601 : 76810 : }
7602 : :
7603 : : /*
7604 : : * MultiXactIdGetUpdateXid
7605 : : *
7606 : : * Given a multixact Xmax and corresponding infomask, which does not have the
7607 : : * HEAP_XMAX_LOCK_ONLY bit set, obtain and return the Xid of the updating
7608 : : * transaction.
7609 : : *
7610 : : * Caller is expected to check the status of the updating transaction, if
7611 : : * necessary.
7612 : : */
7613 : : static TransactionId
7614 : 162055 : MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
7615 : : {
4673 bruce@momjian.us 7616 : 162055 : TransactionId update_xact = InvalidTransactionId;
7617 : : MultiXactMember *members;
7618 : : int nmembers;
7619 : :
4799 alvherre@alvh.no-ip. 7620 [ - + ]: 162055 : Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
7621 [ - + ]: 162055 : Assert(t_infomask & HEAP_XMAX_IS_MULTI);
7622 : :
7623 : : /*
7624 : : * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
7625 : : * pre-pg_upgrade.
7626 : : */
4247 7627 : 162055 : nmembers = GetMultiXactIdMembers(xmax, &members, false, false);
7628 : :
4799 7629 [ + - ]: 162055 : if (nmembers > 0)
7630 : : {
7631 : : int i;
7632 : :
7633 [ + + ]: 3235017 : for (i = 0; i < nmembers; i++)
7634 : : {
7635 : : /* Ignore lockers */
4489 7636 [ + + ]: 3072962 : if (!ISUPDATE_from_mxstatus(members[i].status))
4799 7637 : 2910907 : continue;
7638 : :
7639 : : /* there can be at most one updater */
7640 [ - + ]: 162055 : Assert(update_xact == InvalidTransactionId);
7641 : 162055 : update_xact = members[i].xid;
7642 : : #ifndef USE_ASSERT_CHECKING
7643 : :
7644 : : /*
7645 : : * in an assert-enabled build, walk the whole array to ensure
7646 : : * there's no other updater.
7647 : : */
7648 : : break;
7649 : : #endif
7650 : : }
7651 : :
7652 : 162055 : pfree(members);
7653 : : }
7654 : :
7655 : 162055 : return update_xact;
7656 : : }
7657 : :
7658 : : /*
7659 : : * HeapTupleGetUpdateXid
7660 : : * As above, but use a HeapTupleHeader
7661 : : *
7662 : : * See also HeapTupleHeaderGetUpdateXid, which can be used without previously
7663 : : * checking the hint bits.
7664 : : */
7665 : : TransactionId
416 peter@eisentraut.org 7666 : 159915 : HeapTupleGetUpdateXid(const HeapTupleHeaderData *tup)
7667 : : {
7668 : 159915 : return MultiXactIdGetUpdateXid(HeapTupleHeaderGetRawXmax(tup),
7669 : 159915 : tup->t_infomask);
7670 : : }
7671 : :
7672 : : /*
7673 : : * Does the given multixact conflict with the current transaction grabbing a
7674 : : * tuple lock of the given strength?
7675 : : *
7676 : : * The passed infomask pairs up with the given multixact in the tuple header.
7677 : : *
7678 : : * If current_is_member is not NULL, it is set to 'true' if the current
7679 : : * transaction is a member of the given multixact.
7680 : : */
7681 : : static bool
4097 alvherre@alvh.no-ip. 7682 : 218 : DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask,
7683 : : LockTupleMode lockmode, bool *current_is_member)
7684 : : {
7685 : : int nmembers;
7686 : : MultiXactMember *members;
3949 bruce@momjian.us 7687 : 218 : bool result = false;
7688 : 218 : LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7689 : :
3551 alvherre@alvh.no-ip. 7690 [ - + ]: 218 : if (HEAP_LOCKED_UPGRADED(infomask))
3551 alvherre@alvh.no-ip. 7691 :UBC 0 : return false;
7692 : :
3551 alvherre@alvh.no-ip. 7693 :CBC 218 : nmembers = GetMultiXactIdMembers(multi, &members, false,
4097 7694 : 218 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7695 [ + - ]: 218 : if (nmembers >= 0)
7696 : : {
7697 : : int i;
7698 : :
7699 [ + + ]: 2682 : for (i = 0; i < nmembers; i++)
7700 : : {
7701 : : TransactionId memxid;
7702 : : LOCKMODE memlockmode;
7703 : :
2462 7704 [ + + + + : 2471 : if (result && (current_is_member == NULL || *current_is_member))
+ - ]
7705 : : break;
7706 : :
7707 : 2464 : memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7708 : :
7709 : : /* ignore members from current xact (but track their presence) */
2464 7710 : 2464 : memxid = members[i].xid;
7711 [ + + ]: 2464 : if (TransactionIdIsCurrentTransactionId(memxid))
7712 : : {
2462 7713 [ + + ]: 92 : if (current_is_member != NULL)
7714 : 78 : *current_is_member = true;
7715 : 92 : continue;
7716 : : }
7717 [ + + ]: 2372 : else if (result)
7718 : 8 : continue;
7719 : :
7720 : : /* ignore members that don't conflict with the lock we want */
7721 [ + + ]: 2364 : if (!DoLockModesConflict(memlockmode, wanted))
2464 7722 : 2325 : continue;
7723 : :
4097 7724 [ + + ]: 39 : if (ISUPDATE_from_mxstatus(members[i].status))
7725 : : {
7726 : : /* ignore aborted updaters */
7727 [ + + ]: 17 : if (TransactionIdDidAbort(memxid))
7728 : 1 : continue;
7729 : : }
7730 : : else
7731 : : {
7732 : : /* ignore lockers-only that are no longer in progress */
7733 [ + + ]: 22 : if (!TransactionIdIsInProgress(memxid))
7734 : 7 : continue;
7735 : : }
7736 : :
7737 : : /*
7738 : : * Whatever remains are either live lockers that conflict with our
7739 : : * wanted lock, and updaters that are not aborted. Those conflict
7740 : : * with what we want. Set up to return true, but keep going to
7741 : : * look for the current transaction among the multixact members,
7742 : : * if needed.
7743 : : */
7744 : 31 : result = true;
7745 : : }
7746 : 218 : pfree(members);
7747 : : }
7748 : :
7749 : 218 : return result;
7750 : : }
7751 : :
7752 : : /*
7753 : : * Do_MultiXactIdWait
7754 : : * Actual implementation for the two functions below.
7755 : : *
7756 : : * 'multi', 'status' and 'infomask' indicate what to sleep on (the status is
7757 : : * needed to ensure we only sleep on conflicting members, and the infomask is
7758 : : * used to optimize multixact access in case it's a lock-only multi); 'nowait'
7759 : : * indicates whether to use conditional lock acquisition, to allow callers to
7760 : : * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up
7761 : : * context information for error messages. 'remaining', if not NULL, receives
7762 : : * the number of members that are still running, including any (non-aborted)
7763 : : * subtransactions of our own transaction. 'logLockFailure' indicates whether
7764 : : * to log details when a lock acquisition fails with 'nowait' enabled.
7765 : : *
7766 : : * We do this by sleeping on each member using XactLockTableWait. Any
7767 : : * members that belong to the current backend are *not* waited for, however;
7768 : : * this would not merely be useless but would lead to Assert failure inside
7769 : : * XactLockTableWait. By the time this returns, it is certain that all
7770 : : * transactions *of other backends* that were members of the MultiXactId
7771 : : * that conflict with the requested status are dead (and no new ones can have
7772 : : * been added, since it is not legal to add members to an existing
7773 : : * MultiXactId).
7774 : : *
7775 : : * But by the time we finish sleeping, someone else may have changed the Xmax
7776 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7777 : : *
7778 : : * Note that in case we return false, the number of remaining members is
7779 : : * not to be trusted.
7780 : : */
7781 : : static bool
4799 7782 : 60 : Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7783 : : uint16 infomask, bool nowait,
7784 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7785 : : int *remaining, bool logLockFailure)
7786 : : {
7787 : 60 : bool result = true;
7788 : : MultiXactMember *members;
7789 : : int nmembers;
7790 : 60 : int remain = 0;
7791 : :
7792 : : /* for pre-pg_upgrade tuples, no need to sleep at all */
3551 7793 [ + - ]: 60 : nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7794 : 60 : GetMultiXactIdMembers(multi, &members, false,
7795 : 60 : HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7796 : :
4799 7797 [ + - ]: 60 : if (nmembers >= 0)
7798 : : {
7799 : : int i;
7800 : :
7801 [ + + ]: 188 : for (i = 0; i < nmembers; i++)
7802 : : {
7803 : 134 : TransactionId memxid = members[i].xid;
7804 : 134 : MultiXactStatus memstatus = members[i].status;
7805 : :
7806 [ + + ]: 134 : if (TransactionIdIsCurrentTransactionId(memxid))
7807 : : {
7808 : 24 : remain++;
7809 : 24 : continue;
7810 : : }
7811 : :
7812 [ + + ]: 110 : if (!DoLockModesConflict(LOCKMODE_from_mxstatus(memstatus),
7813 : 110 : LOCKMODE_from_mxstatus(status)))
7814 : : {
7815 [ + + + - ]: 22 : if (remaining && TransactionIdIsInProgress(memxid))
7816 : 8 : remain++;
7817 : 22 : continue;
7818 : : }
7819 : :
7820 : : /*
7821 : : * This member conflicts with our multi, so we have to sleep (or
7822 : : * return failure, if asked to avoid waiting.)
7823 : : *
7824 : : * Note that we don't set up an error context callback ourselves,
7825 : : * but instead we pass the info down to XactLockTableWait. This
7826 : : * might seem a bit wasteful because the context is set up and
7827 : : * tore down for each member of the multixact, but in reality it
7828 : : * should be barely noticeable, and it avoids duplicate code.
7829 : : */
7830 [ + + ]: 88 : if (nowait)
7831 : : {
366 fujii@postgresql.org 7832 : 6 : result = ConditionalXactLockTableWait(memxid, logLockFailure);
4799 alvherre@alvh.no-ip. 7833 [ + - ]: 6 : if (!result)
7834 : 6 : break;
7835 : : }
7836 : : else
4379 7837 : 82 : XactLockTableWait(memxid, rel, ctid, oper);
7838 : : }
7839 : :
4799 7840 : 60 : pfree(members);
7841 : : }
7842 : :
7843 [ + + ]: 60 : if (remaining)
7844 : 10 : *remaining = remain;
7845 : :
7846 : 60 : return result;
7847 : : }
7848 : :
7849 : : /*
7850 : : * MultiXactIdWait
7851 : : * Sleep on a MultiXactId.
7852 : : *
7853 : : * By the time we finish sleeping, someone else may have changed the Xmax
7854 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7855 : : *
7856 : : * We return (in *remaining, if not NULL) the number of members that are still
7857 : : * running, including any (non-aborted) subtransactions of our own transaction.
7858 : : */
7859 : : static void
4379 7860 : 54 : MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask,
7861 : : Relation rel, const ItemPointerData *ctid, XLTW_Oper oper,
7862 : : int *remaining)
7863 : : {
7864 : 54 : (void) Do_MultiXactIdWait(multi, status, infomask, false,
7865 : : rel, ctid, oper, remaining, false);
4799 7866 : 54 : }
7867 : :
7868 : : /*
7869 : : * ConditionalMultiXactIdWait
7870 : : * As above, but only lock if we can get the lock without blocking.
7871 : : *
7872 : : * By the time we finish sleeping, someone else may have changed the Xmax
7873 : : * of the containing tuple, so the caller needs to iterate on us somehow.
7874 : : *
7875 : : * If the multixact is now all gone, return true. Returns false if some
7876 : : * transactions might still be running.
7877 : : *
7878 : : * We return (in *remaining, if not NULL) the number of members that are still
7879 : : * running, including any (non-aborted) subtransactions of our own transaction.
7880 : : */
7881 : : static bool
7882 : 6 : ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
7883 : : uint16 infomask, Relation rel, int *remaining,
7884 : : bool logLockFailure)
7885 : : {
4379 7886 : 6 : return Do_MultiXactIdWait(multi, status, infomask, true,
7887 : : rel, NULL, XLTW_None, remaining, logLockFailure);
7888 : : }
7889 : :
7890 : : /*
7891 : : * heap_tuple_needs_eventual_freeze
7892 : : *
7893 : : * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
7894 : : * will eventually require freezing (if tuple isn't removed by pruning first).
7895 : : */
7896 : : bool
3666 rhaas@postgresql.org 7897 : 2322151 : heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
7898 : : {
7899 : : TransactionId xid;
7900 : :
7901 : : /*
7902 : : * If xmin is a normal transaction ID, this tuple is definitely not
7903 : : * frozen.
7904 : : */
7905 : 2322151 : xid = HeapTupleHeaderGetXmin(tuple);
7906 [ + + ]: 2322151 : if (TransactionIdIsNormal(xid))
7907 : 18787 : return true;
7908 : :
7909 : : /*
7910 : : * If xmax is a valid xact or multixact, this tuple is also not frozen.
7911 : : */
7912 [ + + ]: 2303364 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
7913 : : {
7914 : : MultiXactId multi;
7915 : :
7916 : 2 : multi = HeapTupleHeaderGetRawXmax(tuple);
7917 [ + - ]: 2 : if (MultiXactIdIsValid(multi))
7918 : 2 : return true;
7919 : : }
7920 : : else
7921 : : {
7922 : 2303362 : xid = HeapTupleHeaderGetRawXmax(tuple);
7923 [ + + ]: 2303362 : if (TransactionIdIsNormal(xid))
7924 : 19 : return true;
7925 : : }
7926 : :
7927 [ - + ]: 2303343 : if (tuple->t_infomask & HEAP_MOVED)
7928 : : {
3666 rhaas@postgresql.org 7929 :UBC 0 : xid = HeapTupleHeaderGetXvac(tuple);
7930 [ # # ]: 0 : if (TransactionIdIsNormal(xid))
7931 : 0 : return true;
7932 : : }
7933 : :
3666 rhaas@postgresql.org 7934 :CBC 2303343 : return false;
7935 : : }
7936 : :
7937 : : /*
7938 : : * heap_tuple_should_freeze
7939 : : *
7940 : : * Return value indicates if heap_prepare_freeze_tuple sibling function would
7941 : : * (or should) force freezing of the heap page that contains caller's tuple.
7942 : : * Tuple header XIDs/MXIDs < FreezeLimit/MultiXactCutoff trigger freezing.
7943 : : * This includes (xmin, xmax, xvac) fields, as well as MultiXact member XIDs.
7944 : : *
7945 : : * The *NoFreezePageRelfrozenXid and *NoFreezePageRelminMxid input/output
7946 : : * arguments help VACUUM track the oldest extant XID/MXID remaining in rel.
7947 : : * Our working assumption is that caller won't decide to freeze this tuple.
7948 : : * It's up to caller to only ratchet back its own top-level trackers after the
7949 : : * point that it fully commits to not freezing the tuple/page in question.
7950 : : */
7951 : : bool
1173 pg@bowt.ie 7952 : 2689516 : heap_tuple_should_freeze(HeapTupleHeader tuple,
7953 : : const struct VacuumCutoffs *cutoffs,
7954 : : TransactionId *NoFreezePageRelfrozenXid,
7955 : : MultiXactId *NoFreezePageRelminMxid)
7956 : : {
7957 : : TransactionId xid;
7958 : : MultiXactId multi;
1179 7959 : 2689516 : bool freeze = false;
7960 : :
7961 : : /* First deal with xmin */
5242 rhaas@postgresql.org 7962 : 2689516 : xid = HeapTupleHeaderGetXmin(tuple);
1442 pg@bowt.ie 7963 [ + + ]: 2689516 : if (TransactionIdIsNormal(xid))
7964 : : {
1179 7965 [ - + ]: 2688592 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
1173 7966 [ + + ]: 2688592 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7967 : 20195 : *NoFreezePageRelfrozenXid = xid;
1179 7968 [ + + ]: 2688592 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7969 : 18060 : freeze = true;
7970 : : }
7971 : :
7972 : : /* Now deal with xmax */
1442 7973 : 2689516 : xid = InvalidTransactionId;
7974 : 2689516 : multi = InvalidMultiXactId;
7975 [ + + ]: 2689516 : if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
4490 alvherre@alvh.no-ip. 7976 : 2 : multi = HeapTupleHeaderGetRawXmax(tuple);
7977 : : else
1442 pg@bowt.ie 7978 : 2689514 : xid = HeapTupleHeaderGetRawXmax(tuple);
7979 : :
7980 [ + + ]: 2689516 : if (TransactionIdIsNormal(xid))
7981 : : {
1179 7982 [ - + ]: 264321 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
7983 : : /* xmax is a non-permanent XID */
1173 7984 [ + + ]: 264321 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
7985 : 6 : *NoFreezePageRelfrozenXid = xid;
1179 7986 [ + + ]: 264321 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
7987 : 32 : freeze = true;
7988 : : }
1442 7989 [ + + ]: 2425195 : else if (!MultiXactIdIsValid(multi))
7990 : : {
7991 : : /* xmax is a permanent XID or invalid MultiXactId/XID */
7992 : : }
7993 [ - + ]: 2 : else if (HEAP_LOCKED_UPGRADED(tuple->t_infomask))
7994 : : {
7995 : : /* xmax is a pg_upgrade'd MultiXact, which can't have updater XID */
1173 pg@bowt.ie 7996 [ # # ]:UBC 0 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
7997 : 0 : *NoFreezePageRelminMxid = multi;
7998 : : /* heap_prepare_freeze_tuple always freezes pg_upgrade'd xmax */
1179 7999 : 0 : freeze = true;
8000 : : }
8001 : : else
8002 : : {
8003 : : /* xmax is a MultiXactId that may have an updater XID */
8004 : : MultiXactMember *members;
8005 : : int nmembers;
8006 : :
1179 pg@bowt.ie 8007 [ - + ]:CBC 2 : Assert(MultiXactIdPrecedesOrEquals(cutoffs->relminmxid, multi));
1173 8008 [ + - ]: 2 : if (MultiXactIdPrecedes(multi, *NoFreezePageRelminMxid))
8009 : 2 : *NoFreezePageRelminMxid = multi;
1179 8010 [ + - ]: 2 : if (MultiXactIdPrecedes(multi, cutoffs->MultiXactCutoff))
8011 : 2 : freeze = true;
8012 : :
8013 : : /* need to check whether any member of the mxact is old */
1442 8014 : 2 : nmembers = GetMultiXactIdMembers(multi, &members, false,
8015 : 2 : HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
8016 : :
8017 [ + + ]: 5 : for (int i = 0; i < nmembers; i++)
8018 : : {
8019 : 3 : xid = members[i].xid;
1179 8020 [ - + ]: 3 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
1173 8021 [ - + ]: 3 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
1173 pg@bowt.ie 8022 :UBC 0 : *NoFreezePageRelfrozenXid = xid;
1179 pg@bowt.ie 8023 [ - + ]:CBC 3 : if (TransactionIdPrecedes(xid, cutoffs->FreezeLimit))
1179 pg@bowt.ie 8024 :UBC 0 : freeze = true;
8025 : : }
1442 pg@bowt.ie 8026 [ + + ]:CBC 2 : if (nmembers > 0)
8027 : 1 : pfree(members);
8028 : : }
8029 : :
5242 rhaas@postgresql.org 8030 [ - + ]: 2689516 : if (tuple->t_infomask & HEAP_MOVED)
8031 : : {
5242 rhaas@postgresql.org 8032 :UBC 0 : xid = HeapTupleHeaderGetXvac(tuple);
1442 pg@bowt.ie 8033 [ # # ]: 0 : if (TransactionIdIsNormal(xid))
8034 : : {
1179 8035 [ # # ]: 0 : Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
1173 8036 [ # # ]: 0 : if (TransactionIdPrecedes(xid, *NoFreezePageRelfrozenXid))
8037 : 0 : *NoFreezePageRelfrozenXid = xid;
8038 : : /* heap_prepare_freeze_tuple forces xvac freezing */
1179 8039 : 0 : freeze = true;
8040 : : }
8041 : : }
8042 : :
1179 pg@bowt.ie 8043 :CBC 2689516 : return freeze;
8044 : : }
8045 : :
8046 : : /*
8047 : : * Maintain snapshotConflictHorizon for caller by ratcheting forward its value
8048 : : * using any committed XIDs contained in 'tuple', an obsolescent heap tuple
8049 : : * that caller is in the process of physically removing, e.g. via HOT pruning
8050 : : * or index deletion.
8051 : : *
8052 : : * Caller must initialize its value to InvalidTransactionId, which is
8053 : : * generally interpreted as "definitely no need for a recovery conflict".
8054 : : * Final value must reflect all heap tuples that caller will physically remove
8055 : : * (or remove TID references to) via its ongoing pruning/deletion operation.
8056 : : * ResolveRecoveryConflictWithSnapshot() is passed the final value (taken from
8057 : : * caller's WAL record) by REDO routine when it replays caller's operation.
8058 : : */
8059 : : void
1214 8060 : 1611273 : HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple,
8061 : : TransactionId *snapshotConflictHorizon)
8062 : : {
5930 simon@2ndQuadrant.co 8063 : 1611273 : TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
4799 alvherre@alvh.no-ip. 8064 : 1611273 : TransactionId xmax = HeapTupleHeaderGetUpdateXid(tuple);
5930 simon@2ndQuadrant.co 8065 : 1611273 : TransactionId xvac = HeapTupleHeaderGetXvac(tuple);
8066 : :
5879 tgl@sss.pgh.pa.us 8067 [ - + ]: 1611273 : if (tuple->t_infomask & HEAP_MOVED)
8068 : : {
1214 pg@bowt.ie 8069 [ # # ]:UBC 0 : if (TransactionIdPrecedes(*snapshotConflictHorizon, xvac))
8070 : 0 : *snapshotConflictHorizon = xvac;
8071 : : }
8072 : :
8073 : : /*
8074 : : * Ignore tuples inserted by an aborted transaction or if the tuple was
8075 : : * updated/deleted by the inserting transaction.
8076 : : *
8077 : : * Look for a committed hint bit, or if no xmin bit is set, check clog.
8078 : : */
4466 rhaas@postgresql.org 8079 [ + + ]:CBC 1611273 : if (HeapTupleHeaderXminCommitted(tuple) ||
8080 [ + + + - ]: 97680 : (!HeapTupleHeaderXminInvalid(tuple) && TransactionIdDidCommit(xmin)))
8081 : : {
5575 simon@2ndQuadrant.co 8082 [ + + + + ]: 2880863 : if (xmax != xmin &&
1214 pg@bowt.ie 8083 : 1349734 : TransactionIdFollows(xmax, *snapshotConflictHorizon))
8084 : 97085 : *snapshotConflictHorizon = xmax;
8085 : : }
5930 simon@2ndQuadrant.co 8086 : 1611273 : }
8087 : :
8088 : : #ifdef USE_PREFETCH
8089 : : /*
8090 : : * Helper function for heap_index_delete_tuples. Issues prefetch requests for
8091 : : * prefetch_count buffers. The prefetch_state keeps track of all the buffers
8092 : : * we can prefetch, and which have already been prefetched; each call to this
8093 : : * function picks up where the previous call left off.
8094 : : *
8095 : : * Note: we expect the deltids array to be sorted in an order that groups TIDs
8096 : : * by heap block, with all TIDs for each block appearing together in exactly
8097 : : * one group.
8098 : : */
8099 : : static void
1887 pg@bowt.ie 8100 : 19522 : index_delete_prefetch_buffer(Relation rel,
8101 : : IndexDeletePrefetchState *prefetch_state,
8102 : : int prefetch_count)
8103 : : {
2546 andres@anarazel.de 8104 : 19522 : BlockNumber cur_hblkno = prefetch_state->cur_hblkno;
8105 : 19522 : int count = 0;
8106 : : int i;
1887 pg@bowt.ie 8107 : 19522 : int ndeltids = prefetch_state->ndeltids;
8108 : 19522 : TM_IndexDelete *deltids = prefetch_state->deltids;
8109 : :
2546 andres@anarazel.de 8110 : 19522 : for (i = prefetch_state->next_item;
1887 pg@bowt.ie 8111 [ + + + + ]: 675564 : i < ndeltids && count < prefetch_count;
2546 andres@anarazel.de 8112 : 656042 : i++)
8113 : : {
1887 pg@bowt.ie 8114 : 656042 : ItemPointer htid = &deltids[i].tid;
8115 : :
2546 andres@anarazel.de 8116 [ + + + + ]: 1306307 : if (cur_hblkno == InvalidBlockNumber ||
8117 : 650265 : ItemPointerGetBlockNumber(htid) != cur_hblkno)
8118 : : {
8119 : 17922 : cur_hblkno = ItemPointerGetBlockNumber(htid);
8120 : 17922 : PrefetchBuffer(rel, MAIN_FORKNUM, cur_hblkno);
8121 : 17922 : count++;
8122 : : }
8123 : : }
8124 : :
8125 : : /*
8126 : : * Save the prefetch position so that next time we can continue from that
8127 : : * position.
8128 : : */
8129 : 19522 : prefetch_state->next_item = i;
8130 : 19522 : prefetch_state->cur_hblkno = cur_hblkno;
8131 : 19522 : }
8132 : : #endif
8133 : :
8134 : : /*
8135 : : * Helper function for heap_index_delete_tuples. Checks for index corruption
8136 : : * involving an invalid TID in index AM caller's index page.
8137 : : *
8138 : : * This is an ideal place for these checks. The index AM must hold a buffer
8139 : : * lock on the index page containing the TIDs we examine here, so we don't
8140 : : * have to worry about concurrent VACUUMs at all. We can be sure that the
8141 : : * index is corrupt when htid points directly to an LP_UNUSED item or
8142 : : * heap-only tuple, which is not the case during standard index scans.
8143 : : */
8144 : : static inline void
1592 pg@bowt.ie 8145 : 546997 : index_delete_check_htid(TM_IndexDeleteOp *delstate,
8146 : : Page page, OffsetNumber maxoff,
8147 : : const ItemPointerData *htid, TM_IndexStatus *istatus)
8148 : : {
8149 : 546997 : OffsetNumber indexpagehoffnum = ItemPointerGetOffsetNumber(htid);
8150 : : ItemId iid;
8151 : :
8152 [ + - + - : 546997 : Assert(OffsetNumberIsValid(istatus->idxoffnum));
- + ]
8153 : :
8154 [ - + ]: 546997 : if (unlikely(indexpagehoffnum > maxoff))
1592 pg@bowt.ie 8155 [ # # ]:UBC 0 : ereport(ERROR,
8156 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8157 : : errmsg_internal("heap tid from index tuple (%u,%u) points past end of heap page line pointer array at offset %u of block %u in index \"%s\"",
8158 : : ItemPointerGetBlockNumber(htid),
8159 : : indexpagehoffnum,
8160 : : istatus->idxoffnum, delstate->iblknum,
8161 : : RelationGetRelationName(delstate->irel))));
8162 : :
1592 pg@bowt.ie 8163 :CBC 546997 : iid = PageGetItemId(page, indexpagehoffnum);
8164 [ - + ]: 546997 : if (unlikely(!ItemIdIsUsed(iid)))
1592 pg@bowt.ie 8165 [ # # ]:UBC 0 : ereport(ERROR,
8166 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8167 : : errmsg_internal("heap tid from index tuple (%u,%u) points to unused heap page item at offset %u of block %u in index \"%s\"",
8168 : : ItemPointerGetBlockNumber(htid),
8169 : : indexpagehoffnum,
8170 : : istatus->idxoffnum, delstate->iblknum,
8171 : : RelationGetRelationName(delstate->irel))));
8172 : :
1592 pg@bowt.ie 8173 [ + + ]:CBC 546997 : if (ItemIdHasStorage(iid))
8174 : : {
8175 : : HeapTupleHeader htup;
8176 : :
8177 [ - + ]: 322219 : Assert(ItemIdIsNormal(iid));
8178 : 322219 : htup = (HeapTupleHeader) PageGetItem(page, iid);
8179 : :
8180 [ - + ]: 322219 : if (unlikely(HeapTupleHeaderIsHeapOnly(htup)))
1592 pg@bowt.ie 8181 [ # # ]:UBC 0 : ereport(ERROR,
8182 : : (errcode(ERRCODE_INDEX_CORRUPTED),
8183 : : errmsg_internal("heap tid from index tuple (%u,%u) points to heap-only tuple at offset %u of block %u in index \"%s\"",
8184 : : ItemPointerGetBlockNumber(htid),
8185 : : indexpagehoffnum,
8186 : : istatus->idxoffnum, delstate->iblknum,
8187 : : RelationGetRelationName(delstate->irel))));
8188 : : }
1592 pg@bowt.ie 8189 :CBC 546997 : }
8190 : :
8191 : : /*
8192 : : * heapam implementation of tableam's index_delete_tuples interface.
8193 : : *
8194 : : * This helper function is called by index AMs during index tuple deletion.
8195 : : * See tableam header comments for an explanation of the interface implemented
8196 : : * here and a general theory of operation. Note that each call here is either
8197 : : * a simple index deletion call, or a bottom-up index deletion call.
8198 : : *
8199 : : * It's possible for this to generate a fair amount of I/O, since we may be
8200 : : * deleting hundreds of tuples from a single index block. To amortize that
8201 : : * cost to some degree, this uses prefetching and combines repeat accesses to
8202 : : * the same heap block.
8203 : : */
8204 : : TransactionId
1887 8205 : 5777 : heap_index_delete_tuples(Relation rel, TM_IndexDeleteOp *delstate)
8206 : : {
8207 : : /* Initial assumption is that earlier pruning took care of conflict */
1214 8208 : 5777 : TransactionId snapshotConflictHorizon = InvalidTransactionId;
1901 8209 : 5777 : BlockNumber blkno = InvalidBlockNumber;
2546 andres@anarazel.de 8210 : 5777 : Buffer buf = InvalidBuffer;
1901 pg@bowt.ie 8211 : 5777 : Page page = NULL;
8212 : 5777 : OffsetNumber maxoff = InvalidOffsetNumber;
8213 : : TransactionId priorXmax;
8214 : : #ifdef USE_PREFETCH
8215 : : IndexDeletePrefetchState prefetch_state;
8216 : : int prefetch_distance;
8217 : : #endif
8218 : : SnapshotData SnapshotNonVacuumable;
1887 8219 : 5777 : int finalndeltids = 0,
8220 : 5777 : nblocksaccessed = 0;
8221 : :
8222 : : /* State that's only used in bottom-up index deletion case */
8223 : 5777 : int nblocksfavorable = 0;
8224 : 5777 : int curtargetfreespace = delstate->bottomupfreespace,
8225 : 5777 : lastfreespace = 0,
8226 : 5777 : actualfreespace = 0;
8227 : 5777 : bool bottomup_final_block = false;
8228 : :
8229 : 5777 : InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
8230 : :
8231 : : /* Sort caller's deltids array by TID for further processing */
8232 : 5777 : index_delete_sort(delstate);
8233 : :
8234 : : /*
8235 : : * Bottom-up case: resort deltids array in an order attuned to where the
8236 : : * greatest number of promising TIDs are to be found, and determine how
8237 : : * many blocks from the start of sorted array should be considered
8238 : : * favorable. This will also shrink the deltids array in order to
8239 : : * eliminate completely unfavorable blocks up front.
8240 : : */
8241 [ + + ]: 5777 : if (delstate->bottomup)
8242 : 1987 : nblocksfavorable = bottomup_sort_and_shrink(delstate);
8243 : :
8244 : : #ifdef USE_PREFETCH
8245 : : /* Initialize prefetch state. */
2546 andres@anarazel.de 8246 : 5777 : prefetch_state.cur_hblkno = InvalidBlockNumber;
8247 : 5777 : prefetch_state.next_item = 0;
1887 pg@bowt.ie 8248 : 5777 : prefetch_state.ndeltids = delstate->ndeltids;
8249 : 5777 : prefetch_state.deltids = delstate->deltids;
8250 : :
8251 : : /*
8252 : : * Determine the prefetch distance that we will attempt to maintain.
8253 : : *
8254 : : * Since the caller holds a buffer lock somewhere in rel, we'd better make
8255 : : * sure that isn't a catalog relation before we call code that does
8256 : : * syscache lookups, to avoid risk of deadlock.
8257 : : */
2539 tmunro@postgresql.or 8258 [ + + ]: 5777 : if (IsCatalogRelation(rel))
2190 8259 : 4181 : prefetch_distance = maintenance_io_concurrency;
8260 : : else
8261 : : prefetch_distance =
8262 : 1596 : get_tablespace_maintenance_io_concurrency(rel->rd_rel->reltablespace);
8263 : :
8264 : : /* Cap initial prefetch distance for bottom-up deletion caller */
1887 pg@bowt.ie 8265 [ + + ]: 5777 : if (delstate->bottomup)
8266 : : {
8267 [ - + ]: 1987 : Assert(nblocksfavorable >= 1);
8268 [ - + ]: 1987 : Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
8269 : 1987 : prefetch_distance = Min(prefetch_distance, nblocksfavorable);
8270 : : }
8271 : :
8272 : : /* Start prefetching. */
8273 : 5777 : index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
8274 : : #endif
8275 : :
8276 : : /* Iterate over deltids, determine which to delete, check their horizon */
8277 [ - + ]: 5777 : Assert(delstate->ndeltids > 0);
8278 [ + + ]: 552774 : for (int i = 0; i < delstate->ndeltids; i++)
8279 : : {
8280 : 548984 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8281 : 548984 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8282 : 548984 : ItemPointer htid = &ideltid->tid;
8283 : : OffsetNumber offnum;
8284 : :
8285 : : /*
8286 : : * Read buffer, and perform required extra steps each time a new block
8287 : : * is encountered. Avoid refetching if it's the same block as the one
8288 : : * from the last htid.
8289 : : */
1901 8290 [ + + + + ]: 1092191 : if (blkno == InvalidBlockNumber ||
8291 : 543207 : ItemPointerGetBlockNumber(htid) != blkno)
8292 : : {
8293 : : /*
8294 : : * Consider giving up early for bottom-up index deletion caller
8295 : : * first. (Only prefetch next-next block afterwards, when it
8296 : : * becomes clear that we're at least going to access the next
8297 : : * block in line.)
8298 : : *
8299 : : * Sometimes the first block frees so much space for bottom-up
8300 : : * caller that the deletion process can end without accessing any
8301 : : * more blocks. It is usually necessary to access 2 or 3 blocks
8302 : : * per bottom-up deletion operation, though.
8303 : : */
1887 8304 [ + + ]: 15732 : if (delstate->bottomup)
8305 : : {
8306 : : /*
8307 : : * We often allow caller to delete a few additional items
8308 : : * whose entries we reached after the point that space target
8309 : : * from caller was satisfied. The cost of accessing the page
8310 : : * was already paid at that point, so it made sense to finish
8311 : : * it off. When that happened, we finalize everything here
8312 : : * (by finishing off the whole bottom-up deletion operation
8313 : : * without needlessly paying the cost of accessing any more
8314 : : * blocks).
8315 : : */
8316 [ + + ]: 4370 : if (bottomup_final_block)
8317 : 122 : break;
8318 : :
8319 : : /*
8320 : : * Give up when we didn't enable our caller to free any
8321 : : * additional space as a result of processing the page that we
8322 : : * just finished up with. This rule is the main way in which
8323 : : * we keep the cost of bottom-up deletion under control.
8324 : : */
8325 [ + + + + ]: 4248 : if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
8326 : 1865 : break;
8327 : 2383 : lastfreespace = actualfreespace; /* for next time */
8328 : :
8329 : : /*
8330 : : * Deletion operation (which is bottom-up) will definitely
8331 : : * access the next block in line. Prepare for that now.
8332 : : *
8333 : : * Decay target free space so that we don't hang on for too
8334 : : * long with a marginal case. (Space target is only truly
8335 : : * helpful when it allows us to recognize that we don't need
8336 : : * to access more than 1 or 2 blocks to satisfy caller due to
8337 : : * agreeable workload characteristics.)
8338 : : *
8339 : : * We are a bit more patient when we encounter contiguous
8340 : : * blocks, though: these are treated as favorable blocks. The
8341 : : * decay process is only applied when the next block in line
8342 : : * is not a favorable/contiguous block. This is not an
8343 : : * exception to the general rule; we still insist on finding
8344 : : * at least one deletable item per block accessed. See
8345 : : * bottomup_nblocksfavorable() for full details of the theory
8346 : : * behind favorable blocks and heap block locality in general.
8347 : : *
8348 : : * Note: The first block in line is always treated as a
8349 : : * favorable block, so the earliest possible point that the
8350 : : * decay can be applied is just before we access the second
8351 : : * block in line. The Assert() verifies this for us.
8352 : : */
8353 [ + + - + ]: 2383 : Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
8354 [ + + ]: 2383 : if (nblocksfavorable > 0)
8355 : 2186 : nblocksfavorable--;
8356 : : else
8357 : 197 : curtargetfreespace /= 2;
8358 : : }
8359 : :
8360 : : /* release old buffer */
8361 [ + + ]: 13745 : if (BufferIsValid(buf))
8362 : 7968 : UnlockReleaseBuffer(buf);
8363 : :
8364 : 13745 : blkno = ItemPointerGetBlockNumber(htid);
1901 8365 : 13745 : buf = ReadBuffer(rel, blkno);
1887 8366 : 13745 : nblocksaccessed++;
8367 [ + + - + ]: 13745 : Assert(!delstate->bottomup ||
8368 : : nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
8369 : :
8370 : : #ifdef USE_PREFETCH
8371 : :
8372 : : /*
8373 : : * To maintain the prefetch distance, prefetch one more page for
8374 : : * each page we read.
8375 : : */
8376 : 13745 : index_delete_prefetch_buffer(rel, &prefetch_state, 1);
8377 : : #endif
8378 : :
1901 8379 : 13745 : LockBuffer(buf, BUFFER_LOCK_SHARE);
8380 : :
8381 : 13745 : page = BufferGetPage(buf);
8382 : 13745 : maxoff = PageGetMaxOffsetNumber(page);
8383 : : }
8384 : :
8385 : : /*
8386 : : * In passing, detect index corruption involving an index page with a
8387 : : * TID that points to a location in the heap that couldn't possibly be
8388 : : * correct. We only do this with actual TIDs from caller's index page
8389 : : * (not items reached by traversing through a HOT chain).
8390 : : */
1592 8391 : 546997 : index_delete_check_htid(delstate, page, maxoff, htid, istatus);
8392 : :
1887 8393 [ + + ]: 546997 : if (istatus->knowndeletable)
8394 [ + - - + ]: 136209 : Assert(!delstate->bottomup && !istatus->promising);
8395 : : else
8396 : : {
8397 : 410788 : ItemPointerData tmp = *htid;
8398 : : HeapTupleData heapTuple;
8399 : :
8400 : : /* Are any tuples from this HOT chain non-vacuumable? */
8401 [ + + ]: 410788 : if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
8402 : : &heapTuple, NULL, true))
8403 : 245023 : continue; /* can't delete entry */
8404 : :
8405 : : /* Caller will delete, since whole HOT chain is vacuumable */
8406 : 165765 : istatus->knowndeletable = true;
8407 : :
8408 : : /* Maintain index free space info for bottom-up deletion case */
8409 [ + + ]: 165765 : if (delstate->bottomup)
8410 : : {
8411 [ - + ]: 7522 : Assert(istatus->freespace > 0);
8412 : 7522 : actualfreespace += istatus->freespace;
8413 [ + + ]: 7522 : if (actualfreespace >= curtargetfreespace)
8414 : 2104 : bottomup_final_block = true;
8415 : : }
8416 : : }
8417 : :
8418 : : /*
8419 : : * Maintain snapshotConflictHorizon value for deletion operation as a
8420 : : * whole by advancing current value using heap tuple headers. This is
8421 : : * loosely based on the logic for pruning a HOT chain.
8422 : : */
1901 8423 : 301974 : offnum = ItemPointerGetOffsetNumber(htid);
8424 : 301974 : priorXmax = InvalidTransactionId; /* cannot check first XMIN */
8425 : : for (;;)
2546 andres@anarazel.de 8426 : 21154 : {
8427 : : ItemId lp;
8428 : : HeapTupleHeader htup;
8429 : :
8430 : : /* Sanity check (pure paranoia) */
1635 pg@bowt.ie 8431 [ - + ]: 323128 : if (offnum < FirstOffsetNumber)
1635 pg@bowt.ie 8432 :UBC 0 : break;
8433 : :
8434 : : /*
8435 : : * An offset past the end of page's line pointer array is possible
8436 : : * when the array was truncated
8437 : : */
1635 pg@bowt.ie 8438 [ - + ]:CBC 323128 : if (offnum > maxoff)
1901 pg@bowt.ie 8439 :UBC 0 : break;
8440 : :
1901 pg@bowt.ie 8441 :CBC 323128 : lp = PageGetItemId(page, offnum);
8442 [ + + ]: 323128 : if (ItemIdIsRedirected(lp))
8443 : : {
8444 : 9498 : offnum = ItemIdGetRedirect(lp);
8445 : 9498 : continue;
8446 : : }
8447 : :
8448 : : /*
8449 : : * We'll often encounter LP_DEAD line pointers (especially with an
8450 : : * entry marked knowndeletable by our caller up front). No heap
8451 : : * tuple headers get examined for an htid that leads us to an
8452 : : * LP_DEAD item. This is okay because the earlier pruning
8453 : : * operation that made the line pointer LP_DEAD in the first place
8454 : : * must have considered the original tuple header as part of
8455 : : * generating its own snapshotConflictHorizon value.
8456 : : *
8457 : : * Relying on XLOG_HEAP2_PRUNE_VACUUM_SCAN records like this is
8458 : : * the same strategy that index vacuuming uses in all cases. Index
8459 : : * VACUUM WAL records don't even have a snapshotConflictHorizon
8460 : : * field of their own for this reason.
8461 : : */
8462 [ + + ]: 313630 : if (!ItemIdIsNormal(lp))
8463 : 198122 : break;
8464 : :
8465 : 115508 : htup = (HeapTupleHeader) PageGetItem(page, lp);
8466 : :
8467 : : /*
8468 : : * Check the tuple XMIN against prior XMAX, if any
8469 : : */
8470 [ + + - + ]: 127164 : if (TransactionIdIsValid(priorXmax) &&
8471 : 11656 : !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
1901 pg@bowt.ie 8472 :UBC 0 : break;
8473 : :
1214 pg@bowt.ie 8474 :CBC 115508 : HeapTupleHeaderAdvanceConflictHorizon(htup,
8475 : : &snapshotConflictHorizon);
8476 : :
8477 : : /*
8478 : : * If the tuple is not HOT-updated, then we are at the end of this
8479 : : * HOT-chain. No need to visit later tuples from the same update
8480 : : * chain (they get their own index entries) -- just move on to
8481 : : * next htid from index AM caller.
8482 : : */
1901 8483 [ + + ]: 115508 : if (!HeapTupleHeaderIsHotUpdated(htup))
8484 : 103852 : break;
8485 : :
8486 : : /* Advance to next HOT chain member */
8487 [ - + ]: 11656 : Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
8488 : 11656 : offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
8489 : 11656 : priorXmax = HeapTupleHeaderGetUpdateXid(htup);
8490 : : }
8491 : :
8492 : : /* Enable further/final shrinking of deltids for caller */
1887 8493 : 301974 : finalndeltids = i + 1;
8494 : : }
8495 : :
8496 : 5777 : UnlockReleaseBuffer(buf);
8497 : :
8498 : : /*
8499 : : * Shrink deltids array to exclude non-deletable entries at the end. This
8500 : : * is not just a minor optimization. Final deltids array size might be
8501 : : * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
8502 : : * ndeltids being zero in all cases with zero total deletable entries.
8503 : : */
8504 [ + + - + ]: 5777 : Assert(finalndeltids > 0 || delstate->bottomup);
8505 : 5777 : delstate->ndeltids = finalndeltids;
8506 : :
1214 8507 : 5777 : return snapshotConflictHorizon;
8508 : : }
8509 : :
8510 : : /*
8511 : : * Specialized inlineable comparison function for index_delete_sort()
8512 : : */
8513 : : static inline int
1887 8514 : 12829655 : index_delete_sort_cmp(TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
8515 : : {
8516 : 12829655 : ItemPointer tid1 = &deltid1->tid;
8517 : 12829655 : ItemPointer tid2 = &deltid2->tid;
8518 : :
8519 : : {
8520 : 12829655 : BlockNumber blk1 = ItemPointerGetBlockNumber(tid1);
8521 : 12829655 : BlockNumber blk2 = ItemPointerGetBlockNumber(tid2);
8522 : :
8523 [ + + ]: 12829655 : if (blk1 != blk2)
8524 [ + + ]: 5251616 : return (blk1 < blk2) ? -1 : 1;
8525 : : }
8526 : : {
8527 : 7578039 : OffsetNumber pos1 = ItemPointerGetOffsetNumber(tid1);
8528 : 7578039 : OffsetNumber pos2 = ItemPointerGetOffsetNumber(tid2);
8529 : :
8530 [ + - ]: 7578039 : if (pos1 != pos2)
8531 [ + + ]: 7578039 : return (pos1 < pos2) ? -1 : 1;
8532 : : }
8533 : :
1598 pg@bowt.ie 8534 :UBC 0 : Assert(false);
8535 : :
8536 : : return 0;
8537 : : }
8538 : :
8539 : : /*
8540 : : * Sort deltids array from delstate by TID. This prepares it for further
8541 : : * processing by heap_index_delete_tuples().
8542 : : *
8543 : : * This operation becomes a noticeable consumer of CPU cycles with some
8544 : : * workloads, so we go to the trouble of specialization/micro optimization.
8545 : : * We use shellsort for this because it's easy to specialize, compiles to
8546 : : * relatively few instructions, and is adaptive to presorted inputs/subsets
8547 : : * (which are typical here).
8548 : : */
8549 : : static void
1887 pg@bowt.ie 8550 :CBC 5777 : index_delete_sort(TM_IndexDeleteOp *delstate)
8551 : : {
8552 : 5777 : TM_IndexDelete *deltids = delstate->deltids;
8553 : 5777 : int ndeltids = delstate->ndeltids;
8554 : :
8555 : : /*
8556 : : * Shellsort gap sequence (taken from Sedgewick-Incerpi paper).
8557 : : *
8558 : : * This implementation is fast with array sizes up to ~4500. This covers
8559 : : * all supported BLCKSZ values.
8560 : : */
8561 : 5777 : const int gaps[9] = {1968, 861, 336, 112, 48, 21, 7, 3, 1};
8562 : :
8563 : : /* Think carefully before changing anything here -- keep swaps cheap */
8564 : : StaticAssertDecl(sizeof(TM_IndexDelete) <= 8,
8565 : : "element size exceeds 8 bytes");
8566 : :
8567 [ + + ]: 57770 : for (int g = 0; g < lengthof(gaps); g++)
8568 : : {
494 dgustafsson@postgres 8569 [ + + ]: 7658776 : for (int hi = gaps[g], i = hi; i < ndeltids; i++)
8570 : : {
1887 pg@bowt.ie 8571 : 7606783 : TM_IndexDelete d = deltids[i];
8572 : 7606783 : int j = i;
8573 : :
8574 [ + + + + ]: 13206792 : while (j >= hi && index_delete_sort_cmp(&deltids[j - hi], &d) >= 0)
8575 : : {
8576 : 5600009 : deltids[j] = deltids[j - hi];
8577 : 5600009 : j -= hi;
8578 : : }
8579 : 7606783 : deltids[j] = d;
8580 : : }
8581 : : }
8582 : 5777 : }
8583 : :
8584 : : /*
8585 : : * Returns how many blocks should be considered favorable/contiguous for a
8586 : : * bottom-up index deletion pass. This is a number of heap blocks that starts
8587 : : * from and includes the first block in line.
8588 : : *
8589 : : * There is always at least one favorable block during bottom-up index
8590 : : * deletion. In the worst case (i.e. with totally random heap blocks) the
8591 : : * first block in line (the only favorable block) can be thought of as a
8592 : : * degenerate array of contiguous blocks that consists of a single block.
8593 : : * heap_index_delete_tuples() will expect this.
8594 : : *
8595 : : * Caller passes blockgroups, a description of the final order that deltids
8596 : : * will be sorted in for heap_index_delete_tuples() bottom-up index deletion
8597 : : * processing. Note that deltids need not actually be sorted just yet (caller
8598 : : * only passes deltids to us so that we can interpret blockgroups).
8599 : : *
8600 : : * You might guess that the existence of contiguous blocks cannot matter much,
8601 : : * since in general the main factor that determines which blocks we visit is
8602 : : * the number of promising TIDs, which is a fixed hint from the index AM.
8603 : : * We're not really targeting the general case, though -- the actual goal is
8604 : : * to adapt our behavior to a wide variety of naturally occurring conditions.
8605 : : * The effects of most of the heuristics we apply are only noticeable in the
8606 : : * aggregate, over time and across many _related_ bottom-up index deletion
8607 : : * passes.
8608 : : *
8609 : : * Deeming certain blocks favorable allows heapam to recognize and adapt to
8610 : : * workloads where heap blocks visited during bottom-up index deletion can be
8611 : : * accessed contiguously, in the sense that each newly visited block is the
8612 : : * neighbor of the block that bottom-up deletion just finished processing (or
8613 : : * close enough to it). It will likely be cheaper to access more favorable
8614 : : * blocks sooner rather than later (e.g. in this pass, not across a series of
8615 : : * related bottom-up passes). Either way it is probably only a matter of time
8616 : : * (or a matter of further correlated version churn) before all blocks that
8617 : : * appear together as a single large batch of favorable blocks get accessed by
8618 : : * _some_ bottom-up pass. Large batches of favorable blocks tend to either
8619 : : * appear almost constantly or not even once (it all depends on per-index
8620 : : * workload characteristics).
8621 : : *
8622 : : * Note that the blockgroups sort order applies a power-of-two bucketing
8623 : : * scheme that creates opportunities for contiguous groups of blocks to get
8624 : : * batched together, at least with workloads that are naturally amenable to
8625 : : * being driven by heap block locality. This doesn't just enhance the spatial
8626 : : * locality of bottom-up heap block processing in the obvious way. It also
8627 : : * enables temporal locality of access, since sorting by heap block number
8628 : : * naturally tends to make the bottom-up processing order deterministic.
8629 : : *
8630 : : * Consider the following example to get a sense of how temporal locality
8631 : : * might matter: There is a heap relation with several indexes, each of which
8632 : : * is low to medium cardinality. It is subject to constant non-HOT updates.
8633 : : * The updates are skewed (in one part of the primary key, perhaps). None of
8634 : : * the indexes are logically modified by the UPDATE statements (if they were
8635 : : * then bottom-up index deletion would not be triggered in the first place).
8636 : : * Naturally, each new round of index tuples (for each heap tuple that gets a
8637 : : * heap_update() call) will have the same heap TID in each and every index.
8638 : : * Since these indexes are low cardinality and never get logically modified,
8639 : : * heapam processing during bottom-up deletion passes will access heap blocks
8640 : : * in approximately sequential order. Temporal locality of access occurs due
8641 : : * to bottom-up deletion passes behaving very similarly across each of the
8642 : : * indexes at any given moment. This keeps the number of buffer misses needed
8643 : : * to visit heap blocks to a minimum.
8644 : : */
8645 : : static int
8646 : 1987 : bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups,
8647 : : TM_IndexDelete *deltids)
8648 : : {
8649 : 1987 : int64 lastblock = -1;
8650 : 1987 : int nblocksfavorable = 0;
8651 : :
8652 [ - + ]: 1987 : Assert(nblockgroups >= 1);
8653 [ - + ]: 1987 : Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
8654 : :
8655 : : /*
8656 : : * We tolerate heap blocks that will be accessed only slightly out of
8657 : : * physical order. Small blips occur when a pair of almost-contiguous
8658 : : * blocks happen to fall into different buckets (perhaps due only to a
8659 : : * small difference in npromisingtids that the bucketing scheme didn't
8660 : : * quite manage to ignore). We effectively ignore these blips by applying
8661 : : * a small tolerance. The precise tolerance we use is a little arbitrary,
8662 : : * but it works well enough in practice.
8663 : : */
8664 [ + + ]: 6494 : for (int b = 0; b < nblockgroups; b++)
8665 : : {
8666 : 6210 : IndexDeleteCounts *group = blockgroups + b;
8667 : 6210 : TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
8668 : 6210 : BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
8669 : :
8670 [ + + ]: 6210 : if (lastblock != -1 &&
8671 [ + + ]: 4223 : ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
8672 [ + + ]: 3567 : (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
8673 : : break;
8674 : :
8675 : 4507 : nblocksfavorable++;
8676 : 4507 : lastblock = block;
8677 : : }
8678 : :
8679 : : /* Always indicate that there is at least 1 favorable block */
8680 [ - + ]: 1987 : Assert(nblocksfavorable >= 1);
8681 : :
8682 : 1987 : return nblocksfavorable;
8683 : : }
8684 : :
8685 : : /*
8686 : : * qsort comparison function for bottomup_sort_and_shrink()
8687 : : */
8688 : : static int
8689 : 193208 : bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
8690 : : {
8691 : 193208 : const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
8692 : 193208 : const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
8693 : :
8694 : : /*
8695 : : * Most significant field is npromisingtids (which we invert the order of
8696 : : * so as to sort in desc order).
8697 : : *
8698 : : * Caller should have already normalized npromisingtids fields into
8699 : : * power-of-two values (buckets).
8700 : : */
8701 [ + + ]: 193208 : if (group1->npromisingtids > group2->npromisingtids)
8702 : 9139 : return -1;
8703 [ + + ]: 184069 : if (group1->npromisingtids < group2->npromisingtids)
8704 : 11268 : return 1;
8705 : :
8706 : : /*
8707 : : * Tiebreak: desc ntids sort order.
8708 : : *
8709 : : * We cannot expect power-of-two values for ntids fields. We should
8710 : : * behave as if they were already rounded up for us instead.
8711 : : */
8712 [ + + ]: 172801 : if (group1->ntids != group2->ntids)
8713 : : {
8714 : 124366 : uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
8715 : 124366 : uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
8716 : :
8717 [ + + ]: 124366 : if (ntids1 > ntids2)
8718 : 18609 : return -1;
8719 [ + + ]: 105757 : if (ntids1 < ntids2)
8720 : 24322 : return 1;
8721 : : }
8722 : :
8723 : : /*
8724 : : * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
8725 : : * block in deltids array) order.
8726 : : *
8727 : : * This is equivalent to sorting in ascending heap block number order
8728 : : * (among otherwise equal subsets of the array). This approach allows us
8729 : : * to avoid accessing the out-of-line TID. (We rely on the assumption
8730 : : * that the deltids array was sorted in ascending heap TID order when
8731 : : * these offsets to the first TID from each heap block group were formed.)
8732 : : */
8733 [ + + ]: 129870 : if (group1->ifirsttid > group2->ifirsttid)
8734 : 64413 : return 1;
8735 [ + - ]: 65457 : if (group1->ifirsttid < group2->ifirsttid)
8736 : 65457 : return -1;
8737 : :
1887 pg@bowt.ie 8738 :UBC 0 : pg_unreachable();
8739 : :
8740 : : return 0;
8741 : : }
8742 : :
8743 : : /*
8744 : : * heap_index_delete_tuples() helper function for bottom-up deletion callers.
8745 : : *
8746 : : * Sorts deltids array in the order needed for useful processing by bottom-up
8747 : : * deletion. The array should already be sorted in TID order when we're
8748 : : * called. The sort process groups heap TIDs from deltids into heap block
8749 : : * groupings. Earlier/more-promising groups/blocks are usually those that are
8750 : : * known to have the most "promising" TIDs.
8751 : : *
8752 : : * Sets new size of deltids array (ndeltids) in state. deltids will only have
8753 : : * TIDs from the BOTTOMUP_MAX_NBLOCKS most promising heap blocks when we
8754 : : * return. This often means that deltids will be shrunk to a small fraction
8755 : : * of its original size (we eliminate many heap blocks from consideration for
8756 : : * caller up front).
8757 : : *
8758 : : * Returns the number of "favorable" blocks. See bottomup_nblocksfavorable()
8759 : : * for a definition and full details.
8760 : : */
8761 : : static int
1887 pg@bowt.ie 8762 :CBC 1987 : bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
8763 : : {
8764 : : IndexDeleteCounts *blockgroups;
8765 : : TM_IndexDelete *reordereddeltids;
8766 : 1987 : BlockNumber curblock = InvalidBlockNumber;
8767 : 1987 : int nblockgroups = 0;
8768 : 1987 : int ncopied = 0;
8769 : 1987 : int nblocksfavorable = 0;
8770 : :
8771 [ - + ]: 1987 : Assert(delstate->bottomup);
8772 [ - + ]: 1987 : Assert(delstate->ndeltids > 0);
8773 : :
8774 : : /* Calculate per-heap-block count of TIDs */
95 michael@paquier.xyz 8775 :GNC 1987 : blockgroups = palloc_array(IndexDeleteCounts, delstate->ndeltids);
1887 pg@bowt.ie 8776 [ + + ]:CBC 950084 : for (int i = 0; i < delstate->ndeltids; i++)
8777 : : {
8778 : 948097 : TM_IndexDelete *ideltid = &delstate->deltids[i];
8779 : 948097 : TM_IndexStatus *istatus = delstate->status + ideltid->id;
8780 : 948097 : ItemPointer htid = &ideltid->tid;
8781 : 948097 : bool promising = istatus->promising;
8782 : :
8783 [ + + ]: 948097 : if (curblock != ItemPointerGetBlockNumber(htid))
8784 : : {
8785 : : /* New block group */
8786 : 38247 : nblockgroups++;
8787 : :
8788 [ + + - + ]: 38247 : Assert(curblock < ItemPointerGetBlockNumber(htid) ||
8789 : : !BlockNumberIsValid(curblock));
8790 : :
8791 : 38247 : curblock = ItemPointerGetBlockNumber(htid);
8792 : 38247 : blockgroups[nblockgroups - 1].ifirsttid = i;
8793 : 38247 : blockgroups[nblockgroups - 1].ntids = 1;
8794 : 38247 : blockgroups[nblockgroups - 1].npromisingtids = 0;
8795 : : }
8796 : : else
8797 : : {
8798 : 909850 : blockgroups[nblockgroups - 1].ntids++;
8799 : : }
8800 : :
8801 [ + + ]: 948097 : if (promising)
8802 : 125164 : blockgroups[nblockgroups - 1].npromisingtids++;
8803 : : }
8804 : :
8805 : : /*
8806 : : * We're about ready to sort block groups to determine the optimal order
8807 : : * for visiting heap blocks. But before we do, round the number of
8808 : : * promising tuples for each block group up to the next power-of-two,
8809 : : * unless it is very low (less than 4), in which case we round up to 4.
8810 : : * npromisingtids is far too noisy to trust when choosing between a pair
8811 : : * of block groups that both have very low values.
8812 : : *
8813 : : * This scheme divides heap blocks/block groups into buckets. Each bucket
8814 : : * contains blocks that have _approximately_ the same number of promising
8815 : : * TIDs as each other. The goal is to ignore relatively small differences
8816 : : * in the total number of promising entries, so that the whole process can
8817 : : * give a little weight to heapam factors (like heap block locality)
8818 : : * instead. This isn't a trade-off, really -- we have nothing to lose. It
8819 : : * would be foolish to interpret small differences in npromisingtids
8820 : : * values as anything more than noise.
8821 : : *
8822 : : * We tiebreak on nhtids when sorting block group subsets that have the
8823 : : * same npromisingtids, but this has the same issues as npromisingtids,
8824 : : * and so nhtids is subject to the same power-of-two bucketing scheme. The
8825 : : * only reason that we don't fix nhtids in the same way here too is that
8826 : : * we'll need accurate nhtids values after the sort. We handle nhtids
8827 : : * bucketization dynamically instead (in the sort comparator).
8828 : : *
8829 : : * See bottomup_nblocksfavorable() for a full explanation of when and how
8830 : : * heap locality/favorable blocks can significantly influence when and how
8831 : : * heap blocks are accessed.
8832 : : */
8833 [ + + ]: 40234 : for (int b = 0; b < nblockgroups; b++)
8834 : : {
8835 : 38247 : IndexDeleteCounts *group = blockgroups + b;
8836 : :
8837 : : /* Better off falling back on nhtids with low npromisingtids */
8838 [ + + ]: 38247 : if (group->npromisingtids <= 4)
8839 : 32456 : group->npromisingtids = 4;
8840 : : else
8841 : 5791 : group->npromisingtids =
8842 : 5791 : pg_nextpower2_32((uint32) group->npromisingtids);
8843 : : }
8844 : :
8845 : : /* Sort groups and rearrange caller's deltids array */
8846 : 1987 : qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
8847 : : bottomup_sort_and_shrink_cmp);
8848 : 1987 : reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
8849 : :
8850 : 1987 : nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
8851 : : /* Determine number of favorable blocks at the start of final deltids */
8852 : 1987 : nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
8853 : : delstate->deltids);
8854 : :
8855 [ + + ]: 13299 : for (int b = 0; b < nblockgroups; b++)
8856 : : {
8857 : 11312 : IndexDeleteCounts *group = blockgroups + b;
8858 : 11312 : TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
8859 : :
8860 : 11312 : memcpy(reordereddeltids + ncopied, firstdtid,
8861 : 11312 : sizeof(TM_IndexDelete) * group->ntids);
8862 : 11312 : ncopied += group->ntids;
8863 : : }
8864 : :
8865 : : /* Copy final grouped and sorted TIDs back into start of caller's array */
8866 : 1987 : memcpy(delstate->deltids, reordereddeltids,
8867 : : sizeof(TM_IndexDelete) * ncopied);
8868 : 1987 : delstate->ndeltids = ncopied;
8869 : :
8870 : 1987 : pfree(reordereddeltids);
8871 : 1987 : pfree(blockgroups);
8872 : :
8873 : 1987 : return nblocksfavorable;
8874 : : }
8875 : :
8876 : : /*
8877 : : * Perform XLogInsert for a heap-visible operation. 'block' is the block
8878 : : * being marked all-visible, and vm_buffer is the buffer containing the
8879 : : * corresponding visibility map block. Both should have already been modified
8880 : : * and dirtied.
8881 : : *
8882 : : * snapshotConflictHorizon comes from the largest xmin on the page being
8883 : : * marked all-visible. REDO routine uses it to generate recovery conflicts.
8884 : : *
8885 : : * If checksums or wal_log_hints are enabled, we may also generate a full-page
8886 : : * image of heap_buffer. Otherwise, we optimize away the FPI (by specifying
8887 : : * REGBUF_NO_IMAGE for the heap buffer), in which case the caller should *not*
8888 : : * update the heap page's LSN.
8889 : : */
8890 : : XLogRecPtr
1079 andres@anarazel.de 8891 : 40364 : log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer,
8892 : : TransactionId snapshotConflictHorizon, uint8 vmflags)
8893 : : {
8894 : : xl_heap_visible xlrec;
8895 : : XLogRecPtr recptr;
8896 : : uint8 flags;
8897 : :
4741 simon@2ndQuadrant.co 8898 [ - + ]: 40364 : Assert(BufferIsValid(heap_buffer));
8899 [ - + ]: 40364 : Assert(BufferIsValid(vm_buffer));
8900 : :
1214 pg@bowt.ie 8901 : 40364 : xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
3666 rhaas@postgresql.org 8902 : 40364 : xlrec.flags = vmflags;
1078 andres@anarazel.de 8903 [ + + - + : 40364 : if (RelationIsAccessibleInLogicalDecoding(rel))
+ - - + -
- - - + +
- + - - -
- - - ]
8904 : 57 : xlrec.flags |= VISIBILITYMAP_XLOG_CATALOG_REL;
4133 heikki.linnakangas@i 8905 : 40364 : XLogBeginInsert();
397 peter@eisentraut.org 8906 : 40364 : XLogRegisterData(&xlrec, SizeOfHeapVisible);
8907 : :
4133 heikki.linnakangas@i 8908 : 40364 : XLogRegisterBuffer(0, vm_buffer, 0);
8909 : :
8910 : 40364 : flags = REGBUF_STANDARD;
8911 [ + + + - ]: 40364 : if (!XLogHintBitIsNeeded())
8912 : 3215 : flags |= REGBUF_NO_IMAGE;
8913 : 40364 : XLogRegisterBuffer(1, heap_buffer, flags);
8914 : :
8915 : 40364 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_VISIBLE);
8916 : :
5381 rhaas@postgresql.org 8917 : 40364 : return recptr;
8918 : : }
8919 : :
8920 : : /*
8921 : : * Perform XLogInsert for a heap-update operation. Caller must already
8922 : : * have modified the buffer(s) and marked them dirty.
8923 : : */
8924 : : static XLogRecPtr
4799 alvherre@alvh.no-ip. 8925 : 296331 : log_heap_update(Relation reln, Buffer oldbuf,
8926 : : Buffer newbuf, HeapTuple oldtup, HeapTuple newtup,
8927 : : HeapTuple old_key_tuple,
8928 : : bool all_visible_cleared, bool new_all_visible_cleared)
8929 : : {
8930 : : xl_heap_update xlrec;
8931 : : xl_heap_header xlhdr;
8932 : : xl_heap_header xlhdr_idx;
8933 : : uint8 info;
8934 : : uint16 prefix_suffix[2];
4386 heikki.linnakangas@i 8935 : 296331 : uint16 prefixlen = 0,
8936 : 296331 : suffixlen = 0;
8937 : : XLogRecPtr recptr;
3616 kgrittn@postgresql.o 8938 : 296331 : Page page = BufferGetPage(newbuf);
4478 rhaas@postgresql.org 8939 [ + + - + : 296331 : bool need_tuple_data = RelationIsLogicallyLogged(reln);
+ - - + -
- - - + -
+ + ]
8940 : : bool init;
8941 : : int bufflags;
8942 : :
8943 : : /* Caller should not call me on a non-WAL-logged relation */
5571 8944 [ + - + + : 296331 : Assert(RelationNeedsWAL(reln));
+ - - + ]
8945 : :
4133 heikki.linnakangas@i 8946 : 296331 : XLogBeginInsert();
8947 : :
5879 tgl@sss.pgh.pa.us 8948 [ + + ]: 296331 : if (HeapTupleIsHeapOnly(newtup))
6751 8949 : 144497 : info = XLOG_HEAP_HOT_UPDATE;
8950 : : else
8951 : 151834 : info = XLOG_HEAP_UPDATE;
8952 : :
8953 : : /*
8954 : : * If the old and new tuple are on the same page, we only need to log the
8955 : : * parts of the new tuple that were changed. That saves on the amount of
8956 : : * WAL we need to write. Currently, we just count any unchanged bytes in
8957 : : * the beginning and end of the tuple. That's quick to check, and
8958 : : * perfectly covers the common case that only one field is updated.
8959 : : *
8960 : : * We could do this even if the old and new tuple are on different pages,
8961 : : * but only if we don't make a full-page image of the old page, which is
8962 : : * difficult to know in advance. Also, if the old tuple is corrupt for
8963 : : * some reason, it would allow the corruption to propagate the new page,
8964 : : * so it seems best to avoid. Under the general assumption that most
8965 : : * updates tend to create the new tuple version on the same page, there
8966 : : * isn't much to be gained by doing this across pages anyway.
8967 : : *
8968 : : * Skip this if we're taking a full-page image of the new page, as we
8969 : : * don't include the new tuple in the WAL record in that case. Also
8970 : : * disable if effective_wal_level='logical', as logical decoding needs to
8971 : : * be able to read the new tuple in whole from the WAL record alone.
8972 : : */
4386 heikki.linnakangas@i 8973 [ + + + + ]: 296331 : if (oldbuf == newbuf && !need_tuple_data &&
8974 [ + + ]: 145120 : !XLogCheckBufferNeedsBackup(newbuf))
8975 : : {
8976 : 144542 : char *oldp = (char *) oldtup->t_data + oldtup->t_data->t_hoff;
8977 : 144542 : char *newp = (char *) newtup->t_data + newtup->t_data->t_hoff;
8978 : 144542 : int oldlen = oldtup->t_len - oldtup->t_data->t_hoff;
8979 : 144542 : int newlen = newtup->t_len - newtup->t_data->t_hoff;
8980 : :
8981 : : /* Check for common prefix between old and new tuple */
8982 [ + + ]: 11691630 : for (prefixlen = 0; prefixlen < Min(oldlen, newlen); prefixlen++)
8983 : : {
8984 [ + + ]: 11667943 : if (newp[prefixlen] != oldp[prefixlen])
8985 : 120855 : break;
8986 : : }
8987 : :
8988 : : /*
8989 : : * Storing the length of the prefix takes 2 bytes, so we need to save
8990 : : * at least 3 bytes or there's no point.
8991 : : */
8992 [ + + ]: 144542 : if (prefixlen < 3)
8993 : 22111 : prefixlen = 0;
8994 : :
8995 : : /* Same for suffix */
8996 [ + + ]: 4882374 : for (suffixlen = 0; suffixlen < Min(oldlen, newlen) - prefixlen; suffixlen++)
8997 : : {
8998 [ + + ]: 4858428 : if (newp[newlen - suffixlen - 1] != oldp[oldlen - suffixlen - 1])
8999 : 120596 : break;
9000 : : }
9001 [ + + ]: 144542 : if (suffixlen < 3)
9002 : 34515 : suffixlen = 0;
9003 : : }
9004 : :
9005 : : /* Prepare main WAL data chain */
4478 rhaas@postgresql.org 9006 : 296331 : xlrec.flags = 0;
9007 [ + + ]: 296331 : if (all_visible_cleared)
3964 andres@anarazel.de 9008 : 1650 : xlrec.flags |= XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED;
4478 rhaas@postgresql.org 9009 [ + + ]: 296331 : if (new_all_visible_cleared)
3964 andres@anarazel.de 9010 : 964 : xlrec.flags |= XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED;
4386 heikki.linnakangas@i 9011 [ + + ]: 296331 : if (prefixlen > 0)
3964 andres@anarazel.de 9012 : 122431 : xlrec.flags |= XLH_UPDATE_PREFIX_FROM_OLD;
4386 heikki.linnakangas@i 9013 [ + + ]: 296331 : if (suffixlen > 0)
3964 andres@anarazel.de 9014 : 110027 : xlrec.flags |= XLH_UPDATE_SUFFIX_FROM_OLD;
4133 heikki.linnakangas@i 9015 [ + + ]: 296331 : if (need_tuple_data)
9016 : : {
3964 andres@anarazel.de 9017 : 47024 : xlrec.flags |= XLH_UPDATE_CONTAINS_NEW_TUPLE;
4133 heikki.linnakangas@i 9018 [ + + ]: 47024 : if (old_key_tuple)
9019 : : {
9020 [ + + ]: 146 : if (reln->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3964 andres@anarazel.de 9021 : 65 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_TUPLE;
9022 : : else
9023 : 81 : xlrec.flags |= XLH_UPDATE_CONTAINS_OLD_KEY;
9024 : : }
9025 : : }
9026 : :
9027 : : /* If new tuple is the single and first tuple on page... */
4386 heikki.linnakangas@i 9028 [ + + + + ]: 299769 : if (ItemPointerGetOffsetNumber(&(newtup->t_self)) == FirstOffsetNumber &&
9029 : 3438 : PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
9030 : : {
9031 : 3166 : info |= XLOG_HEAP_INIT_PAGE;
4133 9032 : 3166 : init = true;
9033 : : }
9034 : : else
9035 : 293165 : init = false;
9036 : :
9037 : : /* Prepare WAL data for the old page */
9038 : 296331 : xlrec.old_offnum = ItemPointerGetOffsetNumber(&oldtup->t_self);
9039 : 296331 : xlrec.old_xmax = HeapTupleHeaderGetRawXmax(oldtup->t_data);
9040 : 592662 : xlrec.old_infobits_set = compute_infobits(oldtup->t_data->t_infomask,
9041 : 296331 : oldtup->t_data->t_infomask2);
9042 : :
9043 : : /* Prepare WAL data for the new page */
9044 : 296331 : xlrec.new_offnum = ItemPointerGetOffsetNumber(&newtup->t_self);
9045 : 296331 : xlrec.new_xmax = HeapTupleHeaderGetRawXmax(newtup->t_data);
9046 : :
9047 : 296331 : bufflags = REGBUF_STANDARD;
9048 [ + + ]: 296331 : if (init)
9049 : 3166 : bufflags |= REGBUF_WILL_INIT;
9050 [ + + ]: 296331 : if (need_tuple_data)
9051 : 47024 : bufflags |= REGBUF_KEEP_DATA;
9052 : :
9053 : 296331 : XLogRegisterBuffer(0, newbuf, bufflags);
9054 [ + + ]: 296331 : if (oldbuf != newbuf)
9055 : 139267 : XLogRegisterBuffer(1, oldbuf, REGBUF_STANDARD);
9056 : :
397 peter@eisentraut.org 9057 : 296331 : XLogRegisterData(&xlrec, SizeOfHeapUpdate);
9058 : :
9059 : : /*
9060 : : * Prepare WAL data for the new tuple.
9061 : : */
4386 heikki.linnakangas@i 9062 [ + + + + ]: 296331 : if (prefixlen > 0 || suffixlen > 0)
9063 : : {
9064 [ + + + + ]: 144076 : if (prefixlen > 0 && suffixlen > 0)
9065 : : {
9066 : 88382 : prefix_suffix[0] = prefixlen;
9067 : 88382 : prefix_suffix[1] = suffixlen;
397 peter@eisentraut.org 9068 : 88382 : XLogRegisterBufData(0, &prefix_suffix, sizeof(uint16) * 2);
9069 : : }
4386 heikki.linnakangas@i 9070 [ + + ]: 55694 : else if (prefixlen > 0)
9071 : : {
397 peter@eisentraut.org 9072 : 34049 : XLogRegisterBufData(0, &prefixlen, sizeof(uint16));
9073 : : }
9074 : : else
9075 : : {
9076 : 21645 : XLogRegisterBufData(0, &suffixlen, sizeof(uint16));
9077 : : }
9078 : : }
9079 : :
4133 heikki.linnakangas@i 9080 : 296331 : xlhdr.t_infomask2 = newtup->t_data->t_infomask2;
9081 : 296331 : xlhdr.t_infomask = newtup->t_data->t_infomask;
9082 : 296331 : xlhdr.t_hoff = newtup->t_data->t_hoff;
4040 tgl@sss.pgh.pa.us 9083 [ - + ]: 296331 : Assert(SizeofHeapTupleHeader + prefixlen + suffixlen <= newtup->t_len);
9084 : :
9085 : : /*
9086 : : * PG73FORMAT: write bitmap [+ padding] [+ oid] + data
9087 : : *
9088 : : * The 'data' doesn't include the common prefix or suffix.
9089 : : */
397 peter@eisentraut.org 9090 : 296331 : XLogRegisterBufData(0, &xlhdr, SizeOfHeapHeader);
4386 heikki.linnakangas@i 9091 [ + + ]: 296331 : if (prefixlen == 0)
9092 : : {
4133 9093 : 173900 : XLogRegisterBufData(0,
397 peter@eisentraut.org 9094 : 173900 : (char *) newtup->t_data + SizeofHeapTupleHeader,
3189 tgl@sss.pgh.pa.us 9095 : 173900 : newtup->t_len - SizeofHeapTupleHeader - suffixlen);
9096 : : }
9097 : : else
9098 : : {
9099 : : /*
9100 : : * Have to write the null bitmap and data after the common prefix as
9101 : : * two separate rdata entries.
9102 : : */
9103 : : /* bitmap [+ padding] [+ oid] */
4040 9104 [ + - ]: 122431 : if (newtup->t_data->t_hoff - SizeofHeapTupleHeader > 0)
9105 : : {
4133 heikki.linnakangas@i 9106 : 122431 : XLogRegisterBufData(0,
397 peter@eisentraut.org 9107 : 122431 : (char *) newtup->t_data + SizeofHeapTupleHeader,
3189 tgl@sss.pgh.pa.us 9108 : 122431 : newtup->t_data->t_hoff - SizeofHeapTupleHeader);
9109 : : }
9110 : :
9111 : : /* data after common prefix */
4133 heikki.linnakangas@i 9112 : 122431 : XLogRegisterBufData(0,
397 peter@eisentraut.org 9113 : 122431 : (char *) newtup->t_data + newtup->t_data->t_hoff + prefixlen,
3189 tgl@sss.pgh.pa.us 9114 : 122431 : newtup->t_len - newtup->t_data->t_hoff - prefixlen - suffixlen);
9115 : : }
9116 : :
9117 : : /* We need to log a tuple identity */
4133 heikki.linnakangas@i 9118 [ + + + + ]: 296331 : if (need_tuple_data && old_key_tuple)
9119 : : {
9120 : : /* don't really need this, but its more comfy to decode */
9121 : 146 : xlhdr_idx.t_infomask2 = old_key_tuple->t_data->t_infomask2;
9122 : 146 : xlhdr_idx.t_infomask = old_key_tuple->t_data->t_infomask;
9123 : 146 : xlhdr_idx.t_hoff = old_key_tuple->t_data->t_hoff;
9124 : :
397 peter@eisentraut.org 9125 : 146 : XLogRegisterData(&xlhdr_idx, SizeOfHeapHeader);
9126 : :
9127 : : /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
4040 tgl@sss.pgh.pa.us 9128 : 146 : XLogRegisterData((char *) old_key_tuple->t_data + SizeofHeapTupleHeader,
9129 : 146 : old_key_tuple->t_len - SizeofHeapTupleHeader);
9130 : : }
9131 : :
9132 : : /* filtering by origin on a row level is much more efficient */
3370 andres@anarazel.de 9133 : 296331 : XLogSetRecordFlags(XLOG_INCLUDE_ORIGIN);
9134 : :
4133 heikki.linnakangas@i 9135 : 296331 : recptr = XLogInsert(RM_HEAP_ID, info);
9136 : :
7368 neilc@samurai.com 9137 : 296331 : return recptr;
9138 : : }
9139 : :
9140 : : /*
9141 : : * Perform XLogInsert of an XLOG_HEAP2_NEW_CID record
9142 : : *
9143 : : * This is only used when effective_wal_level is logical, and only for
9144 : : * catalog tuples.
9145 : : */
9146 : : static XLogRecPtr
4478 rhaas@postgresql.org 9147 : 24969 : log_heap_new_cid(Relation relation, HeapTuple tup)
9148 : : {
9149 : : xl_heap_new_cid xlrec;
9150 : :
9151 : : XLogRecPtr recptr;
9152 : 24969 : HeapTupleHeader hdr = tup->t_data;
9153 : :
9154 [ - + ]: 24969 : Assert(ItemPointerIsValid(&tup->t_self));
9155 [ - + ]: 24969 : Assert(tup->t_tableOid != InvalidOid);
9156 : :
9157 : 24969 : xlrec.top_xid = GetTopTransactionId();
1348 9158 : 24969 : xlrec.target_locator = relation->rd_locator;
4133 heikki.linnakangas@i 9159 : 24969 : xlrec.target_tid = tup->t_self;
9160 : :
9161 : : /*
9162 : : * If the tuple got inserted & deleted in the same TX we definitely have a
9163 : : * combo CID, set cmin and cmax.
9164 : : */
4478 rhaas@postgresql.org 9165 [ + + ]: 24969 : if (hdr->t_infomask & HEAP_COMBOCID)
9166 : : {
9167 [ - + ]: 2027 : Assert(!(hdr->t_infomask & HEAP_XMAX_INVALID));
4466 9168 [ - + ]: 2027 : Assert(!HeapTupleHeaderXminInvalid(hdr));
4478 9169 : 2027 : xlrec.cmin = HeapTupleHeaderGetCmin(hdr);
9170 : 2027 : xlrec.cmax = HeapTupleHeaderGetCmax(hdr);
9171 : 2027 : xlrec.combocid = HeapTupleHeaderGetRawCommandId(hdr);
9172 : : }
9173 : : /* No combo CID, so only cmin or cmax can be set by this TX */
9174 : : else
9175 : : {
9176 : : /*
9177 : : * Tuple inserted.
9178 : : *
9179 : : * We need to check for LOCK ONLY because multixacts might be
9180 : : * transferred to the new tuple in case of FOR KEY SHARE updates in
9181 : : * which case there will be an xmax, although the tuple just got
9182 : : * inserted.
9183 : : */
9184 [ + + + + ]: 29875 : if (hdr->t_infomask & HEAP_XMAX_INVALID ||
9185 : 6933 : HEAP_XMAX_IS_LOCKED_ONLY(hdr->t_infomask))
9186 : : {
9187 : 16010 : xlrec.cmin = HeapTupleHeaderGetRawCommandId(hdr);
9188 : 16010 : xlrec.cmax = InvalidCommandId;
9189 : : }
9190 : : /* Tuple from a different tx updated or deleted. */
9191 : : else
9192 : : {
9193 : 6932 : xlrec.cmin = InvalidCommandId;
9194 : 6932 : xlrec.cmax = HeapTupleHeaderGetRawCommandId(hdr);
9195 : : }
9196 : 22942 : xlrec.combocid = InvalidCommandId;
9197 : : }
9198 : :
9199 : : /*
9200 : : * Note that we don't need to register the buffer here, because this
9201 : : * operation does not modify the page. The insert/update/delete that
9202 : : * called us certainly did, but that's WAL-logged separately.
9203 : : */
4133 heikki.linnakangas@i 9204 : 24969 : XLogBeginInsert();
397 peter@eisentraut.org 9205 : 24969 : XLogRegisterData(&xlrec, SizeOfHeapNewCid);
9206 : :
9207 : : /* will be looked at irrespective of origin */
9208 : :
4133 heikki.linnakangas@i 9209 : 24969 : recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_NEW_CID);
9210 : :
4478 rhaas@postgresql.org 9211 : 24969 : return recptr;
9212 : : }
9213 : :
9214 : : /*
9215 : : * Build a heap tuple representing the configured REPLICA IDENTITY to represent
9216 : : * the old tuple in an UPDATE or DELETE.
9217 : : *
9218 : : * Returns NULL if there's no need to log an identity or if there's no suitable
9219 : : * key defined.
9220 : : *
9221 : : * Pass key_required true if any replica identity columns changed value, or if
9222 : : * any of them have any external data. Delete must always pass true.
9223 : : *
9224 : : * *copy is set to true if the returned tuple is a modified copy rather than
9225 : : * the same tuple that was passed in.
9226 : : */
9227 : : static HeapTuple
1490 akapila@postgresql.o 9228 : 1802391 : ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
9229 : : bool *copy)
9230 : : {
4478 rhaas@postgresql.org 9231 : 1802391 : TupleDesc desc = RelationGetDescr(relation);
9232 : 1802391 : char replident = relation->rd_rel->relreplident;
9233 : : Bitmapset *idattrs;
9234 : : HeapTuple key_tuple;
9235 : : bool nulls[MaxHeapAttributeNumber];
9236 : : Datum values[MaxHeapAttributeNumber];
9237 : :
9238 : 1802391 : *copy = false;
9239 : :
9240 [ + + - + : 1802391 : if (!RelationIsLogicallyLogged(relation))
+ + - + -
- - - + -
+ + ]
9241 : 1702099 : return NULL;
9242 : :
9243 [ + + ]: 100292 : if (replident == REPLICA_IDENTITY_NOTHING)
9244 : 228 : return NULL;
9245 : :
9246 [ + + ]: 100064 : if (replident == REPLICA_IDENTITY_FULL)
9247 : : {
9248 : : /*
9249 : : * When logging the entire old tuple, it very well could contain
9250 : : * toasted columns. If so, force them to be inlined.
9251 : : */
9252 [ + + ]: 197 : if (HeapTupleHasExternal(tp))
9253 : : {
9254 : 4 : *copy = true;
2386 tgl@sss.pgh.pa.us 9255 : 4 : tp = toast_flatten_tuple(tp, desc);
9256 : : }
4478 rhaas@postgresql.org 9257 : 197 : return tp;
9258 : : }
9259 : :
9260 : : /* if the key isn't required and we're only logging the key, we're done */
1490 akapila@postgresql.o 9261 [ + + ]: 99867 : if (!key_required)
4478 rhaas@postgresql.org 9262 : 46878 : return NULL;
9263 : :
9264 : : /* find out the replica identity columns */
2386 tgl@sss.pgh.pa.us 9265 : 52989 : idattrs = RelationGetIndexAttrBitmap(relation,
9266 : : INDEX_ATTR_BITMAP_IDENTITY_KEY);
9267 : :
9268 : : /*
9269 : : * If there's no defined replica identity columns, treat as !key_required.
9270 : : * (This case should not be reachable from heap_update, since that should
9271 : : * calculate key_required accurately. But heap_delete just passes
9272 : : * constant true for key_required, so we can hit this case in deletes.)
9273 : : */
9274 [ + + ]: 52989 : if (bms_is_empty(idattrs))
9275 : 6021 : return NULL;
9276 : :
9277 : : /*
9278 : : * Construct a new tuple containing only the replica identity columns,
9279 : : * with nulls elsewhere. While we're at it, assert that the replica
9280 : : * identity columns aren't null.
9281 : : */
9282 : 46968 : heap_deform_tuple(tp, desc, values, nulls);
9283 : :
9284 [ + + ]: 150898 : for (int i = 0; i < desc->natts; i++)
9285 : : {
9286 [ + + ]: 103930 : if (bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
9287 : : idattrs))
9288 [ - + ]: 46980 : Assert(!nulls[i]);
9289 : : else
9290 : 56950 : nulls[i] = true;
9291 : : }
9292 : :
4478 rhaas@postgresql.org 9293 : 46968 : key_tuple = heap_form_tuple(desc, values, nulls);
9294 : 46968 : *copy = true;
9295 : :
2386 tgl@sss.pgh.pa.us 9296 : 46968 : bms_free(idattrs);
9297 : :
9298 : : /*
9299 : : * If the tuple, which by here only contains indexed columns, still has
9300 : : * toasted columns, force them to be inlined. This is somewhat unlikely
9301 : : * since there's limits on the size of indexed columns, so we don't
9302 : : * duplicate toast_flatten_tuple()s functionality in the above loop over
9303 : : * the indexed columns, even if it would be more efficient.
9304 : : */
4478 rhaas@postgresql.org 9305 [ + + ]: 46968 : if (HeapTupleHasExternal(key_tuple))
9306 : : {
4331 bruce@momjian.us 9307 : 4 : HeapTuple oldtup = key_tuple;
9308 : :
2386 tgl@sss.pgh.pa.us 9309 : 4 : key_tuple = toast_flatten_tuple(oldtup, desc);
4478 rhaas@postgresql.org 9310 : 4 : heap_freetuple(oldtup);
9311 : : }
9312 : :
9313 : 46968 : return key_tuple;
9314 : : }
9315 : :
9316 : : /*
9317 : : * HeapCheckForSerializableConflictOut
9318 : : * We are reading a tuple. If it's not visible, there may be a
9319 : : * rw-conflict out with the inserter. Otherwise, if it is visible to us
9320 : : * but has been deleted, there may be a rw-conflict out with the deleter.
9321 : : *
9322 : : * We will determine the top level xid of the writing transaction with which
9323 : : * we may be in conflict, and ask CheckForSerializableConflictOut() to check
9324 : : * for overlap with our own transaction.
9325 : : *
9326 : : * This function should be called just about anywhere in heapam.c where a
9327 : : * tuple has been read. The caller must hold at least a shared lock on the
9328 : : * buffer, because this function might set hint bits on the tuple. There is
9329 : : * currently no known reason to call this function from an index AM.
9330 : : */
9331 : : void
2238 tmunro@postgresql.or 9332 : 32060934 : HeapCheckForSerializableConflictOut(bool visible, Relation relation,
9333 : : HeapTuple tuple, Buffer buffer,
9334 : : Snapshot snapshot)
9335 : : {
9336 : : TransactionId xid;
9337 : : HTSV_Result htsvResult;
9338 : :
9339 [ + + ]: 32060934 : if (!CheckForSerializableConflictOutNeeded(relation, snapshot))
9340 : 32035586 : return;
9341 : :
9342 : : /*
9343 : : * Check to see whether the tuple has been written to by a concurrent
9344 : : * transaction, either to create it not visible to us, or to delete it
9345 : : * while it is visible to us. The "visible" bool indicates whether the
9346 : : * tuple is visible to us, while HeapTupleSatisfiesVacuum checks what else
9347 : : * is going on with it.
9348 : : *
9349 : : * In the event of a concurrently inserted tuple that also happens to have
9350 : : * been concurrently updated (by a separate transaction), the xmin of the
9351 : : * tuple will be used -- not the updater's xid.
9352 : : */
9353 : 25348 : htsvResult = HeapTupleSatisfiesVacuum(tuple, TransactionXmin, buffer);
9354 [ + + + + : 25348 : switch (htsvResult)
- ]
9355 : : {
9356 : 24554 : case HEAPTUPLE_LIVE:
9357 [ + + ]: 24554 : if (visible)
9358 : 24541 : return;
9359 : 13 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9360 : 13 : break;
9361 : 360 : case HEAPTUPLE_RECENTLY_DEAD:
9362 : : case HEAPTUPLE_DELETE_IN_PROGRESS:
2103 pg@bowt.ie 9363 [ + + ]: 360 : if (visible)
9364 : 285 : xid = HeapTupleHeaderGetUpdateXid(tuple->t_data);
9365 : : else
9366 : 75 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9367 : :
9368 [ + + ]: 360 : if (TransactionIdPrecedes(xid, TransactionXmin))
9369 : : {
9370 : : /* This is like the HEAPTUPLE_DEAD case */
9371 [ - + ]: 67 : Assert(!visible);
9372 : 67 : return;
9373 : : }
2238 tmunro@postgresql.or 9374 : 293 : break;
9375 : 324 : case HEAPTUPLE_INSERT_IN_PROGRESS:
9376 : 324 : xid = HeapTupleHeaderGetXmin(tuple->t_data);
9377 : 324 : break;
9378 : 110 : case HEAPTUPLE_DEAD:
2103 pg@bowt.ie 9379 [ - + ]: 110 : Assert(!visible);
2238 tmunro@postgresql.or 9380 : 110 : return;
2238 tmunro@postgresql.or 9381 :UBC 0 : default:
9382 : :
9383 : : /*
9384 : : * The only way to get to this default clause is if a new value is
9385 : : * added to the enum type without adding it to this switch
9386 : : * statement. That's a bug, so elog.
9387 : : */
9388 [ # # ]: 0 : elog(ERROR, "unrecognized return value from HeapTupleSatisfiesVacuum: %u", htsvResult);
9389 : :
9390 : : /*
9391 : : * In spite of having all enum values covered and calling elog on
9392 : : * this default, some compilers think this is a code path which
9393 : : * allows xid to be used below without initialization. Silence
9394 : : * that warning.
9395 : : */
9396 : : xid = InvalidTransactionId;
9397 : : }
9398 : :
2238 tmunro@postgresql.or 9399 [ - + ]:CBC 630 : Assert(TransactionIdIsValid(xid));
9400 [ - + ]: 630 : Assert(TransactionIdFollowsOrEquals(xid, TransactionXmin));
9401 : :
9402 : : /*
9403 : : * Find top level xid. Bail out if xid is too early to be a conflict, or
9404 : : * if it's our own xid.
9405 : : */
9406 [ + + ]: 630 : if (TransactionIdEquals(xid, GetTopTransactionIdIfAny()))
9407 : 60 : return;
9408 : 570 : xid = SubTransGetTopmostTransaction(xid);
9409 [ - + ]: 570 : if (TransactionIdPrecedes(xid, TransactionXmin))
2238 tmunro@postgresql.or 9410 :UBC 0 : return;
9411 : :
2238 tmunro@postgresql.or 9412 :CBC 570 : CheckForSerializableConflictOut(relation, xid, snapshot);
9413 : : }
|