Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * rewriteheap.c
4 : : * Support functions to rewrite tables.
5 : : *
6 : : * These functions provide a facility to completely rewrite a heap, while
7 : : * preserving visibility information and update chains.
8 : : *
9 : : * INTERFACE
10 : : *
11 : : * The caller is responsible for creating the new heap, all catalog
12 : : * changes, supplying the tuples to be written to the new heap, and
13 : : * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14 : : * target table, because we assume no one else is writing into it.
15 : : *
16 : : * To use the facility:
17 : : *
18 : : * begin_heap_rewrite
19 : : * while (fetch next tuple)
20 : : * {
21 : : * if (tuple is dead)
22 : : * rewrite_heap_dead_tuple
23 : : * else
24 : : * {
25 : : * // do any transformations here if required
26 : : * rewrite_heap_tuple
27 : : * }
28 : : * }
29 : : * end_heap_rewrite
30 : : *
31 : : * The contents of the new relation shouldn't be relied on until after
32 : : * end_heap_rewrite is called.
33 : : *
34 : : *
35 : : * IMPLEMENTATION
36 : : *
37 : : * This would be a fairly trivial affair, except that we need to maintain
38 : : * the ctid chains that link versions of an updated tuple together.
39 : : * Since the newly stored tuples will have tids different from the original
40 : : * ones, if we just copied t_ctid fields to the new table the links would
41 : : * be wrong. When we are required to copy a (presumably recently-dead or
42 : : * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43 : : * to substitute the correct ctid instead.
44 : : *
45 : : * For each ctid reference from A -> B, we might encounter either A first
46 : : * or B first. (Note that a tuple in the middle of a chain is both A and B
47 : : * of different pairs.)
48 : : *
49 : : * If we encounter A first, we'll store the tuple in the unresolved_tups
50 : : * hash table. When we later encounter B, we remove A from the hash table,
51 : : * fix the ctid to point to the new location of B, and insert both A and B
52 : : * to the new heap.
53 : : *
54 : : * If we encounter B first, we can insert B to the new heap right away.
55 : : * We then add an entry to the old_new_tid_map hash table showing B's
56 : : * original tid (in the old heap) and new tid (in the new heap).
57 : : * When we later encounter A, we get the new location of B from the table,
58 : : * and can write A immediately with the correct ctid.
59 : : *
60 : : * Entries in the hash tables can be removed as soon as the later tuple
61 : : * is encountered. That helps to keep the memory usage down. At the end,
62 : : * both tables are usually empty; we should have encountered both A and B
63 : : * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64 : : * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65 : : * for deadness using OldestXmin is not exact. In such a case we might
66 : : * encounter B first, and skip it, and find A later. Then A would be added
67 : : * to unresolved_tups, and stay there until end of the rewrite. Since
68 : : * this case is very unusual, we don't worry about the memory usage.
69 : : *
70 : : * Using in-memory hash tables means that we use some memory for each live
71 : : * update chain in the table, from the time we find one end of the
72 : : * reference until we find the other end. That shouldn't be a problem in
73 : : * practice, but if you do something like an UPDATE without a where-clause
74 : : * on a large table, and then run CLUSTER in the same transaction, you
75 : : * could run out of memory. It doesn't seem worthwhile to add support for
76 : : * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77 : : * table under normal circumstances. Furthermore, in the typical scenario
78 : : * of CLUSTERing on an unchanging key column, we'll see all the versions
79 : : * of a given tuple together anyway, and so the peak memory usage is only
80 : : * proportional to the number of RECENTLY_DEAD versions of a single row, not
81 : : * in the whole table. Note that if we do fail halfway through a CLUSTER,
82 : : * the old table is still valid, so failure is not catastrophic.
83 : : *
84 : : * We can't use the normal heap_insert function to insert into the new
85 : : * heap, because heap_insert overwrites the visibility information.
86 : : * We use a special-purpose raw_heap_insert function instead, which
87 : : * is optimized for bulk inserting a lot of tuples, knowing that we have
88 : : * exclusive access to the heap. raw_heap_insert builds new pages in
89 : : * local storage. When a page is full, or at the end of the process,
90 : : * we insert it to WAL as a single record and then write it to disk with
91 : : * the bulk smgr writer. Note, however, that any data sent to the new
92 : : * heap's TOAST table will go through the normal bufmgr.
93 : : *
94 : : *
95 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
96 : : * Portions Copyright (c) 1994-5, Regents of the University of California
97 : : *
98 : : * IDENTIFICATION
99 : : * src/backend/access/heap/rewriteheap.c
100 : : *
101 : : *-------------------------------------------------------------------------
102 : : */
103 : : #include "postgres.h"
104 : :
105 : : #include <unistd.h>
106 : :
107 : : #include "access/heapam.h"
108 : : #include "access/heapam_xlog.h"
109 : : #include "access/heaptoast.h"
110 : : #include "access/rewriteheap.h"
111 : : #include "access/transam.h"
112 : : #include "access/xact.h"
113 : : #include "access/xloginsert.h"
114 : : #include "common/file_utils.h"
115 : : #include "lib/ilist.h"
116 : : #include "miscadmin.h"
117 : : #include "pgstat.h"
118 : : #include "replication/slot.h"
119 : : #include "storage/bufmgr.h"
120 : : #include "storage/bulk_write.h"
121 : : #include "storage/fd.h"
122 : : #include "storage/procarray.h"
123 : : #include "utils/memutils.h"
124 : : #include "utils/rel.h"
125 : :
126 : : /*
127 : : * State associated with a rewrite operation. This is opaque to the user
128 : : * of the rewrite facility.
129 : : */
130 : : typedef struct RewriteStateData
131 : : {
132 : : Relation rs_old_rel; /* source heap */
133 : : Relation rs_new_rel; /* destination heap */
134 : : BulkWriteState *rs_bulkstate; /* writer for the destination */
135 : : BulkWriteBuffer rs_buffer; /* page currently being built */
136 : : BlockNumber rs_blockno; /* block where page will go */
137 : : bool rs_logical_rewrite; /* do we need to do logical rewriting */
138 : : TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
139 : : * tuple visibility */
140 : : TransactionId rs_freeze_xid; /* Xid that will be used as freeze cutoff
141 : : * point */
142 : : TransactionId rs_logical_xmin; /* Xid that will be used as cutoff point
143 : : * for logical rewrites */
144 : : MultiXactId rs_cutoff_multi; /* MultiXactId that will be used as cutoff
145 : : * point for multixacts */
146 : : MemoryContext rs_cxt; /* for hash tables and entries and tuples in
147 : : * them */
148 : : XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
149 : : HTAB *rs_unresolved_tups; /* unmatched A tuples */
150 : : HTAB *rs_old_new_tid_map; /* unmatched B tuples */
151 : : HTAB *rs_logical_mappings; /* logical remapping files */
152 : : uint32 rs_num_rewrite_mappings; /* # in memory mappings */
153 : : } RewriteStateData;
154 : :
155 : : /*
156 : : * The lookup keys for the hash tables are tuple TID and xmin (we must check
157 : : * both to avoid false matches from dead tuples). Beware that there is
158 : : * probably some padding space in this struct; it must be zeroed out for
159 : : * correct hashtable operation.
160 : : */
161 : : typedef struct
162 : : {
163 : : TransactionId xmin; /* tuple xmin */
164 : : ItemPointerData tid; /* tuple location in old heap */
165 : : } TidHashKey;
166 : :
167 : : /*
168 : : * Entry structures for the hash tables
169 : : */
170 : : typedef struct
171 : : {
172 : : TidHashKey key; /* expected xmin/old location of B tuple */
173 : : ItemPointerData old_tid; /* A's location in the old heap */
174 : : HeapTuple tuple; /* A's tuple contents */
175 : : } UnresolvedTupData;
176 : :
177 : : typedef UnresolvedTupData *UnresolvedTup;
178 : :
179 : : typedef struct
180 : : {
181 : : TidHashKey key; /* actual xmin/old location of B tuple */
182 : : ItemPointerData new_tid; /* where we put it in the new heap */
183 : : } OldToNewMappingData;
184 : :
185 : : typedef OldToNewMappingData *OldToNewMapping;
186 : :
187 : : /*
188 : : * In-Memory data for an xid that might need logical remapping entries
189 : : * to be logged.
190 : : */
191 : : typedef struct RewriteMappingFile
192 : : {
193 : : TransactionId xid; /* xid that might need to see the row */
194 : : int vfd; /* fd of mappings file */
195 : : off_t off; /* how far have we written yet */
196 : : dclist_head mappings; /* list of in-memory mappings */
197 : : char path[MAXPGPATH]; /* path, for error messages */
198 : : } RewriteMappingFile;
199 : :
200 : : /*
201 : : * A single In-Memory logical rewrite mapping, hanging off
202 : : * RewriteMappingFile->mappings.
203 : : */
204 : : typedef struct RewriteMappingDataEntry
205 : : {
206 : : LogicalRewriteMappingData map; /* map between old and new location of the
207 : : * tuple */
208 : : dlist_node node;
209 : : } RewriteMappingDataEntry;
210 : :
211 : :
212 : : /* prototypes for internal functions */
213 : : static void raw_heap_insert(RewriteState state, HeapTuple tup);
214 : :
215 : : /* internal logical remapping prototypes */
216 : : static void logical_begin_heap_rewrite(RewriteState state);
217 : : static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple);
218 : : static void logical_end_heap_rewrite(RewriteState state);
219 : :
220 : :
221 : : /*
222 : : * Begin a rewrite of a table
223 : : *
224 : : * old_heap old, locked heap relation tuples will be read from
225 : : * new_heap new, locked heap relation to insert tuples to
226 : : * oldest_xmin xid used by the caller to determine which tuples are dead
227 : : * freeze_xid xid before which tuples will be frozen
228 : : * cutoff_multi multixact before which multis will be removed
229 : : *
230 : : * Returns an opaque RewriteState, allocated in current memory context,
231 : : * to be used in subsequent calls to the other functions.
232 : : */
233 : : RewriteState
4257 rhaas@postgresql.org 234 :CBC 274 : begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
235 : : TransactionId freeze_xid, MultiXactId cutoff_multi)
236 : : {
237 : : RewriteState state;
238 : : MemoryContext rw_cxt;
239 : : MemoryContext old_cxt;
240 : : HASHCTL hash_ctl;
241 : :
242 : : /*
243 : : * To ease cleanup, make a separate context that will contain the
244 : : * RewriteState struct itself plus all subsidiary data.
245 : : */
6778 tgl@sss.pgh.pa.us 246 : 274 : rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
247 : : "Table rewrite",
248 : : ALLOCSET_DEFAULT_SIZES);
249 : 274 : old_cxt = MemoryContextSwitchTo(rw_cxt);
250 : :
251 : : /* Create and fill in the state struct */
252 : 274 : state = palloc0(sizeof(RewriteStateData));
253 : :
4257 rhaas@postgresql.org 254 : 274 : state->rs_old_rel = old_heap;
6778 tgl@sss.pgh.pa.us 255 : 274 : state->rs_new_rel = new_heap;
613 heikki.linnakangas@i 256 : 274 : state->rs_buffer = NULL;
257 : : /* new_heap needn't be empty, just locked */
6778 tgl@sss.pgh.pa.us 258 : 274 : state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
259 : 274 : state->rs_oldest_xmin = oldest_xmin;
6739 alvherre@alvh.no-ip. 260 : 274 : state->rs_freeze_xid = freeze_xid;
4425 261 : 274 : state->rs_cutoff_multi = cutoff_multi;
6778 tgl@sss.pgh.pa.us 262 : 274 : state->rs_cxt = rw_cxt;
613 heikki.linnakangas@i 263 : 274 : state->rs_bulkstate = smgr_bulk_start_rel(new_heap, MAIN_FORKNUM);
264 : :
265 : : /* Initialize hash tables used to track update chains */
6778 tgl@sss.pgh.pa.us 266 : 274 : hash_ctl.keysize = sizeof(TidHashKey);
267 : 274 : hash_ctl.entrysize = sizeof(UnresolvedTupData);
268 : 274 : hash_ctl.hcxt = state->rs_cxt;
269 : :
270 : 274 : state->rs_unresolved_tups =
271 : 274 : hash_create("Rewrite / Unresolved ctids",
272 : : 128, /* arbitrary initial size */
273 : : &hash_ctl,
274 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
275 : :
276 : 274 : hash_ctl.entrysize = sizeof(OldToNewMappingData);
277 : :
278 : 274 : state->rs_old_new_tid_map =
279 : 274 : hash_create("Rewrite / Old to new tid map",
280 : : 128, /* arbitrary initial size */
281 : : &hash_ctl,
282 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
283 : :
284 : 274 : MemoryContextSwitchTo(old_cxt);
285 : :
4257 rhaas@postgresql.org 286 : 274 : logical_begin_heap_rewrite(state);
287 : :
6778 tgl@sss.pgh.pa.us 288 : 274 : return state;
289 : : }
290 : :
291 : : /*
292 : : * End a rewrite.
293 : : *
294 : : * state and any other resources are freed.
295 : : */
296 : : void
297 : 274 : end_heap_rewrite(RewriteState state)
298 : : {
299 : : HASH_SEQ_STATUS seq_status;
300 : : UnresolvedTup unresolved;
301 : :
302 : : /*
303 : : * Write any remaining tuples in the UnresolvedTups table. If we have any
304 : : * left, they should in fact be dead, but let's err on the safe side.
305 : : */
306 : 274 : hash_seq_init(&seq_status, state->rs_unresolved_tups);
307 : :
308 [ - + ]: 274 : while ((unresolved = hash_seq_search(&seq_status)) != NULL)
309 : : {
6778 tgl@sss.pgh.pa.us 310 :UBC 0 : ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
311 : 0 : raw_heap_insert(state, unresolved->tuple);
312 : : }
313 : :
314 : : /* Write the last page, if any */
613 heikki.linnakangas@i 315 [ + + ]:CBC 274 : if (state->rs_buffer)
316 : : {
317 : 189 : smgr_bulk_write(state->rs_bulkstate, state->rs_blockno, state->rs_buffer, true);
318 : 189 : state->rs_buffer = NULL;
319 : : }
320 : :
321 : 274 : smgr_bulk_finish(state->rs_bulkstate);
322 : :
4257 rhaas@postgresql.org 323 : 274 : logical_end_heap_rewrite(state);
324 : :
325 : : /* Deleting the context frees everything */
6778 tgl@sss.pgh.pa.us 326 : 274 : MemoryContextDelete(state->rs_cxt);
327 : 274 : }
328 : :
329 : : /*
330 : : * Add a tuple to the new heap.
331 : : *
332 : : * Visibility information is copied from the original tuple, except that
333 : : * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
334 : : * it had better be temp storage not a pointer to the original tuple.
335 : : *
336 : : * state opaque state as returned by begin_heap_rewrite
337 : : * old_tuple original tuple in the old heap
338 : : * new_tuple new, rewritten tuple to be inserted to new heap
339 : : */
340 : : void
341 : 364303 : rewrite_heap_tuple(RewriteState state,
342 : : HeapTuple old_tuple, HeapTuple new_tuple)
343 : : {
344 : : MemoryContext old_cxt;
345 : : ItemPointerData old_tid;
346 : : TidHashKey hashkey;
347 : : bool found;
348 : : bool free_new;
349 : :
350 : 364303 : old_cxt = MemoryContextSwitchTo(state->rs_cxt);
351 : :
352 : : /*
353 : : * Copy the original tuple's visibility information into new_tuple.
354 : : *
355 : : * XXX we might later need to copy some t_infomask2 bits, too? Right now,
356 : : * we intentionally clear the HOT status bits.
357 : : */
358 : 364303 : memcpy(&new_tuple->t_data->t_choice.t_heap,
359 : 364303 : &old_tuple->t_data->t_choice.t_heap,
360 : : sizeof(HeapTupleFields));
361 : :
362 : 364303 : new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
6613 363 : 364303 : new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
6778 364 : 364303 : new_tuple->t_data->t_infomask |=
365 : 364303 : old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
366 : :
367 : : /*
368 : : * While we have our hands on the tuple, we may as well freeze any
369 : : * eligible xmin or xmax, so that future VACUUM effort can be saved.
370 : : */
2906 andres@anarazel.de 371 : 364303 : heap_freeze_tuple(new_tuple->t_data,
372 : 364303 : state->rs_old_rel->rd_rel->relfrozenxid,
373 : 364303 : state->rs_old_rel->rd_rel->relminmxid,
374 : : state->rs_freeze_xid,
375 : : state->rs_cutoff_multi);
376 : :
377 : : /*
378 : : * Invalid ctid means that ctid should point to the tuple itself. We'll
379 : : * override it later if the tuple is part of an update chain.
380 : : */
6778 tgl@sss.pgh.pa.us 381 : 364303 : ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
382 : :
383 : : /*
384 : : * If the tuple has been updated, check the old-to-new mapping hash table.
385 : : */
4661 alvherre@alvh.no-ip. 386 [ + + + - ]: 385437 : if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
387 : 21134 : HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
2761 andres@anarazel.de 388 [ + - ]: 21134 : !HeapTupleHeaderIndicatesMovedPartitions(old_tuple->t_data) &&
6778 tgl@sss.pgh.pa.us 389 [ + + ]: 21134 : !(ItemPointerEquals(&(old_tuple->t_self),
390 : 21134 : &(old_tuple->t_data->t_ctid))))
391 : : {
392 : : OldToNewMapping mapping;
393 : :
394 : 450 : memset(&hashkey, 0, sizeof(hashkey));
4661 alvherre@alvh.no-ip. 395 : 450 : hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
6778 tgl@sss.pgh.pa.us 396 : 450 : hashkey.tid = old_tuple->t_data->t_ctid;
397 : :
398 : : mapping = (OldToNewMapping)
399 : 450 : hash_search(state->rs_old_new_tid_map, &hashkey,
400 : : HASH_FIND, NULL);
401 : :
402 [ + + ]: 450 : if (mapping != NULL)
403 : : {
404 : : /*
405 : : * We've already copied the tuple that t_ctid points to, so we can
406 : : * set the ctid of this tuple to point to the new location, and
407 : : * insert it right away.
408 : : */
409 : 201 : new_tuple->t_data->t_ctid = mapping->new_tid;
410 : :
411 : : /* We don't need the mapping entry anymore */
412 : 201 : hash_search(state->rs_old_new_tid_map, &hashkey,
413 : : HASH_REMOVE, &found);
414 [ - + ]: 201 : Assert(found);
415 : : }
416 : : else
417 : : {
418 : : /*
419 : : * We haven't seen the tuple t_ctid points to yet. Stash this
420 : : * tuple into unresolved_tups to be written later.
421 : : */
422 : : UnresolvedTup unresolved;
423 : :
424 : 249 : unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
425 : : HASH_ENTER, &found);
426 [ - + ]: 249 : Assert(!found);
427 : :
428 : 249 : unresolved->old_tid = old_tuple->t_self;
429 : 249 : unresolved->tuple = heap_copytuple(new_tuple);
430 : :
431 : : /*
432 : : * We can't do anything more now, since we don't know where the
433 : : * tuple will be written.
434 : : */
435 : 249 : MemoryContextSwitchTo(old_cxt);
436 : 249 : return;
437 : : }
438 : : }
439 : :
440 : : /*
441 : : * Now we will write the tuple, and then check to see if it is the B tuple
442 : : * in any new or known pair. When we resolve a known pair, we will be
443 : : * able to write that pair's A tuple, and then we have to check if it
444 : : * resolves some other pair. Hence, we need a loop here.
445 : : */
446 : 364054 : old_tid = old_tuple->t_self;
447 : 364054 : free_new = false;
448 : :
449 : : for (;;)
450 : 249 : {
451 : : ItemPointerData new_tid;
452 : :
453 : : /* Insert the tuple and find out where it's put in new_heap */
454 : 364303 : raw_heap_insert(state, new_tuple);
455 : 364303 : new_tid = new_tuple->t_self;
456 : :
4257 rhaas@postgresql.org 457 : 364303 : logical_rewrite_heap_tuple(state, old_tid, new_tuple);
458 : :
459 : : /*
460 : : * If the tuple is the updated version of a row, and the prior version
461 : : * wouldn't be DEAD yet, then we need to either resolve the prior
462 : : * version (if it's waiting in rs_unresolved_tups), or make an entry
463 : : * in rs_old_new_tid_map (so we can resolve it when we do see it). The
464 : : * previous tuple's xmax would equal this one's xmin, so it's
465 : : * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
466 : : */
6778 tgl@sss.pgh.pa.us 467 [ + + ]: 364303 : if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
468 [ + + ]: 8005 : !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
469 : : state->rs_oldest_xmin))
470 : : {
471 : : /*
472 : : * Okay, this is B in an update pair. See if we've seen A.
473 : : */
474 : : UnresolvedTup unresolved;
475 : :
476 : 450 : memset(&hashkey, 0, sizeof(hashkey));
477 : 450 : hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
478 : 450 : hashkey.tid = old_tid;
479 : :
480 : 450 : unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
481 : : HASH_FIND, NULL);
482 : :
483 [ + + ]: 450 : if (unresolved != NULL)
484 : : {
485 : : /*
486 : : * We have seen and memorized the previous tuple already. Now
487 : : * that we know where we inserted the tuple its t_ctid points
488 : : * to, fix its t_ctid and insert it to the new heap.
489 : : */
490 [ + + ]: 249 : if (free_new)
491 : 56 : heap_freetuple(new_tuple);
492 : 249 : new_tuple = unresolved->tuple;
493 : 249 : free_new = true;
494 : 249 : old_tid = unresolved->old_tid;
495 : 249 : new_tuple->t_data->t_ctid = new_tid;
496 : :
497 : : /*
498 : : * We don't need the hash entry anymore, but don't free its
499 : : * tuple just yet.
500 : : */
501 : 249 : hash_search(state->rs_unresolved_tups, &hashkey,
502 : : HASH_REMOVE, &found);
503 [ - + ]: 249 : Assert(found);
504 : :
505 : : /* loop back to insert the previous tuple in the chain */
506 : 249 : continue;
507 : : }
508 : : else
509 : : {
510 : : /*
511 : : * Remember the new tid of this tuple. We'll use it to set the
512 : : * ctid when we find the previous tuple in the chain.
513 : : */
514 : : OldToNewMapping mapping;
515 : :
516 : 201 : mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
517 : : HASH_ENTER, &found);
518 [ - + ]: 201 : Assert(!found);
519 : :
520 : 201 : mapping->new_tid = new_tid;
521 : : }
522 : : }
523 : :
524 : : /* Done with this (chain of) tuples, for now */
525 [ + + ]: 364054 : if (free_new)
526 : 193 : heap_freetuple(new_tuple);
527 : 364054 : break;
528 : : }
529 : :
530 : 364054 : MemoryContextSwitchTo(old_cxt);
531 : : }
532 : :
533 : : /*
534 : : * Register a dead tuple with an ongoing rewrite. Dead tuples are not
535 : : * copied to the new table, but we still make note of them so that we
536 : : * can release some resources earlier.
537 : : *
538 : : * Returns true if a tuple was removed from the unresolved_tups table.
539 : : * This indicates that that tuple, previously thought to be "recently dead",
540 : : * is now known really dead and won't be written to the output.
541 : : */
542 : : bool
543 : 12722 : rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
544 : : {
545 : : /*
546 : : * If we have already seen an earlier tuple in the update chain that
547 : : * points to this tuple, let's forget about that earlier tuple. It's in
548 : : * fact dead as well, our simple xmax < OldestXmin test in
549 : : * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
550 : : * when xmin of a tuple is greater than xmax, which sounds
551 : : * counter-intuitive but is perfectly valid.
552 : : *
553 : : * We don't bother to try to detect the situation the other way round,
554 : : * when we encounter the dead tuple first and then the recently dead one
555 : : * that points to it. If that happens, we'll have some unmatched entries
556 : : * in the UnresolvedTups hash table at the end. That can happen anyway,
557 : : * because a vacuum might have removed the dead tuple in the chain before
558 : : * us.
559 : : */
560 : : UnresolvedTup unresolved;
561 : : TidHashKey hashkey;
562 : : bool found;
563 : :
564 : 12722 : memset(&hashkey, 0, sizeof(hashkey));
565 : 12722 : hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
566 : 12722 : hashkey.tid = old_tuple->t_self;
567 : :
568 : 12722 : unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
569 : : HASH_FIND, NULL);
570 : :
571 [ - + ]: 12722 : if (unresolved != NULL)
572 : : {
573 : : /* Need to free the contained tuple as well as the hashtable entry */
6778 tgl@sss.pgh.pa.us 574 :UBC 0 : heap_freetuple(unresolved->tuple);
575 : 0 : hash_search(state->rs_unresolved_tups, &hashkey,
576 : : HASH_REMOVE, &found);
577 [ # # ]: 0 : Assert(found);
5500 578 : 0 : return true;
579 : : }
580 : :
5500 tgl@sss.pgh.pa.us 581 :CBC 12722 : return false;
582 : : }
583 : :
584 : : /*
585 : : * Insert a tuple to the new relation. This has to track heap_insert
586 : : * and its subsidiary functions!
587 : : *
588 : : * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
589 : : * tuple is invalid on entry, it's replaced with the new TID as well (in
590 : : * the inserted data only, not in the caller's copy).
591 : : */
592 : : static void
6778 593 : 364303 : raw_heap_insert(RewriteState state, HeapTuple tup)
594 : : {
595 : : Page page;
596 : : Size pageFreeSpace,
597 : : saveFreeSpace;
598 : : Size len;
599 : : OffsetNumber newoff;
600 : : HeapTuple heaptup;
601 : :
602 : : /*
603 : : * If the new tuple is too big for storage or contains already toasted
604 : : * out-of-line attributes from some other relation, invoke the toaster.
605 : : *
606 : : * Note: below this point, heaptup is the data we actually intend to store
607 : : * into the relation; tup is the caller's original untoasted data.
608 : : */
609 [ - + ]: 364303 : if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
610 : : {
611 : : /* toast table entries should never be recursively toasted */
6778 tgl@sss.pgh.pa.us 612 [ # # ]:UBC 0 : Assert(!HeapTupleHasExternal(tup));
613 : 0 : heaptup = tup;
614 : : }
6778 tgl@sss.pgh.pa.us 615 [ + + + + ]:CBC 364303 : else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
2575 andres@anarazel.de 616 : 299 : {
2351 tgl@sss.pgh.pa.us 617 : 299 : int options = HEAP_INSERT_SKIP_FSM;
618 : :
619 : : /*
620 : : * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
621 : : * for the TOAST table are not logically decoded. The main heap is
622 : : * WAL-logged as XLOG FPI records, which are not logically decoded.
623 : : */
2526 tomas.vondra@postgre 624 : 299 : options |= HEAP_INSERT_NO_LOGICAL;
625 : :
2216 rhaas@postgresql.org 626 : 299 : heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL,
627 : : options);
628 : : }
629 : : else
6778 tgl@sss.pgh.pa.us 630 : 364004 : heaptup = tup;
631 : :
3051 632 : 364303 : len = MAXALIGN(heaptup->t_len); /* be conservative */
633 : :
634 : : /*
635 : : * If we're gonna fail for oversize tuple, do it right away
636 : : */
6778 637 [ - + ]: 364303 : if (len > MaxHeapTupleSize)
6778 tgl@sss.pgh.pa.us 638 [ # # ]:UBC 0 : ereport(ERROR,
639 : : (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
640 : : errmsg("row is too big: size %zu, maximum size %zu",
641 : : len, MaxHeapTupleSize)));
642 : :
643 : : /* Compute desired extra freespace due to fillfactor option */
565 akorotkov@postgresql 644 [ + + ]:CBC 364303 : saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
645 : : HEAP_DEFAULT_FILLFACTOR);
646 : :
647 : : /* Now we can check to see if there's enough free space already. */
613 heikki.linnakangas@i 648 : 364303 : page = (Page) state->rs_buffer;
649 [ + + ]: 364303 : if (page)
650 : : {
6613 tgl@sss.pgh.pa.us 651 : 364114 : pageFreeSpace = PageGetHeapFreeSpace(page);
652 : :
6778 653 [ + + ]: 364114 : if (len + saveFreeSpace > pageFreeSpace)
654 : : {
655 : : /*
656 : : * Doesn't fit, so write out the existing page. It always
657 : : * contains a tuple. Hence, unlike RelationGetBufferForTuple(),
658 : : * enforce saveFreeSpace unconditionally.
659 : : */
613 heikki.linnakangas@i 660 : 5082 : smgr_bulk_write(state->rs_bulkstate, state->rs_blockno, state->rs_buffer, true);
661 : 5082 : state->rs_buffer = NULL;
662 : 5082 : page = NULL;
6778 tgl@sss.pgh.pa.us 663 : 5082 : state->rs_blockno++;
664 : : }
665 : : }
666 : :
613 heikki.linnakangas@i 667 [ + + ]: 364303 : if (!page)
668 : : {
669 : : /* Initialize a new empty page */
670 : 5271 : state->rs_buffer = smgr_bulk_get_buf(state->rs_bulkstate);
671 : 5271 : page = (Page) state->rs_buffer;
6778 tgl@sss.pgh.pa.us 672 : 5271 : PageInit(page, BLCKSZ, 0);
673 : : }
674 : :
675 : : /* And now we can insert the tuple into the page */
1 peter@eisentraut.org 676 :GNC 364303 : newoff = PageAddItem(page, heaptup->t_data, heaptup->t_len, InvalidOffsetNumber, false, true);
6778 tgl@sss.pgh.pa.us 677 [ - + ]:CBC 364303 : if (newoff == InvalidOffsetNumber)
6778 tgl@sss.pgh.pa.us 678 [ # # ]:UBC 0 : elog(ERROR, "failed to add tuple");
679 : :
680 : : /* Update caller's t_self to the actual position where it was stored */
6778 tgl@sss.pgh.pa.us 681 :CBC 364303 : ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
682 : :
683 : : /*
684 : : * Insert the correct position into CTID of the stored tuple, too, if the
685 : : * caller didn't supply a valid CTID.
686 : : */
6557 bruce@momjian.us 687 [ + + ]: 364303 : if (!ItemPointerIsValid(&tup->t_data->t_ctid))
688 : : {
689 : : ItemId newitemid;
690 : : HeapTupleHeader onpage_tup;
691 : :
6778 tgl@sss.pgh.pa.us 692 : 363853 : newitemid = PageGetItemId(page, newoff);
693 : 363853 : onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
694 : :
695 : 363853 : onpage_tup->t_ctid = tup->t_self;
696 : : }
697 : :
698 : : /* If heaptup is a private copy, release it. */
699 [ + + ]: 364303 : if (heaptup != tup)
700 : 299 : heap_freetuple(heaptup);
701 : 364303 : }
702 : :
703 : : /* ------------------------------------------------------------------------
704 : : * Logical rewrite support
705 : : *
706 : : * When doing logical decoding - which relies on using cmin/cmax of catalog
707 : : * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
708 : : * information to allow the decoding backend to update its internal mapping
709 : : * of (relfilelocator,ctid) => (cmin, cmax) to be correct for the rewritten heap.
710 : : *
711 : : * For that, every time we find a tuple that's been modified in a catalog
712 : : * relation within the xmin horizon of any decoding slot, we log a mapping
713 : : * from the old to the new location.
714 : : *
715 : : * To deal with rewrites that abort the filename of a mapping file contains
716 : : * the xid of the transaction performing the rewrite, which then can be
717 : : * checked before being read in.
718 : : *
719 : : * For efficiency we don't immediately spill every single map mapping for a
720 : : * row to disk but only do so in batches when we've collected several of them
721 : : * in memory or when end_heap_rewrite() has been called.
722 : : *
723 : : * Crash-Safety: This module diverts from the usual patterns of doing WAL
724 : : * since it cannot rely on checkpoint flushing out all buffers and thus
725 : : * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
726 : : * buffer modifications is performed while the buffer(s) that are being
727 : : * modified are exclusively locked guaranteeing that both the WAL record and
728 : : * the modified heap are on either side of the checkpoint. But since the
729 : : * mapping files we log aren't in shared_buffers that interlock doesn't work.
730 : : *
731 : : * Instead we simply write the mapping files out to disk, *before* the
732 : : * XLogInsert() is performed. That guarantees that either the XLogInsert() is
733 : : * inserted after the checkpoint's redo pointer or that the checkpoint (via
734 : : * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to
735 : : * disk. That leaves the tail end that has not yet been flushed open to
736 : : * corruption, which is solved by including the current offset in the
737 : : * xl_heap_rewrite_mapping records and truncating the mapping file to it
738 : : * during replay. Every time a rewrite is finished all generated mapping files
739 : : * are synced to disk.
740 : : *
741 : : * Note that if we were only concerned about crash safety we wouldn't have to
742 : : * deal with WAL logging at all - an fsync() at the end of a rewrite would be
743 : : * sufficient for crash safety. Any mapping that hasn't been safely flushed to
744 : : * disk has to be by an aborted (explicitly or via a crash) transaction and is
745 : : * ignored by virtue of the xid in its name being subject to a
746 : : * TransactionDidCommit() check. But we want to support having standbys via
747 : : * physical replication, both for availability and to do logical decoding
748 : : * there.
749 : : * ------------------------------------------------------------------------
750 : : */
751 : :
752 : : /*
753 : : * Do preparations for logging logical mappings during a rewrite if
754 : : * necessary. If we detect that we don't need to log anything we'll prevent
755 : : * any further action by the various logical rewrite functions.
756 : : */
757 : : static void
4257 rhaas@postgresql.org 758 : 274 : logical_begin_heap_rewrite(RewriteState state)
759 : : {
760 : : HASHCTL hash_ctl;
761 : : TransactionId logical_xmin;
762 : :
763 : : /*
764 : : * We only need to persist these mappings if the rewritten table can be
765 : : * accessed during logical decoding, if not, we can skip doing any
766 : : * additional work.
767 : : */
768 : 274 : state->rs_logical_rewrite =
769 [ + + + - : 274 : RelationIsAccessibleInLogicalDecoding(state->rs_old_rel);
- + - - -
- + + - +
- - - - -
- ]
770 : :
771 [ + + ]: 274 : if (!state->rs_logical_rewrite)
772 : 254 : return;
773 : :
774 : 21 : ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
775 : :
776 : : /*
777 : : * If there are no logical slots in progress we don't need to do anything,
778 : : * there cannot be any remappings for relevant rows yet. The relation's
779 : : * lock protects us against races.
780 : : */
781 [ + + ]: 21 : if (logical_xmin == InvalidTransactionId)
782 : : {
783 : 1 : state->rs_logical_rewrite = false;
784 : 1 : return;
785 : : }
786 : :
787 : 20 : state->rs_logical_xmin = logical_xmin;
788 : 20 : state->rs_begin_lsn = GetXLogInsertRecPtr();
789 : 20 : state->rs_num_rewrite_mappings = 0;
790 : :
791 : 20 : hash_ctl.keysize = sizeof(TransactionId);
792 : 20 : hash_ctl.entrysize = sizeof(RewriteMappingFile);
793 : 20 : hash_ctl.hcxt = state->rs_cxt;
794 : :
795 : 20 : state->rs_logical_mappings =
796 : 20 : hash_create("Logical rewrite mapping",
797 : : 128, /* arbitrary initial size */
798 : : &hash_ctl,
799 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
800 : : }
801 : :
802 : : /*
803 : : * Flush all logical in-memory mappings to disk, but don't fsync them yet.
804 : : */
805 : : static void
806 : 9 : logical_heap_rewrite_flush_mappings(RewriteState state)
807 : : {
808 : : HASH_SEQ_STATUS seq_status;
809 : : RewriteMappingFile *src;
810 : : dlist_mutable_iter iter;
811 : :
812 [ - + ]: 9 : Assert(state->rs_logical_rewrite);
813 : :
814 : : /* no logical rewrite in progress, no need to iterate over mappings */
815 [ - + ]: 9 : if (state->rs_num_rewrite_mappings == 0)
4257 rhaas@postgresql.org 816 :UBC 0 : return;
817 : :
4257 rhaas@postgresql.org 818 [ - + ]:CBC 9 : elog(DEBUG1, "flushing %u logical rewrite mapping entries",
819 : : state->rs_num_rewrite_mappings);
820 : :
821 : 9 : hash_seq_init(&seq_status, state->rs_logical_mappings);
822 [ + + ]: 99 : while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
823 : : {
824 : : char *waldata;
825 : : char *waldata_start;
826 : : xl_heap_rewrite_mapping xlrec;
827 : : Oid dboid;
828 : : uint32 len;
829 : : int written;
1091 drowley@postgresql.o 830 : 90 : uint32 num_mappings = dclist_count(&src->mappings);
831 : :
832 : : /* this file hasn't got any new mappings */
833 [ - + ]: 90 : if (num_mappings == 0)
4257 rhaas@postgresql.org 834 :UBC 0 : continue;
835 : :
4257 rhaas@postgresql.org 836 [ - + ]:CBC 90 : if (state->rs_old_rel->rd_rel->relisshared)
4257 rhaas@postgresql.org 837 :UBC 0 : dboid = InvalidOid;
838 : : else
4257 rhaas@postgresql.org 839 :CBC 90 : dboid = MyDatabaseId;
840 : :
1091 drowley@postgresql.o 841 : 90 : xlrec.num_mappings = num_mappings;
4257 rhaas@postgresql.org 842 : 90 : xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
843 : 90 : xlrec.mapped_xid = src->xid;
844 : 90 : xlrec.mapped_db = dboid;
845 : 90 : xlrec.offset = src->off;
846 : 90 : xlrec.start_lsn = state->rs_begin_lsn;
847 : :
848 : : /* write all mappings consecutively */
1091 drowley@postgresql.o 849 : 90 : len = num_mappings * sizeof(LogicalRewriteMappingData);
4207 tgl@sss.pgh.pa.us 850 : 90 : waldata_start = waldata = palloc(len);
851 : :
852 : : /*
853 : : * collect data we need to write out, but don't modify ondisk data yet
854 : : */
1091 drowley@postgresql.o 855 [ + - + + ]: 813 : dclist_foreach_modify(iter, &src->mappings)
856 : : {
857 : : RewriteMappingDataEntry *pmap;
858 : :
859 : 723 : pmap = dclist_container(RewriteMappingDataEntry, node, iter.cur);
860 : :
4257 rhaas@postgresql.org 861 : 723 : memcpy(waldata, &pmap->map, sizeof(pmap->map));
862 : 723 : waldata += sizeof(pmap->map);
863 : :
864 : : /* remove from the list and free */
1091 drowley@postgresql.o 865 : 723 : dclist_delete_from(&src->mappings, &pmap->node);
4257 rhaas@postgresql.org 866 : 723 : pfree(pmap);
867 : :
868 : : /* update bookkeeping */
869 : 723 : state->rs_num_rewrite_mappings--;
870 : : }
871 : :
1091 drowley@postgresql.o 872 [ - + ]: 90 : Assert(dclist_count(&src->mappings) == 0);
4207 tgl@sss.pgh.pa.us 873 [ - + ]: 90 : Assert(waldata == waldata_start + len);
874 : :
875 : : /*
876 : : * Note that we deviate from the usual WAL coding practices here,
877 : : * check the above "Logical rewrite support" comment for reasoning.
878 : : */
2547 tmunro@postgresql.or 879 : 90 : written = FileWrite(src->vfd, waldata_start, len, src->off,
880 : : WAIT_EVENT_LOGICAL_REWRITE_WRITE);
4257 rhaas@postgresql.org 881 [ - + ]: 90 : if (written != len)
4257 rhaas@postgresql.org 882 [ # # ]:UBC 0 : ereport(ERROR,
883 : : (errcode_for_file_access(),
884 : : errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
885 : : written, len)));
4257 rhaas@postgresql.org 886 :CBC 90 : src->off += len;
887 : :
3995 heikki.linnakangas@i 888 : 90 : XLogBeginInsert();
259 peter@eisentraut.org 889 : 90 : XLogRegisterData(&xlrec, sizeof(xlrec));
3995 heikki.linnakangas@i 890 : 90 : XLogRegisterData(waldata_start, len);
891 : :
892 : : /* write xlog record */
893 : 90 : XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
894 : :
4207 tgl@sss.pgh.pa.us 895 : 90 : pfree(waldata_start);
896 : : }
4257 rhaas@postgresql.org 897 [ - + ]: 9 : Assert(state->rs_num_rewrite_mappings == 0);
898 : : }
899 : :
900 : : /*
901 : : * Logical remapping part of end_heap_rewrite().
902 : : */
903 : : static void
904 : 274 : logical_end_heap_rewrite(RewriteState state)
905 : : {
906 : : HASH_SEQ_STATUS seq_status;
907 : : RewriteMappingFile *src;
908 : :
909 : : /* done, no logical rewrite in progress */
910 [ + + ]: 274 : if (!state->rs_logical_rewrite)
911 : 254 : return;
912 : :
913 : : /* writeout remaining in-memory entries */
4193 bruce@momjian.us 914 [ + + ]: 20 : if (state->rs_num_rewrite_mappings > 0)
4257 rhaas@postgresql.org 915 : 9 : logical_heap_rewrite_flush_mappings(state);
916 : :
917 : : /* Iterate over all mappings we have written and fsync the files. */
918 : 20 : hash_seq_init(&seq_status, state->rs_logical_mappings);
919 [ + + ]: 110 : while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
920 : : {
3146 921 [ - + ]: 90 : if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
2535 tmunro@postgresql.or 922 [ # # ]:UBC 0 : ereport(data_sync_elevel(ERROR),
923 : : (errcode_for_file_access(),
924 : : errmsg("could not fsync file \"%s\": %m", src->path)));
4257 rhaas@postgresql.org 925 :CBC 90 : FileClose(src->vfd);
926 : : }
927 : : /* memory context cleanup will deal with the rest */
928 : : }
929 : :
930 : : /*
931 : : * Log a single (old->new) mapping for 'xid'.
932 : : */
933 : : static void
934 : 723 : logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
935 : : LogicalRewriteMappingData *map)
936 : : {
937 : : RewriteMappingFile *src;
938 : : RewriteMappingDataEntry *pmap;
939 : : Oid relid;
940 : : bool found;
941 : :
942 : 723 : relid = RelationGetRelid(state->rs_old_rel);
943 : :
944 : : /* look for existing mappings for this 'mapped' xid */
945 : 723 : src = hash_search(state->rs_logical_mappings, &xid,
946 : : HASH_ENTER, &found);
947 : :
948 : : /*
949 : : * We haven't yet had the need to map anything for this xid, create
950 : : * per-xid data structures.
951 : : */
952 [ + + ]: 723 : if (!found)
953 : : {
954 : : char path[MAXPGPATH];
955 : : Oid dboid;
956 : :
957 [ - + ]: 90 : if (state->rs_old_rel->rd_rel->relisshared)
4257 rhaas@postgresql.org 958 :UBC 0 : dboid = InvalidOid;
959 : : else
4257 rhaas@postgresql.org 960 :CBC 90 : dboid = MyDatabaseId;
961 : :
962 : 90 : snprintf(path, MAXPGPATH,
963 : : "%s/" LOGICAL_REWRITE_FORMAT,
964 : : PG_LOGICAL_MAPPINGS_DIR, dboid, relid,
1708 peter@eisentraut.org 965 : 90 : LSN_FORMAT_ARGS(state->rs_begin_lsn),
966 : : xid, GetCurrentTransactionId());
967 : :
1091 drowley@postgresql.o 968 : 90 : dclist_init(&src->mappings);
4257 rhaas@postgresql.org 969 : 90 : src->off = 0;
970 : 90 : memcpy(src->path, path, sizeof(path));
971 : 90 : src->vfd = PathNameOpenFile(path,
972 : : O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
973 [ - + ]: 90 : if (src->vfd < 0)
4257 rhaas@postgresql.org 974 [ # # ]:UBC 0 : ereport(ERROR,
975 : : (errcode_for_file_access(),
976 : : errmsg("could not create file \"%s\": %m", path)));
977 : : }
978 : :
4257 rhaas@postgresql.org 979 :CBC 723 : pmap = MemoryContextAlloc(state->rs_cxt,
980 : : sizeof(RewriteMappingDataEntry));
981 : 723 : memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
1091 drowley@postgresql.o 982 : 723 : dclist_push_tail(&src->mappings, &pmap->node);
4257 rhaas@postgresql.org 983 : 723 : state->rs_num_rewrite_mappings++;
984 : :
985 : : /*
986 : : * Write out buffer every time we've too many in-memory entries across all
987 : : * mapping files.
988 : : */
4193 bruce@momjian.us 989 [ - + ]: 723 : if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
4257 rhaas@postgresql.org 990 :UBC 0 : logical_heap_rewrite_flush_mappings(state);
4257 rhaas@postgresql.org 991 :CBC 723 : }
992 : :
993 : : /*
994 : : * Perform logical remapping for a tuple that's mapped from old_tid to
995 : : * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
996 : : */
997 : : static void
998 : 364303 : logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
999 : : HeapTuple new_tuple)
1000 : : {
1001 : 364303 : ItemPointerData new_tid = new_tuple->t_self;
4193 bruce@momjian.us 1002 : 364303 : TransactionId cutoff = state->rs_logical_xmin;
1003 : : TransactionId xmin;
1004 : : TransactionId xmax;
1005 : 364303 : bool do_log_xmin = false;
1006 : 364303 : bool do_log_xmax = false;
1007 : : LogicalRewriteMappingData map;
1008 : :
1009 : : /* no logical rewrite in progress, we don't need to log anything */
4257 rhaas@postgresql.org 1010 [ + + ]: 364303 : if (!state->rs_logical_rewrite)
1011 : 363595 : return;
1012 : :
1013 : 26704 : xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
1014 : : /* use *GetUpdateXid to correctly deal with multixacts */
1015 : 26704 : xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
1016 : :
1017 : : /*
1018 : : * Log the mapping iff the tuple has been created recently.
1019 : : */
1020 [ + + + - ]: 26704 : if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
1021 : 535 : do_log_xmin = true;
1022 : :
1023 [ + + ]: 26704 : if (!TransactionIdIsNormal(xmax))
1024 : : {
1025 : : /*
1026 : : * no xmax is set, can't have any permanent ones, so this check is
1027 : : * sufficient
1028 : : */
1029 : : }
1030 [ + - ]: 501 : else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
1031 : : {
1032 : : /* only locked, we don't care */
1033 : : }
1034 [ + - ]: 501 : else if (!TransactionIdPrecedes(xmax, cutoff))
1035 : : {
1036 : : /* tuple has been deleted recently, log */
1037 : 501 : do_log_xmax = true;
1038 : : }
1039 : :
1040 : : /* if neither needs to be logged, we're done */
1041 [ + + + + ]: 26704 : if (!do_log_xmin && !do_log_xmax)
1042 : 25996 : return;
1043 : :
1044 : : /* fill out mapping information */
1210 1045 : 708 : map.old_locator = state->rs_old_rel->rd_locator;
4257 1046 : 708 : map.old_tid = old_tid;
1210 1047 : 708 : map.new_locator = state->rs_new_rel->rd_locator;
4257 1048 : 708 : map.new_tid = new_tid;
1049 : :
1050 : : /* ---
1051 : : * Now persist the mapping for the individual xids that are affected. We
1052 : : * need to log for both xmin and xmax if they aren't the same transaction
1053 : : * since the mapping files are per "affected" xid.
1054 : : * We don't muster all that much effort detecting whether xmin and xmax
1055 : : * are actually the same transaction, we just check whether the xid is the
1056 : : * same disregarding subtransactions. Logging too much is relatively
1057 : : * harmless and we could never do the check fully since subtransaction
1058 : : * data is thrown away during restarts.
1059 : : * ---
1060 : : */
1061 [ + + ]: 708 : if (do_log_xmin)
1062 : 535 : logical_rewrite_log_mapping(state, xmin, &map);
1063 : : /* separately log mapping for xmax unless it'd be redundant */
1064 [ + + + + ]: 708 : if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
1065 : 188 : logical_rewrite_log_mapping(state, xmax, &map);
1066 : : }
1067 : :
1068 : : /*
1069 : : * Replay XLOG_HEAP2_REWRITE records
1070 : : */
1071 : : void
3995 heikki.linnakangas@i 1072 :UBC 0 : heap_xlog_logical_rewrite(XLogReaderState *r)
1073 : : {
1074 : : char path[MAXPGPATH];
1075 : : int fd;
1076 : : xl_heap_rewrite_mapping *xlrec;
1077 : : uint32 len;
1078 : : char *data;
1079 : :
4257 rhaas@postgresql.org 1080 : 0 : xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
1081 : :
1082 : 0 : snprintf(path, MAXPGPATH,
1083 : : "%s/" LOGICAL_REWRITE_FORMAT,
1084 : : PG_LOGICAL_MAPPINGS_DIR, xlrec->mapped_db, xlrec->mapped_rel,
1708 peter@eisentraut.org 1085 : 0 : LSN_FORMAT_ARGS(xlrec->start_lsn),
3995 heikki.linnakangas@i 1086 : 0 : xlrec->mapped_xid, XLogRecGetXid(r));
1087 : :
4257 rhaas@postgresql.org 1088 : 0 : fd = OpenTransientFile(path,
1089 : : O_CREAT | O_WRONLY | PG_BINARY);
1090 [ # # ]: 0 : if (fd < 0)
1091 [ # # ]: 0 : ereport(ERROR,
1092 : : (errcode_for_file_access(),
1093 : : errmsg("could not create file \"%s\": %m", path)));
1094 : :
1095 : : /*
1096 : : * Truncate all data that's not guaranteed to have been safely fsynced (by
1097 : : * previous record or by the last checkpoint).
1098 : : */
3146 1099 : 0 : pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_TRUNCATE);
4257 1100 [ # # ]: 0 : if (ftruncate(fd, xlrec->offset) != 0)
1101 [ # # ]: 0 : ereport(ERROR,
1102 : : (errcode_for_file_access(),
1103 : : errmsg("could not truncate file \"%s\" to %u: %m",
1104 : : path, (uint32) xlrec->offset)));
3146 1105 : 0 : pgstat_report_wait_end();
1106 : :
4257 1107 : 0 : data = XLogRecGetData(r) + sizeof(*xlrec);
1108 : :
1109 : 0 : len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
1110 : :
1111 : : /* write out tail end of mapping file (again) */
2641 michael@paquier.xyz 1112 : 0 : errno = 0;
3146 rhaas@postgresql.org 1113 : 0 : pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_WRITE);
1125 tmunro@postgresql.or 1114 [ # # ]: 0 : if (pg_pwrite(fd, data, len, xlrec->offset) != len)
1115 : : {
1116 : : /* if write didn't set errno, assume problem is no disk space */
2682 michael@paquier.xyz 1117 [ # # ]: 0 : if (errno == 0)
1118 : 0 : errno = ENOSPC;
4257 rhaas@postgresql.org 1119 [ # # ]: 0 : ereport(ERROR,
1120 : : (errcode_for_file_access(),
1121 : : errmsg("could not write to file \"%s\": %m", path)));
1122 : : }
3146 1123 : 0 : pgstat_report_wait_end();
1124 : :
1125 : : /*
1126 : : * Now fsync all previously written data. We could improve things and only
1127 : : * do this for the last write to a file, but the required bookkeeping
1128 : : * doesn't seem worth the trouble.
1129 : : */
1130 : 0 : pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_SYNC);
4257 1131 [ # # ]: 0 : if (pg_fsync(fd) != 0)
2535 tmunro@postgresql.or 1132 [ # # ]: 0 : ereport(data_sync_elevel(ERROR),
1133 : : (errcode_for_file_access(),
1134 : : errmsg("could not fsync file \"%s\": %m", path)));
3146 rhaas@postgresql.org 1135 : 0 : pgstat_report_wait_end();
1136 : :
2306 peter@eisentraut.org 1137 [ # # ]: 0 : if (CloseTransientFile(fd) != 0)
2425 michael@paquier.xyz 1138 [ # # ]: 0 : ereport(ERROR,
1139 : : (errcode_for_file_access(),
1140 : : errmsg("could not close file \"%s\": %m", path)));
4257 rhaas@postgresql.org 1141 : 0 : }
1142 : :
1143 : : /* ---
1144 : : * Perform a checkpoint for logical rewrite mappings
1145 : : *
1146 : : * This serves two tasks:
1147 : : * 1) Remove all mappings not needed anymore based on the logical restart LSN
1148 : : * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
1149 : : * only has to deal with the parts of a mapping that have been written out
1150 : : * after the checkpoint started.
1151 : : * ---
1152 : : */
1153 : : void
4257 rhaas@postgresql.org 1154 :CBC 1701 : CheckPointLogicalRewriteHeap(void)
1155 : : {
1156 : : XLogRecPtr cutoff;
1157 : : XLogRecPtr redo;
1158 : : DIR *mappings_dir;
1159 : : struct dirent *mapping_de;
1160 : : char path[MAXPGPATH + sizeof(PG_LOGICAL_MAPPINGS_DIR)];
1161 : :
1162 : : /*
1163 : : * We start of with a minimum of the last redo pointer. No new decoding
1164 : : * slot will start before that, so that's a safe upper bound for removal.
1165 : : */
1166 : 1701 : redo = GetRedoRecPtr();
1167 : :
1168 : : /* now check for the restart ptrs from existing slots */
1169 : 1701 : cutoff = ReplicationSlotsComputeLogicalRestartLSN();
1170 : :
1171 : : /* don't start earlier than the restart lsn */
1172 [ + + + + ]: 1701 : if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1173 : 1 : cutoff = redo;
1174 : :
424 michael@paquier.xyz 1175 : 1701 : mappings_dir = AllocateDir(PG_LOGICAL_MAPPINGS_DIR);
1176 [ + + ]: 5283 : while ((mapping_de = ReadDir(mappings_dir, PG_LOGICAL_MAPPINGS_DIR)) != NULL)
1177 : : {
1178 : : Oid dboid;
1179 : : Oid relid;
1180 : : XLogRecPtr lsn;
1181 : : TransactionId rewrite_xid;
1182 : : TransactionId create_xid;
1183 : : uint32 hi,
1184 : : lo;
1185 : : PGFileType de_type;
1186 : :
4257 rhaas@postgresql.org 1187 [ + + ]: 3582 : if (strcmp(mapping_de->d_name, ".") == 0 ||
1188 [ + + ]: 1881 : strcmp(mapping_de->d_name, "..") == 0)
1189 : 3402 : continue;
1190 : :
424 michael@paquier.xyz 1191 : 180 : snprintf(path, sizeof(path), "%s/%s", PG_LOGICAL_MAPPINGS_DIR, mapping_de->d_name);
1152 1192 : 180 : de_type = get_dirent_type(path, mapping_de, false, DEBUG1);
1193 : :
1194 [ + - - + ]: 180 : if (de_type != PGFILETYPE_ERROR && de_type != PGFILETYPE_REG)
4257 rhaas@postgresql.org 1195 :UBC 0 : continue;
1196 : :
1197 : : /* Skip over files that cannot be ours. */
4257 rhaas@postgresql.org 1198 [ - + ]:CBC 180 : if (strncmp(mapping_de->d_name, "map-", 4) != 0)
4257 rhaas@postgresql.org 1199 :UBC 0 : continue;
1200 : :
4257 rhaas@postgresql.org 1201 [ - + ]:CBC 180 : if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1202 : : &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
4193 bruce@momjian.us 1203 [ # # ]:UBC 0 : elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1204 : :
4257 rhaas@postgresql.org 1205 :CBC 180 : lsn = ((uint64) hi) << 32 | lo;
1206 : :
1207 [ + + - + ]: 180 : if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1208 : : {
1209 [ - + ]: 90 : elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1210 [ - + ]: 90 : if (unlink(path) < 0)
4257 rhaas@postgresql.org 1211 [ # # ]:UBC 0 : ereport(ERROR,
1212 : : (errcode_for_file_access(),
1213 : : errmsg("could not remove file \"%s\": %m", path)));
1214 : : }
1215 : : else
1216 : : {
1217 : : /* on some operating systems fsyncing a file requires O_RDWR */
2211 michael@paquier.xyz 1218 :CBC 90 : int fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1219 : :
1220 : : /*
1221 : : * The file cannot vanish due to concurrency since this function
1222 : : * is the only one removing logical mappings and only one
1223 : : * checkpoint can be in progress at a time.
1224 : : */
4257 rhaas@postgresql.org 1225 [ - + ]: 90 : if (fd < 0)
4257 rhaas@postgresql.org 1226 [ # # ]:UBC 0 : ereport(ERROR,
1227 : : (errcode_for_file_access(),
1228 : : errmsg("could not open file \"%s\": %m", path)));
1229 : :
1230 : : /*
1231 : : * We could try to avoid fsyncing files that either haven't
1232 : : * changed or have only been created since the checkpoint's start,
1233 : : * but it's currently not deemed worth the effort.
1234 : : */
3146 rhaas@postgresql.org 1235 :CBC 90 : pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_CHECKPOINT_SYNC);
1236 [ - + ]: 90 : if (pg_fsync(fd) != 0)
2535 tmunro@postgresql.or 1237 [ # # ]:UBC 0 : ereport(data_sync_elevel(ERROR),
1238 : : (errcode_for_file_access(),
1239 : : errmsg("could not fsync file \"%s\": %m", path)));
3146 rhaas@postgresql.org 1240 :CBC 90 : pgstat_report_wait_end();
1241 : :
2306 peter@eisentraut.org 1242 [ - + ]: 90 : if (CloseTransientFile(fd) != 0)
2425 michael@paquier.xyz 1243 [ # # ]:UBC 0 : ereport(ERROR,
1244 : : (errcode_for_file_access(),
1245 : : errmsg("could not close file \"%s\": %m", path)));
1246 : : }
1247 : : }
4257 rhaas@postgresql.org 1248 :CBC 1701 : FreeDir(mappings_dir);
1249 : :
1250 : : /* persist directory entries to disk */
424 michael@paquier.xyz 1251 : 1701 : fsync_fname(PG_LOGICAL_MAPPINGS_DIR, true);
4257 rhaas@postgresql.org 1252 : 1701 : }
|