Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : * slotsync.c
3 : : * Functionality for synchronizing slots to a standby server from the
4 : : * primary server.
5 : : *
6 : : * Copyright (c) 2024-2026, PostgreSQL Global Development Group
7 : : *
8 : : * IDENTIFICATION
9 : : * src/backend/replication/logical/slotsync.c
10 : : *
11 : : * This file contains the code for slot synchronization on a physical standby
12 : : * to fetch logical failover slots information from the primary server, create
13 : : * the slots on the standby and synchronize them periodically.
14 : : *
15 : : * Slot synchronization can be performed either automatically by enabling slot
16 : : * sync worker or manually by calling SQL function pg_sync_replication_slots().
17 : : *
18 : : * If the WAL corresponding to the remote's restart_lsn is not available on the
19 : : * physical standby or the remote's catalog_xmin precedes the oldest xid for
20 : : * which it is guaranteed that rows wouldn't have been removed then we cannot
21 : : * create the local standby slot because that would mean moving the local slot
22 : : * backward and decoding won't be possible via such a slot. In this case, the
23 : : * slot will be marked as RS_TEMPORARY. Once the primary server catches up,
24 : : * the slot will be marked as RS_PERSISTENT (which means sync-ready) after
25 : : * which slot sync worker can perform the sync periodically or user can call
26 : : * pg_sync_replication_slots() periodically to perform the syncs.
27 : : *
28 : : * If synchronized slots fail to build a consistent snapshot from the
29 : : * restart_lsn before reaching confirmed_flush_lsn, they would become
30 : : * unreliable after promotion due to potential data loss from changes
31 : : * before reaching a consistent point. This can happen because the slots can
32 : : * be synced at some random time and we may not reach the consistent point
33 : : * at the same WAL location as the primary. So, we mark such slots as
34 : : * RS_TEMPORARY. Once the decoding from corresponding LSNs can reach a
35 : : * consistent point, they will be marked as RS_PERSISTENT.
36 : : *
37 : : * If the WAL prior to the remote slot's confirmed_flush_lsn has not been
38 : : * flushed on the standby, the slot is marked as RS_TEMPORARY. Once the standby
39 : : * catches up and flushes that WAL, the slot will be marked as RS_PERSISTENT.
40 : : *
41 : : * The slot sync worker waits for some time before the next synchronization,
42 : : * with the duration varying based on whether any slots were updated during
43 : : * the last cycle. Refer to the comments above wait_for_slot_activity() for
44 : : * more details.
45 : : *
46 : : * If the SQL function pg_sync_replication_slots() is used to sync the slots,
47 : : * and if the slots are not ready to be synced and are marked as RS_TEMPORARY
48 : : * because of any of the reasons mentioned above, then the SQL function also
49 : : * waits and retries until the slots are marked as RS_PERSISTENT (which means
50 : : * sync-ready). Refer to the comments in SyncReplicationSlots() for more
51 : : * details.
52 : : *
53 : : * Any standby synchronized slots will be dropped if they no longer need
54 : : * to be synchronized. See comment atop drop_local_obsolete_slots() for more
55 : : * details.
56 : : *---------------------------------------------------------------------------
57 : : */
58 : :
59 : : #include "postgres.h"
60 : :
61 : : #include <time.h>
62 : :
63 : : #include "access/xlog_internal.h"
64 : : #include "access/xlogrecovery.h"
65 : : #include "catalog/pg_database.h"
66 : : #include "libpq/pqsignal.h"
67 : : #include "pgstat.h"
68 : : #include "postmaster/interrupt.h"
69 : : #include "replication/logical.h"
70 : : #include "replication/slotsync.h"
71 : : #include "replication/snapbuild.h"
72 : : #include "storage/ipc.h"
73 : : #include "storage/lmgr.h"
74 : : #include "storage/proc.h"
75 : : #include "storage/procarray.h"
76 : : #include "storage/subsystems.h"
77 : : #include "tcop/tcopprot.h"
78 : : #include "utils/builtins.h"
79 : : #include "utils/memutils.h"
80 : : #include "utils/pg_lsn.h"
81 : : #include "utils/ps_status.h"
82 : : #include "utils/timeout.h"
83 : : #include "utils/wait_event.h"
84 : :
85 : : /*
86 : : * Struct for sharing information to control slot synchronization.
87 : : *
88 : : * The 'pid' is either the slot sync worker's pid or the backend's pid running
89 : : * the SQL function pg_sync_replication_slots(). On promotion, the startup
90 : : * process sets 'stopSignaled' and uses this 'pid' to signal the synchronizing
91 : : * process with PROCSIG_SLOTSYNC_MESSAGE and also to wake it up so that the
92 : : * process can immediately stop its synchronizing work.
93 : : * Setting 'stopSignaled' on the other hand is used to handle the race
94 : : * condition when the postmaster has not noticed the promotion yet and thus may
95 : : * end up restarting the slot sync worker. If 'stopSignaled' is set, the worker
96 : : * will exit in such a case. The SQL function pg_sync_replication_slots() will
97 : : * also error out if this flag is set. Note that we don't need to reset this
98 : : * variable as after promotion the slot sync worker won't be restarted because
99 : : * the pmState changes to PM_RUN from PM_HOT_STANDBY and we don't support
100 : : * demoting primary without restarting the server.
101 : : * See LaunchMissingBackgroundProcesses.
102 : : *
103 : : * The 'syncing' flag is needed to prevent concurrent slot syncs to avoid slot
104 : : * overwrites.
105 : : *
106 : : * The 'last_start_time' is needed by postmaster to start the slot sync worker
107 : : * once per SLOTSYNC_RESTART_INTERVAL_SEC. In cases where an immediate restart
108 : : * is expected (e.g., slot sync GUCs change), slot sync worker will reset
109 : : * last_start_time before exiting, so that postmaster can start the worker
110 : : * without waiting for SLOTSYNC_RESTART_INTERVAL_SEC.
111 : : */
112 : : typedef struct SlotSyncCtxStruct
113 : : {
114 : : pid_t pid;
115 : : bool stopSignaled;
116 : : bool syncing;
117 : : time_t last_start_time;
118 : : slock_t mutex;
119 : : } SlotSyncCtxStruct;
120 : :
121 : : static SlotSyncCtxStruct *SlotSyncCtx = NULL;
122 : :
123 : : static void SlotSyncShmemRequest(void *arg);
124 : : static void SlotSyncShmemInit(void *arg);
125 : :
126 : : const ShmemCallbacks SlotSyncShmemCallbacks = {
127 : : .request_fn = SlotSyncShmemRequest,
128 : : .init_fn = SlotSyncShmemInit,
129 : : };
130 : :
131 : : /* GUC variable */
132 : : bool sync_replication_slots = false;
133 : :
134 : : /*
135 : : * The sleep time (ms) between slot-sync cycles varies dynamically
136 : : * (within a MIN/MAX range) according to slot activity. See
137 : : * wait_for_slot_activity() for details.
138 : : */
139 : : #define MIN_SLOTSYNC_WORKER_NAPTIME_MS 200
140 : : #define MAX_SLOTSYNC_WORKER_NAPTIME_MS 30000 /* 30s */
141 : :
142 : : static long sleep_ms = MIN_SLOTSYNC_WORKER_NAPTIME_MS;
143 : :
144 : : /* The restart interval for slot sync work used by postmaster */
145 : : #define SLOTSYNC_RESTART_INTERVAL_SEC 10
146 : :
147 : : /*
148 : : * Flag to tell if we are syncing replication slots. Unlike the 'syncing' flag
149 : : * in SlotSyncCtxStruct, this flag is true only if the current process is
150 : : * performing slot synchronization.
151 : : */
152 : : static bool syncing_slots = false;
153 : :
154 : : /*
155 : : * Interrupt flag set when PROCSIG_SLOTSYNC_MESSAGE is received, asking the
156 : : * slotsync worker or pg_sync_replication_slots() to stop because
157 : : * standby promotion has been triggered.
158 : : */
159 : : volatile sig_atomic_t SlotSyncShutdownPending = false;
160 : :
161 : : /*
162 : : * Structure to hold information fetched from the primary server about a logical
163 : : * replication slot.
164 : : */
165 : : typedef struct RemoteSlot
166 : : {
167 : : char *name;
168 : : char *plugin;
169 : : char *database;
170 : : bool two_phase;
171 : : bool failover;
172 : : XLogRecPtr restart_lsn;
173 : : XLogRecPtr confirmed_lsn;
174 : : XLogRecPtr two_phase_at;
175 : : TransactionId catalog_xmin;
176 : :
177 : : /* RS_INVAL_NONE if valid, or the reason of invalidation */
178 : : ReplicationSlotInvalidationCause invalidated;
179 : : } RemoteSlot;
180 : :
181 : : static void slotsync_failure_callback(int code, Datum arg);
182 : : static void update_synced_slots_inactive_since(void);
183 : :
184 : : /*
185 : : * Update slot sync skip stats. This function requires the caller to acquire
186 : : * the slot.
187 : : */
188 : : static void
158 akapila@postgresql.o 189 :GNC 48 : update_slotsync_skip_stats(SlotSyncSkipReason skip_reason)
190 : : {
191 : : ReplicationSlot *slot;
192 : :
193 [ - + ]: 48 : Assert(MyReplicationSlot);
194 : :
195 : 48 : slot = MyReplicationSlot;
196 : :
197 : : /*
198 : : * Update the slot sync related stats in pg_stat_replication_slots when a
199 : : * slot sync is skipped
200 : : */
201 [ + + ]: 48 : if (skip_reason != SS_SKIP_NONE)
202 : 7 : pgstat_report_replslotsync(slot);
203 : :
204 : : /* Update the slot sync skip reason */
205 [ + + ]: 48 : if (slot->slotsync_skip_reason != skip_reason)
206 : : {
207 : 4 : SpinLockAcquire(&slot->mutex);
208 : 4 : slot->slotsync_skip_reason = skip_reason;
209 : 4 : SpinLockRelease(&slot->mutex);
210 : : }
211 : 48 : }
212 : :
213 : : /*
214 : : * If necessary, update the local synced slot's metadata based on the data
215 : : * from the remote slot.
216 : : *
217 : : * If no update was needed (the data of the remote slot is the same as the
218 : : * local slot) return false, otherwise true.
219 : : */
220 : : static bool
82 221 : 48 : update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid)
222 : : {
811 akapila@postgresql.o 223 :CBC 48 : ReplicationSlot *slot = MyReplicationSlot;
753 224 : 48 : bool updated_xmin_or_lsn = false;
225 : 48 : bool updated_config = false;
158 akapila@postgresql.o 226 :GNC 48 : SlotSyncSkipReason skip_reason = SS_SKIP_NONE;
82 227 : 48 : XLogRecPtr latestFlushPtr = GetStandbyFlushRecPtr(NULL);
228 : :
811 akapila@postgresql.o 229 [ - + ]:CBC 48 : Assert(slot->data.invalidated == RS_INVAL_NONE);
230 : :
231 : : /*
232 : : * Make sure that concerned WAL is received and flushed before syncing
233 : : * slot to target lsn received from the primary server.
234 : : */
82 akapila@postgresql.o 235 [ - + ]:GNC 48 : if (remote_slot->confirmed_lsn > latestFlushPtr)
236 : : {
82 akapila@postgresql.o 237 :UNC 0 : update_slotsync_skip_stats(SS_SKIP_WAL_NOT_FLUSHED);
238 : :
239 : : /*
240 : : * Can get here only if GUC 'synchronized_standby_slots' on the
241 : : * primary server was not configured correctly.
242 : : */
60 243 [ # # ]: 0 : ereport(LOG,
244 : : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
245 : : errmsg("skipping slot synchronization because the received slot sync"
246 : : " LSN %X/%08X for slot \"%s\" is ahead of the standby position %X/%08X",
247 : : LSN_FORMAT_ARGS(remote_slot->confirmed_lsn),
248 : : remote_slot->name,
249 : : LSN_FORMAT_ARGS(latestFlushPtr)));
250 : :
82 251 : 0 : return false;
252 : : }
253 : :
254 : : /*
255 : : * Don't overwrite if we already have a newer catalog_xmin and
256 : : * restart_lsn.
257 : : */
753 akapila@postgresql.o 258 [ + + + + ]:CBC 91 : if (remote_slot->restart_lsn < slot->data.restart_lsn ||
259 : 43 : TransactionIdPrecedes(remote_slot->catalog_xmin,
260 : : slot->data.catalog_xmin))
261 : : {
262 : : /* Update slot sync skip stats */
158 akapila@postgresql.o 263 :GNC 7 : update_slotsync_skip_stats(SS_SKIP_WAL_OR_ROWS_REMOVED);
264 : :
265 : : /*
266 : : * This can happen in following situations:
267 : : *
268 : : * If the slot is temporary, it means either the initial WAL location
269 : : * reserved for the local slot is ahead of the remote slot's
270 : : * restart_lsn or the initial xmin_horizon computed for the local slot
271 : : * is ahead of the remote slot.
272 : : *
273 : : * If the slot is persistent, both restart_lsn and catalog_xmin of the
274 : : * synced slot could still be ahead of the remote slot. Since we use
275 : : * slot advance functionality to keep snapbuild/slot updated, it is
276 : : * possible that the restart_lsn and catalog_xmin are advanced to a
277 : : * later position than it has on the primary. This can happen when
278 : : * slot advancing machinery finds running xacts record after reaching
279 : : * the consistent state at a later point than the primary where it
280 : : * serializes the snapshot and updates the restart_lsn.
281 : : *
282 : : * We LOG the message if the slot is temporary as it can help the user
283 : : * to understand why the slot is not sync-ready. In the case of a
284 : : * persistent slot, it would be a more common case and won't directly
285 : : * impact the users, so we used DEBUG1 level to log the message.
286 : : */
753 akapila@postgresql.o 287 [ + - + - ]:CBC 7 : ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1,
288 : : errmsg("could not synchronize replication slot \"%s\"",
289 : : remote_slot->name),
290 : : errdetail("Synchronization could lead to data loss, because the remote slot needs WAL at LSN %X/%08X and catalog xmin %u, but the standby has LSN %X/%08X and catalog xmin %u.",
291 : : LSN_FORMAT_ARGS(remote_slot->restart_lsn),
292 : : remote_slot->catalog_xmin,
293 : : LSN_FORMAT_ARGS(slot->data.restart_lsn),
294 : : slot->data.catalog_xmin));
295 : :
296 : : /*
297 : : * Skip updating the configuration. This is required to avoid syncing
298 : : * two_phase_at without syncing confirmed_lsn. Otherwise, the prepared
299 : : * transaction between old confirmed_lsn and two_phase_at will
300 : : * unexpectedly get decoded and sent to the downstream after
301 : : * promotion. See comments in ReorderBufferFinishPrepared.
302 : : */
371 303 : 7 : return false;
304 : : }
305 : :
306 : : /*
307 : : * Attempt to sync LSNs and xmins only if remote slot is ahead of local
308 : : * slot.
309 : : */
310 [ + + ]: 41 : if (remote_slot->confirmed_lsn > slot->data.confirmed_flush ||
311 [ + + - + ]: 57 : remote_slot->restart_lsn > slot->data.restart_lsn ||
312 : 28 : TransactionIdFollows(remote_slot->catalog_xmin,
313 : : slot->data.catalog_xmin))
314 : : {
315 : : /*
316 : : * We can't directly copy the remote slot's LSN or xmin unless there
317 : : * exists a consistent snapshot at that point. Otherwise, after
318 : : * promotion, the slots may not reach a consistent point before the
319 : : * confirmed_flush_lsn which can lead to a data loss. To avoid data
320 : : * loss, we let slot machinery advance the slot which ensures that
321 : : * snapbuilder/slot statuses are updated properly.
322 : : */
762 323 [ + + ]: 13 : if (SnapBuildSnapshotExists(remote_slot->restart_lsn))
324 : : {
325 : : /*
326 : : * Update the slot info directly if there is a serialized snapshot
327 : : * at the restart_lsn, as the slot can quickly reach consistency
328 : : * at restart_lsn by restoring the snapshot.
329 : : */
330 [ - + ]: 3 : SpinLockAcquire(&slot->mutex);
331 : 3 : slot->data.restart_lsn = remote_slot->restart_lsn;
332 : 3 : slot->data.confirmed_flush = remote_slot->confirmed_lsn;
333 : 3 : slot->data.catalog_xmin = remote_slot->catalog_xmin;
334 : 3 : SpinLockRelease(&slot->mutex);
335 : :
22 336 : 3 : updated_xmin_or_lsn = true;
337 : : }
338 : : else
339 : : {
340 : : bool found_consistent_snapshot;
341 : 10 : XLogRecPtr old_confirmed_lsn = slot->data.confirmed_flush;
342 : 10 : XLogRecPtr old_restart_lsn = slot->data.restart_lsn;
343 : 10 : XLogRecPtr old_catalog_xmin = slot->data.catalog_xmin;
344 : :
762 345 : 10 : LogicalSlotAdvanceAndCheckSnapState(remote_slot->confirmed_lsn,
346 : : &found_consistent_snapshot);
347 : :
348 : : /* Sanity check */
753 349 [ - + ]: 10 : if (slot->data.confirmed_flush != remote_slot->confirmed_lsn)
753 akapila@postgresql.o 350 [ # # ]:UBC 0 : ereport(ERROR,
351 : : errmsg_internal("synchronized confirmed_flush for slot \"%s\" differs from remote slot",
352 : : remote_slot->name),
353 : : errdetail_internal("Remote slot has LSN %X/%08X but local slot has LSN %X/%08X.",
354 : : LSN_FORMAT_ARGS(remote_slot->confirmed_lsn),
355 : : LSN_FORMAT_ARGS(slot->data.confirmed_flush)));
356 : :
357 : : /*
358 : : * If we can't reach a consistent snapshot, the slot won't be
359 : : * persisted. See update_and_persist_local_synced_slot().
360 : : */
82 akapila@postgresql.o 361 [ - + ]:GNC 10 : if (!found_consistent_snapshot)
362 : : {
82 akapila@postgresql.o 363 [ # # ]:UNC 0 : Assert(MyReplicationSlot->data.persistency == RS_TEMPORARY);
364 : :
365 [ # # ]: 0 : ereport(LOG,
366 : : errmsg("could not synchronize replication slot \"%s\"",
367 : : remote_slot->name),
368 : : errdetail("Synchronization could lead to data loss, because the standby could not build a consistent snapshot to decode WALs at LSN %X/%08X.",
369 : : LSN_FORMAT_ARGS(slot->data.restart_lsn)));
370 : :
158 371 : 0 : skip_reason = SS_SKIP_NO_CONSISTENT_SNAPSHOT;
372 : : }
373 : :
374 : : /*
375 : : * It is possible that the slot's xmin or LSNs are not updated,
376 : : * when the synced slot has reached consistent snapshot state or
377 : : * cannot build one at all.
378 : : */
22 akapila@postgresql.o 379 :CBC 10 : updated_xmin_or_lsn = (old_confirmed_lsn != slot->data.confirmed_flush ||
380 [ - + - - ]: 10 : old_restart_lsn != slot->data.restart_lsn ||
22 akapila@postgresql.o 381 [ # # ]:UBC 0 : old_catalog_xmin != slot->data.catalog_xmin);
382 : : }
383 : : }
384 : :
385 : : /* Update slot sync skip stats */
158 akapila@postgresql.o 386 :GNC 41 : update_slotsync_skip_stats(skip_reason);
387 : :
762 akapila@postgresql.o 388 [ + - ]:CBC 41 : if (remote_dbid != slot->data.database ||
389 [ + + ]: 41 : remote_slot->two_phase != slot->data.two_phase ||
390 [ + - ]: 40 : remote_slot->failover != slot->data.failover ||
397 391 [ + - ]: 40 : strcmp(remote_slot->plugin, NameStr(slot->data.plugin)) != 0 ||
392 [ - + ]: 40 : remote_slot->two_phase_at != slot->data.two_phase_at)
393 : : {
394 : : NameData plugin_name;
395 : :
396 : : /* Avoid expensive operations while holding a spinlock. */
762 397 : 1 : namestrcpy(&plugin_name, remote_slot->plugin);
398 : :
399 [ - + ]: 1 : SpinLockAcquire(&slot->mutex);
400 : 1 : slot->data.plugin = plugin_name;
401 : 1 : slot->data.database = remote_dbid;
402 : 1 : slot->data.two_phase = remote_slot->two_phase;
397 403 : 1 : slot->data.two_phase_at = remote_slot->two_phase_at;
762 404 : 1 : slot->data.failover = remote_slot->failover;
405 : 1 : SpinLockRelease(&slot->mutex);
406 : :
753 407 : 1 : updated_config = true;
408 : :
409 : : /*
410 : : * Ensure that there is no risk of sending prepared transactions
411 : : * unexpectedly after the promotion.
412 : : */
371 413 [ - + ]: 1 : Assert(slot->data.two_phase_at <= slot->data.confirmed_flush);
414 : : }
415 : :
416 : : /*
417 : : * We have to write the changed xmin to disk *before* we change the
418 : : * in-memory value, otherwise after a crash we wouldn't know that some
419 : : * catalog tuples might have been removed already.
420 : : */
753 421 [ + + + + ]: 41 : if (updated_config || updated_xmin_or_lsn)
422 : : {
423 : 14 : ReplicationSlotMarkDirty();
424 : 14 : ReplicationSlotSave();
425 : : }
426 : :
427 : : /*
428 : : * Now the new xmin is safely on disk, we can let the global value
429 : : * advance. We do not take ProcArrayLock or similar since we only advance
430 : : * xmin here and there's not much harm done by a concurrent computation
431 : : * missing that.
432 : : */
433 [ + + ]: 41 : if (updated_xmin_or_lsn)
434 : : {
435 [ - + ]: 13 : SpinLockAcquire(&slot->mutex);
436 : 13 : slot->effective_catalog_xmin = remote_slot->catalog_xmin;
437 : 13 : SpinLockRelease(&slot->mutex);
438 : :
439 : 13 : ReplicationSlotsComputeRequiredXmin(false);
440 : 13 : ReplicationSlotsComputeRequiredLSN();
441 : : }
442 : :
443 [ + + + + ]: 41 : return updated_config || updated_xmin_or_lsn;
444 : : }
445 : :
446 : : /*
447 : : * Get the list of local logical slots that are synchronized from the
448 : : * primary server.
449 : : */
450 : : static List *
811 451 : 29 : get_local_synced_slots(void)
452 : : {
453 : 29 : List *local_slots = NIL;
454 : :
455 : 29 : LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
456 : :
28 alvherre@kurilemu.de 457 [ + + ]:GNC 464 : for (int i = 0; i < max_replication_slots + max_repack_replication_slots; i++)
458 : : {
811 akapila@postgresql.o 459 :CBC 435 : ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
460 : :
461 : : /* Check if it is a synchronized slot */
462 [ + + + + ]: 435 : if (s->in_use && s->data.synced)
463 : : {
464 [ - + ]: 42 : Assert(SlotIsLogical(s));
465 : 42 : local_slots = lappend(local_slots, s);
466 : : }
467 : : }
468 : :
469 : 29 : LWLockRelease(ReplicationSlotControlLock);
470 : :
471 : 29 : return local_slots;
472 : : }
473 : :
474 : : /*
475 : : * Helper function to check if local_slot is required to be retained.
476 : : *
477 : : * Return false either if local_slot does not exist in the remote_slots list
478 : : * or is invalidated while the corresponding remote slot is still valid,
479 : : * otherwise true.
480 : : */
481 : : static bool
482 : 42 : local_sync_slot_required(ReplicationSlot *local_slot, List *remote_slots)
483 : : {
484 : 42 : bool remote_exists = false;
485 : 42 : bool locally_invalidated = false;
486 : :
487 [ + - + + : 103 : foreach_ptr(RemoteSlot, remote_slot, remote_slots)
+ + ]
488 : : {
489 [ + + ]: 59 : if (strcmp(remote_slot->name, NameStr(local_slot->data.name)) == 0)
490 : : {
491 : 40 : remote_exists = true;
492 : :
493 : : /*
494 : : * If remote slot is not invalidated but local slot is marked as
495 : : * invalidated, then set locally_invalidated flag.
496 : : */
497 [ - + ]: 40 : SpinLockAcquire(&local_slot->mutex);
498 : 40 : locally_invalidated =
499 [ + - ]: 80 : (remote_slot->invalidated == RS_INVAL_NONE) &&
500 [ + + ]: 40 : (local_slot->data.invalidated != RS_INVAL_NONE);
501 : 40 : SpinLockRelease(&local_slot->mutex);
502 : :
503 : 40 : break;
504 : : }
505 : : }
506 : :
507 [ + + + + ]: 42 : return (remote_exists && !locally_invalidated);
508 : : }
509 : :
510 : : /*
511 : : * Drop local obsolete slots.
512 : : *
513 : : * Drop the local slots that no longer need to be synced i.e. these either do
514 : : * not exist on the primary or are no longer enabled for failover.
515 : : *
516 : : * Additionally, drop any slots that are valid on the primary but got
517 : : * invalidated on the standby. This situation may occur due to the following
518 : : * reasons:
519 : : * - The 'max_slot_wal_keep_size' on the standby is insufficient to retain WAL
520 : : * records from the restart_lsn of the slot.
521 : : * - 'primary_slot_name' is temporarily reset to null and the physical slot is
522 : : * removed.
523 : : * These dropped slots will get recreated in next sync-cycle and it is okay to
524 : : * drop and recreate such slots as long as these are not consumable on the
525 : : * standby (which is the case currently).
526 : : *
527 : : * Note: Change of 'wal_level' on the primary server to a level lower than
528 : : * logical may also result in slot invalidation and removal on the standby.
529 : : * This is because such 'wal_level' change is only possible if the logical
530 : : * slots are removed on the primary server, so it's expected to see the
531 : : * slots being invalidated and removed on the standby too (and re-created
532 : : * if they are re-created on the primary server).
533 : : */
534 : : static void
535 : 29 : drop_local_obsolete_slots(List *remote_slot_list)
536 : : {
537 : 29 : List *local_slots = get_local_synced_slots();
538 : :
539 [ + + + + : 100 : foreach_ptr(ReplicationSlot, local_slot, local_slots)
+ + ]
540 : : {
541 : : /* Drop the local slot if it is not required to be retained. */
542 [ + + ]: 42 : if (!local_sync_slot_required(local_slot, remote_slot_list))
543 : : {
544 : : bool synced_slot;
545 : :
546 : : /*
547 : : * Use shared lock to prevent a conflict with
548 : : * ReplicationSlotsDropDBSlots(), trying to drop the same slot
549 : : * during a drop-database operation.
550 : : */
551 : 3 : LockSharedObject(DatabaseRelationId, local_slot->data.database,
552 : : 0, AccessShareLock);
553 : :
554 : : /*
555 : : * In the small window between getting the slot to drop and
556 : : * locking the database, there is a possibility of a parallel
557 : : * database drop by the startup process and the creation of a new
558 : : * slot by the user. This new user-created slot may end up using
559 : : * the same shared memory as that of 'local_slot'. Thus check if
560 : : * local_slot is still the synced one before performing actual
561 : : * drop.
562 : : */
563 [ - + ]: 3 : SpinLockAcquire(&local_slot->mutex);
564 [ + - + - ]: 3 : synced_slot = local_slot->in_use && local_slot->data.synced;
565 : 3 : SpinLockRelease(&local_slot->mutex);
566 : :
567 [ + - ]: 3 : if (synced_slot)
568 : : {
459 569 : 3 : ReplicationSlotAcquire(NameStr(local_slot->data.name), true, false);
811 570 : 3 : ReplicationSlotDropAcquired();
571 : : }
572 : :
573 : 3 : UnlockSharedObject(DatabaseRelationId, local_slot->data.database,
574 : : 0, AccessShareLock);
575 : :
576 [ + - ]: 3 : ereport(LOG,
577 : : errmsg("dropped replication slot \"%s\" of database with OID %u",
578 : : NameStr(local_slot->data.name),
579 : : local_slot->data.database));
580 : : }
581 : : }
582 : 29 : }
583 : :
584 : : /*
585 : : * Reserve WAL for the currently active local slot using the specified WAL
586 : : * location (restart_lsn).
587 : : *
588 : : * If the given WAL location has been removed or is at risk of removal,
589 : : * reserve WAL using the oldest segment that is non-removable.
590 : : */
591 : : static void
592 : 9 : reserve_wal_for_local_slot(XLogRecPtr restart_lsn)
593 : : {
594 : : XLogRecPtr slot_min_lsn;
595 : : XLogRecPtr min_safe_lsn;
596 : : XLogSegNo segno;
597 : 9 : ReplicationSlot *slot = MyReplicationSlot;
598 : :
599 [ - + ]: 9 : Assert(slot != NULL);
180 alvherre@kurilemu.de 600 [ - + ]: 9 : Assert(!XLogRecPtrIsValid(slot->data.restart_lsn));
601 : :
602 : : /*
603 : : * Acquire an exclusive lock to prevent the checkpoint process from
604 : : * concurrently calculating the minimum slot LSN (see
605 : : * CheckPointReplicationSlots), ensuring that if WAL reservation occurs
606 : : * first, the checkpoint must wait for the restart_lsn update before
607 : : * calculating the minimum LSN.
608 : : *
609 : : * Note: Unlike ReplicationSlotReserveWal(), this lock does not protect a
610 : : * newly synced slot from being invalidated if a concurrent checkpoint has
611 : : * invoked CheckPointReplicationSlots() before the WAL reservation here.
612 : : * This can happen because the initial restart_lsn received from the
613 : : * remote server can precede the redo pointer. Therefore, when selecting
614 : : * the initial restart_lsn, we consider using the redo pointer or the
615 : : * minimum slot LSN (if those values are greater than the remote
616 : : * restart_lsn) instead of relying solely on the remote value.
617 : : */
98 akapila@postgresql.o 618 : 9 : LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
619 : :
620 : : /*
621 : : * Determine the minimum non-removable LSN by comparing the redo pointer
622 : : * with the minimum slot LSN.
623 : : *
624 : : * The minimum slot LSN is considered because the redo pointer advances at
625 : : * every checkpoint, even when replication slots are present on the
626 : : * standby. In such scenarios, the redo pointer can exceed the remote
627 : : * restart_lsn, while WALs preceding the remote restart_lsn remain
628 : : * protected by a local replication slot.
629 : : */
630 : 9 : min_safe_lsn = GetRedoRecPtr();
631 : 9 : slot_min_lsn = XLogGetReplicationSlotMinimumLSN();
632 : :
633 [ + + - + ]: 9 : if (XLogRecPtrIsValid(slot_min_lsn) && min_safe_lsn > slot_min_lsn)
98 akapila@postgresql.o 634 :UBC 0 : min_safe_lsn = slot_min_lsn;
635 : :
636 : : /*
637 : : * If the minimum safe LSN is greater than the given restart_lsn, use it
638 : : * as the initial restart_lsn for the newly synced slot. Otherwise, use
639 : : * the given remote restart_lsn.
640 : : */
98 akapila@postgresql.o 641 [ - + ]:CBC 9 : SpinLockAcquire(&slot->mutex);
642 : 9 : slot->data.restart_lsn = Max(restart_lsn, min_safe_lsn);
643 : 9 : SpinLockRelease(&slot->mutex);
644 : :
645 : 9 : ReplicationSlotsComputeRequiredLSN();
646 : :
647 : 9 : XLByteToSeg(slot->data.restart_lsn, segno, wal_segment_size);
648 [ - + ]: 9 : if (XLogGetLastRemovedSegno() >= segno)
98 akapila@postgresql.o 649 [ # # ]:UBC 0 : elog(ERROR, "WAL required by replication slot %s has been removed concurrently",
650 : : NameStr(slot->data.name));
651 : :
98 akapila@postgresql.o 652 :CBC 9 : LWLockRelease(ReplicationSlotAllocationLock);
811 653 : 9 : }
654 : :
655 : : /*
656 : : * If the remote restart_lsn and catalog_xmin have caught up with the
657 : : * local ones, then update the LSNs and persist the local synced slot for
658 : : * future synchronization; otherwise, do nothing.
659 : : *
660 : : * *slot_persistence_pending is set to true if any of the slots fail to
661 : : * persist.
662 : : *
663 : : * Return true if the slot is marked as RS_PERSISTENT (sync-ready), otherwise
664 : : * false.
665 : : */
666 : : static bool
141 akapila@postgresql.o 667 :GNC 14 : update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid,
668 : : bool *slot_persistence_pending)
669 : : {
811 akapila@postgresql.o 670 :CBC 14 : ReplicationSlot *slot = MyReplicationSlot;
671 : :
672 : : /* Slotsync skip stats are handled in function update_local_synced_slot() */
82 akapila@postgresql.o 673 :GNC 14 : (void) update_local_synced_slot(remote_slot, remote_dbid);
674 : :
675 : : /*
676 : : * Check if the slot cannot be synchronized. Refer to the comment atop the
677 : : * file for details on this check.
678 : : */
679 [ + + ]: 14 : if (slot->slotsync_skip_reason != SS_SKIP_NONE)
680 : : {
681 : : /*
682 : : * We reach this point when the remote slot didn't catch up to locally
683 : : * reserved position, or it cannot reach the consistent point from the
684 : : * restart_lsn, or the WAL prior to the remote confirmed flush LSN has
685 : : * not been received and flushed.
686 : : *
687 : : * We do not drop the slot because the restart_lsn and confirmed_lsn
688 : : * can be ahead of the current location when recreating the slot in
689 : : * the next cycle. It may take more time to create such a slot or
690 : : * reach the consistent point. Therefore, we keep this slot and
691 : : * attempt the synchronization in the next cycle.
692 : : *
693 : : * We also update the slot_persistence_pending parameter, so the SQL
694 : : * function can retry.
695 : : */
141 696 [ + + ]: 7 : if (slot_persistence_pending)
697 : 2 : *slot_persistence_pending = true;
698 : :
803 akapila@postgresql.o 699 :GBC 7 : return false;
700 : : }
701 : :
811 akapila@postgresql.o 702 :CBC 7 : ReplicationSlotPersist();
703 : :
704 [ + - ]: 7 : ereport(LOG,
705 : : errmsg("newly created replication slot \"%s\" is sync-ready now",
706 : : remote_slot->name));
707 : :
803 708 : 7 : return true;
709 : : }
710 : :
711 : : /*
712 : : * Synchronize a single slot to the given position.
713 : : *
714 : : * This creates a new slot if there is no existing one and updates the
715 : : * metadata of the slot as per the data received from the primary server.
716 : : *
717 : : * The slot is created as a temporary slot and stays in the same state until the
718 : : * remote_slot catches up with locally reserved position and local slot is
719 : : * updated. The slot is then persisted and is considered as sync-ready for
720 : : * periodic syncs.
721 : : *
722 : : * *slot_persistence_pending is set to true if any of the slots fail to
723 : : * persist.
724 : : *
725 : : * Returns TRUE if the local slot is updated.
726 : : */
727 : : static bool
141 akapila@postgresql.o 728 :GNC 48 : synchronize_one_slot(RemoteSlot *remote_slot, Oid remote_dbid,
729 : : bool *slot_persistence_pending)
730 : : {
731 : : ReplicationSlot *slot;
803 akapila@postgresql.o 732 :CBC 48 : bool slot_updated = false;
733 : :
734 : : /* Search for the named slot */
811 735 [ + + ]: 48 : if ((slot = SearchNamedReplicationSlot(remote_slot->name, true)))
736 : : {
737 : : bool synced;
738 : :
739 [ - + ]: 39 : SpinLockAcquire(&slot->mutex);
740 : 39 : synced = slot->data.synced;
741 : 39 : SpinLockRelease(&slot->mutex);
742 : :
743 : : /* User-created slot with the same name exists, raise ERROR. */
744 [ - + ]: 39 : if (!synced)
811 akapila@postgresql.o 745 [ # # ]:UBC 0 : ereport(ERROR,
746 : : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
747 : : errmsg("exiting from slot synchronization because same"
748 : : " name slot \"%s\" already exists on the standby",
749 : : remote_slot->name));
750 : :
751 : : /*
752 : : * The slot has been synchronized before.
753 : : *
754 : : * It is important to acquire the slot here before checking
755 : : * invalidation. If we don't acquire the slot first, there could be a
756 : : * race condition that the local slot could be invalidated just after
757 : : * checking the 'invalidated' flag here and we could end up
758 : : * overwriting 'invalidated' flag to remote_slot's value. See
759 : : * InvalidatePossiblyObsoleteSlot() where it invalidates slot directly
760 : : * if the slot is not acquired by other processes.
761 : : *
762 : : * XXX: If it ever turns out that slot acquire/release is costly for
763 : : * cases when none of the slot properties is changed then we can do a
764 : : * pre-check to ensure that at least one of the slot properties is
765 : : * changed before acquiring the slot.
766 : : */
459 akapila@postgresql.o 767 :CBC 39 : ReplicationSlotAcquire(remote_slot->name, true, false);
768 : :
811 769 [ - + ]: 39 : Assert(slot == MyReplicationSlot);
770 : :
771 : : /*
772 : : * Copy the invalidation cause from remote only if local slot is not
773 : : * invalidated locally, we don't want to overwrite existing one.
774 : : */
775 [ + - ]: 39 : if (slot->data.invalidated == RS_INVAL_NONE &&
776 [ - + ]: 39 : remote_slot->invalidated != RS_INVAL_NONE)
777 : : {
811 akapila@postgresql.o 778 [ # # ]:UBC 0 : SpinLockAcquire(&slot->mutex);
779 : 0 : slot->data.invalidated = remote_slot->invalidated;
780 : 0 : SpinLockRelease(&slot->mutex);
781 : :
782 : : /* Make sure the invalidated state persists across server restart */
783 : 0 : ReplicationSlotMarkDirty();
784 : 0 : ReplicationSlotSave();
785 : :
803 786 : 0 : slot_updated = true;
787 : : }
788 : :
789 : : /* Skip the sync of an invalidated slot */
811 akapila@postgresql.o 790 [ - + ]:CBC 39 : if (slot->data.invalidated != RS_INVAL_NONE)
791 : : {
158 akapila@postgresql.o 792 :UNC 0 : update_slotsync_skip_stats(SS_SKIP_INVALID);
793 : :
811 akapila@postgresql.o 794 :UBC 0 : ReplicationSlotRelease();
803 795 : 0 : return slot_updated;
796 : : }
797 : :
798 : : /* Slot not ready yet, let's attempt to make it sync-ready now. */
811 akapila@postgresql.o 799 [ + + ]:CBC 39 : if (slot->data.persistency == RS_TEMPORARY)
800 : : {
803 801 : 5 : slot_updated = update_and_persist_local_synced_slot(remote_slot,
802 : : remote_dbid,
803 : : slot_persistence_pending);
804 : : }
805 : :
806 : : /* Slot ready for sync, so sync it. */
807 : : else
808 : : {
809 : : /*
810 : : * Sanity check: As long as the invalidations are handled
811 : : * appropriately as above, this should never happen.
812 : : *
813 : : * We don't need to check restart_lsn here. See the comments in
814 : : * update_local_synced_slot() for details.
815 : : */
753 816 [ - + ]: 34 : if (remote_slot->confirmed_lsn < slot->data.confirmed_flush)
753 akapila@postgresql.o 817 [ # # ]:UBC 0 : ereport(ERROR,
818 : : errmsg_internal("cannot synchronize local slot \"%s\"",
819 : : remote_slot->name),
820 : : errdetail_internal("Local slot's start streaming location LSN(%X/%08X) is ahead of remote slot's LSN(%X/%08X).",
821 : : LSN_FORMAT_ARGS(slot->data.confirmed_flush),
822 : : LSN_FORMAT_ARGS(remote_slot->confirmed_lsn)));
823 : :
82 akapila@postgresql.o 824 :GNC 34 : slot_updated = update_local_synced_slot(remote_slot, remote_dbid);
825 : : }
826 : : }
827 : : /* Otherwise create the slot first. */
828 : : else
829 : : {
830 : : NameData plugin_name;
811 akapila@postgresql.o 831 :CBC 9 : TransactionId xmin_horizon = InvalidTransactionId;
832 : :
833 : : /* Skip creating the local slot if remote_slot is invalidated already */
834 [ - + ]: 9 : if (remote_slot->invalidated != RS_INVAL_NONE)
803 akapila@postgresql.o 835 :UBC 0 : return false;
836 : :
837 : : /*
838 : : * We create temporary slots instead of ephemeral slots here because
839 : : * we want the slots to survive after releasing them. This is done to
840 : : * avoid dropping and re-creating the slots in each synchronization
841 : : * cycle if the restart_lsn or catalog_xmin of the remote slot has not
842 : : * caught up.
843 : : */
811 akapila@postgresql.o 844 :CBC 9 : ReplicationSlotCreate(remote_slot->name, true, RS_TEMPORARY,
845 : 9 : remote_slot->two_phase,
846 : : false,
847 : 9 : remote_slot->failover,
848 : : true);
849 : :
850 : : /* For shorter lines. */
851 : 9 : slot = MyReplicationSlot;
852 : :
853 : : /* Avoid expensive operations while holding a spinlock. */
854 : 9 : namestrcpy(&plugin_name, remote_slot->plugin);
855 : :
856 [ - + ]: 9 : SpinLockAcquire(&slot->mutex);
857 : 9 : slot->data.database = remote_dbid;
858 : 9 : slot->data.plugin = plugin_name;
859 : 9 : SpinLockRelease(&slot->mutex);
860 : :
861 : 9 : reserve_wal_for_local_slot(remote_slot->restart_lsn);
862 : :
126 msawada@postgresql.o 863 : 9 : LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
811 akapila@postgresql.o 864 : 9 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
865 : 9 : xmin_horizon = GetOldestSafeDecodingTransactionId(true);
866 [ - + ]: 9 : SpinLockAcquire(&slot->mutex);
867 : 9 : slot->effective_catalog_xmin = xmin_horizon;
868 : 9 : slot->data.catalog_xmin = xmin_horizon;
869 : 9 : SpinLockRelease(&slot->mutex);
870 : 9 : ReplicationSlotsComputeRequiredXmin(true);
871 : 9 : LWLockRelease(ProcArrayLock);
126 msawada@postgresql.o 872 : 9 : LWLockRelease(ReplicationSlotControlLock);
873 : :
141 akapila@postgresql.o 874 :GNC 9 : update_and_persist_local_synced_slot(remote_slot, remote_dbid,
875 : : slot_persistence_pending);
876 : :
803 akapila@postgresql.o 877 :CBC 9 : slot_updated = true;
878 : : }
879 : :
811 880 : 48 : ReplicationSlotRelease();
881 : :
803 882 : 48 : return slot_updated;
883 : : }
884 : :
885 : : /*
886 : : * Fetch remote slots.
887 : : *
888 : : * If slot_names is NIL, fetches all failover logical slots from the
889 : : * primary server, otherwise fetches only the ones with names in slot_names.
890 : : *
891 : : * Returns a list of remote slot information structures, or NIL if none
892 : : * are found.
893 : : */
894 : : static List *
141 akapila@postgresql.o 895 :GNC 31 : fetch_remote_slots(WalReceiverConn *wrconn, List *slot_names)
896 : : {
897 : : #define SLOTSYNC_COLUMN_COUNT 10
811 akapila@postgresql.o 898 :CBC 31 : Oid slotRow[SLOTSYNC_COLUMN_COUNT] = {TEXTOID, TEXTOID, LSNOID,
899 : : LSNOID, XIDOID, BOOLOID, LSNOID, BOOLOID, TEXTOID, TEXTOID};
900 : :
901 : : WalRcvExecResult *res;
902 : : TupleTableSlot *tupslot;
903 : 31 : List *remote_slot_list = NIL;
904 : : StringInfoData query;
905 : :
141 akapila@postgresql.o 906 :GNC 31 : initStringInfo(&query);
907 : 31 : appendStringInfoString(&query,
908 : : "SELECT slot_name, plugin, confirmed_flush_lsn,"
909 : : " restart_lsn, catalog_xmin, two_phase,"
910 : : " two_phase_at, failover,"
911 : : " database, invalidation_reason"
912 : : " FROM pg_catalog.pg_replication_slots"
913 : : " WHERE failover and NOT temporary");
914 : :
915 [ + + ]: 31 : if (slot_names != NIL)
916 : : {
917 : 2 : bool first_slot = true;
918 : :
919 : : /*
920 : : * Construct the query to fetch only the specified slots
921 : : */
922 : 2 : appendStringInfoString(&query, " AND slot_name IN (");
923 : :
924 [ + - + + : 6 : foreach_ptr(char, slot_name, slot_names)
+ + ]
925 : : {
926 [ - + ]: 2 : if (!first_slot)
141 akapila@postgresql.o 927 :UNC 0 : appendStringInfoString(&query, ", ");
928 : :
22 drowley@postgresql.o 929 :GNC 2 : appendStringInfoString(&query, quote_literal_cstr(slot_name));
141 akapila@postgresql.o 930 : 2 : first_slot = false;
931 : : }
932 : 2 : appendStringInfoChar(&query, ')');
933 : : }
934 : :
935 : : /* Execute the query */
936 : 31 : res = walrcv_exec(wrconn, query.data, SLOTSYNC_COLUMN_COUNT, slotRow);
937 : 31 : pfree(query.data);
811 akapila@postgresql.o 938 [ + + ]:CBC 31 : if (res->status != WALRCV_OK_TUPLES)
939 [ + - ]: 2 : ereport(ERROR,
940 : : errmsg("could not fetch failover logical slots info from the primary server: %s",
941 : : res->err));
942 : :
943 : 29 : tupslot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
944 [ + + ]: 77 : while (tuplestore_gettupleslot(res->tuplestore, true, false, tupslot))
945 : : {
946 : : bool isnull;
146 michael@paquier.xyz 947 :GNC 48 : RemoteSlot *remote_slot = palloc0_object(RemoteSlot);
948 : : Datum d;
811 akapila@postgresql.o 949 :CBC 48 : int col = 0;
950 : :
951 : 48 : remote_slot->name = TextDatumGetCString(slot_getattr(tupslot, ++col,
952 : : &isnull));
953 [ - + ]: 48 : Assert(!isnull);
954 : :
955 : 48 : remote_slot->plugin = TextDatumGetCString(slot_getattr(tupslot, ++col,
956 : : &isnull));
957 [ - + ]: 48 : Assert(!isnull);
958 : :
959 : : /*
960 : : * It is possible to get null values for LSN and Xmin if slot is
961 : : * invalidated on the primary server, so handle accordingly.
962 : : */
963 : 48 : d = slot_getattr(tupslot, ++col, &isnull);
964 [ + - ]: 48 : remote_slot->confirmed_lsn = isnull ? InvalidXLogRecPtr :
965 : 48 : DatumGetLSN(d);
966 : :
967 : 48 : d = slot_getattr(tupslot, ++col, &isnull);
968 [ + - ]: 48 : remote_slot->restart_lsn = isnull ? InvalidXLogRecPtr : DatumGetLSN(d);
969 : :
970 : 48 : d = slot_getattr(tupslot, ++col, &isnull);
971 [ + - ]: 48 : remote_slot->catalog_xmin = isnull ? InvalidTransactionId :
972 : 48 : DatumGetTransactionId(d);
973 : :
974 : 48 : remote_slot->two_phase = DatumGetBool(slot_getattr(tupslot, ++col,
975 : : &isnull));
976 [ - + ]: 48 : Assert(!isnull);
977 : :
397 978 : 48 : d = slot_getattr(tupslot, ++col, &isnull);
979 [ + + ]: 48 : remote_slot->two_phase_at = isnull ? InvalidXLogRecPtr : DatumGetLSN(d);
980 : :
811 981 : 48 : remote_slot->failover = DatumGetBool(slot_getattr(tupslot, ++col,
982 : : &isnull));
983 [ - + ]: 48 : Assert(!isnull);
984 : :
985 : 48 : remote_slot->database = TextDatumGetCString(slot_getattr(tupslot,
986 : : ++col, &isnull));
987 [ - + ]: 48 : Assert(!isnull);
988 : :
989 : 48 : d = slot_getattr(tupslot, ++col, &isnull);
990 [ - + ]: 48 : remote_slot->invalidated = isnull ? RS_INVAL_NONE :
811 akapila@postgresql.o 991 :UBC 0 : GetSlotInvalidationCause(TextDatumGetCString(d));
992 : :
993 : : /* Sanity check */
811 akapila@postgresql.o 994 [ - + ]:CBC 48 : Assert(col == SLOTSYNC_COLUMN_COUNT);
995 : :
996 : : /*
997 : : * If restart_lsn, confirmed_lsn or catalog_xmin is invalid but the
998 : : * slot is valid, that means we have fetched the remote_slot in its
999 : : * RS_EPHEMERAL state. In such a case, don't sync it; we can always
1000 : : * sync it in the next sync cycle when the remote_slot is persisted
1001 : : * and has valid lsn(s) and xmin values.
1002 : : *
1003 : : * XXX: In future, if we plan to expose 'slot->data.persistency' in
1004 : : * pg_replication_slots view, then we can avoid fetching RS_EPHEMERAL
1005 : : * slots in the first place.
1006 : : */
180 alvherre@kurilemu.de 1007 [ + - ]:GNC 48 : if ((!XLogRecPtrIsValid(remote_slot->restart_lsn) ||
1008 [ + - ]: 48 : !XLogRecPtrIsValid(remote_slot->confirmed_lsn) ||
811 akapila@postgresql.o 1009 [ - + ]:CBC 48 : !TransactionIdIsValid(remote_slot->catalog_xmin)) &&
811 akapila@postgresql.o 1010 [ # # ]:UBC 0 : remote_slot->invalidated == RS_INVAL_NONE)
1011 : 0 : pfree(remote_slot);
1012 : : else
1013 : : /* Create list of remote slots */
811 akapila@postgresql.o 1014 :CBC 48 : remote_slot_list = lappend(remote_slot_list, remote_slot);
1015 : :
1016 : 48 : ExecClearTuple(tupslot);
1017 : : }
1018 : :
141 akapila@postgresql.o 1019 :GNC 29 : walrcv_clear_result(res);
1020 : :
1021 : 29 : return remote_slot_list;
1022 : : }
1023 : :
1024 : : /*
1025 : : * Synchronize slots.
1026 : : *
1027 : : * This function takes a list of remote slots and synchronizes them locally. It
1028 : : * creates the slots if not present on the standby and updates existing ones.
1029 : : *
1030 : : * If slot_persistence_pending is not NULL, it will be set to true if one or
1031 : : * more slots could not be persisted. This allows callers such as
1032 : : * SyncReplicationSlots() to retry those slots.
1033 : : *
1034 : : * Returns TRUE if any of the slots gets updated in this sync-cycle.
1035 : : */
1036 : : static bool
1037 : 29 : synchronize_slots(WalReceiverConn *wrconn, List *remote_slot_list,
1038 : : bool *slot_persistence_pending)
1039 : : {
1040 : 29 : bool some_slot_updated = false;
1041 : :
1042 : : /* Drop local slots that no longer need to be synced. */
811 akapila@postgresql.o 1043 :CBC 29 : drop_local_obsolete_slots(remote_slot_list);
1044 : :
1045 : : /* Now sync the slots locally */
1046 [ + - + + : 106 : foreach_ptr(RemoteSlot, remote_slot, remote_slot_list)
+ + ]
1047 : : {
1048 : 48 : Oid remote_dbid = get_database_oid(remote_slot->database, false);
1049 : :
1050 : : /*
1051 : : * Use shared lock to prevent a conflict with
1052 : : * ReplicationSlotsDropDBSlots(), trying to drop the same slot during
1053 : : * a drop-database operation.
1054 : : */
1055 : 48 : LockSharedObject(DatabaseRelationId, remote_dbid, 0, AccessShareLock);
1056 : :
141 akapila@postgresql.o 1057 :GNC 48 : some_slot_updated |= synchronize_one_slot(remote_slot, remote_dbid,
1058 : : slot_persistence_pending);
1059 : :
811 akapila@postgresql.o 1060 :CBC 48 : UnlockSharedObject(DatabaseRelationId, remote_dbid, 0, AccessShareLock);
1061 : : }
1062 : :
803 1063 : 29 : return some_slot_updated;
1064 : : }
1065 : :
1066 : : /*
1067 : : * Checks the remote server info.
1068 : : *
1069 : : * We ensure that the 'primary_slot_name' exists on the remote server and the
1070 : : * remote server is not a standby node.
1071 : : */
1072 : : static void
811 1073 : 15 : validate_remote_info(WalReceiverConn *wrconn)
1074 : : {
1075 : : #define PRIMARY_INFO_OUTPUT_COL_COUNT 2
1076 : : WalRcvExecResult *res;
1077 : 15 : Oid slotRow[PRIMARY_INFO_OUTPUT_COL_COUNT] = {BOOLOID, BOOLOID};
1078 : : StringInfoData cmd;
1079 : : bool isnull;
1080 : : TupleTableSlot *tupslot;
1081 : : bool remote_in_recovery;
1082 : : bool primary_slot_valid;
803 1083 : 15 : bool started_tx = false;
1084 : :
811 1085 : 15 : initStringInfo(&cmd);
1086 : 15 : appendStringInfo(&cmd,
1087 : : "SELECT pg_is_in_recovery(), count(*) = 1"
1088 : : " FROM pg_catalog.pg_replication_slots"
1089 : : " WHERE slot_type='physical' AND slot_name=%s",
1090 : : quote_literal_cstr(PrimarySlotName));
1091 : :
1092 : : /* The syscache access in walrcv_exec() needs a transaction env. */
803 1093 [ + + ]: 15 : if (!IsTransactionState())
1094 : : {
1095 : 6 : StartTransactionCommand();
1096 : 6 : started_tx = true;
1097 : : }
1098 : :
811 1099 : 15 : res = walrcv_exec(wrconn, cmd.data, PRIMARY_INFO_OUTPUT_COL_COUNT, slotRow);
1100 : 15 : pfree(cmd.data);
1101 : :
1102 [ - + ]: 15 : if (res->status != WALRCV_OK_TUPLES)
811 akapila@postgresql.o 1103 [ # # ]:UBC 0 : ereport(ERROR,
1104 : : errmsg("could not fetch primary slot name \"%s\" info from the primary server: %s",
1105 : : PrimarySlotName, res->err),
1106 : : errhint("Check if \"primary_slot_name\" is configured correctly."));
1107 : :
811 akapila@postgresql.o 1108 :CBC 15 : tupslot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
1109 [ - + ]: 15 : if (!tuplestore_gettupleslot(res->tuplestore, true, false, tupslot))
811 akapila@postgresql.o 1110 [ # # ]:UBC 0 : elog(ERROR,
1111 : : "failed to fetch tuple for the primary server slot specified by \"primary_slot_name\"");
1112 : :
811 akapila@postgresql.o 1113 :CBC 15 : remote_in_recovery = DatumGetBool(slot_getattr(tupslot, 1, &isnull));
1114 [ - + ]: 15 : Assert(!isnull);
1115 : :
1116 : : /*
1117 : : * Slot sync is currently not supported on a cascading standby. This is
1118 : : * because if we allow it, the primary server needs to wait for all the
1119 : : * cascading standbys, otherwise, logical subscribers can still be ahead
1120 : : * of one of the cascading standbys which we plan to promote. Thus, to
1121 : : * avoid this additional complexity, we restrict it for the time being.
1122 : : */
1123 [ + + ]: 15 : if (remote_in_recovery)
1124 [ + - ]: 1 : ereport(ERROR,
1125 : : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1126 : : errmsg("cannot synchronize replication slots from a standby server"));
1127 : :
1128 : 14 : primary_slot_valid = DatumGetBool(slot_getattr(tupslot, 2, &isnull));
1129 [ - + ]: 14 : Assert(!isnull);
1130 : :
1131 [ - + ]: 14 : if (!primary_slot_valid)
811 akapila@postgresql.o 1132 [ # # ]:UBC 0 : ereport(ERROR,
1133 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1134 : : /* translator: second %s is a GUC variable name */
1135 : : errmsg("replication slot \"%s\" specified by \"%s\" does not exist on primary server",
1136 : : PrimarySlotName, "primary_slot_name"));
1137 : :
811 akapila@postgresql.o 1138 :CBC 14 : ExecClearTuple(tupslot);
1139 : 14 : walrcv_clear_result(res);
1140 : :
803 1141 [ + + ]: 14 : if (started_tx)
1142 : 6 : CommitTransactionCommand();
811 1143 : 14 : }
1144 : :
1145 : : /*
1146 : : * Checks if dbname is specified in 'primary_conninfo'.
1147 : : *
1148 : : * Error out if not specified otherwise return it.
1149 : : */
1150 : : char *
803 1151 : 16 : CheckAndGetDbnameFromConninfo(void)
1152 : : {
1153 : : char *dbname;
1154 : :
1155 : : /*
1156 : : * The slot synchronization needs a database connection for walrcv_exec to
1157 : : * work.
1158 : : */
1159 : 16 : dbname = walrcv_get_dbname_from_conninfo(PrimaryConnInfo);
1160 [ + + ]: 16 : if (dbname == NULL)
1161 [ + - ]: 1 : ereport(ERROR,
1162 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1163 : :
1164 : : /*
1165 : : * translator: first %s is a connection option; second %s is a GUC
1166 : : * variable name
1167 : : */
1168 : : errmsg("replication slot synchronization requires \"%s\" to be specified in \"%s\"",
1169 : : "dbname", "primary_conninfo"));
1170 : 15 : return dbname;
1171 : : }
1172 : :
1173 : : /*
1174 : : * Return true if all necessary GUCs for slot synchronization are set
1175 : : * appropriately, otherwise, return false.
1176 : : */
1177 : : bool
1178 : 23 : ValidateSlotSyncParams(int elevel)
1179 : : {
1180 : : /*
1181 : : * Logical slot sync/creation requires logical decoding to be enabled.
1182 : : */
133 msawada@postgresql.o 1183 [ - + ]:GNC 23 : if (!IsLogicalDecodingEnabled())
1184 : : {
274 fujii@postgresql.org 1185 [ # # ]:UBC 0 : ereport(elevel,
1186 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1187 : : errmsg("replication slot synchronization requires \"effective_wal_level\" >= \"logical\" on the primary"),
1188 : : errhint("To enable logical decoding on primary, set \"wal_level\" >= \"logical\" or create at least one logical slot when \"wal_level\" = \"replica\"."));
1189 : :
1190 : 0 : return false;
1191 : : }
1192 : :
1193 : : /*
1194 : : * A physical replication slot(primary_slot_name) is required on the
1195 : : * primary to ensure that the rows needed by the standby are not removed
1196 : : * after restarting, so that the synchronized slot on the standby will not
1197 : : * be invalidated.
1198 : : */
811 akapila@postgresql.o 1199 [ + - - + ]:CBC 23 : if (PrimarySlotName == NULL || *PrimarySlotName == '\0')
1200 : : {
803 akapila@postgresql.o 1201 [ # # ]:UBC 0 : ereport(elevel,
1202 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1203 : : /* translator: %s is a GUC variable name */
1204 : : errmsg("replication slot synchronization requires \"%s\" to be set", "primary_slot_name"));
1205 : 0 : return false;
1206 : : }
1207 : :
1208 : : /*
1209 : : * hot_standby_feedback must be enabled to cooperate with the physical
1210 : : * replication slot, which allows informing the primary about the xmin and
1211 : : * catalog_xmin values on the standby.
1212 : : */
811 akapila@postgresql.o 1213 [ + + ]:CBC 23 : if (!hot_standby_feedback)
1214 : : {
803 1215 [ + - ]: 1 : ereport(elevel,
1216 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1217 : : /* translator: %s is a GUC variable name */
1218 : : errmsg("replication slot synchronization requires \"%s\" to be enabled",
1219 : : "hot_standby_feedback"));
1220 : 1 : return false;
1221 : : }
1222 : :
1223 : : /*
1224 : : * The primary_conninfo is required to make connection to primary for
1225 : : * getting slots information.
1226 : : */
811 1227 [ + - - + ]: 22 : if (PrimaryConnInfo == NULL || *PrimaryConnInfo == '\0')
1228 : : {
803 akapila@postgresql.o 1229 [ # # ]:UBC 0 : ereport(elevel,
1230 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1231 : : /* translator: %s is a GUC variable name */
1232 : : errmsg("replication slot synchronization requires \"%s\" to be set",
1233 : : "primary_conninfo"));
1234 : 0 : return false;
1235 : : }
1236 : :
803 akapila@postgresql.o 1237 :CBC 22 : return true;
1238 : : }
1239 : :
1240 : : /*
1241 : : * Re-read the config file for slot synchronization.
1242 : : *
1243 : : * Exit or throw error if relevant GUCs have changed depending on whether
1244 : : * called from slot sync worker or from the SQL function pg_sync_replication_slots()
1245 : : */
1246 : : static void
1247 : 1 : slotsync_reread_config(void)
1248 : : {
1249 : 1 : char *old_primary_conninfo = pstrdup(PrimaryConnInfo);
1250 : 1 : char *old_primary_slotname = pstrdup(PrimarySlotName);
1251 : 1 : bool old_sync_replication_slots = sync_replication_slots;
1252 : 1 : bool old_hot_standby_feedback = hot_standby_feedback;
1253 : : bool conninfo_changed;
1254 : : bool primary_slotname_changed;
145 1255 : 1 : bool is_slotsync_worker = AmLogicalSlotSyncWorkerProcess();
1256 : 1 : bool parameter_changed = false;
1257 : :
1258 [ + - ]: 1 : if (is_slotsync_worker)
1259 [ - + ]: 1 : Assert(sync_replication_slots);
1260 : :
803 1261 : 1 : ConfigReloadPending = false;
1262 : 1 : ProcessConfigFile(PGC_SIGHUP);
1263 : :
1264 : 1 : conninfo_changed = strcmp(old_primary_conninfo, PrimaryConnInfo) != 0;
1265 : 1 : primary_slotname_changed = strcmp(old_primary_slotname, PrimarySlotName) != 0;
1266 : 1 : pfree(old_primary_conninfo);
1267 : 1 : pfree(old_primary_slotname);
1268 : :
1269 [ - + ]: 1 : if (old_sync_replication_slots != sync_replication_slots)
1270 : : {
145 akapila@postgresql.o 1271 [ # # ]:UBC 0 : if (is_slotsync_worker)
1272 : : {
1273 [ # # ]: 0 : ereport(LOG,
1274 : : /* translator: %s is a GUC variable name */
1275 : : errmsg("replication slot synchronization worker will stop because \"%s\" is disabled",
1276 : : "sync_replication_slots"));
1277 : :
1278 : 0 : proc_exit(0);
1279 : : }
1280 : :
1281 : 0 : parameter_changed = true;
1282 : : }
1283 : : else
1284 : : {
145 akapila@postgresql.o 1285 [ + - + - ]:CBC 1 : if (conninfo_changed ||
1286 : 1 : primary_slotname_changed ||
1287 [ + - ]: 1 : (old_hot_standby_feedback != hot_standby_feedback))
1288 : : {
1289 : :
1290 [ + - ]: 1 : if (is_slotsync_worker)
1291 : : {
1292 [ + - ]: 1 : ereport(LOG,
1293 : : errmsg("replication slot synchronization worker will restart because of a parameter change"));
1294 : :
1295 : : /*
1296 : : * Reset the last-start time for this worker so that the
1297 : : * postmaster can restart it without waiting for
1298 : : * SLOTSYNC_RESTART_INTERVAL_SEC.
1299 : : */
1300 : 1 : SlotSyncCtx->last_start_time = 0;
1301 : :
1302 : 1 : proc_exit(0);
1303 : : }
1304 : :
145 akapila@postgresql.o 1305 :UBC 0 : parameter_changed = true;
1306 : : }
1307 : : }
1308 : :
1309 : : /*
1310 : : * If we have reached here with a parameter change, we must be running in
1311 : : * SQL function, emit error in such a case.
1312 : : */
1313 [ # # ]: 0 : if (parameter_changed)
1314 : : {
1315 [ # # ]: 0 : Assert(!is_slotsync_worker);
1316 [ # # ]: 0 : ereport(ERROR,
1317 : : errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1318 : : errmsg("replication slot synchronization will stop because of a parameter change"));
1319 : : }
1320 : :
803 1321 : 0 : }
1322 : :
1323 : : /*
1324 : : * Handle receipt of an interrupt indicating a slotsync shutdown message.
1325 : : *
1326 : : * This is called within the SIGUSR1 handler. All we do here is set a flag
1327 : : * that will cause the next CHECK_FOR_INTERRUPTS() to invoke
1328 : : * ProcessSlotSyncMessage().
1329 : : */
1330 : : void
27 fujii@postgresql.org 1331 :CBC 1 : HandleSlotSyncMessageInterrupt(void)
1332 : : {
1333 : 1 : InterruptPending = true;
1334 : 1 : SlotSyncShutdownPending = true;
1335 : : /* latch will be set by procsignal_sigusr1_handler */
1336 : 1 : }
1337 : :
1338 : : /*
1339 : : * Handle a PROCSIG_SLOTSYNC_MESSAGE signal, called from ProcessInterrupts().
1340 : : *
1341 : : * If the current process is the slotsync background worker, log a message
1342 : : * and exit cleanly. If it is a backend executing pg_sync_replication_slots(),
1343 : : * raise an error, unless the sync has already finished, in which case there
1344 : : * is no need to interrupt the caller.
1345 : : */
1346 : : void
1347 : 1 : ProcessSlotSyncMessage(void)
1348 : : {
1349 : 1 : SlotSyncShutdownPending = false;
1350 : :
1351 [ + - ]: 1 : if (AmLogicalSlotSyncWorkerProcess())
1352 : : {
1353 [ + - ]: 1 : ereport(LOG,
1354 : : errmsg("replication slot synchronization worker will stop because promotion is triggered"));
1355 : 1 : proc_exit(0);
1356 : : }
1357 : : else
1358 : : {
1359 : : /*
1360 : : * If sync has already completed, there is no need to interrupt the
1361 : : * caller with an error.
1362 : : */
27 fujii@postgresql.org 1363 [ # # ]:UBC 0 : if (!IsSyncingReplicationSlots())
1364 : 0 : return;
1365 : :
1366 [ # # ]: 0 : ereport(ERROR,
1367 : : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1368 : : errmsg("replication slot synchronization will stop because promotion is triggered"));
1369 : : }
1370 : : }
1371 : :
1372 : : /*
1373 : : * Connection cleanup function for slotsync worker.
1374 : : *
1375 : : * Called on slotsync worker exit.
1376 : : */
1377 : : static void
740 akapila@postgresql.o 1378 :CBC 6 : slotsync_worker_disconnect(int code, Datum arg)
1379 : : {
1380 : 6 : WalReceiverConn *wrconn = (WalReceiverConn *) DatumGetPointer(arg);
1381 : :
1382 : 6 : walrcv_disconnect(wrconn);
1383 : 6 : }
1384 : :
1385 : : /*
1386 : : * Cleanup function for slotsync worker.
1387 : : *
1388 : : * Called on slotsync worker exit.
1389 : : */
1390 : : static void
803 1391 : 6 : slotsync_worker_onexit(int code, Datum arg)
1392 : : {
1393 : : /*
1394 : : * We need to do slots cleanup here just like WalSndErrorCleanup() does.
1395 : : *
1396 : : * The startup process during promotion invokes ShutDownSlotSync() which
1397 : : * waits for slot sync to finish and it does that by checking the
1398 : : * 'syncing' flag. Thus the slot sync worker must be done with slots'
1399 : : * release and cleanup to avoid any dangling temporary slots or active
1400 : : * slots before it marks itself as finished syncing.
1401 : : */
1402 : :
1403 : : /* Make sure active replication slots are released */
740 1404 [ - + ]: 6 : if (MyReplicationSlot != NULL)
740 akapila@postgresql.o 1405 :UBC 0 : ReplicationSlotRelease();
1406 : :
1407 : : /* Also cleanup the temporary slots. */
740 akapila@postgresql.o 1408 :CBC 6 : ReplicationSlotCleanup(false);
1409 : :
803 1410 [ - + ]: 6 : SpinLockAcquire(&SlotSyncCtx->mutex);
1411 : :
1412 : 6 : SlotSyncCtx->pid = InvalidPid;
1413 : :
1414 : : /*
1415 : : * If syncing_slots is true, it indicates that the process errored out
1416 : : * without resetting the flag. So, we need to clean up shared memory and
1417 : : * reset the flag here.
1418 : : */
740 1419 [ + - ]: 6 : if (syncing_slots)
1420 : : {
1421 : 6 : SlotSyncCtx->syncing = false;
1422 : 6 : syncing_slots = false;
1423 : : }
1424 : :
803 1425 : 6 : SpinLockRelease(&SlotSyncCtx->mutex);
1426 : 6 : }
1427 : :
1428 : : /*
1429 : : * Sleep for long enough that we believe it's likely that the slots on primary
1430 : : * get updated.
1431 : : *
1432 : : * If there is no slot activity the wait time between sync-cycles will double
1433 : : * (to a maximum of 30s). If there is some slot activity the wait time between
1434 : : * sync-cycles is reset to the minimum (200ms).
1435 : : */
1436 : : static void
1437 : 21 : wait_for_slot_activity(bool some_slot_updated)
1438 : : {
1439 : : int rc;
1440 : :
1441 [ + + ]: 21 : if (!some_slot_updated)
1442 : : {
1443 : : /*
1444 : : * No slots were updated, so double the sleep time, but not beyond the
1445 : : * maximum allowable value.
1446 : : */
796 1447 : 12 : sleep_ms = Min(sleep_ms * 2, MAX_SLOTSYNC_WORKER_NAPTIME_MS);
1448 : : }
1449 : : else
1450 : : {
1451 : : /*
1452 : : * Some slots were updated since the last sleep, so reset the sleep
1453 : : * time.
1454 : : */
1455 : 9 : sleep_ms = MIN_SLOTSYNC_WORKER_NAPTIME_MS;
1456 : : }
1457 : :
803 1458 : 21 : rc = WaitLatch(MyLatch,
1459 : : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
1460 : : sleep_ms,
1461 : : WAIT_EVENT_REPLICATION_SLOTSYNC_MAIN);
1462 : :
1463 [ + + ]: 21 : if (rc & WL_LATCH_SET)
1464 : 3 : ResetLatch(MyLatch);
1465 : 21 : }
1466 : :
1467 : : /*
1468 : : * Emit an error if a concurrent sync call is in progress.
1469 : : * Otherwise, advertise that a sync is in progress.
1470 : : */
1471 : : static void
145 1472 : 15 : check_and_set_sync_info(pid_t sync_process_pid)
1473 : : {
740 1474 [ - + ]: 15 : SpinLockAcquire(&SlotSyncCtx->mutex);
1475 : :
1476 : : /*
1477 : : * Exit immediately if promotion has been triggered. This guards against
1478 : : * a new worker (or a call to pg_sync_replication_slots()) that starts
1479 : : * after the old worker was stopped by ShutDownSlotSync().
1480 : : */
27 fujii@postgresql.org 1481 [ - + ]: 15 : if (SlotSyncCtx->stopSignaled)
1482 : : {
27 fujii@postgresql.org 1483 :UBC 0 : SpinLockRelease(&SlotSyncCtx->mutex);
1484 : :
1485 [ # # ]: 0 : if (AmLogicalSlotSyncWorkerProcess())
1486 : : {
1487 [ # # ]: 0 : ereport(DEBUG1,
1488 : : errmsg("replication slot synchronization worker will not start because promotion was triggered"));
1489 : :
1490 : 0 : proc_exit(0);
1491 : : }
1492 : : else
1493 : : {
1494 : : /*
1495 : : * For the backend executing SQL function
1496 : : * pg_sync_replication_slots().
1497 : : */
1498 [ # # ]: 0 : ereport(ERROR,
1499 : : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1500 : : errmsg("replication slot synchronization will not start because promotion was triggered"));
1501 : : }
1502 : : }
1503 : :
740 akapila@postgresql.o 1504 [ - + ]:CBC 15 : if (SlotSyncCtx->syncing)
1505 : : {
740 akapila@postgresql.o 1506 :UBC 0 : SpinLockRelease(&SlotSyncCtx->mutex);
1507 [ # # ]: 0 : ereport(ERROR,
1508 : : errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1509 : : errmsg("cannot synchronize replication slots concurrently"));
1510 : : }
1511 : :
1512 : : /* The pid must not be already assigned in SlotSyncCtx */
145 akapila@postgresql.o 1513 [ - + ]:CBC 15 : Assert(SlotSyncCtx->pid == InvalidPid);
1514 : :
740 1515 : 15 : SlotSyncCtx->syncing = true;
1516 : :
1517 : : /*
1518 : : * Advertise the required PID so that the startup process can kill the
1519 : : * slot sync process on promotion.
1520 : : */
145 1521 : 15 : SlotSyncCtx->pid = sync_process_pid;
1522 : :
740 1523 : 15 : SpinLockRelease(&SlotSyncCtx->mutex);
1524 : :
1525 : 15 : syncing_slots = true;
1526 : 15 : }
1527 : :
1528 : : /*
1529 : : * Reset syncing flag.
1530 : : */
1531 : : static void
153 nathan@postgresql.or 1532 :GNC 9 : reset_syncing_flag(void)
1533 : : {
740 akapila@postgresql.o 1534 [ - + ]:CBC 9 : SpinLockAcquire(&SlotSyncCtx->mutex);
1535 : 9 : SlotSyncCtx->syncing = false;
145 1536 : 9 : SlotSyncCtx->pid = InvalidPid;
740 1537 : 9 : SpinLockRelease(&SlotSyncCtx->mutex);
1538 : :
1539 : 9 : syncing_slots = false;
236 peter@eisentraut.org 1540 : 9 : }
1541 : :
1542 : : /*
1543 : : * The main loop of our worker process.
1544 : : *
1545 : : * It connects to the primary server, fetches logical failover slots
1546 : : * information periodically in order to create and sync the slots.
1547 : : *
1548 : : * Note: If any changes are made here, check if the corresponding SQL
1549 : : * function logic in SyncReplicationSlots() also needs to be changed.
1550 : : */
1551 : : void
438 1552 : 6 : ReplSlotSyncWorkerMain(const void *startup_data, size_t startup_data_len)
1553 : : {
803 akapila@postgresql.o 1554 : 6 : WalReceiverConn *wrconn = NULL;
1555 : : char *dbname;
1556 : : char *err;
1557 : : sigjmp_buf local_sigjmp_buf;
1558 : : StringInfoData app_name;
1559 : :
778 heikki.linnakangas@i 1560 [ - + ]: 6 : Assert(startup_data_len == 0);
1561 : :
1562 : : /* Release postmaster's working memory context */
29 fujii@postgresql.org 1563 [ + - ]:GNC 6 : if (PostmasterContext)
1564 : : {
1565 : 6 : MemoryContextDelete(PostmasterContext);
1566 : 6 : PostmasterContext = NULL;
1567 : : }
1568 : :
803 akapila@postgresql.o 1569 :CBC 6 : init_ps_display(NULL);
1570 : :
672 heikki.linnakangas@i 1571 [ - + ]: 6 : Assert(GetProcessingMode() == InitProcessing);
1572 : :
1573 : : /*
1574 : : * Create a per-backend PGPROC struct in shared memory. We must do this
1575 : : * before we access any shared memory.
1576 : : */
803 akapila@postgresql.o 1577 : 6 : InitProcess();
1578 : :
1579 : : /*
1580 : : * Early initialization.
1581 : : */
1582 : 6 : BaseInit();
1583 : :
1584 [ - + ]: 6 : Assert(SlotSyncCtx != NULL);
1585 : :
1586 : : /*
1587 : : * If an exception is encountered, processing resumes here.
1588 : : *
1589 : : * We just need to clean up, report the error, and go away.
1590 : : *
1591 : : * If we do not have this handling here, then since this worker process
1592 : : * operates at the bottom of the exception stack, ERRORs turn into FATALs.
1593 : : * Therefore, we create our own exception handler to catch ERRORs.
1594 : : */
1595 [ + + ]: 6 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
1596 : : {
1597 : : /* since not using PG_TRY, must reset error stack by hand */
1598 : 2 : error_context_stack = NULL;
1599 : :
1600 : : /* Prevents interrupts while cleaning up */
1601 : 2 : HOLD_INTERRUPTS();
1602 : :
1603 : : /* Report the error to the server log */
1604 : 2 : EmitErrorReport();
1605 : :
1606 : : /*
1607 : : * We can now go away. Note that because we called InitProcess, a
1608 : : * callback was registered to do ProcKill, which will clean up
1609 : : * necessary state.
1610 : : */
1611 : 2 : proc_exit(0);
1612 : : }
1613 : :
1614 : : /* We can now handle ereport(ERROR) */
1615 : 6 : PG_exception_stack = &local_sigjmp_buf;
1616 : :
1617 : : /* Setup signal handling */
740 1618 : 6 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
147 1619 : 6 : pqsignal(SIGINT, StatementCancelHandler);
740 1620 : 6 : pqsignal(SIGTERM, die);
1621 : 6 : pqsignal(SIGFPE, FloatExceptionHandler);
1622 : 6 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
21 andrew@dunslane.net 1623 :GNC 6 : pqsignal(SIGUSR2, PG_SIG_IGN);
1624 : 6 : pqsignal(SIGPIPE, PG_SIG_IGN);
1625 : 6 : pqsignal(SIGCHLD, PG_SIG_DFL);
1626 : :
740 akapila@postgresql.o 1627 :CBC 6 : check_and_set_sync_info(MyProcPid);
1628 : :
1629 [ + - ]: 6 : ereport(LOG, errmsg("slot sync worker started"));
1630 : :
1631 : : /* Register it as soon as SlotSyncCtx->pid is initialized. */
1632 : 6 : before_shmem_exit(slotsync_worker_onexit, (Datum) 0);
1633 : :
1634 : : /*
1635 : : * Establishes SIGALRM handler and initialize timeout module. It is needed
1636 : : * by InitPostgres to register different timeouts.
1637 : : */
1638 : 6 : InitializeTimeouts();
1639 : :
1640 : : /* Load the libpq-specific functions */
1641 : 6 : load_file("libpqwalreceiver", false);
1642 : :
1643 : : /*
1644 : : * Unblock signals (they were blocked when the postmaster forked us)
1645 : : */
803 1646 : 6 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
1647 : :
1648 : : /*
1649 : : * Set always-secure search path, so malicious users can't redirect user
1650 : : * code (e.g. operators).
1651 : : *
1652 : : * It's not strictly necessary since we won't be scanning or writing to
1653 : : * any user table locally, but it's good to retain it here for added
1654 : : * precaution.
1655 : : */
796 1656 : 6 : SetConfigOption("search_path", "", PGC_SUSET, PGC_S_OVERRIDE);
1657 : :
803 1658 : 6 : dbname = CheckAndGetDbnameFromConninfo();
1659 : :
1660 : : /*
1661 : : * Connect to the database specified by the user in primary_conninfo. We
1662 : : * need a database connection for walrcv_exec to work which we use to
1663 : : * fetch slot information from the remote node. See comments atop
1664 : : * libpqrcv_exec.
1665 : : *
1666 : : * We do not specify a specific user here since the slot sync worker will
1667 : : * operate as a superuser. This is safe because the slot sync worker does
1668 : : * not interact with user tables, eliminating the risk of executing
1669 : : * arbitrary code within triggers.
1670 : : */
1671 : 6 : InitPostgres(dbname, InvalidOid, NULL, InvalidOid, 0, NULL);
1672 : :
1673 : 6 : SetProcessingMode(NormalProcessing);
1674 : :
1675 : 6 : initStringInfo(&app_name);
1676 [ + - ]: 6 : if (cluster_name[0])
1677 : 6 : appendStringInfo(&app_name, "%s_%s", cluster_name, "slotsync worker");
1678 : : else
755 drowley@postgresql.o 1679 :UBC 0 : appendStringInfoString(&app_name, "slotsync worker");
1680 : :
1681 : : /*
1682 : : * Establish the connection to the primary server for slot
1683 : : * synchronization.
1684 : : */
803 akapila@postgresql.o 1685 :CBC 6 : wrconn = walrcv_connect(PrimaryConnInfo, false, false, false,
1686 : : app_name.data, &err);
1687 : :
1688 [ - + ]: 6 : if (!wrconn)
803 akapila@postgresql.o 1689 [ # # ]:UBC 0 : ereport(ERROR,
1690 : : errcode(ERRCODE_CONNECTION_FAILURE),
1691 : : errmsg("synchronization worker \"%s\" could not connect to the primary server: %s",
1692 : : app_name.data, err));
1693 : :
244 akapila@postgresql.o 1694 :CBC 6 : pfree(app_name.data);
1695 : :
1696 : : /*
1697 : : * Register the disconnection callback.
1698 : : *
1699 : : * XXX: This can be combined with previous cleanup registration of
1700 : : * slotsync_worker_onexit() but that will need the connection to be made
1701 : : * global and we want to avoid introducing global for this purpose.
1702 : : */
740 1703 : 6 : before_shmem_exit(slotsync_worker_disconnect, PointerGetDatum(wrconn));
1704 : :
1705 : : /*
1706 : : * Using the specified primary server connection, check that we are not a
1707 : : * cascading standby and slot configured in 'primary_slot_name' exists on
1708 : : * the primary server.
1709 : : */
803 1710 : 6 : validate_remote_info(wrconn);
1711 : :
1712 : : /* Main loop to synchronize slots */
1713 : : for (;;)
1714 : 19 : {
1715 : 25 : bool some_slot_updated = false;
141 akapila@postgresql.o 1716 :GNC 25 : bool started_tx = false;
1717 : : List *remote_slots;
1718 : :
27 fujii@postgresql.org 1719 [ + + ]:CBC 25 : CHECK_FOR_INTERRUPTS();
1720 : :
1721 [ + + ]: 22 : if (ConfigReloadPending)
1722 : 1 : slotsync_reread_config();
1723 : :
1724 : : /*
1725 : : * The syscache access in fetch_remote_slots() needs a transaction
1726 : : * env.
1727 : : */
141 akapila@postgresql.o 1728 [ + - ]:GNC 21 : if (!IsTransactionState())
1729 : : {
1730 : 21 : StartTransactionCommand();
1731 : 21 : started_tx = true;
1732 : : }
1733 : :
1734 : 21 : remote_slots = fetch_remote_slots(wrconn, NIL);
1735 : 19 : some_slot_updated = synchronize_slots(wrconn, remote_slots, NULL);
1736 : 19 : list_free_deep(remote_slots);
1737 : :
1738 [ + - ]: 19 : if (started_tx)
1739 : 19 : CommitTransactionCommand();
1740 : :
803 akapila@postgresql.o 1741 :CBC 19 : wait_for_slot_activity(some_slot_updated);
1742 : : }
1743 : :
1744 : : /*
1745 : : * The slot sync worker can't get here because it will only stop when it
1746 : : * receives a stop request from the startup process, or when there is an
1747 : : * error.
1748 : : */
1749 : : Assert(false);
1750 : : }
1751 : :
1752 : : /*
1753 : : * Update the inactive_since property for synced slots.
1754 : : *
1755 : : * Note that this function is currently called when we shutdown the slot
1756 : : * sync machinery.
1757 : : */
1758 : : static void
760 1759 : 1010 : update_synced_slots_inactive_since(void)
1760 : : {
1761 : 1010 : TimestampTz now = 0;
1762 : :
1763 : : /*
1764 : : * We need to update inactive_since only when we are promoting standby to
1765 : : * correctly interpret the inactive_since if the standby gets promoted
1766 : : * without a restart. We don't want the slots to appear inactive for a
1767 : : * long time after promotion if they haven't been synchronized recently.
1768 : : * Whoever acquires the slot, i.e., makes the slot active, will reset it.
1769 : : */
1770 [ + + ]: 1010 : if (!StandbyMode)
1771 : 957 : return;
1772 : :
1773 : : /* The slot sync worker or the SQL function mustn't be running by now */
740 1774 [ + - - + ]: 53 : Assert((SlotSyncCtx->pid == InvalidPid) && !SlotSyncCtx->syncing);
1775 : :
760 1776 : 53 : LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
1777 : :
28 alvherre@kurilemu.de 1778 [ + + ]:GNC 836 : for (int i = 0; i < max_replication_slots + max_repack_replication_slots; i++)
1779 : : {
760 akapila@postgresql.o 1780 :CBC 783 : ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
1781 : :
1782 : : /* Check if it is a synchronized slot */
1783 [ + + + + ]: 783 : if (s->in_use && s->data.synced)
1784 : : {
1785 [ - + ]: 3 : Assert(SlotIsLogical(s));
1786 : :
1787 : : /* The slot must not be acquired by any process */
84 heikki.linnakangas@i 1788 [ - + ]:GNC 3 : Assert(s->active_proc == INVALID_PROC_NUMBER);
1789 : :
1790 : : /* Use the same inactive_since time for all the slots. */
760 akapila@postgresql.o 1791 [ + + ]:CBC 3 : if (now == 0)
1792 : 2 : now = GetCurrentTimestamp();
1793 : :
454 1794 : 3 : ReplicationSlotSetInactiveSince(s, now, true);
1795 : : }
1796 : : }
1797 : :
760 1798 : 53 : LWLockRelease(ReplicationSlotControlLock);
1799 : : }
1800 : :
1801 : : /*
1802 : : * Shut down slot synchronization.
1803 : : *
1804 : : * This function sets stopSignaled=true and wakes up the slot sync process
1805 : : * (either worker or backend running the SQL function pg_sync_replication_slots())
1806 : : * so that worker can exit or the SQL function pg_sync_replication_slots() can
1807 : : * finish. It also waits till the slot sync worker has exited or
1808 : : * pg_sync_replication_slots() has finished.
1809 : : */
1810 : : void
803 1811 : 1010 : ShutDownSlotSync(void)
1812 : : {
1813 : : pid_t sync_process_pid;
1814 : :
1815 [ - + ]: 1010 : SpinLockAcquire(&SlotSyncCtx->mutex);
1816 : :
1817 : 1010 : SlotSyncCtx->stopSignaled = true;
1818 : :
1819 : : /*
1820 : : * Return if neither the slot sync worker is running nor the function
1821 : : * pg_sync_replication_slots() is executing.
1822 : : */
740 1823 [ + + ]: 1010 : if (!SlotSyncCtx->syncing)
1824 : : {
803 1825 : 1009 : SpinLockRelease(&SlotSyncCtx->mutex);
760 1826 : 1009 : update_synced_slots_inactive_since();
803 1827 : 1009 : return;
1828 : : }
1829 : :
145 1830 : 1 : sync_process_pid = SlotSyncCtx->pid;
1831 : :
803 1832 : 1 : SpinLockRelease(&SlotSyncCtx->mutex);
1833 : :
1834 : : /*
1835 : : * Signal process doing slotsync, if any, asking it to stop.
1836 : : */
145 1837 [ + - ]: 1 : if (sync_process_pid != InvalidPid)
27 fujii@postgresql.org 1838 : 1 : SendProcSignal(sync_process_pid, PROCSIG_SLOTSYNC_MESSAGE,
1839 : : INVALID_PROC_NUMBER);
1840 : :
1841 : : /* Wait for slot sync to end */
1842 : : for (;;)
803 akapila@postgresql.o 1843 :UBC 0 : {
1844 : : int rc;
1845 : :
1846 : : /* Wait a bit, we don't expect to have to wait long */
803 akapila@postgresql.o 1847 :CBC 1 : rc = WaitLatch(MyLatch,
1848 : : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
1849 : : 10L, WAIT_EVENT_REPLICATION_SLOTSYNC_SHUTDOWN);
1850 : :
1851 [ - + ]: 1 : if (rc & WL_LATCH_SET)
1852 : : {
803 akapila@postgresql.o 1853 :UBC 0 : ResetLatch(MyLatch);
1854 [ # # ]: 0 : CHECK_FOR_INTERRUPTS();
1855 : : }
1856 : :
803 akapila@postgresql.o 1857 [ - + ]:CBC 1 : SpinLockAcquire(&SlotSyncCtx->mutex);
1858 : :
1859 : : /* Ensure that no process is syncing the slots. */
740 1860 [ + - ]: 1 : if (!SlotSyncCtx->syncing)
803 1861 : 1 : break;
1862 : :
803 akapila@postgresql.o 1863 :UBC 0 : SpinLockRelease(&SlotSyncCtx->mutex);
1864 : : }
1865 : :
803 akapila@postgresql.o 1866 :CBC 1 : SpinLockRelease(&SlotSyncCtx->mutex);
1867 : :
760 1868 : 1 : update_synced_slots_inactive_since();
1869 : : }
1870 : :
1871 : : /*
1872 : : * SlotSyncWorkerCanRestart
1873 : : *
1874 : : * Return true, indicating worker is allowed to restart, if enough time has
1875 : : * passed since it was last launched to reach SLOTSYNC_RESTART_INTERVAL_SEC.
1876 : : * Otherwise return false.
1877 : : *
1878 : : * This is a safety valve to protect against continuous respawn attempts if the
1879 : : * worker is dying immediately at launch. Note that since we will retry to
1880 : : * launch the worker from the postmaster main loop, we will get another
1881 : : * chance later.
1882 : : */
1883 : : bool
803 1884 : 12 : SlotSyncWorkerCanRestart(void)
1885 : : {
1886 : 12 : time_t curtime = time(NULL);
1887 : :
1888 : : /*
1889 : : * If first time through, or time somehow went backwards, always update
1890 : : * last_start_time to match the current clock and allow worker start.
1891 : : * Otherwise allow it only once enough time has elapsed.
1892 : : */
195 tgl@sss.pgh.pa.us 1893 [ + + ]:GNC 12 : if (SlotSyncCtx->last_start_time == 0 ||
1894 [ + - ]: 6 : curtime < SlotSyncCtx->last_start_time ||
1895 [ - + ]: 6 : curtime - SlotSyncCtx->last_start_time >= SLOTSYNC_RESTART_INTERVAL_SEC)
1896 : : {
1897 : 6 : SlotSyncCtx->last_start_time = curtime;
1898 : 6 : return true;
1899 : : }
1900 : 6 : return false;
1901 : : }
1902 : :
1903 : : /*
1904 : : * Is current process syncing replication slots?
1905 : : *
1906 : : * Could be either backend executing SQL function or slot sync worker.
1907 : : */
1908 : : bool
811 akapila@postgresql.o 1909 :CBC 78 : IsSyncingReplicationSlots(void)
1910 : : {
1911 : 78 : return syncing_slots;
1912 : : }
1913 : :
1914 : : /*
1915 : : * Register shared memory space needed for slot synchronization.
1916 : : */
1917 : : static void
29 heikki.linnakangas@i 1918 :GNC 1244 : SlotSyncShmemRequest(void *arg)
1919 : : {
1920 : 1244 : ShmemRequestStruct(.name = "Slot Sync Data",
1921 : : .size = sizeof(SlotSyncCtxStruct),
1922 : : .ptr = (void **) &SlotSyncCtx,
1923 : : );
811 akapila@postgresql.o 1924 :GIC 1244 : }
1925 : :
1926 : : /*
1927 : : * Initialize shared memory for slot synchronization.
1928 : : */
1929 : : static void
29 heikki.linnakangas@i 1930 :GNC 1241 : SlotSyncShmemInit(void *arg)
1931 : : {
1932 : 1241 : memset(SlotSyncCtx, 0, sizeof(SlotSyncCtxStruct));
1933 : 1241 : SlotSyncCtx->pid = InvalidPid;
1934 : 1241 : SpinLockInit(&SlotSyncCtx->mutex);
811 akapila@postgresql.o 1935 :CBC 1241 : }
1936 : :
1937 : : /*
1938 : : * Error cleanup callback for slot sync SQL function.
1939 : : */
1940 : : static void
1941 : 1 : slotsync_failure_callback(int code, Datum arg)
1942 : : {
1943 : 1 : WalReceiverConn *wrconn = (WalReceiverConn *) DatumGetPointer(arg);
1944 : :
1945 : : /*
1946 : : * We need to do slots cleanup here just like WalSndErrorCleanup() does.
1947 : : *
1948 : : * The startup process during promotion invokes ShutDownSlotSync() which
1949 : : * waits for slot sync to finish and it does that by checking the
1950 : : * 'syncing' flag. Thus the SQL function must be done with slots' release
1951 : : * and cleanup to avoid any dangling temporary slots or active slots
1952 : : * before it marks itself as finished syncing.
1953 : : */
1954 : :
1955 : : /* Make sure active replication slots are released */
740 1956 [ - + ]: 1 : if (MyReplicationSlot != NULL)
740 akapila@postgresql.o 1957 :UBC 0 : ReplicationSlotRelease();
1958 : :
1959 : : /* Also cleanup the synced temporary slots. */
740 akapila@postgresql.o 1960 :CBC 1 : ReplicationSlotCleanup(true);
1961 : :
1962 : : /*
1963 : : * The set syncing_slots indicates that the process errored out without
1964 : : * resetting the flag. So, we need to clean up shared memory and reset the
1965 : : * flag here.
1966 : : */
1967 [ + - ]: 1 : if (syncing_slots)
1968 : 1 : reset_syncing_flag();
1969 : :
811 1970 : 1 : walrcv_disconnect(wrconn);
1971 : 1 : }
1972 : :
1973 : : /*
1974 : : * Helper function to extract slot names from a list of remote slots
1975 : : */
1976 : : static List *
141 akapila@postgresql.o 1977 :GNC 1 : extract_slot_names(List *remote_slots)
1978 : : {
1979 : 1 : List *slot_names = NIL;
1980 : :
1981 [ + - + + : 3 : foreach_ptr(RemoteSlot, remote_slot, remote_slots)
+ + ]
1982 : : {
1983 : : char *slot_name;
1984 : :
1985 : 1 : slot_name = pstrdup(remote_slot->name);
1986 : 1 : slot_names = lappend(slot_names, slot_name);
1987 : : }
1988 : :
1989 : 1 : return slot_names;
1990 : : }
1991 : :
1992 : : /*
1993 : : * Synchronize the failover enabled replication slots using the specified
1994 : : * primary server connection.
1995 : : *
1996 : : * Repeatedly fetches and updates replication slot information from the
1997 : : * primary until all slots are at least "sync ready".
1998 : : *
1999 : : * Exits early if promotion is triggered or certain critical
2000 : : * configuration parameters have changed.
2001 : : */
2002 : : void
811 akapila@postgresql.o 2003 :CBC 9 : SyncReplicationSlots(WalReceiverConn *wrconn)
2004 : : {
2005 [ + + ]: 9 : PG_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(wrconn));
2006 : : {
141 akapila@postgresql.o 2007 :GNC 9 : List *remote_slots = NIL;
2008 : 9 : List *slot_names = NIL; /* List of slot names to track */
2009 : :
145 akapila@postgresql.o 2010 :CBC 9 : check_and_set_sync_info(MyProcPid);
2011 : :
811 2012 : 9 : validate_remote_info(wrconn);
2013 : :
2014 : : /* Retry until all the slots are sync-ready */
2015 : : for (;;)
141 akapila@postgresql.o 2016 :GNC 2 : {
2017 : 10 : bool slot_persistence_pending = false;
2018 : 10 : bool some_slot_updated = false;
2019 : :
2020 : : /* Check for interrupts and config changes */
27 fujii@postgresql.org 2021 [ - + ]: 10 : CHECK_FOR_INTERRUPTS();
2022 : :
2023 [ - + ]: 10 : if (ConfigReloadPending)
27 fujii@postgresql.org 2024 :UNC 0 : slotsync_reread_config();
2025 : :
2026 : : /* We must be in a valid transaction state */
141 akapila@postgresql.o 2027 [ - + ]:GNC 10 : Assert(IsTransactionState());
2028 : :
2029 : : /*
2030 : : * Fetch remote slot info for the given slot_names. If slot_names
2031 : : * is NIL, fetch all failover-enabled slots. Note that we reuse
2032 : : * slot_names from the first iteration; re-fetching all failover
2033 : : * slots each time could cause an endless loop. Instead of
2034 : : * reprocessing only the pending slots in each iteration, it's
2035 : : * better to process all the slots received in the first
2036 : : * iteration. This ensures that by the time we're done, all slots
2037 : : * reflect the latest values.
2038 : : */
2039 : 10 : remote_slots = fetch_remote_slots(wrconn, slot_names);
2040 : :
2041 : : /* Attempt to synchronize slots */
2042 : 10 : some_slot_updated = synchronize_slots(wrconn, remote_slots,
2043 : : &slot_persistence_pending);
2044 : :
2045 : : /*
2046 : : * If slot_persistence_pending is true, extract slot names for
2047 : : * future iterations (only needed if we haven't done it yet)
2048 : : */
2049 [ + + + + ]: 10 : if (slot_names == NIL && slot_persistence_pending)
2050 : 1 : slot_names = extract_slot_names(remote_slots);
2051 : :
2052 : : /* Free the current remote_slots list */
2053 : 10 : list_free_deep(remote_slots);
2054 : :
2055 : : /* Done if all slots are persisted i.e are sync-ready */
2056 [ + + ]: 10 : if (!slot_persistence_pending)
2057 : 8 : break;
2058 : :
2059 : : /* wait before retrying again */
2060 : 2 : wait_for_slot_activity(some_slot_updated);
2061 : : }
2062 : :
2063 [ + + ]: 8 : if (slot_names)
2064 : 1 : list_free_deep(slot_names);
2065 : :
2066 : : /* Cleanup the synced temporary slots */
740 akapila@postgresql.o 2067 :CBC 8 : ReplicationSlotCleanup(true);
2068 : :
2069 : : /* We are done with sync, so reset sync flag */
2070 : 8 : reset_syncing_flag();
2071 : : }
811 2072 [ - + ]: 9 : PG_END_ENSURE_ERROR_CLEANUP(slotsync_failure_callback, PointerGetDatum(wrconn));
2073 : 8 : }
|