Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * walsender.c
4 : : *
5 : : * The WAL sender process (walsender) is new as of Postgres 9.0. It takes
6 : : * care of sending XLOG from the primary server to a single recipient.
7 : : * (Note that there can be more than one walsender process concurrently.)
8 : : * It is started by the postmaster when the walreceiver of a standby server
9 : : * connects to the primary server and requests XLOG streaming replication.
10 : : *
11 : : * A walsender is similar to a regular backend, ie. there is a one-to-one
12 : : * relationship between a connection and a walsender process, but instead
13 : : * of processing SQL queries, it understands a small set of special
14 : : * replication-mode commands. The START_REPLICATION command begins streaming
15 : : * WAL to the client. While streaming, the walsender keeps reading XLOG
16 : : * records from the disk and sends them to the standby server over the
17 : : * COPY protocol, until either side ends the replication by exiting COPY
18 : : * mode (or until the connection is closed).
19 : : *
20 : : * Normal termination is by SIGTERM, which instructs the walsender to
21 : : * close the connection and exit(0) at the next convenient moment. Emergency
22 : : * termination is by SIGQUIT; like any backend, the walsender will simply
23 : : * abort and exit on SIGQUIT. A close of the connection and a FATAL error
24 : : * are treated as not a crash but approximately normal termination;
25 : : * the walsender will exit quickly without sending any more XLOG records.
26 : : *
27 : : * If the server is shut down, checkpointer sends us
28 : : * PROCSIG_WALSND_INIT_STOPPING after all regular backends have exited. If
29 : : * the backend is idle or runs an SQL query this causes the backend to
30 : : * shutdown, if logical replication is in progress all existing WAL records
31 : : * are processed followed by a shutdown. Otherwise this causes the walsender
32 : : * to switch to the "stopping" state. In this state, the walsender will reject
33 : : * any further replication commands. The checkpointer begins the shutdown
34 : : * checkpoint once all walsenders are confirmed as stopping. When the shutdown
35 : : * checkpoint finishes, the postmaster sends us SIGUSR2. This instructs
36 : : * walsender to send any outstanding WAL, including the shutdown checkpoint
37 : : * record, wait for it to be replicated to the standby, and then exit.
38 : : * This waiting time can be limited by the wal_sender_shutdown_timeout
39 : : * parameter.
40 : : *
41 : : *
42 : : * Portions Copyright (c) 2010-2026, PostgreSQL Global Development Group
43 : : *
44 : : * IDENTIFICATION
45 : : * src/backend/replication/walsender.c
46 : : *
47 : : *-------------------------------------------------------------------------
48 : : */
49 : : #include "postgres.h"
50 : :
51 : : #include <signal.h>
52 : : #include <unistd.h>
53 : :
54 : : #include "access/timeline.h"
55 : : #include "access/transam.h"
56 : : #include "access/twophase.h"
57 : : #include "access/xact.h"
58 : : #include "access/xlog_internal.h"
59 : : #include "access/xlogreader.h"
60 : : #include "access/xlogrecovery.h"
61 : : #include "access/xlogutils.h"
62 : : #include "backup/basebackup.h"
63 : : #include "backup/basebackup_incremental.h"
64 : : #include "catalog/pg_authid.h"
65 : : #include "catalog/pg_type.h"
66 : : #include "commands/defrem.h"
67 : : #include "funcapi.h"
68 : : #include "libpq/libpq.h"
69 : : #include "libpq/pqformat.h"
70 : : #include "libpq/protocol.h"
71 : : #include "miscadmin.h"
72 : : #include "nodes/replnodes.h"
73 : : #include "pgstat.h"
74 : : #include "postmaster/interrupt.h"
75 : : #include "replication/decode.h"
76 : : #include "replication/logical.h"
77 : : #include "replication/slotsync.h"
78 : : #include "replication/slot.h"
79 : : #include "replication/snapbuild.h"
80 : : #include "replication/syncrep.h"
81 : : #include "replication/walreceiver.h"
82 : : #include "replication/walsender.h"
83 : : #include "replication/walsender_private.h"
84 : : #include "storage/condition_variable.h"
85 : : #include "storage/aio_subsys.h"
86 : : #include "storage/fd.h"
87 : : #include "storage/ipc.h"
88 : : #include "storage/pmsignal.h"
89 : : #include "storage/proc.h"
90 : : #include "storage/procarray.h"
91 : : #include "storage/subsystems.h"
92 : : #include "tcop/dest.h"
93 : : #include "tcop/tcopprot.h"
94 : : #include "utils/acl.h"
95 : : #include "utils/builtins.h"
96 : : #include "utils/guc.h"
97 : : #include "utils/lsyscache.h"
98 : : #include "utils/memutils.h"
99 : : #include "utils/pg_lsn.h"
100 : : #include "utils/pgstat_internal.h"
101 : : #include "utils/ps_status.h"
102 : : #include "utils/timeout.h"
103 : : #include "utils/timestamp.h"
104 : : #include "utils/wait_event.h"
105 : :
106 : : /* Minimum interval used by walsender for stats flushes, in ms */
107 : : #define WALSENDER_STATS_FLUSH_INTERVAL 1000
108 : :
109 : : /*
110 : : * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
111 : : *
112 : : * We don't have a good idea of what a good value would be; there's some
113 : : * overhead per message in both walsender and walreceiver, but on the other
114 : : * hand sending large batches makes walsender less responsive to signals
115 : : * because signals are checked only between messages. 128kB (with
116 : : * default 8k blocks) seems like a reasonable guess for now.
117 : : */
118 : : #define MAX_SEND_SIZE (XLOG_BLCKSZ * 16)
119 : :
120 : : /* Array of WalSnds in shared memory */
121 : : WalSndCtlData *WalSndCtl = NULL;
122 : :
123 : : static void WalSndShmemRequest(void *arg);
124 : : static void WalSndShmemInit(void *arg);
125 : :
126 : : const ShmemCallbacks WalSndShmemCallbacks = {
127 : : .request_fn = WalSndShmemRequest,
128 : : .init_fn = WalSndShmemInit,
129 : : };
130 : :
131 : : /* My slot in the shared memory array */
132 : : WalSnd *MyWalSnd = NULL;
133 : :
134 : : /* Global state */
135 : : bool am_walsender = false; /* Am I a walsender process? */
136 : : bool am_cascading_walsender = false; /* Am I cascading WAL to another
137 : : * standby? */
138 : : bool am_db_walsender = false; /* Connected to a database? */
139 : :
140 : : /* GUC variables */
141 : : int max_wal_senders = 10; /* the maximum number of concurrent
142 : : * walsenders */
143 : : int wal_sender_timeout = 60 * 1000; /* maximum time to send one WAL
144 : : * data message */
145 : :
146 : : int wal_sender_shutdown_timeout = -1; /* maximum time to wait during
147 : : * shutdown for WAL
148 : : * replication */
149 : :
150 : : bool log_replication_commands = false;
151 : :
152 : : /*
153 : : * State for WalSndWakeupRequest
154 : : */
155 : : bool wake_wal_senders = false;
156 : :
157 : : /*
158 : : * xlogreader used for replication. Note that a WAL sender doing physical
159 : : * replication does not need xlogreader to read WAL, but it needs one to
160 : : * keep a state of its work.
161 : : */
162 : : static XLogReaderState *xlogreader = NULL;
163 : :
164 : : /*
165 : : * If the UPLOAD_MANIFEST command is used to provide a backup manifest in
166 : : * preparation for an incremental backup, uploaded_manifest will be point
167 : : * to an object containing information about its contexts, and
168 : : * uploaded_manifest_mcxt will point to the memory context that contains
169 : : * that object and all of its subordinate data. Otherwise, both values will
170 : : * be NULL.
171 : : */
172 : : static IncrementalBackupInfo *uploaded_manifest = NULL;
173 : : static MemoryContext uploaded_manifest_mcxt = NULL;
174 : :
175 : : /*
176 : : * These variables keep track of the state of the timeline we're currently
177 : : * sending. sendTimeLine identifies the timeline. If sendTimeLineIsHistoric,
178 : : * the timeline is not the latest timeline on this server, and the server's
179 : : * history forked off from that timeline at sendTimeLineValidUpto.
180 : : */
181 : : static TimeLineID sendTimeLine = 0;
182 : : static TimeLineID sendTimeLineNextTLI = 0;
183 : : static bool sendTimeLineIsHistoric = false;
184 : : static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
185 : :
186 : : /*
187 : : * How far have we sent WAL already? This is also advertised in
188 : : * MyWalSnd->sentPtr. (Actually, this is the next WAL location to send.)
189 : : */
190 : : static XLogRecPtr sentPtr = InvalidXLogRecPtr;
191 : :
192 : : /* Buffers for constructing outgoing messages and processing reply messages. */
193 : : static StringInfoData output_message;
194 : : static StringInfoData reply_message;
195 : : static StringInfoData tmpbuf;
196 : :
197 : : /* Timestamp of last ProcessRepliesIfAny(). */
198 : : static TimestampTz last_processing = 0;
199 : :
200 : : /*
201 : : * Timestamp of last ProcessRepliesIfAny() that saw a reply from the
202 : : * standby. Set to 0 if wal_sender_timeout doesn't need to be active.
203 : : */
204 : : static TimestampTz last_reply_timestamp = 0;
205 : :
206 : : /* Have we sent a heartbeat message asking for reply, since last reply? */
207 : : static bool waiting_for_ping_response = false;
208 : :
209 : : /* Timestamp when walsender received the shutdown request */
210 : : static TimestampTz shutdown_request_timestamp = 0;
211 : :
212 : : /*
213 : : * Set after queueing the CommandComplete message that ends WAL streaming
214 : : * during shutdown. This prevents WalSndDone() and WalSndDoneImmediate()
215 : : * from queueing the same message twice.
216 : : */
217 : : static bool shutdown_stream_done_queued = false;
218 : :
219 : : /*
220 : : * While streaming WAL in Copy mode, streamingDoneSending is set to true
221 : : * after we have sent CopyDone. We should not send any more CopyData messages
222 : : * after that. streamingDoneReceiving is set to true when we receive CopyDone
223 : : * from the other end. When both become true, it's time to exit Copy mode.
224 : : */
225 : : static bool streamingDoneSending;
226 : : static bool streamingDoneReceiving;
227 : :
228 : : /* Are we there yet? */
229 : : static bool WalSndCaughtUp = false;
230 : :
231 : : /* Flags set by signal handlers for later service in main loop */
232 : : static volatile sig_atomic_t got_SIGUSR2 = false;
233 : : static volatile sig_atomic_t got_STOPPING = false;
234 : :
235 : : /*
236 : : * This is set while we are streaming. When not set
237 : : * PROCSIG_WALSND_INIT_STOPPING signal will be handled like SIGTERM. When set,
238 : : * the main loop is responsible for checking got_STOPPING and terminating when
239 : : * it's set (after streaming any remaining WAL).
240 : : */
241 : : static volatile sig_atomic_t replication_active = false;
242 : :
243 : : static LogicalDecodingContext *logical_decoding_ctx = NULL;
244 : :
245 : : /* A sample associating a WAL location with the time it was written. */
246 : : typedef struct
247 : : {
248 : : XLogRecPtr lsn;
249 : : TimestampTz time;
250 : : } WalTimeSample;
251 : :
252 : : /* The size of our buffer of time samples. */
253 : : #define LAG_TRACKER_BUFFER_SIZE 8192
254 : :
255 : : /* A mechanism for tracking replication lag. */
256 : : typedef struct
257 : : {
258 : : XLogRecPtr last_lsn;
259 : : WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
260 : : int write_head;
261 : : int read_heads[NUM_SYNC_REP_WAIT_MODE];
262 : : WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
263 : :
264 : : /*
265 : : * Overflow entries for read heads that collide with the write head.
266 : : *
267 : : * When the cyclic buffer fills (write head is about to collide with a
268 : : * read head), we save that read head's current sample here and mark it as
269 : : * using overflow (read_heads[i] = -1). This allows the write head to
270 : : * continue advancing while the overflowed mode continues lag computation
271 : : * using the saved sample.
272 : : *
273 : : * Once the standby's reported LSN advances past the overflow entry's LSN,
274 : : * we transition back to normal buffer-based tracking.
275 : : */
276 : : WalTimeSample overflowed[NUM_SYNC_REP_WAIT_MODE];
277 : : } LagTracker;
278 : :
279 : : static LagTracker *lag_tracker;
280 : :
281 : : /* Signal handlers */
282 : : static void WalSndLastCycleHandler(SIGNAL_ARGS);
283 : :
284 : : /* Prototypes for private functions */
285 : : typedef void (*WalSndSendDataCallback) (void);
286 : : static void WalSndLoop(WalSndSendDataCallback send_data);
287 : : static void InitWalSenderSlot(void);
288 : : static void WalSndKill(int code, Datum arg);
289 : : pg_noreturn static void WalSndShutdown(void);
290 : : static void XLogSendPhysical(void);
291 : : static void XLogSendLogical(void);
292 : : pg_noreturn static void WalSndDoneImmediate(void);
293 : : static void WalSndDone(WalSndSendDataCallback send_data);
294 : : static void IdentifySystem(void);
295 : : static void UploadManifest(void);
296 : : static bool HandleUploadManifestPacket(StringInfo buf, off_t *offset,
297 : : IncrementalBackupInfo *ib);
298 : : static void ReadReplicationSlot(ReadReplicationSlotCmd *cmd);
299 : : static void CreateReplicationSlot(CreateReplicationSlotCmd *cmd);
300 : : static void DropReplicationSlot(DropReplicationSlotCmd *cmd);
301 : : static void StartReplication(StartReplicationCmd *cmd);
302 : : static void StartLogicalReplication(StartReplicationCmd *cmd);
303 : : static void ProcessStandbyMessage(void);
304 : : static void ProcessStandbyReplyMessage(void);
305 : : static void ProcessStandbyHSFeedbackMessage(void);
306 : : static void ProcessStandbyPSRequestMessage(void);
307 : : static void ProcessRepliesIfAny(void);
308 : : static void ProcessPendingWrites(void);
309 : : static void WalSndKeepalive(bool requestReply, XLogRecPtr writePtr);
310 : : static void WalSndKeepaliveIfNecessary(void);
311 : : static void WalSndCheckTimeOut(void);
312 : : static void WalSndCheckShutdownTimeout(void);
313 : : static long WalSndComputeSleeptime(TimestampTz now);
314 : : static void WalSndWait(uint32 socket_events, long timeout, uint32 wait_event);
315 : : static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
316 : : static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
317 : : static void WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
318 : : bool skipped_xact);
319 : : static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc);
320 : : static void LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time);
321 : : static TimeOffset LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now);
322 : : static bool TransactionIdInRecentPast(TransactionId xid, uint32 epoch);
323 : :
324 : : static void WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
325 : : TimeLineID *tli_p);
326 : :
327 : :
328 : : /* Initialize walsender process before entering the main command loop */
329 : : void
4960 heikki.linnakangas@i 330 :CBC 1279 : InitWalSender(void)
331 : : {
5404 simon@2ndQuadrant.co 332 : 1279 : am_cascading_walsender = RecoveryInProgress();
333 : :
334 : : /* Create a per-walsender data structure in shared memory */
4960 heikki.linnakangas@i 335 : 1279 : InitWalSenderSlot();
336 : :
337 : : /* need resource owner for e.g. basebackups */
574 andres@anarazel.de 338 : 1279 : CreateAuxProcessResourceOwner();
339 : :
340 : : /*
341 : : * Let postmaster know that we're a WAL sender. Once we've declared us as
342 : : * a WAL sender process, postmaster will let us outlive the bgwriter and
343 : : * kill us last in the shutdown sequence, so we get a chance to stream all
344 : : * remaining WAL at shutdown, including the shutdown checkpoint. Note that
345 : : * there's no going back, and we mustn't write any WAL records after this.
346 : : */
4891 heikki.linnakangas@i 347 : 1279 : MarkPostmasterChildWalSender();
348 : 1279 : SendPostmasterSignal(PMSIGNAL_ADVANCE_STATE_MACHINE);
349 : :
350 : : /*
351 : : * If the client didn't specify a database to connect to, show in PGPROC
352 : : * that our advertised xmin should affect vacuum horizons in all
353 : : * databases. This allows physical replication clients to send hot
354 : : * standby feedback that will delay vacuum cleanup in all databases.
355 : : */
1481 tgl@sss.pgh.pa.us 356 [ + + ]: 1279 : if (MyDatabaseId == InvalidOid)
357 : : {
358 [ - + ]: 506 : Assert(MyProc->xmin == InvalidTransactionId);
359 : 506 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
360 : 506 : MyProc->statusFlags |= PROC_AFFECTS_ALL_HORIZONS;
361 : 506 : ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
362 : 506 : LWLockRelease(ProcArrayLock);
363 : : }
364 : :
365 : : /* Initialize empty timestamp buffer for lag tracking. */
2758 tmunro@postgresql.or 366 : 1279 : lag_tracker = MemoryContextAllocZero(TopMemoryContext, sizeof(LagTracker));
5954 heikki.linnakangas@i 367 : 1279 : }
368 : :
369 : : /*
370 : : * Clean up after an error.
371 : : *
372 : : * WAL sender processes don't use transactions like regular backends do.
373 : : * This function does any cleanup required after an error in a WAL sender
374 : : * process, similar to what transaction abort does in a regular backend.
375 : : */
376 : : void
3916 andres@anarazel.de 377 : 49 : WalSndErrorCleanup(void)
378 : : {
4477 rhaas@postgresql.org 379 : 49 : LWLockReleaseAll();
3451 380 : 49 : ConditionVariableCancelSleep();
3708 381 : 49 : pgstat_report_wait_end();
414 andres@anarazel.de 382 : 49 : pgaio_error_cleanup();
383 : :
2181 alvherre@alvh.no-ip. 384 [ + + + + ]: 49 : if (xlogreader != NULL && xlogreader->seg.ws_file >= 0)
2183 385 : 6 : wal_segment_close(xlogreader);
386 : :
4477 rhaas@postgresql.org 387 [ + + ]: 49 : if (MyReplicationSlot != NULL)
388 : 15 : ReplicationSlotRelease();
389 : :
740 akapila@postgresql.o 390 : 49 : ReplicationSlotCleanup(false);
391 : :
4891 heikki.linnakangas@i 392 : 49 : replication_active = false;
393 : :
394 : : /*
395 : : * If there is a transaction in progress, it will clean up our
396 : : * ResourceOwner, but if a replication command set up a resource owner
397 : : * without a transaction, we've got to clean that up now.
398 : : */
2223 rhaas@postgresql.org 399 [ + + ]: 49 : if (!IsTransactionOrTransactionBlock())
574 andres@anarazel.de 400 : 48 : ReleaseAuxProcessResources(false);
401 : :
3256 402 [ + - - + ]: 49 : if (got_STOPPING || got_SIGUSR2)
4960 heikki.linnakangas@i 403 :UBC 0 : proc_exit(0);
404 : :
405 : : /* Revert back to startup state */
4891 heikki.linnakangas@i 406 :CBC 49 : WalSndSetState(WALSNDSTATE_STARTUP);
5954 407 : 49 : }
408 : :
409 : : /*
410 : : * Handle a client's connection abort in an orderly manner.
411 : : */
412 : : static void
4439 rhaas@postgresql.org 413 : 1 : WalSndShutdown(void)
414 : : {
415 : : /*
416 : : * Reset whereToSendOutput to prevent ereport from attempting to send any
417 : : * more messages to the standby.
418 : : */
419 [ + - ]: 1 : if (whereToSendOutput == DestRemote)
420 : 1 : whereToSendOutput = DestNone;
421 : :
422 : 1 : proc_exit(0);
423 : : }
424 : :
425 : : /*
426 : : * Handle the IDENTIFY_SYSTEM command.
427 : : */
428 : : static void
5590 magnus@hagander.net 429 : 803 : IdentifySystem(void)
430 : : {
431 : : char sysid[32];
432 : : char xloc[MAXFNAMELEN];
433 : : XLogRecPtr logptr;
4439 rhaas@postgresql.org 434 : 803 : char *dbname = NULL;
435 : : DestReceiver *dest;
436 : : TupOutputState *tstate;
437 : : TupleDesc tupdesc;
438 : : Datum values[4];
1389 peter@eisentraut.org 439 : 803 : bool nulls[4] = {0};
440 : : TimeLineID currTLI;
441 : :
442 : : /*
443 : : * Reply with a result set with one row, four columns. First col is system
444 : : * ID, second is timeline ID, third is current xlog location and the
445 : : * fourth contains the database name if we are connected to one.
446 : : */
447 : :
5590 magnus@hagander.net 448 : 803 : snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
449 : : GetSystemIdentifier());
450 : :
4891 heikki.linnakangas@i 451 : 803 : am_cascading_walsender = RecoveryInProgress();
452 [ + + ]: 803 : if (am_cascading_walsender)
1642 rhaas@postgresql.org 453 : 66 : logptr = GetStandbyFlushRecPtr(&currTLI);
454 : : else
455 : 737 : logptr = GetFlushRecPtr(&currTLI);
456 : :
302 alvherre@kurilemu.de 457 :GNC 803 : snprintf(xloc, sizeof(xloc), "%X/%08X", LSN_FORMAT_ARGS(logptr));
458 : :
4439 rhaas@postgresql.org 459 [ + + ]:CBC 803 : if (MyDatabaseId != InvalidOid)
460 : : {
461 : 284 : MemoryContext cur = CurrentMemoryContext;
462 : :
463 : : /* syscache access needs a transaction env. */
464 : 284 : StartTransactionCommand();
465 : 284 : dbname = get_database_name(MyDatabaseId);
466 : : /* copy dbname out of TX context */
673 tgl@sss.pgh.pa.us 467 : 284 : dbname = MemoryContextStrdup(cur, dbname);
4439 rhaas@postgresql.org 468 : 284 : CommitTransactionCommand();
469 : : }
470 : :
3380 471 : 803 : dest = CreateDestReceiver(DestRemoteSimple);
472 : :
473 : : /* need a tuple descriptor representing four columns */
2723 andres@anarazel.de 474 : 803 : tupdesc = CreateTemplateTupleDesc(4);
3380 rhaas@postgresql.org 475 : 803 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "systemid",
476 : : TEXTOID, -1, 0);
477 : 803 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "timeline",
478 : : INT8OID, -1, 0);
479 : 803 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "xlogpos",
480 : : TEXTOID, -1, 0);
481 : 803 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "dbname",
482 : : TEXTOID, -1, 0);
50 drowley@postgresql.o 483 :GNC 803 : TupleDescFinalize(tupdesc);
484 : :
485 : : /* prepare for projection of tuples */
2728 andres@anarazel.de 486 :CBC 803 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
487 : :
488 : : /* column 1: system identifier */
3380 rhaas@postgresql.org 489 : 803 : values[0] = CStringGetTextDatum(sysid);
490 : :
491 : : /* column 2: timeline */
1401 peter@eisentraut.org 492 : 803 : values[1] = Int64GetDatum(currTLI);
493 : :
494 : : /* column 3: wal location */
3280 peter_e@gmx.net 495 : 803 : values[2] = CStringGetTextDatum(xloc);
496 : :
497 : : /* column 4: database name, or NULL if none */
4439 rhaas@postgresql.org 498 [ + + ]: 803 : if (dbname)
3380 499 : 284 : values[3] = CStringGetTextDatum(dbname);
500 : : else
501 : 519 : nulls[3] = true;
502 : :
503 : : /* send it to dest */
504 : 803 : do_tup_output(tstate, values, nulls);
505 : :
506 : 803 : end_tup_output(tstate);
5590 magnus@hagander.net 507 : 803 : }
508 : :
509 : : /* Handle READ_REPLICATION_SLOT command */
510 : : static void
1653 michael@paquier.xyz 511 : 6 : ReadReplicationSlot(ReadReplicationSlotCmd *cmd)
512 : : {
513 : : #define READ_REPLICATION_SLOT_COLS 3
514 : : ReplicationSlot *slot;
515 : : DestReceiver *dest;
516 : : TupOutputState *tstate;
517 : : TupleDesc tupdesc;
1389 peter@eisentraut.org 518 : 6 : Datum values[READ_REPLICATION_SLOT_COLS] = {0};
519 : : bool nulls[READ_REPLICATION_SLOT_COLS];
520 : :
1653 michael@paquier.xyz 521 : 6 : tupdesc = CreateTemplateTupleDesc(READ_REPLICATION_SLOT_COLS);
522 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_type",
523 : : TEXTOID, -1, 0);
524 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "restart_lsn",
525 : : TEXTOID, -1, 0);
526 : : /* TimeLineID is unsigned, so int4 is not wide enough. */
527 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "restart_tli",
528 : : INT8OID, -1, 0);
50 drowley@postgresql.o 529 :GNC 6 : TupleDescFinalize(tupdesc);
530 : :
1389 peter@eisentraut.org 531 :CBC 6 : memset(nulls, true, READ_REPLICATION_SLOT_COLS * sizeof(bool));
532 : :
1653 michael@paquier.xyz 533 : 6 : LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
534 : 6 : slot = SearchNamedReplicationSlot(cmd->slotname, false);
535 [ + + - + ]: 6 : if (slot == NULL || !slot->in_use)
536 : : {
537 : 2 : LWLockRelease(ReplicationSlotControlLock);
538 : : }
539 : : else
540 : : {
541 : : ReplicationSlot slot_contents;
542 : 4 : int i = 0;
543 : :
544 : : /* Copy slot contents while holding spinlock */
545 [ - + ]: 4 : SpinLockAcquire(&slot->mutex);
546 : 4 : slot_contents = *slot;
547 : 4 : SpinLockRelease(&slot->mutex);
548 : 4 : LWLockRelease(ReplicationSlotControlLock);
549 : :
550 [ + + ]: 4 : if (OidIsValid(slot_contents.data.database))
551 [ + - ]: 1 : ereport(ERROR,
552 : : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
553 : : errmsg("cannot use %s with a logical replication slot",
554 : : "READ_REPLICATION_SLOT"));
555 : :
556 : : /* slot type */
557 : 3 : values[i] = CStringGetTextDatum("physical");
558 : 3 : nulls[i] = false;
559 : 3 : i++;
560 : :
561 : : /* start LSN */
180 alvherre@kurilemu.de 562 [ + - ]:GNC 3 : if (XLogRecPtrIsValid(slot_contents.data.restart_lsn))
563 : : {
564 : : char xloc[64];
565 : :
302 566 : 3 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1653 michael@paquier.xyz 567 :CBC 3 : LSN_FORMAT_ARGS(slot_contents.data.restart_lsn));
568 : 3 : values[i] = CStringGetTextDatum(xloc);
569 : 3 : nulls[i] = false;
570 : : }
571 : 3 : i++;
572 : :
573 : : /* timeline this WAL was produced on */
180 alvherre@kurilemu.de 574 [ + - ]:GNC 3 : if (XLogRecPtrIsValid(slot_contents.data.restart_lsn))
575 : : {
576 : : TimeLineID slots_position_timeline;
577 : : TimeLineID current_timeline;
1653 michael@paquier.xyz 578 :CBC 3 : List *timeline_history = NIL;
579 : :
580 : : /*
581 : : * While in recovery, use as timeline the currently-replaying one
582 : : * to get the LSN position's history.
583 : : */
584 [ - + ]: 3 : if (RecoveryInProgress())
1653 michael@paquier.xyz 585 :UBC 0 : (void) GetXLogReplayRecPtr(¤t_timeline);
586 : : else
1642 rhaas@postgresql.org 587 :CBC 3 : current_timeline = GetWALInsertionTimeLine();
588 : :
1653 michael@paquier.xyz 589 : 3 : timeline_history = readTimeLineHistory(current_timeline);
590 : 3 : slots_position_timeline = tliOfPointInHistory(slot_contents.data.restart_lsn,
591 : : timeline_history);
592 : 3 : values[i] = Int64GetDatum((int64) slots_position_timeline);
593 : 3 : nulls[i] = false;
594 : : }
595 : 3 : i++;
596 : :
597 [ - + ]: 3 : Assert(i == READ_REPLICATION_SLOT_COLS);
598 : : }
599 : :
600 : 5 : dest = CreateDestReceiver(DestRemoteSimple);
601 : 5 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
602 : 5 : do_tup_output(tstate, values, nulls);
603 : 5 : end_tup_output(tstate);
604 : 5 : }
605 : :
606 : :
607 : : /*
608 : : * Handle TIMELINE_HISTORY command.
609 : : */
610 : : static void
4891 heikki.linnakangas@i 611 : 15 : SendTimeLineHistory(TimeLineHistoryCmd *cmd)
612 : : {
613 : : DestReceiver *dest;
614 : : TupleDesc tupdesc;
615 : : StringInfoData buf;
616 : : char histfname[MAXFNAMELEN];
617 : : char path[MAXPGPATH];
618 : : int fd;
619 : : off_t histfilelen;
620 : : off_t bytesleft;
621 : : Size len;
622 : :
1401 peter@eisentraut.org 623 : 15 : dest = CreateDestReceiver(DestRemoteSimple);
624 : :
625 : : /*
626 : : * Reply with a result set with one row, and two columns. The first col is
627 : : * the name of the history file, 2nd is the contents.
628 : : */
629 : 15 : tupdesc = CreateTemplateTupleDesc(2);
630 : 15 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "filename", TEXTOID, -1, 0);
631 : 15 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "content", TEXTOID, -1, 0);
50 drowley@postgresql.o 632 :GNC 15 : TupleDescFinalize(tupdesc);
633 : :
4891 heikki.linnakangas@i 634 :CBC 15 : TLHistoryFileName(histfname, cmd->timeline);
635 : 15 : TLHistoryFilePath(path, cmd->timeline);
636 : :
637 : : /* Send a RowDescription message */
1401 peter@eisentraut.org 638 : 15 : dest->rStartup(dest, CMD_SELECT, tupdesc);
639 : :
640 : : /* Send a DataRow message */
987 nathan@postgresql.or 641 : 15 : pq_beginmessage(&buf, PqMsg_DataRow);
3128 andres@anarazel.de 642 : 15 : pq_sendint16(&buf, 2); /* # of columns */
3843 alvherre@alvh.no-ip. 643 : 15 : len = strlen(histfname);
3128 andres@anarazel.de 644 : 15 : pq_sendint32(&buf, len); /* col1 len */
3843 alvherre@alvh.no-ip. 645 : 15 : pq_sendbytes(&buf, histfname, len);
646 : :
3146 peter_e@gmx.net 647 : 15 : fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
4891 heikki.linnakangas@i 648 [ - + ]: 15 : if (fd < 0)
4891 heikki.linnakangas@i 649 [ # # ]:UBC 0 : ereport(ERROR,
650 : : (errcode_for_file_access(),
651 : : errmsg("could not open file \"%s\": %m", path)));
652 : :
653 : : /* Determine file length and send it to client */
4891 heikki.linnakangas@i 654 :CBC 15 : histfilelen = lseek(fd, 0, SEEK_END);
655 [ - + ]: 15 : if (histfilelen < 0)
4891 heikki.linnakangas@i 656 [ # # ]:UBC 0 : ereport(ERROR,
657 : : (errcode_for_file_access(),
658 : : errmsg("could not seek to end of file \"%s\": %m", path)));
4891 heikki.linnakangas@i 659 [ - + ]:CBC 15 : if (lseek(fd, 0, SEEK_SET) != 0)
4891 heikki.linnakangas@i 660 [ # # ]:UBC 0 : ereport(ERROR,
661 : : (errcode_for_file_access(),
662 : : errmsg("could not seek to beginning of file \"%s\": %m", path)));
663 : :
3128 andres@anarazel.de 664 :CBC 15 : pq_sendint32(&buf, histfilelen); /* col2 len */
665 : :
4891 heikki.linnakangas@i 666 : 15 : bytesleft = histfilelen;
667 [ + + ]: 30 : while (bytesleft > 0)
668 : : {
669 : : PGAlignedBlock rbuf;
670 : : int nread;
671 : :
3335 rhaas@postgresql.org 672 : 15 : pgstat_report_wait_start(WAIT_EVENT_WALSENDER_TIMELINE_HISTORY_READ);
2803 tgl@sss.pgh.pa.us 673 : 15 : nread = read(fd, rbuf.data, sizeof(rbuf));
3335 rhaas@postgresql.org 674 : 15 : pgstat_report_wait_end();
2848 michael@paquier.xyz 675 [ - + ]: 15 : if (nread < 0)
4891 heikki.linnakangas@i 676 [ # # ]:UBC 0 : ereport(ERROR,
677 : : (errcode_for_file_access(),
678 : : errmsg("could not read file \"%s\": %m",
679 : : path)));
2848 michael@paquier.xyz 680 [ - + ]:CBC 15 : else if (nread == 0)
2848 michael@paquier.xyz 681 [ # # ]:UBC 0 : ereport(ERROR,
682 : : (errcode(ERRCODE_DATA_CORRUPTED),
683 : : errmsg("could not read file \"%s\": read %d of %zu",
684 : : path, nread, (Size) bytesleft)));
685 : :
2803 tgl@sss.pgh.pa.us 686 :CBC 15 : pq_sendbytes(&buf, rbuf.data, nread);
4891 heikki.linnakangas@i 687 : 15 : bytesleft -= nread;
688 : : }
689 : :
2495 peter@eisentraut.org 690 [ - + ]: 15 : if (CloseTransientFile(fd) != 0)
2614 michael@paquier.xyz 691 [ # # ]:UBC 0 : ereport(ERROR,
692 : : (errcode_for_file_access(),
693 : : errmsg("could not close file \"%s\": %m", path)));
694 : :
4891 heikki.linnakangas@i 695 :CBC 15 : pq_endmessage(&buf);
696 : 15 : }
697 : :
698 : : /*
699 : : * Handle UPLOAD_MANIFEST command.
700 : : */
701 : : static void
867 rhaas@postgresql.org 702 : 12 : UploadManifest(void)
703 : : {
704 : : MemoryContext mcxt;
705 : : IncrementalBackupInfo *ib;
706 : 12 : off_t offset = 0;
707 : : StringInfoData buf;
708 : :
709 : : /*
710 : : * parsing the manifest will use the cryptohash stuff, which requires a
711 : : * resource owner
712 : : */
574 andres@anarazel.de 713 [ - + ]: 12 : Assert(AuxProcessResourceOwner != NULL);
714 [ + - - + ]: 12 : Assert(CurrentResourceOwner == AuxProcessResourceOwner ||
715 : : CurrentResourceOwner == NULL);
716 : 12 : CurrentResourceOwner = AuxProcessResourceOwner;
717 : :
718 : : /* Prepare to read manifest data into a temporary context. */
867 rhaas@postgresql.org 719 : 12 : mcxt = AllocSetContextCreate(CurrentMemoryContext,
720 : : "incremental backup information",
721 : : ALLOCSET_DEFAULT_SIZES);
722 : 12 : ib = CreateIncrementalBackupInfo(mcxt);
723 : :
724 : : /* Send a CopyInResponse message */
657 nathan@postgresql.or 725 : 12 : pq_beginmessage(&buf, PqMsg_CopyInResponse);
867 rhaas@postgresql.org 726 : 12 : pq_sendbyte(&buf, 0);
727 : 12 : pq_sendint16(&buf, 0);
728 : 12 : pq_endmessage_reuse(&buf);
729 : 12 : pq_flush();
730 : :
731 : : /* Receive packets from client until done. */
732 [ + + ]: 48 : while (HandleUploadManifestPacket(&buf, &offset, ib))
733 : : ;
734 : :
735 : : /* Finish up manifest processing. */
736 : 11 : FinalizeIncrementalManifest(ib);
737 : :
738 : : /*
739 : : * Discard any old manifest information and arrange to preserve the new
740 : : * information we just got.
741 : : *
742 : : * We assume that MemoryContextDelete and MemoryContextSetParent won't
743 : : * fail, and thus we shouldn't end up bailing out of here in such a way as
744 : : * to leave dangling pointers.
745 : : */
746 [ - + ]: 11 : if (uploaded_manifest_mcxt != NULL)
867 rhaas@postgresql.org 747 :UBC 0 : MemoryContextDelete(uploaded_manifest_mcxt);
867 rhaas@postgresql.org 748 :CBC 11 : MemoryContextSetParent(mcxt, CacheMemoryContext);
749 : 11 : uploaded_manifest = ib;
750 : 11 : uploaded_manifest_mcxt = mcxt;
751 : :
752 : : /* clean up the resource owner we created */
574 andres@anarazel.de 753 : 11 : ReleaseAuxProcessResources(true);
867 rhaas@postgresql.org 754 : 11 : }
755 : :
756 : : /*
757 : : * Process one packet received during the handling of an UPLOAD_MANIFEST
758 : : * operation.
759 : : *
760 : : * 'buf' is scratch space. This function expects it to be initialized, doesn't
761 : : * care what the current contents are, and may override them with completely
762 : : * new contents.
763 : : *
764 : : * The return value is true if the caller should continue processing
765 : : * additional packets and false if the UPLOAD_MANIFEST operation is complete.
766 : : */
767 : : static bool
768 : 48 : HandleUploadManifestPacket(StringInfo buf, off_t *offset,
769 : : IncrementalBackupInfo *ib)
770 : : {
771 : : int mtype;
772 : : int maxmsglen;
773 : :
774 : 48 : HOLD_CANCEL_INTERRUPTS();
775 : :
776 : 48 : pq_startmsgread();
777 : 48 : mtype = pq_getbyte();
778 [ - + ]: 48 : if (mtype == EOF)
867 rhaas@postgresql.org 779 [ # # ]:UBC 0 : ereport(ERROR,
780 : : (errcode(ERRCODE_CONNECTION_FAILURE),
781 : : errmsg("unexpected EOF on client connection with an open transaction")));
782 : :
867 rhaas@postgresql.org 783 [ + + - ]:CBC 48 : switch (mtype)
784 : : {
286 nathan@postgresql.or 785 :GNC 37 : case PqMsg_CopyData:
867 rhaas@postgresql.org 786 :CBC 37 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
787 : 37 : break;
286 nathan@postgresql.or 788 :GNC 11 : case PqMsg_CopyDone:
789 : : case PqMsg_CopyFail:
790 : : case PqMsg_Flush:
791 : : case PqMsg_Sync:
867 rhaas@postgresql.org 792 :CBC 11 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
793 : 11 : break;
867 rhaas@postgresql.org 794 :UBC 0 : default:
795 [ # # ]: 0 : ereport(ERROR,
796 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
797 : : errmsg("unexpected message type 0x%02X during COPY from stdin",
798 : : mtype)));
799 : : maxmsglen = 0; /* keep compiler quiet */
800 : : break;
801 : : }
802 : :
803 : : /* Now collect the message body */
867 rhaas@postgresql.org 804 [ - + ]:CBC 48 : if (pq_getmessage(buf, maxmsglen))
867 rhaas@postgresql.org 805 [ # # ]:UBC 0 : ereport(ERROR,
806 : : (errcode(ERRCODE_CONNECTION_FAILURE),
807 : : errmsg("unexpected EOF on client connection with an open transaction")));
867 rhaas@postgresql.org 808 [ - + ]:CBC 48 : RESUME_CANCEL_INTERRUPTS();
809 : :
810 : : /* Process the message */
811 [ + + - - : 48 : switch (mtype)
- ]
812 : : {
286 nathan@postgresql.or 813 :GNC 37 : case PqMsg_CopyData:
867 rhaas@postgresql.org 814 :CBC 37 : AppendIncrementalManifestData(ib, buf->data, buf->len);
815 : 36 : return true;
816 : :
286 nathan@postgresql.or 817 :GNC 11 : case PqMsg_CopyDone:
867 rhaas@postgresql.org 818 :CBC 11 : return false;
819 : :
286 nathan@postgresql.or 820 :UNC 0 : case PqMsg_Sync:
821 : : case PqMsg_Flush:
822 : : /* Ignore these while in CopyOut mode as we do elsewhere. */
867 rhaas@postgresql.org 823 :UBC 0 : return true;
824 : :
286 nathan@postgresql.or 825 :UNC 0 : case PqMsg_CopyFail:
867 rhaas@postgresql.org 826 [ # # ]:UBC 0 : ereport(ERROR,
827 : : (errcode(ERRCODE_QUERY_CANCELED),
828 : : errmsg("COPY from stdin failed: %s",
829 : : pq_getmsgstring(buf))));
830 : : }
831 : :
832 : : /* Not reached. */
833 : 0 : Assert(false);
834 : : return false;
835 : : }
836 : :
837 : : /*
838 : : * Handle START_REPLICATION command.
839 : : *
840 : : * At the moment, this never returns, but an ereport(ERROR) will take us back
841 : : * to the main loop.
842 : : */
843 : : static void
4891 heikki.linnakangas@i 844 :CBC 302 : StartReplication(StartReplicationCmd *cmd)
845 : : {
846 : : StringInfoData buf;
847 : : XLogRecPtr FlushPtr;
848 : : TimeLineID FlushTLI;
849 : :
850 : : /* create xlogreader for physical replication */
2157 michael@paquier.xyz 851 : 302 : xlogreader =
1821 tmunro@postgresql.or 852 : 302 : XLogReaderAllocate(wal_segment_size, NULL,
853 : 302 : XL_ROUTINE(.segment_open = WalSndSegmentOpen,
854 : : .segment_close = wal_segment_close),
855 : : NULL);
856 : :
2157 michael@paquier.xyz 857 [ - + ]: 302 : if (!xlogreader)
2157 michael@paquier.xyz 858 [ # # ]:UBC 0 : ereport(ERROR,
859 : : (errcode(ERRCODE_OUT_OF_MEMORY),
860 : : errmsg("out of memory"),
861 : : errdetail("Failed while allocating a WAL reading processor.")));
862 : :
863 : : /*
864 : : * We assume here that we're logging enough information in the WAL for
865 : : * log-shipping, since this is checked in PostmasterMain().
866 : : *
867 : : * NOTE: wal_level can only change at shutdown, so in most cases it is
868 : : * difficult for there to be WAL data that we can still see that was
869 : : * written at wal_level='minimal'.
870 : : */
871 : :
4477 rhaas@postgresql.org 872 [ + + ]:CBC 302 : if (cmd->slotname)
873 : : {
459 akapila@postgresql.o 874 : 199 : ReplicationSlotAcquire(cmd->slotname, true, true);
3920 andres@anarazel.de 875 [ - + ]: 197 : if (SlotIsLogical(MyReplicationSlot))
4477 rhaas@postgresql.org 876 [ # # ]:UBC 0 : ereport(ERROR,
877 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
878 : : errmsg("cannot use a logical replication slot for physical replication")));
879 : :
880 : : /*
881 : : * We don't need to verify the slot's restart_lsn here; instead we
882 : : * rely on the caller requesting the starting point to use. If the
883 : : * WAL segment doesn't exist, we'll fail later.
884 : : */
885 : : }
886 : :
887 : : /*
888 : : * Select the timeline. If it was given explicitly by the client, use
889 : : * that. Otherwise use the timeline of the last replayed record.
890 : : */
1761 jdavis@postgresql.or 891 :CBC 300 : am_cascading_walsender = RecoveryInProgress();
4884 heikki.linnakangas@i 892 [ + + ]: 300 : if (am_cascading_walsender)
1642 rhaas@postgresql.org 893 : 16 : FlushPtr = GetStandbyFlushRecPtr(&FlushTLI);
894 : : else
895 : 284 : FlushPtr = GetFlushRecPtr(&FlushTLI);
896 : :
4891 heikki.linnakangas@i 897 [ + + ]: 300 : if (cmd->timeline != 0)
898 : : {
899 : : XLogRecPtr switchpoint;
900 : :
901 : 299 : sendTimeLine = cmd->timeline;
1642 rhaas@postgresql.org 902 [ + + ]: 299 : if (sendTimeLine == FlushTLI)
903 : : {
4891 heikki.linnakangas@i 904 : 290 : sendTimeLineIsHistoric = false;
905 : 290 : sendTimeLineValidUpto = InvalidXLogRecPtr;
906 : : }
907 : : else
908 : : {
909 : : List *timeLineHistory;
910 : :
911 : 9 : sendTimeLineIsHistoric = true;
912 : :
913 : : /*
914 : : * Check that the timeline the client requested exists, and the
915 : : * requested start location is on that timeline.
916 : : */
1642 rhaas@postgresql.org 917 : 9 : timeLineHistory = readTimeLineHistory(FlushTLI);
4856 heikki.linnakangas@i 918 : 9 : switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
919 : : &sendTimeLineNextTLI);
4891 920 : 9 : list_free_deep(timeLineHistory);
921 : :
922 : : /*
923 : : * Found the requested timeline in the history. Check that
924 : : * requested startpoint is on that timeline in our history.
925 : : *
926 : : * This is quite loose on purpose. We only check that we didn't
927 : : * fork off the requested timeline before the switchpoint. We
928 : : * don't check that we switched *to* it before the requested
929 : : * starting point. This is because the client can legitimately
930 : : * request to start replication from the beginning of the WAL
931 : : * segment that contains switchpoint, but on the new timeline, so
932 : : * that it doesn't end up with a partial segment. If you ask for
933 : : * too old a starting point, you'll get an error later when we
934 : : * fail to find the requested WAL segment in pg_wal.
935 : : *
936 : : * XXX: we could be more strict here and only allow a startpoint
937 : : * that's older than the switchpoint, if it's still in the same
938 : : * WAL segment.
939 : : */
180 alvherre@kurilemu.de 940 [ + - ]:GNC 9 : if (XLogRecPtrIsValid(switchpoint) &&
4876 alvherre@alvh.no-ip. 941 [ - + ]:CBC 9 : switchpoint < cmd->startpoint)
942 : : {
4891 heikki.linnakangas@i 943 [ # # ]:UBC 0 : ereport(ERROR,
944 : : errmsg("requested starting point %X/%08X on timeline %u is not in this server's history",
945 : : LSN_FORMAT_ARGS(cmd->startpoint),
946 : : cmd->timeline),
947 : : errdetail("This server's history forked from timeline %u at %X/%08X.",
948 : : cmd->timeline,
949 : : LSN_FORMAT_ARGS(switchpoint)));
950 : : }
4891 heikki.linnakangas@i 951 :CBC 9 : sendTimeLineValidUpto = switchpoint;
952 : : }
953 : : }
954 : : else
955 : : {
1642 rhaas@postgresql.org 956 : 1 : sendTimeLine = FlushTLI;
4891 heikki.linnakangas@i 957 : 1 : sendTimeLineValidUpto = InvalidXLogRecPtr;
958 : 1 : sendTimeLineIsHistoric = false;
959 : : }
960 : :
961 : 300 : streamingDoneSending = streamingDoneReceiving = false;
962 : :
963 : : /* If there is nothing to stream, don't even enter COPY mode */
4856 964 [ + + + - ]: 300 : if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
965 : : {
966 : : /*
967 : : * When we first start replication the standby will be behind the
968 : : * primary. For some applications, for example synchronous
969 : : * replication, it is important to have a clear state for this initial
970 : : * catchup mode, so we can trigger actions when we change streaming
971 : : * state later. We may stay in this state for a long time, which is
972 : : * exactly why we want to be able to monitor whether or not we are
973 : : * still here.
974 : : */
4891 975 : 300 : WalSndSetState(WALSNDSTATE_CATCHUP);
976 : :
977 : : /* Send a CopyBothResponse message, and start streaming */
987 nathan@postgresql.or 978 : 300 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4891 heikki.linnakangas@i 979 : 300 : pq_sendbyte(&buf, 0);
3128 andres@anarazel.de 980 : 300 : pq_sendint16(&buf, 0);
4891 heikki.linnakangas@i 981 : 300 : pq_endmessage(&buf);
982 : 300 : pq_flush();
983 : :
984 : : /*
985 : : * Don't allow a request to stream from a future point in WAL that
986 : : * hasn't been flushed to disk in this server yet.
987 : : */
4876 alvherre@alvh.no-ip. 988 [ - + ]: 300 : if (FlushPtr < cmd->startpoint)
989 : : {
4891 heikki.linnakangas@i 990 [ # # ]:UBC 0 : ereport(ERROR,
991 : : errmsg("requested starting point %X/%08X is ahead of the WAL flush position of this server %X/%08X",
992 : : LSN_FORMAT_ARGS(cmd->startpoint),
993 : : LSN_FORMAT_ARGS(FlushPtr)));
994 : : }
995 : :
996 : : /* Start streaming from the requested point */
4891 heikki.linnakangas@i 997 :CBC 300 : sentPtr = cmd->startpoint;
998 : :
999 : : /* Initialize shared memory status, too */
3231 alvherre@alvh.no-ip. 1000 [ - + ]: 300 : SpinLockAcquire(&MyWalSnd->mutex);
1001 : 300 : MyWalSnd->sentPtr = sentPtr;
1002 : 300 : SpinLockRelease(&MyWalSnd->mutex);
1003 : :
4891 heikki.linnakangas@i 1004 : 300 : SyncRepInitConfig();
1005 : :
1006 : : /* Main loop of walsender */
1007 : 300 : replication_active = true;
1008 : :
4439 rhaas@postgresql.org 1009 : 300 : WalSndLoop(XLogSendPhysical);
1010 : :
4891 heikki.linnakangas@i 1011 : 162 : replication_active = false;
3256 andres@anarazel.de 1012 [ - + ]: 162 : if (got_STOPPING)
4891 heikki.linnakangas@i 1013 :UBC 0 : proc_exit(0);
4891 heikki.linnakangas@i 1014 :CBC 162 : WalSndSetState(WALSNDSTATE_STARTUP);
1015 : :
4856 1016 [ + - - + ]: 162 : Assert(streamingDoneSending && streamingDoneReceiving);
1017 : : }
1018 : :
4477 rhaas@postgresql.org 1019 [ + + ]: 162 : if (cmd->slotname)
1020 : 147 : ReplicationSlotRelease();
1021 : :
1022 : : /*
1023 : : * Copy is finished now. Send a single-row result set indicating the next
1024 : : * timeline.
1025 : : */
4856 heikki.linnakangas@i 1026 [ + + ]: 162 : if (sendTimeLineIsHistoric)
1027 : : {
1028 : : char startpos_str[8 + 1 + 8 + 1];
1029 : : DestReceiver *dest;
1030 : : TupOutputState *tstate;
1031 : : TupleDesc tupdesc;
1032 : : Datum values[2];
1389 peter@eisentraut.org 1033 : 11 : bool nulls[2] = {0};
1034 : :
302 alvherre@kurilemu.de 1035 :GNC 11 : snprintf(startpos_str, sizeof(startpos_str), "%X/%08X",
1897 peter@eisentraut.org 1036 :CBC 11 : LSN_FORMAT_ARGS(sendTimeLineValidUpto));
1037 : :
3380 rhaas@postgresql.org 1038 : 11 : dest = CreateDestReceiver(DestRemoteSimple);
1039 : :
1040 : : /*
1041 : : * Need a tuple descriptor representing two columns. int8 may seem
1042 : : * like a surprising data type for this, but in theory int4 would not
1043 : : * be wide enough for this, as TimeLineID is unsigned.
1044 : : */
2723 andres@anarazel.de 1045 : 11 : tupdesc = CreateTemplateTupleDesc(2);
3380 rhaas@postgresql.org 1046 : 11 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
1047 : : INT8OID, -1, 0);
1048 : 11 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "next_tli_startpos",
1049 : : TEXTOID, -1, 0);
50 drowley@postgresql.o 1050 :GNC 11 : TupleDescFinalize(tupdesc);
1051 : :
1052 : : /* prepare for projection of tuple */
2728 andres@anarazel.de 1053 :CBC 11 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1054 : :
3380 rhaas@postgresql.org 1055 : 11 : values[0] = Int64GetDatum((int64) sendTimeLineNextTLI);
1056 : 11 : values[1] = CStringGetTextDatum(startpos_str);
1057 : :
1058 : : /* send it to dest */
1059 : 11 : do_tup_output(tstate, values, nulls);
1060 : :
1061 : 11 : end_tup_output(tstate);
1062 : : }
1063 : :
1064 : : /* Send CommandComplete message */
2057 alvherre@alvh.no-ip. 1065 : 162 : EndReplicationCommand("START_STREAMING");
5590 magnus@hagander.net 1066 : 162 : }
1067 : :
1068 : : /*
1069 : : * XLogReaderRoutine->page_read callback for logical decoding contexts, as a
1070 : : * walsender process.
1071 : : *
1072 : : * Inside the walsender we can do better than read_local_xlog_page,
1073 : : * which has to do a plain sleep/busy loop, because the walsender's latch gets
1074 : : * set every time WAL is flushed.
1075 : : */
1076 : : static int
1821 tmunro@postgresql.or 1077 : 19900 : logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
1078 : : XLogRecPtr targetRecPtr, char *cur_page)
1079 : : {
1080 : : XLogRecPtr flushptr;
1081 : : int count;
1082 : : WALReadError errinfo;
1083 : : XLogSegNo segno;
1084 : : TimeLineID currTLI;
1085 : :
1086 : : /*
1087 : : * Make sure we have enough WAL available before retrieving the current
1088 : : * timeline.
1089 : : */
1123 andres@anarazel.de 1090 : 19900 : flushptr = WalSndWaitForWal(targetPagePtr + reqLen);
1091 : :
1092 : : /* Fail if not enough (implies we are going to shut down) */
665 akapila@postgresql.o 1093 [ + + ]: 19682 : if (flushptr < targetPagePtr + reqLen)
1094 : 3367 : return -1;
1095 : :
1096 : : /*
1097 : : * Since logical decoding is also permitted on a standby server, we need
1098 : : * to check if the server is in recovery to decide how to get the current
1099 : : * timeline ID (so that it also covers the promotion or timeline change
1100 : : * cases). We must determine am_cascading_walsender after waiting for the
1101 : : * required WAL so that it is correct when the walsender wakes up after a
1102 : : * promotion.
1103 : : */
1123 andres@anarazel.de 1104 : 16315 : am_cascading_walsender = RecoveryInProgress();
1105 : :
1106 [ + + ]: 16315 : if (am_cascading_walsender)
1107 : 167 : GetXLogReplayRecPtr(&currTLI);
1108 : : else
1109 : 16148 : currTLI = GetWALInsertionTimeLine();
1110 : :
1642 rhaas@postgresql.org 1111 : 16315 : XLogReadDetermineTimeline(state, targetPagePtr, reqLen, currTLI);
1112 : 16315 : sendTimeLineIsHistoric = (state->currTLI != currTLI);
3331 simon@2ndQuadrant.co 1113 : 16315 : sendTimeLine = state->currTLI;
1114 : 16315 : sendTimeLineValidUpto = state->currTLIValidUntil;
1115 : 16315 : sendTimeLineNextTLI = state->nextTLI;
1116 : :
3231 tgl@sss.pgh.pa.us 1117 [ + + ]: 16315 : if (targetPagePtr + XLOG_BLCKSZ <= flushptr)
1118 : 14258 : count = XLOG_BLCKSZ; /* more than one block available */
1119 : : else
1120 : 2057 : count = flushptr - targetPagePtr; /* part of the page available */
1121 : :
1122 : : /* now actually read the data, we know it's there */
1821 tmunro@postgresql.or 1123 [ - + ]: 16315 : if (!WALRead(state,
1124 : : cur_page,
1125 : : targetPagePtr,
1126 : : count,
1127 : : currTLI, /* Pass the current TLI because only
1128 : : * WalSndSegmentOpen controls whether new TLI
1129 : : * is needed. */
1130 : : &errinfo))
2353 alvherre@alvh.no-ip. 1131 :UBC 0 : WALReadRaiseError(&errinfo);
1132 : :
1133 : : /*
1134 : : * After reading into the buffer, check that what we read was valid. We do
1135 : : * this after reading, because even though the segment was present when we
1136 : : * opened it, it might get recycled or removed while we read it. The
1137 : : * read() succeeds in that case, but the data we tried to read might
1138 : : * already have been overwritten with new WAL records.
1139 : : */
2183 alvherre@alvh.no-ip. 1140 :CBC 16315 : XLByteToSeg(targetPagePtr, segno, state->segcxt.ws_segsize);
1141 : 16315 : CheckXLogRemoved(segno, state->seg.ws_tli);
1142 : :
1821 tmunro@postgresql.or 1143 : 16315 : return count;
1144 : : }
1145 : :
1146 : : /*
1147 : : * Process extra options given to CREATE_REPLICATION_SLOT.
1148 : : */
1149 : : static void
3339 peter_e@gmx.net 1150 : 513 : parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
1151 : : bool *reserve_wal,
1152 : : CRSSnapshotAction *snapshot_action,
1153 : : bool *two_phase, bool *failover)
1154 : : {
1155 : : ListCell *lc;
1156 : 513 : bool snapshot_action_given = false;
1157 : 513 : bool reserve_wal_given = false;
1770 akapila@postgresql.o 1158 : 513 : bool two_phase_given = false;
827 1159 : 513 : bool failover_given = false;
1160 : :
1161 : : /* Parse options */
3275 bruce@momjian.us 1162 [ + + + + : 1039 : foreach(lc, cmd->options)
+ + ]
1163 : : {
3339 peter_e@gmx.net 1164 : 526 : DefElem *defel = (DefElem *) lfirst(lc);
1165 : :
1673 rhaas@postgresql.org 1166 [ + + ]: 526 : if (strcmp(defel->defname, "snapshot") == 0)
1167 : : {
1168 : : char *action;
1169 : :
3339 peter_e@gmx.net 1170 [ + - - + ]: 362 : if (snapshot_action_given || cmd->kind != REPLICATION_KIND_LOGICAL)
3339 peter_e@gmx.net 1171 [ # # ]:UBC 0 : ereport(ERROR,
1172 : : (errcode(ERRCODE_SYNTAX_ERROR),
1173 : : errmsg("conflicting or redundant options")));
1174 : :
1673 rhaas@postgresql.org 1175 :CBC 362 : action = defGetString(defel);
3339 peter_e@gmx.net 1176 : 362 : snapshot_action_given = true;
1177 : :
1673 rhaas@postgresql.org 1178 [ + + ]: 362 : if (strcmp(action, "export") == 0)
1179 : 1 : *snapshot_action = CRS_EXPORT_SNAPSHOT;
1180 [ + + ]: 361 : else if (strcmp(action, "nothing") == 0)
1181 : 149 : *snapshot_action = CRS_NOEXPORT_SNAPSHOT;
1182 [ + - ]: 212 : else if (strcmp(action, "use") == 0)
1183 : 212 : *snapshot_action = CRS_USE_SNAPSHOT;
1184 : : else
3330 peter_e@gmx.net 1185 [ # # ]:UBC 0 : ereport(ERROR,
1186 : : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1187 : : errmsg("unrecognized value for %s option \"%s\": \"%s\"",
1188 : : "CREATE_REPLICATION_SLOT", defel->defname, action)));
1189 : : }
3339 peter_e@gmx.net 1190 [ + + ]:CBC 164 : else if (strcmp(defel->defname, "reserve_wal") == 0)
1191 : : {
1192 [ + - - + ]: 150 : if (reserve_wal_given || cmd->kind != REPLICATION_KIND_PHYSICAL)
3339 peter_e@gmx.net 1193 [ # # ]:UBC 0 : ereport(ERROR,
1194 : : (errcode(ERRCODE_SYNTAX_ERROR),
1195 : : errmsg("conflicting or redundant options")));
1196 : :
3339 peter_e@gmx.net 1197 :CBC 150 : reserve_wal_given = true;
1673 rhaas@postgresql.org 1198 : 150 : *reserve_wal = defGetBoolean(defel);
1199 : : }
1770 akapila@postgresql.o 1200 [ + + ]: 14 : else if (strcmp(defel->defname, "two_phase") == 0)
1201 : : {
1202 [ + - - + ]: 2 : if (two_phase_given || cmd->kind != REPLICATION_KIND_LOGICAL)
1770 akapila@postgresql.o 1203 [ # # ]:UBC 0 : ereport(ERROR,
1204 : : (errcode(ERRCODE_SYNTAX_ERROR),
1205 : : errmsg("conflicting or redundant options")));
1770 akapila@postgresql.o 1206 :CBC 2 : two_phase_given = true;
1673 rhaas@postgresql.org 1207 : 2 : *two_phase = defGetBoolean(defel);
1208 : : }
827 akapila@postgresql.o 1209 [ + - ]: 12 : else if (strcmp(defel->defname, "failover") == 0)
1210 : : {
1211 [ + - - + ]: 12 : if (failover_given || cmd->kind != REPLICATION_KIND_LOGICAL)
827 akapila@postgresql.o 1212 [ # # ]:UBC 0 : ereport(ERROR,
1213 : : (errcode(ERRCODE_SYNTAX_ERROR),
1214 : : errmsg("conflicting or redundant options")));
827 akapila@postgresql.o 1215 :CBC 12 : failover_given = true;
1216 : 12 : *failover = defGetBoolean(defel);
1217 : : }
1218 : : else
3339 peter_e@gmx.net 1219 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1220 : : }
3339 peter_e@gmx.net 1221 :CBC 513 : }
1222 : :
1223 : : /*
1224 : : * Create a new replication slot.
1225 : : */
1226 : : static void
4477 rhaas@postgresql.org 1227 : 513 : CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
1228 : : {
4439 1229 : 513 : const char *snapshot_name = NULL;
1230 : : char xloc[MAXFNAMELEN];
1231 : : char *slot_name;
3339 peter_e@gmx.net 1232 : 513 : bool reserve_wal = false;
1770 akapila@postgresql.o 1233 : 513 : bool two_phase = false;
827 1234 : 513 : bool failover = false;
3330 peter_e@gmx.net 1235 : 513 : CRSSnapshotAction snapshot_action = CRS_EXPORT_SNAPSHOT;
1236 : : DestReceiver *dest;
1237 : : TupOutputState *tstate;
1238 : : TupleDesc tupdesc;
1239 : : Datum values[4];
1389 peter@eisentraut.org 1240 : 513 : bool nulls[4] = {0};
1241 : :
4477 rhaas@postgresql.org 1242 [ - + ]: 513 : Assert(!MyReplicationSlot);
1243 : :
827 akapila@postgresql.o 1244 : 513 : parseCreateReplSlotOptions(cmd, &reserve_wal, &snapshot_action, &two_phase,
1245 : : &failover);
1246 : :
4439 rhaas@postgresql.org 1247 [ + + ]: 513 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
1248 : : {
3435 peter_e@gmx.net 1249 : 151 : ReplicationSlotCreate(cmd->slotname, false,
1889 akapila@postgresql.o 1250 [ + + ]: 151 : cmd->temporary ? RS_TEMPORARY : RS_PERSISTENT,
1251 : : false, false, false, false);
1252 : :
896 michael@paquier.xyz 1253 [ + + ]: 150 : if (reserve_wal)
1254 : : {
1255 : 149 : ReplicationSlotReserveWal();
1256 : :
1257 : 149 : ReplicationSlotMarkDirty();
1258 : :
1259 : : /* Write this slot to disk if it's a permanent one. */
1260 [ + + ]: 149 : if (!cmd->temporary)
1261 : 4 : ReplicationSlotSave();
1262 : : }
1263 : : }
1264 : : else
1265 : : {
1266 : : LogicalDecodingContext *ctx;
1267 : 362 : bool need_full_snapshot = false;
1268 : :
1269 [ - + ]: 362 : Assert(cmd->kind == REPLICATION_KIND_LOGICAL);
1270 : :
28 alvherre@kurilemu.de 1271 :GNC 362 : CheckLogicalDecodingRequirements(false);
1272 : :
1273 : : /*
1274 : : * Initially create persistent slot as ephemeral - that allows us to
1275 : : * nicely handle errors during initialization because it'll get
1276 : : * dropped if this transaction fails. We'll make it persistent at the
1277 : : * end. Temporary slots can be created as temporary from beginning as
1278 : : * they get dropped on error as well.
1279 : : */
3435 peter_e@gmx.net 1280 :CBC 362 : ReplicationSlotCreate(cmd->slotname, true,
1889 akapila@postgresql.o 1281 [ - + ]: 362 : cmd->temporary ? RS_TEMPORARY : RS_EPHEMERAL,
1282 : : two_phase, false, failover, false);
1283 : :
1284 : : /*
1285 : : * Do options check early so that we can bail before calling the
1286 : : * DecodingContextFindStartpoint which can take long time.
1287 : : */
3330 peter_e@gmx.net 1288 [ + + ]: 362 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1289 : : {
1290 [ - + ]: 1 : if (IsTransactionBlock())
3330 peter_e@gmx.net 1291 [ # # ]:UBC 0 : ereport(ERROR,
1292 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1293 : : (errmsg("%s must not be called inside a transaction",
1294 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'export')")));
1295 : :
3295 andres@anarazel.de 1296 :CBC 1 : need_full_snapshot = true;
1297 : : }
3330 peter_e@gmx.net 1298 [ + + ]: 361 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1299 : : {
1300 [ - + ]: 212 : if (!IsTransactionBlock())
3330 peter_e@gmx.net 1301 [ # # ]:UBC 0 : ereport(ERROR,
1302 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1303 : : (errmsg("%s must be called inside a transaction",
1304 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1305 : :
3330 peter_e@gmx.net 1306 [ - + ]:CBC 212 : if (XactIsoLevel != XACT_REPEATABLE_READ)
3330 peter_e@gmx.net 1307 [ # # ]:UBC 0 : ereport(ERROR,
1308 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1309 : : (errmsg("%s must be called in REPEATABLE READ isolation mode transaction",
1310 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1261 akapila@postgresql.o 1311 [ - + ]:CBC 212 : if (!XactReadOnly)
1261 akapila@postgresql.o 1312 [ # # ]:UBC 0 : ereport(ERROR,
1313 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1314 : : (errmsg("%s must be called in a read-only transaction",
1315 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1316 : :
3330 peter_e@gmx.net 1317 [ - + ]:CBC 212 : if (FirstSnapshotSet)
3330 peter_e@gmx.net 1318 [ # # ]:UBC 0 : ereport(ERROR,
1319 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1320 : : (errmsg("%s must be called before any query",
1321 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1322 : :
3330 peter_e@gmx.net 1323 [ - + ]:CBC 212 : if (IsSubTransaction())
3330 peter_e@gmx.net 1324 [ # # ]:UBC 0 : ereport(ERROR,
1325 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1326 : : (errmsg("%s must not be called in a subtransaction",
1327 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1328 : :
3295 andres@anarazel.de 1329 :CBC 212 : need_full_snapshot = true;
1330 : : }
1331 : :
1332 : : /*
1333 : : * Ensure the logical decoding is enabled before initializing the
1334 : : * logical decoding context.
1335 : : */
133 msawada@postgresql.o 1336 :GNC 362 : EnsureLogicalDecodingEnabled();
1337 [ - + ]: 362 : Assert(IsLogicalDecodingEnabled());
1338 : :
3295 andres@anarazel.de 1339 :CBC 362 : ctx = CreateInitDecodingContext(cmd->plugin, NIL, need_full_snapshot,
1340 : : false,
1341 : : InvalidXLogRecPtr,
1821 tmunro@postgresql.or 1342 : 362 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1343 : : .segment_open = WalSndSegmentOpen,
1344 : : .segment_close = wal_segment_close),
1345 : : WalSndPrepareWrite, WalSndWriteData,
1346 : : WalSndUpdateProgress);
1347 : :
1348 : : /*
1349 : : * Signal that we don't need the timeout mechanism. We're just
1350 : : * creating the replication slot and don't yet accept feedback
1351 : : * messages or send keepalives. As we possibly need to wait for
1352 : : * further WAL the walsender would otherwise possibly be killed too
1353 : : * soon.
1354 : : */
4359 andres@anarazel.de 1355 : 362 : last_reply_timestamp = 0;
1356 : :
1357 : : /* build initial snapshot, might take a while */
4439 rhaas@postgresql.org 1358 : 362 : DecodingContextFindStartpoint(ctx);
1359 : :
1360 : : /*
1361 : : * Export or use the snapshot if we've been asked to do so.
1362 : : *
1363 : : * NB. We will convert the snapbuild.c kind of snapshot to normal
1364 : : * snapshot when doing this.
1365 : : */
3330 peter_e@gmx.net 1366 [ + + ]: 362 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1367 : : {
3339 1368 : 1 : snapshot_name = SnapBuildExportSnapshot(ctx->snapshot_builder);
1369 : : }
3330 1370 [ + + ]: 361 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1371 : : {
1372 : : Snapshot snap;
1373 : :
3327 tgl@sss.pgh.pa.us 1374 : 212 : snap = SnapBuildInitialSnapshot(ctx->snapshot_builder);
3330 peter_e@gmx.net 1375 : 212 : RestoreTransactionSnapshot(snap, MyProc);
1376 : : }
1377 : :
1378 : : /* don't need the decoding context anymore */
4439 rhaas@postgresql.org 1379 : 362 : FreeDecodingContext(ctx);
1380 : :
3435 peter_e@gmx.net 1381 [ + - ]: 362 : if (!cmd->temporary)
1382 : 362 : ReplicationSlotPersist();
1383 : : }
1384 : :
302 alvherre@kurilemu.de 1385 :GNC 512 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1897 peter@eisentraut.org 1386 :CBC 512 : LSN_FORMAT_ARGS(MyReplicationSlot->data.confirmed_flush));
1387 : :
3380 rhaas@postgresql.org 1388 : 512 : dest = CreateDestReceiver(DestRemoteSimple);
1389 : :
1390 : : /*----------
1391 : : * Need a tuple descriptor representing four columns:
1392 : : * - first field: the slot name
1393 : : * - second field: LSN at which we became consistent
1394 : : * - third field: exported snapshot's name
1395 : : * - fourth field: output plugin
1396 : : */
2723 andres@anarazel.de 1397 : 512 : tupdesc = CreateTemplateTupleDesc(4);
3380 rhaas@postgresql.org 1398 : 512 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_name",
1399 : : TEXTOID, -1, 0);
1400 : 512 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "consistent_point",
1401 : : TEXTOID, -1, 0);
1402 : 512 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "snapshot_name",
1403 : : TEXTOID, -1, 0);
1404 : 512 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "output_plugin",
1405 : : TEXTOID, -1, 0);
50 drowley@postgresql.o 1406 :GNC 512 : TupleDescFinalize(tupdesc);
1407 : :
1408 : : /* prepare for projection of tuples */
2728 andres@anarazel.de 1409 :CBC 512 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1410 : :
1411 : : /* slot_name */
3380 rhaas@postgresql.org 1412 : 512 : slot_name = NameStr(MyReplicationSlot->data.name);
1413 : 512 : values[0] = CStringGetTextDatum(slot_name);
1414 : :
1415 : : /* consistent wal location */
3280 peter_e@gmx.net 1416 : 512 : values[1] = CStringGetTextDatum(xloc);
1417 : :
1418 : : /* snapshot name, or NULL if none */
4439 rhaas@postgresql.org 1419 [ + + ]: 512 : if (snapshot_name != NULL)
3380 1420 : 1 : values[2] = CStringGetTextDatum(snapshot_name);
1421 : : else
1422 : 511 : nulls[2] = true;
1423 : :
1424 : : /* plugin, or NULL if none */
4439 1425 [ + + ]: 512 : if (cmd->plugin != NULL)
3380 1426 : 362 : values[3] = CStringGetTextDatum(cmd->plugin);
1427 : : else
1428 : 150 : nulls[3] = true;
1429 : :
1430 : : /* send it to dest */
1431 : 512 : do_tup_output(tstate, values, nulls);
1432 : 512 : end_tup_output(tstate);
1433 : :
4477 1434 : 512 : ReplicationSlotRelease();
1435 : 512 : }
1436 : :
1437 : : /*
1438 : : * Get rid of a replication slot that is no longer wanted.
1439 : : */
1440 : : static void
1441 : 295 : DropReplicationSlot(DropReplicationSlotCmd *cmd)
1442 : : {
3168 alvherre@alvh.no-ip. 1443 : 295 : ReplicationSlotDrop(cmd->slotname, !cmd->wait);
4477 rhaas@postgresql.org 1444 : 292 : }
1445 : :
1446 : : /*
1447 : : * Change the definition of a replication slot.
1448 : : */
1449 : : static void
650 akapila@postgresql.o 1450 : 7 : AlterReplicationSlot(AlterReplicationSlotCmd *cmd)
1451 : : {
827 1452 : 7 : bool failover_given = false;
650 1453 : 7 : bool two_phase_given = false;
1454 : : bool failover;
1455 : : bool two_phase;
1456 : :
1457 : : /* Parse options */
827 1458 [ + - + + : 21 : foreach_ptr(DefElem, defel, cmd->options)
+ + ]
1459 : : {
1460 [ + + ]: 7 : if (strcmp(defel->defname, "failover") == 0)
1461 : : {
1462 [ - + ]: 6 : if (failover_given)
827 akapila@postgresql.o 1463 [ # # ]:UBC 0 : ereport(ERROR,
1464 : : (errcode(ERRCODE_SYNTAX_ERROR),
1465 : : errmsg("conflicting or redundant options")));
827 akapila@postgresql.o 1466 :CBC 6 : failover_given = true;
650 1467 : 6 : failover = defGetBoolean(defel);
1468 : : }
1469 [ + - ]: 1 : else if (strcmp(defel->defname, "two_phase") == 0)
1470 : : {
1471 [ - + ]: 1 : if (two_phase_given)
650 akapila@postgresql.o 1472 [ # # ]:UBC 0 : ereport(ERROR,
1473 : : (errcode(ERRCODE_SYNTAX_ERROR),
1474 : : errmsg("conflicting or redundant options")));
650 akapila@postgresql.o 1475 :CBC 1 : two_phase_given = true;
1476 : 1 : two_phase = defGetBoolean(defel);
1477 : : }
1478 : : else
827 akapila@postgresql.o 1479 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1480 : : }
1481 : :
650 akapila@postgresql.o 1482 [ + + + + ]:CBC 7 : ReplicationSlotAlter(cmd->slotname,
1483 : : failover_given ? &failover : NULL,
1484 : : two_phase_given ? &two_phase : NULL);
827 1485 : 5 : }
1486 : :
1487 : : /*
1488 : : * Load previously initiated logical slot and prepare for sending data (via
1489 : : * WalSndLoop).
1490 : : */
1491 : : static void
4439 rhaas@postgresql.org 1492 : 454 : StartLogicalReplication(StartReplicationCmd *cmd)
1493 : : {
1494 : : StringInfoData buf;
1495 : : QueryCompletion qc;
1496 : :
1497 : : /* make sure that our requirements are still fulfilled */
28 alvherre@kurilemu.de 1498 :GNC 454 : CheckLogicalDecodingRequirements(false);
1499 : :
4439 rhaas@postgresql.org 1500 [ - + ]:CBC 452 : Assert(!MyReplicationSlot);
1501 : :
459 akapila@postgresql.o 1502 : 452 : ReplicationSlotAcquire(cmd->slotname, true, true);
1503 : :
1504 : : /*
1505 : : * Force a disconnect, so that the decoding code doesn't need to care
1506 : : * about an eventual switch from running in recovery, to running in a
1507 : : * normal environment. Client code is expected to handle reconnects.
1508 : : */
4439 rhaas@postgresql.org 1509 [ + + - + ]: 447 : if (am_cascading_walsender && !RecoveryInProgress())
1510 : : {
4439 rhaas@postgresql.org 1511 [ # # ]:UBC 0 : ereport(LOG,
1512 : : (errmsg("terminating walsender process after promotion")));
3256 andres@anarazel.de 1513 : 0 : got_STOPPING = true;
1514 : : }
1515 : :
1516 : : /*
1517 : : * Create our decoding context, making it start at the previously ack'ed
1518 : : * position.
1519 : : *
1520 : : * Do this before sending a CopyBothResponse message, so that any errors
1521 : : * are reported early.
1522 : : */
2834 alvherre@alvh.no-ip. 1523 :CBC 446 : logical_decoding_ctx =
1524 : 447 : CreateDecodingContext(cmd->startpoint, cmd->options, false,
1821 tmunro@postgresql.or 1525 : 447 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1526 : : .segment_open = WalSndSegmentOpen,
1527 : : .segment_close = wal_segment_close),
1528 : : WalSndPrepareWrite, WalSndWriteData,
1529 : : WalSndUpdateProgress);
2183 alvherre@alvh.no-ip. 1530 : 446 : xlogreader = logical_decoding_ctx->reader;
1531 : :
4439 rhaas@postgresql.org 1532 : 446 : WalSndSetState(WALSNDSTATE_CATCHUP);
1533 : :
1534 : : /* Send a CopyBothResponse message, and start streaming */
987 nathan@postgresql.or 1535 : 446 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4439 rhaas@postgresql.org 1536 : 446 : pq_sendbyte(&buf, 0);
3128 andres@anarazel.de 1537 : 446 : pq_sendint16(&buf, 0);
4439 rhaas@postgresql.org 1538 : 446 : pq_endmessage(&buf);
1539 : 446 : pq_flush();
1540 : :
1541 : : /* Start reading WAL from the oldest required WAL. */
2291 heikki.linnakangas@i 1542 : 446 : XLogBeginRead(logical_decoding_ctx->reader,
1543 : 446 : MyReplicationSlot->data.restart_lsn);
1544 : :
1545 : : /*
1546 : : * Report the location after which we'll send out further commits as the
1547 : : * current sentPtr.
1548 : : */
4439 rhaas@postgresql.org 1549 : 446 : sentPtr = MyReplicationSlot->data.confirmed_flush;
1550 : :
1551 : : /* Also update the sent position status in shared memory */
3231 alvherre@alvh.no-ip. 1552 [ - + ]: 446 : SpinLockAcquire(&MyWalSnd->mutex);
1553 : 446 : MyWalSnd->sentPtr = MyReplicationSlot->data.restart_lsn;
1554 : 446 : SpinLockRelease(&MyWalSnd->mutex);
1555 : :
4439 rhaas@postgresql.org 1556 : 446 : replication_active = true;
1557 : :
1558 : 446 : SyncRepInitConfig();
1559 : :
1560 : : /* Main loop of walsender */
1561 : 446 : WalSndLoop(XLogSendLogical);
1562 : :
1563 : 205 : FreeDecodingContext(logical_decoding_ctx);
1564 : 205 : ReplicationSlotRelease();
1565 : :
1566 : 205 : replication_active = false;
3256 andres@anarazel.de 1567 [ - + ]: 205 : if (got_STOPPING)
4439 rhaas@postgresql.org 1568 :UBC 0 : proc_exit(0);
4439 rhaas@postgresql.org 1569 :CBC 205 : WalSndSetState(WALSNDSTATE_STARTUP);
1570 : :
1571 : : /* Get out of COPY mode (CommandComplete). */
2255 alvherre@alvh.no-ip. 1572 : 205 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
1573 : 205 : EndCommand(&qc, DestRemote, false);
4439 rhaas@postgresql.org 1574 : 205 : }
1575 : :
1576 : : /*
1577 : : * LogicalDecodingContext 'prepare_write' callback.
1578 : : *
1579 : : * Prepare a write into a StringInfo.
1580 : : *
1581 : : * Don't do anything lasting in here, it's quite possible that nothing will be done
1582 : : * with the data.
1583 : : */
1584 : : static void
1585 : 205695 : WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write)
1586 : : {
1587 : : /* can't have sync rep confused by sending the same LSN several times */
1588 [ + + ]: 205695 : if (!last_write)
1589 : 437 : lsn = InvalidXLogRecPtr;
1590 : :
1591 : 205695 : resetStringInfo(ctx->out);
1592 : :
272 nathan@postgresql.or 1593 :GNC 205695 : pq_sendbyte(ctx->out, PqReplMsg_WALData);
4439 rhaas@postgresql.org 1594 :CBC 205695 : pq_sendint64(ctx->out, lsn); /* dataStart */
1595 : 205695 : pq_sendint64(ctx->out, lsn); /* walEnd */
1596 : :
1597 : : /*
1598 : : * Fill out the sendtime later, just as it's done in XLogSendPhysical, but
1599 : : * reserve space here.
1600 : : */
4382 bruce@momjian.us 1601 : 205695 : pq_sendint64(ctx->out, 0); /* sendtime */
4439 rhaas@postgresql.org 1602 : 205695 : }
1603 : :
1604 : : /*
1605 : : * LogicalDecodingContext 'write' callback.
1606 : : *
1607 : : * Actually write out data previously prepared by WalSndPrepareWrite out to
1608 : : * the network. Take as long as needed, but process replies from the other
1609 : : * side and check timeouts during that.
1610 : : */
1611 : : static void
1612 : 205695 : WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1613 : : bool last_write)
1614 : : {
1615 : : TimestampTz now;
1616 : :
1617 : : /*
1618 : : * Fill the send timestamp last, so that it is taken as late as possible.
1619 : : * This is somewhat ugly, but the protocol is set as it's already used for
1620 : : * several releases by streaming physical replication.
1621 : : */
1622 : 205695 : resetStringInfo(&tmpbuf);
3064 andrew@dunslane.net 1623 : 205695 : now = GetCurrentTimestamp();
1624 : 205695 : pq_sendint64(&tmpbuf, now);
4439 rhaas@postgresql.org 1625 : 205695 : memcpy(&ctx->out->data[1 + sizeof(int64) + sizeof(int64)],
1626 : 205695 : tmpbuf.data, sizeof(int64));
1627 : :
1628 : : /* output previously gathered data in a CopyData packet */
286 nathan@postgresql.or 1629 :GNC 205695 : pq_putmessage_noblock(PqMsg_CopyData, ctx->out->data, ctx->out->len);
1630 : :
3064 andrew@dunslane.net 1631 [ - + ]:CBC 205695 : CHECK_FOR_INTERRUPTS();
1632 : :
1633 : : /* Try to flush pending output to the client */
4439 rhaas@postgresql.org 1634 [ + + ]: 205695 : if (pq_flush_if_writable() != 0)
1635 : 1 : WalSndShutdown();
1636 : :
1637 : : /* Try taking fast path unless we get too close to walsender timeout. */
3064 andrew@dunslane.net 1638 [ + - ]: 205694 : if (now < TimestampTzPlusMilliseconds(last_reply_timestamp,
1639 : 205694 : wal_sender_timeout / 2) &&
1640 [ + + ]: 205694 : !pq_is_send_pending())
1641 : : {
4439 rhaas@postgresql.org 1642 : 205339 : return;
1643 : : }
1644 : :
1645 : : /* If we have pending write here, go to slow path */
1497 akapila@postgresql.o 1646 : 355 : ProcessPendingWrites();
1647 : : }
1648 : :
1649 : : /*
1650 : : * Handle configuration reload.
1651 : : *
1652 : : * Process the pending configuration file reload and reinitializes synchronous
1653 : : * replication settings. Also releases any waiters that may now be satisfied due
1654 : : * to changes in synchronous replication requirements.
1655 : : */
1656 : : static void
91 fujii@postgresql.org 1657 :GNC 1124715 : WalSndHandleConfigReload(void)
1658 : : {
1659 [ + + ]: 1124715 : if (!ConfigReloadPending)
1660 : 1124678 : return;
1661 : :
1662 : 37 : ConfigReloadPending = false;
1663 : 37 : ProcessConfigFile(PGC_SIGHUP);
1664 : 37 : SyncRepInitConfig();
1665 : :
1666 : : /*
1667 : : * Recheck and release any now-satisfied waiters after config reload
1668 : : * changes synchronous replication requirements (e.g., reducing the number
1669 : : * of sync standbys or changing the standby names).
1670 : : */
1671 [ + + ]: 37 : if (!am_cascading_walsender)
1672 : 34 : SyncRepReleaseWaiters();
1673 : : }
1674 : :
1675 : : /*
1676 : : * Wait until there is no pending write. Also process replies from the other
1677 : : * side and check timeouts during that.
1678 : : */
1679 : : static void
1497 akapila@postgresql.o 1680 :CBC 355 : ProcessPendingWrites(void)
1681 : : {
1682 : : for (;;)
4439 rhaas@postgresql.org 1683 : 491 : {
1684 : : long sleeptime;
1685 : :
1686 : : /* Check for input from the client */
3064 andrew@dunslane.net 1687 : 846 : ProcessRepliesIfAny();
1688 : :
1689 : : /* die if timeout was reached */
2804 noah@leadboat.com 1690 : 846 : WalSndCheckTimeOut();
1691 : :
1692 : : /*
1693 : : * During shutdown, die if the shutdown timeout expires. Call this
1694 : : * before WalSndComputeSleeptime() so the timeout is considered when
1695 : : * computing sleep time.
1696 : : */
29 fujii@postgresql.org 1697 :GNC 846 : WalSndCheckShutdownTimeout();
1698 : :
1699 : : /* Send keepalive if the time has come */
2804 noah@leadboat.com 1700 :CBC 845 : WalSndKeepaliveIfNecessary();
1701 : :
3064 andrew@dunslane.net 1702 [ + + ]: 845 : if (!pq_is_send_pending())
1703 : 354 : break;
1704 : :
2804 noah@leadboat.com 1705 : 491 : sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
1706 : :
1707 : : /* Sleep until something happens or we time out */
1891 tmunro@postgresql.or 1708 : 491 : WalSndWait(WL_SOCKET_WRITEABLE | WL_SOCKET_READABLE, sleeptime,
1709 : : WAIT_EVENT_WAL_SENDER_WRITE_DATA);
1710 : :
1711 : : /* Clear any already-pending wakeups */
4126 andres@anarazel.de 1712 : 491 : ResetLatch(MyLatch);
1713 : :
1714 [ - + ]: 491 : CHECK_FOR_INTERRUPTS();
1715 : :
1716 : : /* Process any requests or signals received recently */
91 fujii@postgresql.org 1717 :GNC 491 : WalSndHandleConfigReload();
1718 : :
1719 : : /* Try to flush pending output to the client */
4439 rhaas@postgresql.org 1720 [ - + ]:CBC 491 : if (pq_flush_if_writable() != 0)
4439 rhaas@postgresql.org 1721 :UBC 0 : WalSndShutdown();
1722 : : }
1723 : :
1724 : : /* reactivate latch so WalSndLoop knows to continue */
4126 andres@anarazel.de 1725 :CBC 354 : SetLatch(MyLatch);
4439 rhaas@postgresql.org 1726 : 354 : }
1727 : :
1728 : : /*
1729 : : * LogicalDecodingContext 'update_progress' callback.
1730 : : *
1731 : : * Write the current position to the lag tracker (see XLogSendPhysical).
1732 : : *
1733 : : * When skipping empty transactions, send a keepalive message if necessary.
1734 : : */
1735 : : static void
1497 akapila@postgresql.o 1736 : 3009 : WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1737 : : bool skipped_xact)
1738 : : {
1739 : : static TimestampTz sendTime = 0;
3280 simon@2ndQuadrant.co 1740 : 3009 : TimestampTz now = GetCurrentTimestamp();
1455 akapila@postgresql.o 1741 : 3009 : bool pending_writes = false;
1742 : 3009 : bool end_xact = ctx->end_xact;
1743 : :
1744 : : /*
1745 : : * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
1746 : : * avoid flooding the lag tracker when we commit frequently.
1747 : : *
1748 : : * We don't have a mechanism to get the ack for any LSN other than end
1749 : : * xact LSN from the downstream. So, we track lag only for end of
1750 : : * transaction LSN.
1751 : : */
1752 : : #define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
1753 [ + + + + ]: 3009 : if (end_xact && TimestampDifferenceExceeds(sendTime, now,
1754 : : WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
1755 : : {
1497 1756 : 287 : LagTrackerWrite(lsn, now);
1757 : 287 : sendTime = now;
1758 : : }
1759 : :
1760 : : /*
1761 : : * When skipping empty transactions in synchronous replication, we send a
1762 : : * keepalive message to avoid delaying such transactions.
1763 : : *
1764 : : * It is okay to check sync_standbys_status without lock here as in the
1765 : : * worst case we will just send an extra keepalive message when it is
1766 : : * really not required.
1767 : : */
1768 [ + + ]: 3009 : if (skipped_xact &&
1769 [ + - + - ]: 666 : SyncRepRequested() &&
389 michael@paquier.xyz 1770 [ - + ]: 666 : (((volatile WalSndCtlData *) WalSndCtl)->sync_standbys_status & SYNC_STANDBY_DEFINED))
1771 : : {
1497 akapila@postgresql.o 1772 :UBC 0 : WalSndKeepalive(false, lsn);
1773 : :
1774 : : /* Try to flush pending output to the client */
1775 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
1776 : 0 : WalSndShutdown();
1777 : :
1778 : : /* If we have pending write here, make sure it's actually flushed */
1779 [ # # ]: 0 : if (pq_is_send_pending())
1455 1780 : 0 : pending_writes = true;
1781 : : }
1782 : :
1783 : : /*
1784 : : * Process pending writes if any or try to send a keepalive if required.
1785 : : * We don't need to try sending keep alive messages at the transaction end
1786 : : * as that will be done at a later point in time. This is required only
1787 : : * for large transactions where we don't send any changes to the
1788 : : * downstream and the receiver can timeout due to that.
1789 : : */
1455 akapila@postgresql.o 1790 [ + - + + ]:CBC 3009 : if (pending_writes || (!end_xact &&
1791 [ - + ]: 1745 : now >= TimestampTzPlusMilliseconds(last_reply_timestamp,
1792 : : wal_sender_timeout / 2)))
1455 akapila@postgresql.o 1793 :UBC 0 : ProcessPendingWrites();
3280 simon@2ndQuadrant.co 1794 :CBC 3009 : }
1795 : :
1796 : : /*
1797 : : * Wake up the logical walsender processes with logical failover slots if the
1798 : : * currently acquired physical slot is specified in synchronized_standby_slots GUC.
1799 : : */
1800 : : void
788 akapila@postgresql.o 1801 : 41949 : PhysicalWakeupLogicalWalSnd(void)
1802 : : {
1803 [ + - - + ]: 41949 : Assert(MyReplicationSlot && SlotIsPhysical(MyReplicationSlot));
1804 : :
1805 : : /*
1806 : : * If we are running in a standby, there is no need to wake up walsenders.
1807 : : * This is because we do not support syncing slots to cascading standbys,
1808 : : * so, there are no walsenders waiting for standbys to catch up.
1809 : : */
1810 [ + + ]: 41949 : if (RecoveryInProgress())
1811 : 51 : return;
1812 : :
673 1813 [ + + ]: 41898 : if (SlotExistsInSyncStandbySlots(NameStr(MyReplicationSlot->data.name)))
788 1814 : 8 : ConditionVariableBroadcast(&WalSndCtl->wal_confirm_rcv_cv);
1815 : : }
1816 : :
1817 : : /*
1818 : : * Returns true if not all standbys have caught up to the flushed position
1819 : : * (flushed_lsn) when the current acquired slot is a logical failover
1820 : : * slot and we are streaming; otherwise, returns false.
1821 : : *
1822 : : * If returning true, the function sets the appropriate wait event in
1823 : : * wait_event; otherwise, wait_event is set to 0.
1824 : : */
1825 : : static bool
1826 : 19520 : NeedToWaitForStandbys(XLogRecPtr flushed_lsn, uint32 *wait_event)
1827 : : {
1828 [ + + ]: 19520 : int elevel = got_STOPPING ? ERROR : WARNING;
1829 : : bool failover_slot;
1830 : :
1831 [ + + + + ]: 19520 : failover_slot = (replication_active && MyReplicationSlot->data.failover);
1832 : :
1833 : : /*
1834 : : * Note that after receiving the shutdown signal, an ERROR is reported if
1835 : : * any slots are dropped, invalidated, or inactive. This measure is taken
1836 : : * to prevent the walsender from waiting indefinitely.
1837 : : */
1838 [ + + + + ]: 19520 : if (failover_slot && !StandbySlotsHaveCaughtup(flushed_lsn, elevel))
1839 : : {
1840 : 12 : *wait_event = WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION;
1841 : 12 : return true;
1842 : : }
1843 : :
1844 : 19508 : *wait_event = 0;
1845 : 19508 : return false;
1846 : : }
1847 : :
1848 : : /*
1849 : : * Returns true if we need to wait for WALs to be flushed to disk, or if not
1850 : : * all standbys have caught up to the flushed position (flushed_lsn) when the
1851 : : * current acquired slot is a logical failover slot and we are
1852 : : * streaming; otherwise, returns false.
1853 : : *
1854 : : * If returning true, the function sets the appropriate wait event in
1855 : : * wait_event; otherwise, wait_event is set to 0.
1856 : : */
1857 : : static bool
1858 : 26372 : NeedToWaitForWal(XLogRecPtr target_lsn, XLogRecPtr flushed_lsn,
1859 : : uint32 *wait_event)
1860 : : {
1861 : : /* Check if we need to wait for WALs to be flushed to disk */
1862 [ + + ]: 26372 : if (target_lsn > flushed_lsn)
1863 : : {
1864 : 10048 : *wait_event = WAIT_EVENT_WAL_SENDER_WAIT_FOR_WAL;
1865 : 10048 : return true;
1866 : : }
1867 : :
1868 : : /* Check if the standby slots have caught up to the flushed position */
1869 : 16324 : return NeedToWaitForStandbys(flushed_lsn, wait_event);
1870 : : }
1871 : :
1872 : : /*
1873 : : * Wait till WAL < loc is flushed to disk so it can be safely sent to client.
1874 : : *
1875 : : * If the walsender holds a logical failover slot, we also wait for all the
1876 : : * specified streaming replication standby servers to confirm receipt of WAL
1877 : : * up to RecentFlushPtr. It is beneficial to wait here for the confirmation
1878 : : * up to RecentFlushPtr rather than waiting before transmitting each change
1879 : : * to logical subscribers, which is already covered by RecentFlushPtr.
1880 : : *
1881 : : * Returns end LSN of flushed WAL. Normally this will be >= loc, but if we
1882 : : * detect a shutdown request (either from postmaster or client) we will return
1883 : : * early, so caller must always check.
1884 : : */
1885 : : static XLogRecPtr
4439 rhaas@postgresql.org 1886 : 19900 : WalSndWaitForWal(XLogRecPtr loc)
1887 : : {
1888 : : int wakeEvents;
788 akapila@postgresql.o 1889 : 19900 : uint32 wait_event = 0;
1890 : : static XLogRecPtr RecentFlushPtr = InvalidXLogRecPtr;
392 michael@paquier.xyz 1891 : 19900 : TimestampTz last_flush = 0;
1892 : :
1893 : : /*
1894 : : * Fast path to avoid acquiring the spinlock in case we already know we
1895 : : * have enough WAL available and all the standby servers have confirmed
1896 : : * receipt of WAL up to RecentFlushPtr. This is particularly interesting
1897 : : * if we're far behind.
1898 : : */
180 alvherre@kurilemu.de 1899 [ + + ]:GNC 19900 : if (XLogRecPtrIsValid(RecentFlushPtr) &&
788 akapila@postgresql.o 1900 [ + + ]:CBC 19290 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4439 rhaas@postgresql.org 1901 : 14349 : return RecentFlushPtr;
1902 : :
1903 : : /*
1904 : : * Within the loop, we wait for the necessary WALs to be flushed to disk
1905 : : * first, followed by waiting for standbys to catch up if there are enough
1906 : : * WALs (see NeedToWaitForWal()) or upon receiving the shutdown signal.
1907 : : */
1908 : : for (;;)
1909 : 4944 : {
788 akapila@postgresql.o 1910 : 10495 : bool wait_for_standby_at_stop = false;
1911 : : long sleeptime;
1912 : : TimestampTz now;
1913 : :
1914 : : /* Clear any already-pending wakeups */
4126 andres@anarazel.de 1915 : 10495 : ResetLatch(MyLatch);
1916 : :
1917 [ + + ]: 10495 : CHECK_FOR_INTERRUPTS();
1918 : :
1919 : : /* Process any requests or signals received recently */
91 fujii@postgresql.org 1920 :GNC 10488 : WalSndHandleConfigReload();
1921 : :
1922 : : /* Check for input from the client */
4439 rhaas@postgresql.org 1923 :CBC 10488 : ProcessRepliesIfAny();
1924 : :
1925 : : /*
1926 : : * If we're shutting down, trigger pending WAL to be written out,
1927 : : * otherwise we'd possibly end up waiting for WAL that never gets
1928 : : * written, because walwriter has shut down already.
1929 : : *
1930 : : * Note that GetXLogInsertEndRecPtr() is used to obtain the WAL flush
1931 : : * request location instead of GetXLogInsertRecPtr(). Because if the
1932 : : * last WAL record ends at a page boundary, GetXLogInsertRecPtr() can
1933 : : * return an LSN pointing past the page header, which may cause
1934 : : * XLogFlush() to report an error.
1935 : : */
60 fujii@postgresql.org 1936 [ + + + + ]: 10278 : if (got_STOPPING && !RecoveryInProgress())
49 1937 : 3192 : XLogFlush(GetXLogInsertEndRecPtr());
1938 : :
1939 : : /*
1940 : : * To avoid the scenario where standbys need to catch up to a newer
1941 : : * WAL location in each iteration, we update our idea of the currently
1942 : : * flushed position only if we are not waiting for standbys to catch
1943 : : * up.
1944 : : */
788 akapila@postgresql.o 1945 [ + + ]: 10278 : if (wait_event != WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
1946 : : {
1947 [ + + ]: 10267 : if (!RecoveryInProgress())
1948 : 10082 : RecentFlushPtr = GetFlushRecPtr(NULL);
1949 : : else
1950 : 185 : RecentFlushPtr = GetXLogReplayRecPtr(NULL);
1951 : : }
1952 : :
1953 : : /*
1954 : : * If postmaster asked us to stop and the standby slots have caught up
1955 : : * to the flushed position, don't wait anymore.
1956 : : *
1957 : : * It's important to do this check after the recomputation of
1958 : : * RecentFlushPtr, so we can send all remaining data before shutting
1959 : : * down.
1960 : : */
3256 andres@anarazel.de 1961 [ + + ]: 10278 : if (got_STOPPING)
1962 : : {
788 akapila@postgresql.o 1963 [ + + ]: 3196 : if (NeedToWaitForStandbys(RecentFlushPtr, &wait_event))
788 akapila@postgresql.o 1964 :GBC 2 : wait_for_standby_at_stop = true;
1965 : : else
788 akapila@postgresql.o 1966 :CBC 3194 : break;
1967 : : }
1968 : :
1969 : : /*
1970 : : * We only send regular messages to the client for full decoded
1971 : : * transactions, but a synchronous replication and walsender shutdown
1972 : : * possibly are waiting for a later location. So, before sleeping, we
1973 : : * send a ping containing the flush location. If the receiver is
1974 : : * otherwise idle, this keepalive will trigger a reply. Processing the
1975 : : * reply will update these MyWalSnd locations.
1976 : : */
4284 andres@anarazel.de 1977 [ + + ]: 7084 : if (MyWalSnd->flush < sentPtr &&
1978 [ + + ]: 2616 : MyWalSnd->write < sentPtr &&
1979 [ + - ]: 2015 : !waiting_for_ping_response)
1497 akapila@postgresql.o 1980 : 2015 : WalSndKeepalive(false, InvalidXLogRecPtr);
1981 : :
1982 : : /*
1983 : : * Exit the loop if already caught up and doesn't need to wait for
1984 : : * standby slots.
1985 : : */
788 1986 [ + + ]: 7084 : if (!wait_for_standby_at_stop &&
1987 [ + + ]: 7082 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4439 rhaas@postgresql.org 1988 : 1965 : break;
1989 : :
1990 : : /*
1991 : : * Waiting for new WAL or waiting for standbys to catch up. Since we
1992 : : * need to wait, we're now caught up.
1993 : : */
1994 : 5119 : WalSndCaughtUp = true;
1995 : :
1996 : : /*
1997 : : * Try to flush any pending output to the client.
1998 : : */
1999 [ - + ]: 5119 : if (pq_flush_if_writable() != 0)
4439 rhaas@postgresql.org 2000 :UBC 0 : WalSndShutdown();
2001 : :
2002 : : /*
2003 : : * If we have received CopyDone from the client, sent CopyDone
2004 : : * ourselves, and the output buffer is empty, it's time to exit
2005 : : * streaming, so fail the current WAL fetch request.
2006 : : */
3231 tgl@sss.pgh.pa.us 2007 [ + + + - ]:CBC 5119 : if (streamingDoneReceiving && streamingDoneSending &&
2008 [ + - ]: 174 : !pq_is_send_pending())
2009 : 174 : break;
2010 : :
2011 : : /* die if timeout was reached */
2804 noah@leadboat.com 2012 : 4945 : WalSndCheckTimeOut();
2013 : :
2014 : : /*
2015 : : * During shutdown, die if the shutdown timeout expires. Call this
2016 : : * before WalSndComputeSleeptime() so the timeout is considered when
2017 : : * computing sleep time.
2018 : : */
29 fujii@postgresql.org 2019 :GNC 4945 : WalSndCheckShutdownTimeout();
2020 : :
2021 : : /* Send keepalive if the time has come */
2804 noah@leadboat.com 2022 :CBC 4944 : WalSndKeepaliveIfNecessary();
2023 : :
2024 : : /*
2025 : : * Sleep until something happens or we time out. Also wait for the
2026 : : * socket becoming writable, if there's still pending output.
2027 : : * Otherwise we might sit on sendable output data while waiting for
2028 : : * new WAL to be generated. (But if we have nothing to send, we don't
2029 : : * want to wake on socket-writable.)
2030 : : */
392 michael@paquier.xyz 2031 : 4944 : now = GetCurrentTimestamp();
2032 : 4944 : sleeptime = WalSndComputeSleeptime(now);
2033 : :
1891 tmunro@postgresql.or 2034 : 4944 : wakeEvents = WL_SOCKET_READABLE;
2035 : :
4439 rhaas@postgresql.org 2036 [ - + ]: 4944 : if (pq_is_send_pending())
4439 rhaas@postgresql.org 2037 :UBC 0 : wakeEvents |= WL_SOCKET_WRITEABLE;
2038 : :
788 akapila@postgresql.o 2039 [ - + ]:CBC 4944 : Assert(wait_event != 0);
2040 : :
2041 : : /* Report IO statistics, if needed */
392 michael@paquier.xyz 2042 [ + + ]: 4944 : if (TimestampDifferenceExceeds(last_flush, now,
2043 : : WALSENDER_STATS_FLUSH_INTERVAL))
2044 : : {
2045 : 1668 : pgstat_flush_io(false);
2046 : 1668 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
2047 : 1668 : last_flush = now;
2048 : : }
2049 : :
788 akapila@postgresql.o 2050 : 4944 : WalSndWait(wakeEvents, sleeptime, wait_event);
2051 : : }
2052 : :
2053 : : /* reactivate latch so WalSndLoop knows to continue */
4126 andres@anarazel.de 2054 : 5333 : SetLatch(MyLatch);
4439 rhaas@postgresql.org 2055 : 5333 : return RecentFlushPtr;
2056 : : }
2057 : :
2058 : : /*
2059 : : * Execute an incoming replication command.
2060 : : *
2061 : : * Returns true if the cmd_string was recognized as WalSender command, false
2062 : : * if not.
2063 : : */
2064 : : bool
4960 heikki.linnakangas@i 2065 : 5807 : exec_replication_command(const char *cmd_string)
2066 : : {
2067 : : yyscan_t scanner;
2068 : : int parse_rc;
2069 : : Node *cmd_node;
2070 : : const char *cmdtag;
379 tgl@sss.pgh.pa.us 2071 : 5807 : MemoryContext old_context = CurrentMemoryContext;
2072 : :
2073 : : /* We save and re-use the cmd_context across calls */
2074 : : static MemoryContext cmd_context = NULL;
2075 : :
2076 : : /*
2077 : : * If WAL sender has been told that shutdown is getting close, switch its
2078 : : * status accordingly to handle the next replication commands correctly.
2079 : : */
3256 andres@anarazel.de 2080 [ - + ]: 5807 : if (got_STOPPING)
3256 andres@anarazel.de 2081 :UBC 0 : WalSndSetState(WALSNDSTATE_STOPPING);
2082 : :
2083 : : /*
2084 : : * Throw error if in stopping mode. We need prevent commands that could
2085 : : * generate WAL while the shutdown checkpoint is being written. To be
2086 : : * safe, we just prohibit all new commands.
2087 : : */
3256 andres@anarazel.de 2088 [ - + ]:CBC 5807 : if (MyWalSnd->state == WALSNDSTATE_STOPPING)
3256 andres@anarazel.de 2089 [ # # ]:UBC 0 : ereport(ERROR,
2090 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2091 : : errmsg("cannot execute new commands while WAL sender is in stopping mode")));
2092 : :
2093 : : /*
2094 : : * CREATE_REPLICATION_SLOT ... LOGICAL exports a snapshot until the next
2095 : : * command arrives. Clean up the old stuff if there's anything.
2096 : : */
4439 rhaas@postgresql.org 2097 :CBC 5807 : SnapBuildClearExportedSnapshot();
2098 : :
4960 heikki.linnakangas@i 2099 [ - + ]: 5807 : CHECK_FOR_INTERRUPTS();
2100 : :
2101 : : /*
2102 : : * Prepare to parse and execute the command.
2103 : : *
2104 : : * Because replication command execution can involve beginning or ending
2105 : : * transactions, we need a working context that will survive that, so we
2106 : : * make it a child of TopMemoryContext. That in turn creates a hazard of
2107 : : * long-lived memory leaks if we lose track of the working context. We
2108 : : * deal with that by creating it only once per walsender, and resetting it
2109 : : * for each new command. (Normally this reset is a no-op, but if the
2110 : : * prior exec_replication_command call failed with an error, it won't be.)
2111 : : *
2112 : : * This is subtler than it looks. The transactions we manage can extend
2113 : : * across replication commands, indeed SnapBuildClearExportedSnapshot
2114 : : * might have just ended one. Because transaction exit will revert to the
2115 : : * memory context that was current at transaction start, we need to be
2116 : : * sure that that context is still valid. That motivates re-using the
2117 : : * same cmd_context rather than making a new one each time.
2118 : : */
379 tgl@sss.pgh.pa.us 2119 [ + + ]: 5807 : if (cmd_context == NULL)
2120 : 1278 : cmd_context = AllocSetContextCreate(TopMemoryContext,
2121 : : "Replication command context",
2122 : : ALLOCSET_DEFAULT_SIZES);
2123 : : else
2124 : 4529 : MemoryContextReset(cmd_context);
2125 : :
2126 : 5807 : MemoryContextSwitchTo(cmd_context);
2127 : :
519 peter@eisentraut.org 2128 : 5807 : replication_scanner_init(cmd_string, &scanner);
2129 : :
2130 : : /*
2131 : : * Is it a WalSender command?
2132 : : */
2133 [ + + ]: 5807 : if (!replication_scanner_is_replication_command(scanner))
2134 : : {
2135 : : /* Nope; clean up and get out. */
2136 : 2571 : replication_scanner_finish(scanner);
2137 : :
2059 tgl@sss.pgh.pa.us 2138 : 2571 : MemoryContextSwitchTo(old_context);
379 2139 : 2571 : MemoryContextReset(cmd_context);
2140 : :
2141 : : /* XXX this is a pretty random place to make this check */
1562 2142 [ - + ]: 2571 : if (MyDatabaseId == InvalidOid)
1562 tgl@sss.pgh.pa.us 2143 [ # # ]:UBC 0 : ereport(ERROR,
2144 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2145 : : errmsg("cannot execute SQL commands in WAL sender for physical replication")));
2146 : :
2147 : : /* Tell the caller that this wasn't a WalSender command. */
2059 tgl@sss.pgh.pa.us 2148 :CBC 2571 : return false;
2149 : : }
2150 : :
2151 : : /*
2152 : : * Looks like a WalSender command, so parse it.
2153 : : */
466 peter@eisentraut.org 2154 : 3236 : parse_rc = replication_yyparse(&cmd_node, scanner);
1562 tgl@sss.pgh.pa.us 2155 [ - + ]: 3236 : if (parse_rc != 0)
1562 tgl@sss.pgh.pa.us 2156 [ # # ]:UBC 0 : ereport(ERROR,
2157 : : (errcode(ERRCODE_SYNTAX_ERROR),
2158 : : errmsg_internal("replication command parser returned %d",
2159 : : parse_rc)));
519 peter@eisentraut.org 2160 :CBC 3236 : replication_scanner_finish(scanner);
2161 : :
2162 : : /*
2163 : : * Report query to various monitoring facilities. For this purpose, we
2164 : : * report replication commands just like SQL commands.
2165 : : */
2059 tgl@sss.pgh.pa.us 2166 : 3236 : debug_query_string = cmd_string;
2167 : :
2168 : 3236 : pgstat_report_activity(STATE_RUNNING, cmd_string);
2169 : :
2170 : : /*
2171 : : * Log replication command if log_replication_commands is enabled. Even
2172 : : * when it's disabled, log the command with DEBUG1 level for backward
2173 : : * compatibility.
2174 : : */
2175 [ + - + - ]: 3236 : ereport(log_replication_commands ? LOG : DEBUG1,
2176 : : (errmsg("received replication command: %s", cmd_string)));
2177 : :
2178 : : /*
2179 : : * Disallow replication commands in aborted transaction blocks.
2180 : : */
2181 [ - + ]: 3236 : if (IsAbortedTransactionBlockState())
3330 peter_e@gmx.net 2182 [ # # ]:UBC 0 : ereport(ERROR,
2183 : : (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
2184 : : errmsg("current transaction is aborted, "
2185 : : "commands ignored until end of transaction block")));
2186 : :
3330 peter_e@gmx.net 2187 [ - + ]:CBC 3236 : CHECK_FOR_INTERRUPTS();
2188 : :
2189 : : /*
2190 : : * Allocate buffers that will be used for each outgoing and incoming
2191 : : * message. We do this just once per command to reduce palloc overhead.
2192 : : */
3359 fujii@postgresql.org 2193 : 3236 : initStringInfo(&output_message);
2194 : 3236 : initStringInfo(&reply_message);
2195 : 3236 : initStringInfo(&tmpbuf);
2196 : :
5590 magnus@hagander.net 2197 [ + + + + : 3236 : switch (cmd_node->type)
+ + + + +
+ - ]
2198 : : {
2199 : 803 : case T_IdentifySystemCmd:
2057 alvherre@alvh.no-ip. 2200 : 803 : cmdtag = "IDENTIFY_SYSTEM";
tgl@sss.pgh.pa.us 2201 : 803 : set_ps_display(cmdtag);
5590 magnus@hagander.net 2202 : 803 : IdentifySystem();
2057 alvherre@alvh.no-ip. 2203 : 803 : EndReplicationCommand(cmdtag);
5590 magnus@hagander.net 2204 : 803 : break;
2205 : :
1653 michael@paquier.xyz 2206 : 6 : case T_ReadReplicationSlotCmd:
2207 : 6 : cmdtag = "READ_REPLICATION_SLOT";
2208 : 6 : set_ps_display(cmdtag);
2209 : 6 : ReadReplicationSlot((ReadReplicationSlotCmd *) cmd_node);
2210 : 5 : EndReplicationCommand(cmdtag);
2211 : 5 : break;
2212 : :
5590 magnus@hagander.net 2213 : 201 : case T_BaseBackupCmd:
2057 alvherre@alvh.no-ip. 2214 : 201 : cmdtag = "BASE_BACKUP";
tgl@sss.pgh.pa.us 2215 : 201 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2216 : 201 : PreventInTransactionBlock(true, cmdtag);
867 rhaas@postgresql.org 2217 : 201 : SendBaseBackup((BaseBackupCmd *) cmd_node, uploaded_manifest);
2057 alvherre@alvh.no-ip. 2218 : 175 : EndReplicationCommand(cmdtag);
5581 magnus@hagander.net 2219 : 175 : break;
2220 : :
4477 rhaas@postgresql.org 2221 : 513 : case T_CreateReplicationSlotCmd:
2057 alvherre@alvh.no-ip. 2222 : 513 : cmdtag = "CREATE_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2223 : 513 : set_ps_display(cmdtag);
4477 rhaas@postgresql.org 2224 : 513 : CreateReplicationSlot((CreateReplicationSlotCmd *) cmd_node);
2057 alvherre@alvh.no-ip. 2225 : 512 : EndReplicationCommand(cmdtag);
4477 rhaas@postgresql.org 2226 : 512 : break;
2227 : :
2228 : 295 : case T_DropReplicationSlotCmd:
2057 alvherre@alvh.no-ip. 2229 : 295 : cmdtag = "DROP_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2230 : 295 : set_ps_display(cmdtag);
4477 rhaas@postgresql.org 2231 : 295 : DropReplicationSlot((DropReplicationSlotCmd *) cmd_node);
2057 alvherre@alvh.no-ip. 2232 : 292 : EndReplicationCommand(cmdtag);
4477 rhaas@postgresql.org 2233 : 292 : break;
2234 : :
827 akapila@postgresql.o 2235 : 7 : case T_AlterReplicationSlotCmd:
2236 : 7 : cmdtag = "ALTER_REPLICATION_SLOT";
2237 : 7 : set_ps_display(cmdtag);
2238 : 7 : AlterReplicationSlot((AlterReplicationSlotCmd *) cmd_node);
2239 : 5 : EndReplicationCommand(cmdtag);
2240 : 5 : break;
2241 : :
4477 rhaas@postgresql.org 2242 : 756 : case T_StartReplicationCmd:
2243 : : {
2244 : 756 : StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node;
2245 : :
2057 alvherre@alvh.no-ip. 2246 : 756 : cmdtag = "START_REPLICATION";
tgl@sss.pgh.pa.us 2247 : 756 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2248 : 756 : PreventInTransactionBlock(true, cmdtag);
2249 : :
4477 rhaas@postgresql.org 2250 [ + + ]: 756 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
2251 : 302 : StartReplication(cmd);
2252 : : else
4439 2253 : 454 : StartLogicalReplication(cmd);
2254 : :
2255 : : /* dupe, but necessary per libpqrcv_endstreaming */
2029 alvherre@alvh.no-ip. 2256 : 367 : EndReplicationCommand(cmdtag);
2257 : :
2157 michael@paquier.xyz 2258 [ - + ]: 367 : Assert(xlogreader != NULL);
4477 rhaas@postgresql.org 2259 : 367 : break;
2260 : : }
2261 : :
4891 heikki.linnakangas@i 2262 : 15 : case T_TimeLineHistoryCmd:
2057 alvherre@alvh.no-ip. 2263 : 15 : cmdtag = "TIMELINE_HISTORY";
tgl@sss.pgh.pa.us 2264 : 15 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2265 : 15 : PreventInTransactionBlock(true, cmdtag);
4891 heikki.linnakangas@i 2266 : 15 : SendTimeLineHistory((TimeLineHistoryCmd *) cmd_node);
2057 alvherre@alvh.no-ip. 2267 : 15 : EndReplicationCommand(cmdtag);
4891 heikki.linnakangas@i 2268 : 15 : break;
2269 : :
3388 rhaas@postgresql.org 2270 : 628 : case T_VariableShowStmt:
2271 : : {
2272 : 628 : DestReceiver *dest = CreateDestReceiver(DestRemoteSimple);
2273 : 628 : VariableShowStmt *n = (VariableShowStmt *) cmd_node;
2274 : :
2057 alvherre@alvh.no-ip. 2275 : 628 : cmdtag = "SHOW";
tgl@sss.pgh.pa.us 2276 : 628 : set_ps_display(cmdtag);
2277 : :
2278 : : /* syscache access needs a transaction environment */
2577 michael@paquier.xyz 2279 : 628 : StartTransactionCommand();
3388 rhaas@postgresql.org 2280 : 628 : GetPGVariable(n->name, dest);
2577 michael@paquier.xyz 2281 : 628 : CommitTransactionCommand();
2057 alvherre@alvh.no-ip. 2282 : 628 : EndReplicationCommand(cmdtag);
2283 : : }
3388 rhaas@postgresql.org 2284 : 628 : break;
2285 : :
867 2286 : 12 : case T_UploadManifestCmd:
2287 : 12 : cmdtag = "UPLOAD_MANIFEST";
2288 : 12 : set_ps_display(cmdtag);
2289 : 12 : PreventInTransactionBlock(true, cmdtag);
2290 : 12 : UploadManifest();
2291 : 11 : EndReplicationCommand(cmdtag);
2292 : 11 : break;
2293 : :
5590 magnus@hagander.net 2294 :UBC 0 : default:
4891 heikki.linnakangas@i 2295 [ # # ]: 0 : elog(ERROR, "unrecognized replication command node tag: %u",
2296 : : cmd_node->type);
2297 : : }
2298 : :
2299 : : /*
2300 : : * Done. Revert to caller's memory context, and clean out the cmd_context
2301 : : * to recover memory right away.
2302 : : */
5590 magnus@hagander.net 2303 :CBC 2813 : MemoryContextSwitchTo(old_context);
379 tgl@sss.pgh.pa.us 2304 : 2813 : MemoryContextReset(cmd_context);
2305 : :
2306 : : /*
2307 : : * We need not update ps display or pg_stat_activity, because PostgresMain
2308 : : * will reset those to "idle". But we must reset debug_query_string to
2309 : : * ensure it doesn't become a dangling pointer.
2310 : : */
2059 2311 : 2813 : debug_query_string = NULL;
2312 : :
3330 peter_e@gmx.net 2313 : 2813 : return true;
2314 : : }
2315 : :
2316 : : /*
2317 : : * Process any incoming messages while streaming. Also checks if the remote
2318 : : * end has closed the connection.
2319 : : */
2320 : : static void
5563 heikki.linnakangas@i 2321 : 1125070 : ProcessRepliesIfAny(void)
2322 : : {
2323 : : unsigned char firstchar;
2324 : : int maxmsglen;
2325 : : int r;
5382 tgl@sss.pgh.pa.us 2326 : 1125070 : bool received = false;
2327 : :
2804 noah@leadboat.com 2328 : 1125070 : last_processing = GetCurrentTimestamp();
2329 : :
2330 : : /*
2331 : : * If we already received a CopyDone from the frontend, any subsequent
2332 : : * message is the beginning of a new command, and should be processed in
2333 : : * the main processing loop.
2334 : : */
1968 jdavis@postgresql.or 2335 [ + + ]: 1234773 : while (!streamingDoneReceiving)
2336 : : {
4110 heikki.linnakangas@i 2337 : 1234019 : pq_startmsgread();
5555 simon@2ndQuadrant.co 2338 : 1234019 : r = pq_getbyte_if_available(&firstchar);
2339 [ + + ]: 1234019 : if (r < 0)
2340 : : {
2341 : : /* unexpected error or EOF */
2342 [ + - ]: 17 : ereport(COMMERROR,
2343 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2344 : : errmsg("unexpected EOF on standby connection")));
2345 : 17 : proc_exit(0);
2346 : : }
2347 [ + + ]: 1234002 : if (r == 0)
2348 : : {
2349 : : /* no data available without blocking */
4110 heikki.linnakangas@i 2350 : 1123997 : pq_endmsgread();
5515 2351 : 1123997 : break;
2352 : : }
2353 : :
2354 : : /* Validate message type and set packet size limit */
1833 tgl@sss.pgh.pa.us 2355 [ + + - ]: 110005 : switch (firstchar)
2356 : : {
987 nathan@postgresql.or 2357 : 109336 : case PqMsg_CopyData:
1833 tgl@sss.pgh.pa.us 2358 : 109336 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
2359 : 109336 : break;
987 nathan@postgresql.or 2360 : 669 : case PqMsg_CopyDone:
2361 : : case PqMsg_Terminate:
1833 tgl@sss.pgh.pa.us 2362 : 669 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
2363 : 669 : break;
1833 tgl@sss.pgh.pa.us 2364 :UBC 0 : default:
2365 [ # # ]: 0 : ereport(FATAL,
2366 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2367 : : errmsg("invalid standby message type \"%c\"",
2368 : : firstchar)));
2369 : : maxmsglen = 0; /* keep compiler quiet */
2370 : : break;
2371 : : }
2372 : :
2373 : : /* Read the message contents */
4110 heikki.linnakangas@i 2374 :CBC 110005 : resetStringInfo(&reply_message);
1833 tgl@sss.pgh.pa.us 2375 [ - + ]: 110005 : if (pq_getmessage(&reply_message, maxmsglen))
2376 : : {
4110 heikki.linnakangas@i 2377 [ # # ]:UBC 0 : ereport(COMMERROR,
2378 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2379 : : errmsg("unexpected EOF on standby connection")));
2380 : 0 : proc_exit(0);
2381 : : }
2382 : :
2383 : : /* ... and process it */
5555 simon@2ndQuadrant.co 2384 [ + + + - ]:CBC 110005 : switch (firstchar)
2385 : : {
2386 : : /*
2387 : : * PqMsg_CopyData means a standby reply wrapped in a CopyData
2388 : : * packet.
2389 : : */
987 nathan@postgresql.or 2390 : 109336 : case PqMsg_CopyData:
5555 simon@2ndQuadrant.co 2391 : 109336 : ProcessStandbyMessage();
5515 heikki.linnakangas@i 2392 : 109336 : received = true;
5555 simon@2ndQuadrant.co 2393 : 109336 : break;
2394 : :
2395 : : /*
2396 : : * PqMsg_CopyDone means the standby requested to finish
2397 : : * streaming. Reply with CopyDone, if we had not sent that
2398 : : * already.
2399 : : */
987 nathan@postgresql.or 2400 : 367 : case PqMsg_CopyDone:
4891 heikki.linnakangas@i 2401 [ + + ]: 367 : if (!streamingDoneSending)
2402 : : {
286 nathan@postgresql.or 2403 :GNC 356 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4891 heikki.linnakangas@i 2404 :CBC 356 : streamingDoneSending = true;
2405 : : }
2406 : :
2407 : 367 : streamingDoneReceiving = true;
2408 : 367 : received = true;
2409 : 367 : break;
2410 : :
2411 : : /*
2412 : : * PqMsg_Terminate means that the standby is closing down the
2413 : : * socket.
2414 : : */
987 nathan@postgresql.or 2415 : 302 : case PqMsg_Terminate:
5555 simon@2ndQuadrant.co 2416 : 302 : proc_exit(0);
2417 : :
5555 simon@2ndQuadrant.co 2418 :UBC 0 : default:
1833 tgl@sss.pgh.pa.us 2419 : 0 : Assert(false); /* NOT REACHED */
2420 : : }
2421 : : }
2422 : :
2423 : : /*
2424 : : * Save the last reply timestamp if we've received at least one reply.
2425 : : */
5515 heikki.linnakangas@i 2426 [ + + ]:CBC 1124751 : if (received)
2427 : : {
2804 noah@leadboat.com 2428 : 60286 : last_reply_timestamp = last_processing;
4439 rhaas@postgresql.org 2429 : 60286 : waiting_for_ping_response = false;
2430 : : }
5954 heikki.linnakangas@i 2431 : 1124751 : }
2432 : :
2433 : : /*
2434 : : * Process a status update message received from standby.
2435 : : */
2436 : : static void
5555 simon@2ndQuadrant.co 2437 : 109336 : ProcessStandbyMessage(void)
2438 : : {
2439 : : char msgtype;
2440 : :
2441 : : /*
2442 : : * Check message type from the first byte.
2443 : : */
5558 rhaas@postgresql.org 2444 : 109336 : msgtype = pq_getmsgbyte(&reply_message);
2445 : :
5555 simon@2ndQuadrant.co 2446 [ + + + - ]: 109336 : switch (msgtype)
2447 : : {
272 nathan@postgresql.or 2448 :GNC 107363 : case PqReplMsg_StandbyStatusUpdate:
5555 simon@2ndQuadrant.co 2449 :CBC 107363 : ProcessStandbyReplyMessage();
2450 : 107363 : break;
2451 : :
272 nathan@postgresql.or 2452 :GNC 154 : case PqReplMsg_HotStandbyFeedback:
5555 simon@2ndQuadrant.co 2453 :CBC 154 : ProcessStandbyHSFeedbackMessage();
2454 : 154 : break;
2455 : :
272 nathan@postgresql.or 2456 :GNC 1819 : case PqReplMsg_PrimaryStatusRequest:
286 akapila@postgresql.o 2457 : 1819 : ProcessStandbyPSRequestMessage();
2458 : 1819 : break;
2459 : :
5555 simon@2ndQuadrant.co 2460 :UBC 0 : default:
2461 [ # # ]: 0 : ereport(COMMERROR,
2462 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2463 : : errmsg("unexpected message type \"%c\"", msgtype)));
2464 : 0 : proc_exit(0);
2465 : : }
5555 simon@2ndQuadrant.co 2466 :CBC 109336 : }
2467 : :
2468 : : /*
2469 : : * Remember that a walreceiver just confirmed receipt of lsn `lsn`.
2470 : : */
2471 : : static void
4477 rhaas@postgresql.org 2472 : 93346 : PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
2473 : : {
4382 bruce@momjian.us 2474 : 93346 : bool changed = false;
3864 rhaas@postgresql.org 2475 : 93346 : ReplicationSlot *slot = MyReplicationSlot;
2476 : :
180 alvherre@kurilemu.de 2477 [ - + ]:GNC 93346 : Assert(XLogRecPtrIsValid(lsn));
4477 rhaas@postgresql.org 2478 [ - + ]:CBC 93346 : SpinLockAcquire(&slot->mutex);
2479 [ + + ]: 93346 : if (slot->data.restart_lsn != lsn)
2480 : : {
2481 : 41942 : changed = true;
2482 : 41942 : slot->data.restart_lsn = lsn;
2483 : : }
2484 : 93346 : SpinLockRelease(&slot->mutex);
2485 : :
2486 [ + + ]: 93346 : if (changed)
2487 : : {
2488 : 41942 : ReplicationSlotMarkDirty();
2489 : 41942 : ReplicationSlotsComputeRequiredLSN();
788 akapila@postgresql.o 2490 : 41942 : PhysicalWakeupLogicalWalSnd();
2491 : : }
2492 : :
2493 : : /*
2494 : : * One could argue that the slot should be saved to disk now, but that'd
2495 : : * be energy wasted - the worst thing lost information could cause here is
2496 : : * to give wrong information in a statistics view - we'll just potentially
2497 : : * be more conservative in removing files.
2498 : : */
4477 rhaas@postgresql.org 2499 : 93346 : }
2500 : :
2501 : : /*
2502 : : * Regular reply from standby advising of WAL locations on standby server.
2503 : : */
2504 : : static void
5555 simon@2ndQuadrant.co 2505 : 107363 : ProcessStandbyReplyMessage(void)
2506 : : {
2507 : : XLogRecPtr writePtr,
2508 : : flushPtr,
2509 : : applyPtr;
2510 : : bool replyRequested;
2511 : : TimeOffset writeLag,
2512 : : flushLag,
2513 : : applyLag;
2514 : : bool clearLagTimes;
2515 : : TimestampTz now;
2516 : : TimestampTz replyTime;
2517 : :
2518 : : static XLogRecPtr prevWritePtr = InvalidXLogRecPtr;
2519 : : static XLogRecPtr prevFlushPtr = InvalidXLogRecPtr;
2520 : : static XLogRecPtr prevApplyPtr = InvalidXLogRecPtr;
2521 : :
2522 : : /* the caller already consumed the msgtype byte */
4927 heikki.linnakangas@i 2523 : 107363 : writePtr = pq_getmsgint64(&reply_message);
2524 : 107363 : flushPtr = pq_getmsgint64(&reply_message);
2525 : 107363 : applyPtr = pq_getmsgint64(&reply_message);
2704 michael@paquier.xyz 2526 : 107363 : replyTime = pq_getmsgint64(&reply_message);
4927 heikki.linnakangas@i 2527 : 107363 : replyRequested = pq_getmsgbyte(&reply_message);
2528 : :
1989 tgl@sss.pgh.pa.us 2529 [ + + ]: 107363 : if (message_level_is_interesting(DEBUG2))
2530 : : {
2531 : : char *replyTimeStr;
2532 : :
2533 : : /* Copy because timestamptz_to_str returns a static buffer */
2704 michael@paquier.xyz 2534 : 648 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2535 : :
302 alvherre@kurilemu.de 2536 [ + - - + ]:GNC 648 : elog(DEBUG2, "write %X/%08X flush %X/%08X apply %X/%08X%s reply_time %s",
2537 : : LSN_FORMAT_ARGS(writePtr),
2538 : : LSN_FORMAT_ARGS(flushPtr),
2539 : : LSN_FORMAT_ARGS(applyPtr),
2540 : : replyRequested ? " (reply requested)" : "",
2541 : : replyTimeStr);
2542 : :
2704 michael@paquier.xyz 2543 :CBC 648 : pfree(replyTimeStr);
2544 : : }
2545 : :
2546 : : /* See if we can compute the round-trip lag for these positions. */
3330 simon@2ndQuadrant.co 2547 : 107363 : now = GetCurrentTimestamp();
2548 : 107363 : writeLag = LagTrackerRead(SYNC_REP_WAIT_WRITE, writePtr, now);
2549 : 107363 : flushLag = LagTrackerRead(SYNC_REP_WAIT_FLUSH, flushPtr, now);
2550 : 107363 : applyLag = LagTrackerRead(SYNC_REP_WAIT_APPLY, applyPtr, now);
2551 : :
2552 : : /*
2553 : : * If the standby reports that it has fully replayed the WAL, and the
2554 : : * write/flush/apply positions remain unchanged across two consecutive
2555 : : * reply messages, forget the lag times measured when it last
2556 : : * wrote/flushed/applied a WAL record.
2557 : : *
2558 : : * The second message with unchanged positions typically results from
2559 : : * wal_receiver_status_interval expiring on the standby, so lag values are
2560 : : * usually cleared after that interval when there is no activity. This
2561 : : * avoids displaying stale lag data until more WAL traffic arrives.
2562 : : */
40 fujii@postgresql.org 2563 [ + + ]: 10616 : clearLagTimes = (applyPtr == sentPtr && flushPtr == sentPtr &&
2564 [ + + + + : 126483 : writePtr == prevWritePtr && flushPtr == prevFlushPtr &&
+ + ]
2565 [ - + ]: 8504 : applyPtr == prevApplyPtr);
2566 : :
2567 : 107363 : prevWritePtr = writePtr;
2568 : 107363 : prevFlushPtr = flushPtr;
2569 : 107363 : prevApplyPtr = applyPtr;
2570 : :
2571 : : /* Send a reply if the standby requested one. */
4927 heikki.linnakangas@i 2572 [ - + ]: 107363 : if (replyRequested)
1497 akapila@postgresql.o 2573 :UBC 0 : WalSndKeepalive(false, InvalidXLogRecPtr);
2574 : :
2575 : : /*
2576 : : * Update shared state for this WalSender process based on reply data from
2577 : : * standby.
2578 : : */
2579 : : {
3617 rhaas@postgresql.org 2580 :CBC 107363 : WalSnd *walsnd = MyWalSnd;
2581 : :
5563 heikki.linnakangas@i 2582 [ - + ]: 107363 : SpinLockAcquire(&walsnd->mutex);
4927 2583 : 107363 : walsnd->write = writePtr;
2584 : 107363 : walsnd->flush = flushPtr;
2585 : 107363 : walsnd->apply = applyPtr;
3330 simon@2ndQuadrant.co 2586 [ + + - + ]: 107363 : if (writeLag != -1 || clearLagTimes)
2587 : 65384 : walsnd->writeLag = writeLag;
2588 [ + + - + ]: 107363 : if (flushLag != -1 || clearLagTimes)
2589 : 87258 : walsnd->flushLag = flushLag;
2590 [ + + - + ]: 107363 : if (applyLag != -1 || clearLagTimes)
2591 : 95781 : walsnd->applyLag = applyLag;
2704 michael@paquier.xyz 2592 : 107363 : walsnd->replyTime = replyTime;
5563 heikki.linnakangas@i 2593 : 107363 : SpinLockRelease(&walsnd->mutex);
2594 : : }
2595 : :
5404 simon@2ndQuadrant.co 2596 [ + + ]: 107363 : if (!am_cascading_walsender)
2597 : 107084 : SyncRepReleaseWaiters();
2598 : :
2599 : : /*
2600 : : * Advance our local xmin horizon when the client confirmed a flush.
2601 : : */
180 alvherre@kurilemu.de 2602 [ + + + + ]:GNC 107363 : if (MyReplicationSlot && XLogRecPtrIsValid(flushPtr))
2603 : : {
3920 andres@anarazel.de 2604 [ + + ]:CBC 104176 : if (SlotIsLogical(MyReplicationSlot))
4439 rhaas@postgresql.org 2605 : 10830 : LogicalConfirmReceivedLocation(flushPtr);
2606 : : else
4477 2607 : 93346 : PhysicalConfirmReceivedLocation(flushPtr);
2608 : : }
2609 : 107363 : }
2610 : :
2611 : : /* compute new replication slot xmin horizon if needed */
2612 : : static void
3328 simon@2ndQuadrant.co 2613 : 66 : PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin, TransactionId feedbackCatalogXmin)
2614 : : {
4382 bruce@momjian.us 2615 : 66 : bool changed = false;
3864 rhaas@postgresql.org 2616 : 66 : ReplicationSlot *slot = MyReplicationSlot;
2617 : :
4477 2618 [ - + ]: 66 : SpinLockAcquire(&slot->mutex);
2091 andres@anarazel.de 2619 : 66 : MyProc->xmin = InvalidTransactionId;
2620 : :
2621 : : /*
2622 : : * For physical replication we don't need the interlock provided by xmin
2623 : : * and effective_xmin since the consequences of a missed increase are
2624 : : * limited to query cancellations, so set both at once.
2625 : : */
4477 rhaas@postgresql.org 2626 [ + + + + ]: 66 : if (!TransactionIdIsNormal(slot->data.xmin) ||
2627 [ + + ]: 29 : !TransactionIdIsNormal(feedbackXmin) ||
2628 : 29 : TransactionIdPrecedes(slot->data.xmin, feedbackXmin))
2629 : : {
2630 : 47 : changed = true;
2631 : 47 : slot->data.xmin = feedbackXmin;
2632 : 47 : slot->effective_xmin = feedbackXmin;
2633 : : }
3328 simon@2ndQuadrant.co 2634 [ + + + + ]: 66 : if (!TransactionIdIsNormal(slot->data.catalog_xmin) ||
2635 [ + + ]: 14 : !TransactionIdIsNormal(feedbackCatalogXmin) ||
2636 : 14 : TransactionIdPrecedes(slot->data.catalog_xmin, feedbackCatalogXmin))
2637 : : {
2638 : 53 : changed = true;
2639 : 53 : slot->data.catalog_xmin = feedbackCatalogXmin;
2640 : 53 : slot->effective_catalog_xmin = feedbackCatalogXmin;
2641 : : }
4477 rhaas@postgresql.org 2642 : 66 : SpinLockRelease(&slot->mutex);
2643 : :
2644 [ + + ]: 66 : if (changed)
2645 : : {
2646 : 57 : ReplicationSlotMarkDirty();
4446 2647 : 57 : ReplicationSlotsComputeRequiredXmin(false);
2648 : : }
5555 simon@2ndQuadrant.co 2649 : 66 : }
2650 : :
2651 : : /*
2652 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2653 : : * and not so far back as to be already wrapped around.
2654 : : *
2655 : : * Epoch of nextXid should be same as standby, or if the counter has
2656 : : * wrapped, then one greater than standby.
2657 : : *
2658 : : * This check doesn't care about whether clog exists for these xids
2659 : : * at all.
2660 : : */
2661 : : static bool
3328 2662 : 65 : TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
2663 : : {
2664 : : FullTransactionId nextFullXid;
2665 : : TransactionId nextXid;
2666 : : uint32 nextEpoch;
2667 : :
2595 tmunro@postgresql.or 2668 : 65 : nextFullXid = ReadNextFullTransactionId();
2669 : 65 : nextXid = XidFromFullTransactionId(nextFullXid);
2670 : 65 : nextEpoch = EpochFromFullTransactionId(nextFullXid);
2671 : :
3328 simon@2ndQuadrant.co 2672 [ + - ]: 65 : if (xid <= nextXid)
2673 : : {
2674 [ - + ]: 65 : if (epoch != nextEpoch)
3328 simon@2ndQuadrant.co 2675 :UBC 0 : return false;
2676 : : }
2677 : : else
2678 : : {
2679 [ # # ]: 0 : if (epoch + 1 != nextEpoch)
2680 : 0 : return false;
2681 : : }
2682 : :
3328 simon@2ndQuadrant.co 2683 [ - + ]:CBC 65 : if (!TransactionIdPrecedesOrEquals(xid, nextXid))
3275 bruce@momjian.us 2684 :UBC 0 : return false; /* epoch OK, but it's wrapped around */
2685 : :
3328 simon@2ndQuadrant.co 2686 :CBC 65 : return true;
2687 : : }
2688 : :
2689 : : /*
2690 : : * Hot Standby feedback
2691 : : */
2692 : : static void
5555 2693 : 154 : ProcessStandbyHSFeedbackMessage(void)
2694 : : {
2695 : : TransactionId feedbackXmin;
2696 : : uint32 feedbackEpoch;
2697 : : TransactionId feedbackCatalogXmin;
2698 : : uint32 feedbackCatalogEpoch;
2699 : : TimestampTz replyTime;
2700 : :
2701 : : /*
2702 : : * Decipher the reply message. The caller already consumed the msgtype
2703 : : * byte. See XLogWalRcvSendHSFeedback() in walreceiver.c for the creation
2704 : : * of this message.
2705 : : */
2704 michael@paquier.xyz 2706 : 154 : replyTime = pq_getmsgint64(&reply_message);
4927 heikki.linnakangas@i 2707 : 154 : feedbackXmin = pq_getmsgint(&reply_message, 4);
2708 : 154 : feedbackEpoch = pq_getmsgint(&reply_message, 4);
3328 simon@2ndQuadrant.co 2709 : 154 : feedbackCatalogXmin = pq_getmsgint(&reply_message, 4);
2710 : 154 : feedbackCatalogEpoch = pq_getmsgint(&reply_message, 4);
2711 : :
1989 tgl@sss.pgh.pa.us 2712 [ + + ]: 154 : if (message_level_is_interesting(DEBUG2))
2713 : : {
2714 : : char *replyTimeStr;
2715 : :
2716 : : /* Copy because timestamptz_to_str returns a static buffer */
2704 michael@paquier.xyz 2717 : 4 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2718 : :
2719 [ + - ]: 4 : elog(DEBUG2, "hot standby feedback xmin %u epoch %u, catalog_xmin %u epoch %u reply_time %s",
2720 : : feedbackXmin,
2721 : : feedbackEpoch,
2722 : : feedbackCatalogXmin,
2723 : : feedbackCatalogEpoch,
2724 : : replyTimeStr);
2725 : :
2726 : 4 : pfree(replyTimeStr);
2727 : : }
2728 : :
2729 : : /*
2730 : : * Update shared state for this WalSender process based on reply data from
2731 : : * standby.
2732 : : */
2733 : : {
2734 : 154 : WalSnd *walsnd = MyWalSnd;
2735 : :
2736 [ - + ]: 154 : SpinLockAcquire(&walsnd->mutex);
2737 : 154 : walsnd->replyTime = replyTime;
2738 : 154 : SpinLockRelease(&walsnd->mutex);
2739 : : }
2740 : :
2741 : : /*
2742 : : * Unset WalSender's xmins if the feedback message values are invalid.
2743 : : * This happens when the downstream turned hot_standby_feedback off.
2744 : : */
3328 simon@2ndQuadrant.co 2745 [ + + ]: 154 : if (!TransactionIdIsNormal(feedbackXmin)
2746 [ + - ]: 109 : && !TransactionIdIsNormal(feedbackCatalogXmin))
2747 : : {
2091 andres@anarazel.de 2748 : 109 : MyProc->xmin = InvalidTransactionId;
4477 rhaas@postgresql.org 2749 [ + + ]: 109 : if (MyReplicationSlot != NULL)
3328 simon@2ndQuadrant.co 2750 : 25 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
5311 tgl@sss.pgh.pa.us 2751 : 109 : return;
2752 : : }
2753 : :
2754 : : /*
2755 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2756 : : * and not so far back as to be already wrapped around. Ignore if not.
2757 : : */
3328 simon@2ndQuadrant.co 2758 [ + - ]: 45 : if (TransactionIdIsNormal(feedbackXmin) &&
2759 [ - + ]: 45 : !TransactionIdInRecentPast(feedbackXmin, feedbackEpoch))
3328 simon@2ndQuadrant.co 2760 :UBC 0 : return;
2761 : :
3328 simon@2ndQuadrant.co 2762 [ + + ]:CBC 45 : if (TransactionIdIsNormal(feedbackCatalogXmin) &&
2763 [ - + ]: 20 : !TransactionIdInRecentPast(feedbackCatalogXmin, feedbackCatalogEpoch))
3328 simon@2ndQuadrant.co 2764 :UBC 0 : return;
2765 : :
2766 : : /*
2767 : : * Set the WalSender's xmin equal to the standby's requested xmin, so that
2768 : : * the xmin will be taken into account by GetSnapshotData() /
2769 : : * ComputeXidHorizons(). This will hold back the removal of dead rows and
2770 : : * thereby prevent the generation of cleanup conflicts on the standby
2771 : : * server.
2772 : : *
2773 : : * There is a small window for a race condition here: although we just
2774 : : * checked that feedbackXmin precedes nextXid, the nextXid could have
2775 : : * gotten advanced between our fetching it and applying the xmin below,
2776 : : * perhaps far enough to make feedbackXmin wrap around. In that case the
2777 : : * xmin we set here would be "in the future" and have no effect. No point
2778 : : * in worrying about this since it's too late to save the desired data
2779 : : * anyway. Assuming that the standby sends us an increasing sequence of
2780 : : * xmins, this could only happen during the first reply cycle, else our
2781 : : * own xmin would prevent nextXid from advancing so far.
2782 : : *
2783 : : * We don't bother taking the ProcArrayLock here. Setting the xmin field
2784 : : * is assumed atomic, and there's no real need to prevent concurrent
2785 : : * horizon determinations. (If we're moving our xmin forward, this is
2786 : : * obviously safe, and if we're moving it backwards, well, the data is at
2787 : : * risk already since a VACUUM could already have determined the horizon.)
2788 : : *
2789 : : * If we're using a replication slot we reserve the xmin via that,
2790 : : * otherwise via the walsender's PGPROC entry. We can only track the
2791 : : * catalog xmin separately when using a slot, so we store the least of the
2792 : : * two provided when not using a slot.
2793 : : *
2794 : : * XXX: It might make sense to generalize the ephemeral slot concept and
2795 : : * always use the slot mechanism to handle the feedback xmin.
2796 : : */
3240 tgl@sss.pgh.pa.us 2797 [ + + ]:CBC 45 : if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
3328 simon@2ndQuadrant.co 2798 : 41 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
2799 : : else
2800 : : {
2801 [ - + ]: 4 : if (TransactionIdIsNormal(feedbackCatalogXmin)
3328 simon@2ndQuadrant.co 2802 [ # # ]:UBC 0 : && TransactionIdPrecedes(feedbackCatalogXmin, feedbackXmin))
2091 andres@anarazel.de 2803 : 0 : MyProc->xmin = feedbackCatalogXmin;
2804 : : else
2091 andres@anarazel.de 2805 :CBC 4 : MyProc->xmin = feedbackXmin;
2806 : : }
2807 : : }
2808 : :
2809 : : /*
2810 : : * Process the request for a primary status update message.
2811 : : */
2812 : : static void
286 akapila@postgresql.o 2813 :GNC 1819 : ProcessStandbyPSRequestMessage(void)
2814 : : {
2815 : 1819 : XLogRecPtr lsn = InvalidXLogRecPtr;
2816 : : TransactionId oldestXidInCommit;
2817 : : TransactionId oldestGXidInCommit;
2818 : : FullTransactionId nextFullXid;
2819 : : FullTransactionId fullOldestXidInCommit;
2820 : 1819 : WalSnd *walsnd = MyWalSnd;
2821 : : TimestampTz replyTime;
2822 : :
2823 : : /*
2824 : : * This shouldn't happen because we don't support getting primary status
2825 : : * message from standby.
2826 : : */
2827 [ - + ]: 1819 : if (RecoveryInProgress())
286 akapila@postgresql.o 2828 [ # # ]:UNC 0 : elog(ERROR, "the primary status is unavailable during recovery");
2829 : :
286 akapila@postgresql.o 2830 :GNC 1819 : replyTime = pq_getmsgint64(&reply_message);
2831 : :
2832 : : /*
2833 : : * Update shared state for this WalSender process based on reply data from
2834 : : * standby.
2835 : : */
2836 : 1819 : SpinLockAcquire(&walsnd->mutex);
2837 : 1819 : walsnd->replyTime = replyTime;
2838 : 1819 : SpinLockRelease(&walsnd->mutex);
2839 : :
2840 : : /*
2841 : : * Consider transactions in the current database, as only these are the
2842 : : * ones replicated.
2843 : : */
2844 : 1819 : oldestXidInCommit = GetOldestActiveTransactionId(true, false);
239 2845 : 1819 : oldestGXidInCommit = TwoPhaseGetOldestXidInCommit();
2846 : :
2847 : : /*
2848 : : * Update the oldest xid for standby transmission if an older prepared
2849 : : * transaction exists and is currently in commit phase.
2850 : : */
2851 [ + + + - ]: 3583 : if (TransactionIdIsValid(oldestGXidInCommit) &&
2852 : 1764 : TransactionIdPrecedes(oldestGXidInCommit, oldestXidInCommit))
2853 : 1764 : oldestXidInCommit = oldestGXidInCommit;
2854 : :
286 2855 : 1819 : nextFullXid = ReadNextFullTransactionId();
2856 : 1819 : fullOldestXidInCommit = FullTransactionIdFromAllowableAt(nextFullXid,
2857 : : oldestXidInCommit);
2858 : 1819 : lsn = GetXLogWriteRecPtr();
2859 : :
2860 [ + + ]: 1819 : elog(DEBUG2, "sending primary status");
2861 : :
2862 : : /* construct the message... */
2863 : 1819 : resetStringInfo(&output_message);
272 nathan@postgresql.or 2864 : 1819 : pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate);
286 akapila@postgresql.o 2865 : 1819 : pq_sendint64(&output_message, lsn);
2866 : 1819 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
2867 : 1819 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
2868 : 1819 : pq_sendint64(&output_message, GetCurrentTimestamp());
2869 : :
2870 : : /* ... and send it wrapped in CopyData */
nathan@postgresql.or 2871 : 1819 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
akapila@postgresql.o 2872 : 1819 : }
2873 : :
2874 : : /*
2875 : : * Compute how long send/receive loops should sleep.
2876 : : *
2877 : : * If wal_sender_timeout is enabled we want to wake up in time to send
2878 : : * keepalives and to abort the connection if wal_sender_timeout has been
2879 : : * reached.
2880 : : *
2881 : : * If wal_sender_shutdown_timeout is enabled, during shutdown, we want to
2882 : : * wake up in time to exit when it expires.
2883 : : */
2884 : : static long
4439 rhaas@postgresql.org 2885 :CBC 92843 : WalSndComputeSleeptime(TimestampTz now)
2886 : : {
2887 : : TimestampTz wakeup_time;
3240 tgl@sss.pgh.pa.us 2888 : 92843 : long sleeptime = 10000; /* 10 s */
2889 : :
4359 andres@anarazel.de 2890 [ + - + + ]: 92843 : if (wal_sender_timeout > 0 && last_reply_timestamp > 0)
2891 : : {
2892 : : /*
2893 : : * At the latest stop sleeping once wal_sender_timeout has been
2894 : : * reached.
2895 : : */
4439 rhaas@postgresql.org 2896 : 92772 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2897 : : wal_sender_timeout);
2898 : :
2899 : : /*
2900 : : * If no ping has been sent yet, wakeup when it's time to do so.
2901 : : * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of
2902 : : * the timeout passed without a response.
2903 : : */
2904 [ + + ]: 92772 : if (!waiting_for_ping_response)
2905 : 91999 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2906 : : wal_sender_timeout / 2);
2907 : :
2908 : : /* Compute relative time until wakeup. */
2002 tgl@sss.pgh.pa.us 2909 : 92772 : sleeptime = TimestampDifferenceMilliseconds(now, wakeup_time);
2910 : : }
2911 : :
29 fujii@postgresql.org 2912 [ + + + + ]:GNC 92843 : if (shutdown_request_timestamp != 0 && wal_sender_shutdown_timeout > 0)
2913 : : {
2914 : : long shutdown_sleeptime;
2915 : :
2916 : 4 : wakeup_time = TimestampTzPlusMilliseconds(shutdown_request_timestamp,
2917 : : wal_sender_shutdown_timeout);
2918 : :
2919 : 4 : shutdown_sleeptime = TimestampDifferenceMilliseconds(now, wakeup_time);
2920 : :
2921 : : /* Choose the earliest wakeup. */
2922 [ + - ]: 4 : if (shutdown_sleeptime < sleeptime)
2923 : 4 : sleeptime = shutdown_sleeptime;
2924 : : }
2925 : :
4439 rhaas@postgresql.org 2926 :CBC 92843 : return sleeptime;
2927 : : }
2928 : :
2929 : : /*
2930 : : * Check whether there have been responses by the client within
2931 : : * wal_sender_timeout and shutdown if not. Using last_processing as the
2932 : : * reference point avoids counting server-side stalls against the client.
2933 : : * However, a long server-side stall can make WalSndKeepaliveIfNecessary()
2934 : : * postdate last_processing by more than wal_sender_timeout. If that happens,
2935 : : * the client must reply almost immediately to avoid a timeout. This rarely
2936 : : * affects the default configuration, under which clients spontaneously send a
2937 : : * message every standby_message_timeout = wal_sender_timeout/6 = 10s. We
2938 : : * could eliminate that problem by recognizing timeout expiration at
2939 : : * wal_sender_timeout/2 after the keepalive.
2940 : : */
2941 : : static void
2804 noah@leadboat.com 2942 : 1118786 : WalSndCheckTimeOut(void)
2943 : : {
2944 : : TimestampTz timeout;
2945 : :
2946 : : /* don't bail out if we're doing something that doesn't require timeouts */
4359 andres@anarazel.de 2947 [ + + ]: 1118786 : if (last_reply_timestamp <= 0)
2948 : 28 : return;
2949 : :
4439 rhaas@postgresql.org 2950 : 1118758 : timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
2951 : : wal_sender_timeout);
2952 : :
2804 noah@leadboat.com 2953 [ + - - + ]: 1118758 : if (wal_sender_timeout > 0 && last_processing >= timeout)
2954 : : {
2955 : : /*
2956 : : * Since typically expiration of replication timeout means
2957 : : * communication problem, we don't send the error message to the
2958 : : * standby.
2959 : : */
4439 rhaas@postgresql.org 2960 [ # # ]:UBC 0 : ereport(COMMERROR,
2961 : : (errmsg("terminating walsender process due to replication timeout")));
2962 : :
2963 : 0 : WalSndShutdown();
2964 : : }
2965 : : }
2966 : :
2967 : : /*
2968 : : * Check whether the walsender process should terminate due to the expiration
2969 : : * of wal_sender_shutdown_timeout after the receipt of a shutdown request.
2970 : : */
2971 : : static void
29 fujii@postgresql.org 2972 :GNC 1118872 : WalSndCheckShutdownTimeout(void)
2973 : : {
2974 : : TimestampTz now;
2975 : :
2976 : : /* Do nothing if shutdown has not been requested yet */
2977 [ + + + - ]: 1118872 : if (!(got_STOPPING || got_SIGUSR2))
2978 : 1115691 : return;
2979 : :
2980 : : /* Terminate immediately if the timeout is set to 0 */
2981 [ - + ]: 3181 : if (wal_sender_shutdown_timeout == 0)
29 fujii@postgresql.org 2982 :UNC 0 : WalSndDoneImmediate();
2983 : :
2984 : : /*
2985 : : * Record the shutdown request timestamp even if
2986 : : * wal_sender_shutdown_timeout is disabled (-1), since the setting may
2987 : : * change during shutdown and the timestamp will be needed in that case.
2988 : : */
29 fujii@postgresql.org 2989 [ + + ]:GNC 3181 : if (shutdown_request_timestamp == 0)
2990 : : {
2991 : 47 : shutdown_request_timestamp = GetCurrentTimestamp();
2992 : 47 : return;
2993 : : }
2994 : :
2995 : : /* Do not check the timeout if it's disabled */
2996 [ + + ]: 3134 : if (wal_sender_shutdown_timeout == -1)
2997 : 2861 : return;
2998 : :
2999 : : /* Terminate immediately if the timeout expires */
3000 : 273 : now = GetCurrentTimestamp();
3001 [ + + ]: 273 : if (TimestampDifferenceExceeds(shutdown_request_timestamp, now,
3002 : : wal_sender_shutdown_timeout))
3003 : 4 : WalSndDoneImmediate();
3004 : : }
3005 : :
3006 : : /* Main loop of walsender process that streams the WAL over Copy messages. */
3007 : : static void
4439 rhaas@postgresql.org 3008 :CBC 746 : WalSndLoop(WalSndSendDataCallback send_data)
3009 : : {
392 michael@paquier.xyz 3010 : 746 : TimestampTz last_flush = 0;
3011 : :
3012 : : /*
3013 : : * Initialize the last reply timestamp. That enables timeout processing
3014 : : * from hereon.
3015 : : */
5515 heikki.linnakangas@i 3016 : 746 : last_reply_timestamp = GetCurrentTimestamp();
4439 rhaas@postgresql.org 3017 : 746 : waiting_for_ping_response = false;
3018 : :
3019 : : /*
3020 : : * Loop until we reach the end of this timeline or the client requests to
3021 : : * stop streaming.
3022 : : */
3023 : : for (;;)
3024 : : {
3025 : : /* Clear any already-pending wakeups */
4126 andres@anarazel.de 3026 : 1113739 : ResetLatch(MyLatch);
3027 : :
3028 [ + + ]: 1113739 : CHECK_FOR_INTERRUPTS();
3029 : :
3030 : : /* Process any requests or signals received recently */
91 fujii@postgresql.org 3031 :GNC 1113736 : WalSndHandleConfigReload();
3032 : :
3033 : : /* Check for input from the client */
5382 tgl@sss.pgh.pa.us 3034 :CBC 1113736 : ProcessRepliesIfAny();
3035 : :
3036 : : /*
3037 : : * If we have received CopyDone from the client, sent CopyDone
3038 : : * ourselves, and the output buffer is empty, it's time to exit
3039 : : * streaming.
3040 : : */
3231 3041 [ + + + - ]: 1113627 : if (streamingDoneReceiving && streamingDoneSending &&
3042 [ + + ]: 578 : !pq_is_send_pending())
4891 heikki.linnakangas@i 3043 : 367 : break;
3044 : :
3045 : : /*
3046 : : * If we don't have any pending data in the output buffer, try to send
3047 : : * some more. If there is some, we don't bother to call send_data
3048 : : * again until we've flushed it ... but we'd better assume we are not
3049 : : * caught up.
3050 : : */
5515 3051 [ + + ]: 1113260 : if (!pq_is_send_pending())
4439 rhaas@postgresql.org 3052 : 1081441 : send_data();
3053 : : else
3054 : 31819 : WalSndCaughtUp = false;
3055 : :
3056 : : /* Try to flush pending output to the client */
5382 tgl@sss.pgh.pa.us 3057 [ - + ]: 1113038 : if (pq_flush_if_writable() != 0)
4439 rhaas@postgresql.org 3058 :UBC 0 : WalSndShutdown();
3059 : :
3060 : : /* If nothing remains to be sent right now ... */
4439 rhaas@postgresql.org 3061 [ + + + + ]:CBC 1113038 : if (WalSndCaughtUp && !pq_is_send_pending())
3062 : : {
3063 : : /*
3064 : : * If we're in catchup state, move to streaming. This is an
3065 : : * important state change for users to know about, since before
3066 : : * this point data loss might occur if the primary dies and we
3067 : : * need to failover to the standby. The state change is also
3068 : : * important for synchronous replication, since commits that
3069 : : * started to wait at that point might wait for some time.
3070 : : */
5382 tgl@sss.pgh.pa.us 3071 [ + + ]: 71731 : if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
3072 : : {
3073 [ + + ]: 710 : ereport(DEBUG1,
3074 : : (errmsg_internal("\"%s\" has now caught up with upstream server",
3075 : : application_name)));
3076 : 710 : WalSndSetState(WALSNDSTATE_STREAMING);
3077 : : }
3078 : :
3079 : : /*
3080 : : * When SIGUSR2 arrives, we send any outstanding logs up to the
3081 : : * shutdown checkpoint record (i.e., the latest record), wait for
3082 : : * them to be replicated to the standby, and exit. This may be a
3083 : : * normal termination at shutdown, or a promotion, the walsender
3084 : : * is not sure which.
3085 : : */
3256 andres@anarazel.de 3086 [ + + ]: 71731 : if (got_SIGUSR2)
4439 rhaas@postgresql.org 3087 : 2160 : WalSndDone(send_data);
3088 : : }
3089 : :
3090 : : /* Check for replication timeout. */
2804 noah@leadboat.com 3091 : 1112995 : WalSndCheckTimeOut();
3092 : :
3093 : : /*
3094 : : * During shutdown, die if the shutdown timeout expires. Call this
3095 : : * before WalSndComputeSleeptime() so the timeout is considered when
3096 : : * computing sleep time.
3097 : : */
29 fujii@postgresql.org 3098 :GNC 1112995 : WalSndCheckShutdownTimeout();
3099 : :
3100 : : /* Send keepalive if the time has come */
2804 noah@leadboat.com 3101 :CBC 1112993 : WalSndKeepaliveIfNecessary();
3102 : :
3103 : : /*
3104 : : * Block if we have unsent data. XXX For logical replication, let
3105 : : * WalSndWaitForWal() handle any other blocking; idle receivers need
3106 : : * its additional actions. For physical replication, also block if
3107 : : * caught up; its send_data does not block.
3108 : : *
3109 : : * The IO statistics are reported in WalSndWaitForWal() for the
3110 : : * logical WAL senders.
3111 : : */
2201 3112 [ + + + + ]: 1112993 : if ((WalSndCaughtUp && send_data != XLogSendLogical &&
3113 [ + + + + ]: 1124520 : !streamingDoneSending) ||
3114 : 1055456 : pq_is_send_pending())
3115 : : {
3116 : : long sleeptime;
3117 : : int wakeEvents;
3118 : : TimestampTz now;
3119 : :
1968 jdavis@postgresql.or 3120 [ + + ]: 87365 : if (!streamingDoneReceiving)
1891 tmunro@postgresql.or 3121 : 87334 : wakeEvents = WL_SOCKET_READABLE;
3122 : : else
3123 : 31 : wakeEvents = 0;
3124 : :
3125 : : /*
3126 : : * Use fresh timestamp, not last_processing, to reduce the chance
3127 : : * of reaching wal_sender_timeout before sending a keepalive.
3128 : : */
392 michael@paquier.xyz 3129 : 87365 : now = GetCurrentTimestamp();
3130 : 87365 : sleeptime = WalSndComputeSleeptime(now);
3131 : :
2201 noah@leadboat.com 3132 [ + + ]: 87365 : if (pq_is_send_pending())
3133 : 31771 : wakeEvents |= WL_SOCKET_WRITEABLE;
3134 : :
3135 : : /* Report IO statistics, if needed */
392 michael@paquier.xyz 3136 [ + + ]: 87365 : if (TimestampDifferenceExceeds(last_flush, now,
3137 : : WALSENDER_STATS_FLUSH_INTERVAL))
3138 : : {
3139 : 583 : pgstat_flush_io(false);
3140 : 583 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
3141 : 583 : last_flush = now;
3142 : : }
3143 : :
3144 : : /* Sleep until something happens or we time out */
1891 tmunro@postgresql.or 3145 : 87365 : WalSndWait(wakeEvents, sleeptime, WAIT_EVENT_WAL_SENDER_MAIN);
3146 : : }
3147 : : }
5954 heikki.linnakangas@i 3148 : 367 : }
3149 : :
3150 : : /* Initialize a per-walsender data structure for this walsender process */
3151 : : static void
4960 3152 : 1279 : InitWalSenderSlot(void)
3153 : : {
3154 : : int i;
3155 : :
3156 : : /*
3157 : : * WalSndCtl should be set up already (we inherit this by fork() or
3158 : : * EXEC_BACKEND mechanism from the postmaster).
3159 : : */
5954 3160 [ - + ]: 1279 : Assert(WalSndCtl != NULL);
3161 [ - + ]: 1279 : Assert(MyWalSnd == NULL);
3162 : :
3163 : : /*
3164 : : * Find a free walsender slot and reserve it. This must not fail due to
3165 : : * the prior check for free WAL senders in InitProcess().
3166 : : */
5878 rhaas@postgresql.org 3167 [ + - ]: 1888 : for (i = 0; i < max_wal_senders; i++)
3168 : : {
3617 3169 : 1888 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3170 : :
5954 heikki.linnakangas@i 3171 [ - + ]: 1888 : SpinLockAcquire(&walsnd->mutex);
3172 : :
3173 [ + + ]: 1888 : if (walsnd->pid != 0)
3174 : : {
3175 : 609 : SpinLockRelease(&walsnd->mutex);
3176 : 609 : continue;
3177 : : }
3178 : : else
3179 : : {
3180 : : /*
3181 : : * Found a free slot. Reserve it for us.
3182 : : */
3183 : 1279 : walsnd->pid = MyProcPid;
2208 tgl@sss.pgh.pa.us 3184 : 1279 : walsnd->state = WALSNDSTATE_STARTUP;
4877 alvherre@alvh.no-ip. 3185 : 1279 : walsnd->sentPtr = InvalidXLogRecPtr;
2208 tgl@sss.pgh.pa.us 3186 : 1279 : walsnd->needreload = false;
3796 magnus@hagander.net 3187 : 1279 : walsnd->write = InvalidXLogRecPtr;
3188 : 1279 : walsnd->flush = InvalidXLogRecPtr;
3189 : 1279 : walsnd->apply = InvalidXLogRecPtr;
3330 simon@2ndQuadrant.co 3190 : 1279 : walsnd->writeLag = -1;
3191 : 1279 : walsnd->flushLag = -1;
3192 : 1279 : walsnd->applyLag = -1;
2208 tgl@sss.pgh.pa.us 3193 : 1279 : walsnd->sync_standby_priority = 0;
2704 michael@paquier.xyz 3194 : 1279 : walsnd->replyTime = 0;
3195 : :
3196 : : /*
3197 : : * The kind assignment is done here and not in StartReplication()
3198 : : * and StartLogicalReplication(). Indeed, the logical walsender
3199 : : * needs to read WAL records (like snapshot of running
3200 : : * transactions) during the slot creation. So it needs to be woken
3201 : : * up based on its kind.
3202 : : *
3203 : : * The kind assignment could also be done in StartReplication(),
3204 : : * StartLogicalReplication() and CREATE_REPLICATION_SLOT but it
3205 : : * seems better to set it on one place.
3206 : : */
1123 andres@anarazel.de 3207 [ + + ]: 1279 : if (MyDatabaseId == InvalidOid)
3208 : 506 : walsnd->kind = REPLICATION_KIND_PHYSICAL;
3209 : : else
3210 : 773 : walsnd->kind = REPLICATION_KIND_LOGICAL;
3211 : :
5954 heikki.linnakangas@i 3212 : 1279 : SpinLockRelease(&walsnd->mutex);
3213 : : /* don't need the lock anymore */
154 peter@eisentraut.org 3214 :GNC 1279 : MyWalSnd = walsnd;
3215 : :
5954 heikki.linnakangas@i 3216 :CBC 1279 : break;
3217 : : }
3218 : : }
3219 : :
2639 michael@paquier.xyz 3220 [ - + ]: 1279 : Assert(MyWalSnd != NULL);
3221 : :
3222 : : /* Arrange to clean up at walsender exit */
5954 heikki.linnakangas@i 3223 : 1279 : on_shmem_exit(WalSndKill, 0);
3224 : 1279 : }
3225 : :
3226 : : /* Destroy the per-walsender data structure for this walsender process */
3227 : : static void
3228 : 1279 : WalSndKill(int code, Datum arg)
3229 : : {
4476 tgl@sss.pgh.pa.us 3230 : 1279 : WalSnd *walsnd = MyWalSnd;
3231 : :
3232 [ - + ]: 1279 : Assert(walsnd != NULL);
3233 : :
3234 : 1279 : MyWalSnd = NULL;
3235 : :
4126 andres@anarazel.de 3236 [ - + ]: 1279 : SpinLockAcquire(&walsnd->mutex);
3237 : : /* Mark WalSnd struct as no longer being in use. */
4476 tgl@sss.pgh.pa.us 3238 : 1279 : walsnd->pid = 0;
4126 andres@anarazel.de 3239 : 1279 : SpinLockRelease(&walsnd->mutex);
5954 heikki.linnakangas@i 3240 : 1279 : }
3241 : :
3242 : : /* XLogReaderRoutine->segment_open callback */
3243 : : static void
2183 alvherre@alvh.no-ip. 3244 : 4989 : WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
3245 : : TimeLineID *tli_p)
3246 : : {
3247 : : char path[MAXPGPATH];
3248 : :
3249 : : /*-------
3250 : : * When reading from a historic timeline, and there is a timeline switch
3251 : : * within this segment, read from the WAL segment belonging to the new
3252 : : * timeline.
3253 : : *
3254 : : * For example, imagine that this server is currently on timeline 5, and
3255 : : * we're streaming timeline 4. The switch from timeline 4 to 5 happened at
3256 : : * 0/13002088. In pg_wal, we have these files:
3257 : : *
3258 : : * ...
3259 : : * 000000040000000000000012
3260 : : * 000000040000000000000013
3261 : : * 000000050000000000000013
3262 : : * 000000050000000000000014
3263 : : * ...
3264 : : *
3265 : : * In this situation, when requested to send the WAL from segment 0x13, on
3266 : : * timeline 4, we read the WAL from file 000000050000000000000013. Archive
3267 : : * recovery prefers files from newer timelines, so if the segment was
3268 : : * restored from the archive on this server, the file belonging to the old
3269 : : * timeline, 000000040000000000000013, might not exist. Their contents are
3270 : : * equal up to the switchpoint, because at a timeline switch, the used
3271 : : * portion of the old segment is copied to the new file.
3272 : : */
2353 3273 : 4989 : *tli_p = sendTimeLine;
3274 [ + + ]: 4989 : if (sendTimeLineIsHistoric)
3275 : : {
3276 : : XLogSegNo endSegNo;
3277 : :
2183 3278 : 9 : XLByteToSeg(sendTimeLineValidUpto, endSegNo, state->segcxt.ws_segsize);
1937 fujii@postgresql.org 3279 [ + + ]: 9 : if (nextSegNo == endSegNo)
2353 alvherre@alvh.no-ip. 3280 : 8 : *tli_p = sendTimeLineNextTLI;
3281 : : }
3282 : :
2183 3283 : 4989 : XLogFilePath(path, *tli_p, nextSegNo, state->segcxt.ws_segsize);
3284 : 4989 : state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
3285 [ + + ]: 4989 : if (state->seg.ws_file >= 0)
3286 : 4988 : return;
3287 : :
3288 : : /*
3289 : : * If the file is not found, assume it's because the standby asked for a
3290 : : * too old WAL segment that has already been removed or recycled.
3291 : : */
2353 3292 [ + - ]: 1 : if (errno == ENOENT)
3293 : : {
3294 : : char xlogfname[MAXFNAMELEN];
2345 michael@paquier.xyz 3295 : 1 : int save_errno = errno;
3296 : :
3297 : 1 : XLogFileName(xlogfname, *tli_p, nextSegNo, wal_segment_size);
3298 : 1 : errno = save_errno;
2353 alvherre@alvh.no-ip. 3299 [ + - ]: 1 : ereport(ERROR,
3300 : : (errcode_for_file_access(),
3301 : : errmsg("requested WAL segment %s has already been removed",
3302 : : xlogfname)));
3303 : : }
3304 : : else
2353 alvherre@alvh.no-ip. 3305 [ # # ]:UBC 0 : ereport(ERROR,
3306 : : (errcode_for_file_access(),
3307 : : errmsg("could not open file \"%s\": %m",
3308 : : path)));
3309 : : }
3310 : :
3311 : : /*
3312 : : * Send out the WAL in its normal physical/stored form.
3313 : : *
3314 : : * Read up to MAX_SEND_SIZE bytes of WAL that's been flushed to disk,
3315 : : * but not yet sent to the client, and buffer it in the libpq output
3316 : : * buffer.
3317 : : *
3318 : : * If there is no unsent WAL remaining, WalSndCaughtUp is set to true,
3319 : : * otherwise WalSndCaughtUp is set to false.
3320 : : */
3321 : : static void
4439 rhaas@postgresql.org 3322 :CBC 156184 : XLogSendPhysical(void)
3323 : : {
3324 : : XLogRecPtr SendRqstPtr;
3325 : : XLogRecPtr startptr;
3326 : : XLogRecPtr endptr;
3327 : : Size nbytes;
3328 : : XLogSegNo segno;
3329 : : WALReadError errinfo;
3330 : : Size rbytes;
3331 : :
3332 : : /* If requested switch the WAL sender to the stopping state. */
3256 andres@anarazel.de 3333 [ + + ]: 156184 : if (got_STOPPING)
3334 : 1468 : WalSndSetState(WALSNDSTATE_STOPPING);
3335 : :
4891 heikki.linnakangas@i 3336 [ + + ]: 156184 : if (streamingDoneSending)
3337 : : {
4439 rhaas@postgresql.org 3338 : 11516 : WalSndCaughtUp = true;
4891 heikki.linnakangas@i 3339 : 46610 : return;
3340 : : }
3341 : :
3342 : : /* Figure out how far we can safely send the WAL. */
4883 3343 [ + + ]: 144668 : if (sendTimeLineIsHistoric)
3344 : : {
3345 : : /*
3346 : : * Streaming an old timeline that's in this server's history, but is
3347 : : * not the one we're currently inserting or replaying. It can be
3348 : : * streamed up to the point where we switched off that timeline.
3349 : : */
3350 : 32 : SendRqstPtr = sendTimeLineValidUpto;
3351 : : }
3352 [ + + ]: 144636 : else if (am_cascading_walsender)
3353 : : {
3354 : : TimeLineID SendRqstTLI;
3355 : :
3356 : : /*
3357 : : * Streaming the latest timeline on a standby.
3358 : : *
3359 : : * Attempt to send all WAL that has already been replayed, so that we
3360 : : * know it's valid. If we're receiving WAL through streaming
3361 : : * replication, it's also OK to send any WAL that has been received
3362 : : * but not replayed.
3363 : : *
3364 : : * The timeline we're recovering from can change, or we can be
3365 : : * promoted. In either case, the current timeline becomes historic. We
3366 : : * need to detect that so that we don't try to stream past the point
3367 : : * where we switched to another timeline. We check for promotion or
3368 : : * timeline switch after calculating FlushPtr, to avoid a race
3369 : : * condition: if the timeline becomes historic just after we checked
3370 : : * that it was still current, it's still be OK to stream it up to the
3371 : : * FlushPtr that was calculated before it became historic.
3372 : : */
4891 3373 : 987 : bool becameHistoric = false;
3374 : :
1642 rhaas@postgresql.org 3375 : 987 : SendRqstPtr = GetStandbyFlushRecPtr(&SendRqstTLI);
3376 : :
4891 heikki.linnakangas@i 3377 [ + + ]: 987 : if (!RecoveryInProgress())
3378 : : {
3379 : : /* We have been promoted. */
1642 rhaas@postgresql.org 3380 :GBC 2 : SendRqstTLI = GetWALInsertionTimeLine();
4891 heikki.linnakangas@i 3381 : 2 : am_cascading_walsender = false;
3382 : 2 : becameHistoric = true;
3383 : : }
3384 : : else
3385 : : {
3386 : : /*
3387 : : * Still a cascading standby. But is the timeline we're sending
3388 : : * still the one recovery is recovering from?
3389 : : */
1642 rhaas@postgresql.org 3390 [ - + ]:CBC 985 : if (sendTimeLine != SendRqstTLI)
4891 heikki.linnakangas@i 3391 :UBC 0 : becameHistoric = true;
3392 : : }
3393 : :
4891 heikki.linnakangas@i 3394 [ + + ]:CBC 987 : if (becameHistoric)
3395 : : {
3396 : : /*
3397 : : * The timeline we were sending has become historic. Read the
3398 : : * timeline history file of the new timeline to see where exactly
3399 : : * we forked off from the timeline we were sending.
3400 : : */
3401 : : List *history;
3402 : :
1642 rhaas@postgresql.org 3403 :GBC 2 : history = readTimeLineHistory(SendRqstTLI);
4856 heikki.linnakangas@i 3404 : 2 : sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history, &sendTimeLineNextTLI);
3405 : :
3406 [ - + ]: 2 : Assert(sendTimeLine < sendTimeLineNextTLI);
4891 3407 : 2 : list_free_deep(history);
3408 : :
3409 : 2 : sendTimeLineIsHistoric = true;
3410 : :
4883 3411 : 2 : SendRqstPtr = sendTimeLineValidUpto;
3412 : : }
3413 : : }
3414 : : else
3415 : : {
3416 : : /*
3417 : : * Streaming the current timeline on a primary.
3418 : : *
3419 : : * Attempt to send all data that's already been written out and
3420 : : * fsync'd to disk. We cannot go further than what's been written out
3421 : : * given the current implementation of WALRead(). And in any case
3422 : : * it's unsafe to send WAL that is not securely down to disk on the
3423 : : * primary: if the primary subsequently crashes and restarts, standbys
3424 : : * must not have applied any WAL that got lost on the primary.
3425 : : */
1642 rhaas@postgresql.org 3426 :CBC 143649 : SendRqstPtr = GetFlushRecPtr(NULL);
3427 : : }
3428 : :
3429 : : /*
3430 : : * Record the current system time as an approximation of the time at which
3431 : : * this WAL location was written for the purposes of lag tracking.
3432 : : *
3433 : : * In theory we could make XLogFlush() record a time in shmem whenever WAL
3434 : : * is flushed and we could get that time as well as the LSN when we call
3435 : : * GetFlushRecPtr() above (and likewise for the cascading standby
3436 : : * equivalent), but rather than putting any new code into the hot WAL path
3437 : : * it seems good enough to capture the time here. We should reach this
3438 : : * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
3439 : : * may take some time, we read the WAL flush pointer and take the time
3440 : : * very close to together here so that we'll get a later position if it is
3441 : : * still moving.
3442 : : *
3443 : : * Because LagTrackerWrite ignores samples when the LSN hasn't advanced,
3444 : : * this gives us a cheap approximation for the WAL flush time for this
3445 : : * LSN.
3446 : : *
3447 : : * Note that the LSN is not necessarily the LSN for the data contained in
3448 : : * the present message; it's the end of the WAL, which might be further
3449 : : * ahead. All the lag tracking machinery cares about is finding out when
3450 : : * that arbitrary LSN is eventually reported as written, flushed and
3451 : : * applied, so that it can measure the elapsed time.
3452 : : */
3330 simon@2ndQuadrant.co 3453 : 144668 : LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
3454 : :
3455 : : /*
3456 : : * If this is a historic timeline and we've reached the point where we
3457 : : * forked to the next timeline, stop streaming.
3458 : : *
3459 : : * Note: We might already have sent WAL > sendTimeLineValidUpto. The
3460 : : * startup process will normally replay all WAL that has been received
3461 : : * from the primary, before promoting, but if the WAL streaming is
3462 : : * terminated at a WAL page boundary, the valid portion of the timeline
3463 : : * might end in the middle of a WAL record. We might've already sent the
3464 : : * first half of that partial WAL record to the cascading standby, so that
3465 : : * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
3466 : : * replay the partial WAL record either, so it can still follow our
3467 : : * timeline switch.
3468 : : */
4876 alvherre@alvh.no-ip. 3469 [ + + + + ]: 144668 : if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
3470 : : {
3471 : : /* close the current file. */
2183 3472 [ + - ]: 11 : if (xlogreader->seg.ws_file >= 0)
3473 : 11 : wal_segment_close(xlogreader);
3474 : :
3475 : : /* Send CopyDone */
286 nathan@postgresql.or 3476 :GNC 11 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4891 heikki.linnakangas@i 3477 :CBC 11 : streamingDoneSending = true;
3478 : :
4439 rhaas@postgresql.org 3479 : 11 : WalSndCaughtUp = true;
3480 : :
302 alvherre@kurilemu.de 3481 [ + + ]:GNC 11 : elog(DEBUG1, "walsender reached end of timeline at %X/%08X (sent up to %X/%08X)",
3482 : : LSN_FORMAT_ARGS(sendTimeLineValidUpto),
3483 : : LSN_FORMAT_ARGS(sentPtr));
4891 heikki.linnakangas@i 3484 :CBC 11 : return;
3485 : : }
3486 : :
3487 : : /* Do we have any work to do? */
4876 alvherre@alvh.no-ip. 3488 [ - + ]: 144657 : Assert(sentPtr <= SendRqstPtr);
3489 [ + + ]: 144657 : if (SendRqstPtr <= sentPtr)
3490 : : {
4439 rhaas@postgresql.org 3491 : 35083 : WalSndCaughtUp = true;
5515 heikki.linnakangas@i 3492 : 35083 : return;
3493 : : }
3494 : :
3495 : : /*
3496 : : * Figure out how much to send in one message. If there's no more than
3497 : : * MAX_SEND_SIZE bytes to send, send everything. Otherwise send
3498 : : * MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
3499 : : *
3500 : : * The rounding is not only for performance reasons. Walreceiver relies on
3501 : : * the fact that we never split a WAL record across two messages. Since a
3502 : : * long WAL record is split at page boundary into continuation records,
3503 : : * page boundary is always a safe cut-off point. We also assume that
3504 : : * SendRqstPtr never points to the middle of a WAL record.
3505 : : */
5823 3506 : 109574 : startptr = sentPtr;
3507 : 109574 : endptr = startptr;
4876 alvherre@alvh.no-ip. 3508 : 109574 : endptr += MAX_SEND_SIZE;
3509 : :
3510 : : /* if we went beyond SendRqstPtr, back off */
3511 [ + + ]: 109574 : if (SendRqstPtr <= endptr)
3512 : : {
5815 tgl@sss.pgh.pa.us 3513 : 23058 : endptr = SendRqstPtr;
4891 heikki.linnakangas@i 3514 [ + + ]: 23058 : if (sendTimeLineIsHistoric)
4439 rhaas@postgresql.org 3515 : 9 : WalSndCaughtUp = false;
3516 : : else
3517 : 23049 : WalSndCaughtUp = true;
3518 : : }
3519 : : else
3520 : : {
3521 : : /* round down to page boundary. */
5063 heikki.linnakangas@i 3522 : 86516 : endptr -= (endptr % XLOG_BLCKSZ);
4439 rhaas@postgresql.org 3523 : 86516 : WalSndCaughtUp = false;
3524 : : }
3525 : :
5063 heikki.linnakangas@i 3526 : 109574 : nbytes = endptr - startptr;
5815 tgl@sss.pgh.pa.us 3527 [ - + ]: 109574 : Assert(nbytes <= MAX_SEND_SIZE);
3528 : :
3529 : : /*
3530 : : * OK to read and send the slice.
3531 : : */
4927 heikki.linnakangas@i 3532 : 109574 : resetStringInfo(&output_message);
272 nathan@postgresql.or 3533 :GNC 109574 : pq_sendbyte(&output_message, PqReplMsg_WALData);
3534 : :
4927 heikki.linnakangas@i 3535 :CBC 109574 : pq_sendint64(&output_message, startptr); /* dataStart */
4724 bruce@momjian.us 3536 : 109574 : pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
3537 : 109574 : pq_sendint64(&output_message, 0); /* sendtime, filled in last */
3538 : :
3539 : : /*
3540 : : * Read the log directly into the output buffer to avoid extra memcpy
3541 : : * calls.
3542 : : */
4927 heikki.linnakangas@i 3543 : 109574 : enlargeStringInfo(&output_message, nbytes);
3544 : :
2353 alvherre@alvh.no-ip. 3545 : 109574 : retry:
3546 : : /* attempt to read WAL from WAL buffers first */
813 jdavis@postgresql.or 3547 : 109574 : rbytes = WALReadFromBuffers(&output_message.data[output_message.len],
3548 : 109574 : startptr, nbytes, xlogreader->seg.ws_tli);
3549 : 109574 : output_message.len += rbytes;
3550 : 109574 : startptr += rbytes;
3551 : 109574 : nbytes -= rbytes;
3552 : :
3553 : : /* now read the remaining WAL from WAL file */
3554 [ + + ]: 109574 : if (nbytes > 0 &&
3555 [ - + ]: 100523 : !WALRead(xlogreader,
2188 alvherre@alvh.no-ip. 3556 : 100524 : &output_message.data[output_message.len],
3557 : : startptr,
3558 : : nbytes,
2183 3559 : 100524 : xlogreader->seg.ws_tli, /* Pass the current TLI because
3560 : : * only WalSndSegmentOpen controls
3561 : : * whether new TLI is needed. */
3562 : : &errinfo))
2353 alvherre@alvh.no-ip. 3563 :UBC 0 : WALReadRaiseError(&errinfo);
3564 : :
3565 : : /* See logical_read_xlog_page(). */
2183 alvherre@alvh.no-ip. 3566 :CBC 109573 : XLByteToSeg(startptr, segno, xlogreader->segcxt.ws_segsize);
3567 : 109573 : CheckXLogRemoved(segno, xlogreader->seg.ws_tli);
3568 : :
3569 : : /*
3570 : : * During recovery, the currently-open WAL file might be replaced with the
3571 : : * file of the same name retrieved from archive. So we always need to
3572 : : * check what we read was valid after reading into the buffer. If it's
3573 : : * invalid, we try to open and read the file again.
3574 : : */
2353 3575 [ + + ]: 109573 : if (am_cascading_walsender)
3576 : : {
3577 : 774 : WalSnd *walsnd = MyWalSnd;
3578 : : bool reload;
3579 : :
3580 [ - + ]: 774 : SpinLockAcquire(&walsnd->mutex);
3581 : 774 : reload = walsnd->needreload;
3582 : 774 : walsnd->needreload = false;
3583 : 774 : SpinLockRelease(&walsnd->mutex);
3584 : :
2183 3585 [ - + - - ]: 774 : if (reload && xlogreader->seg.ws_file >= 0)
3586 : : {
2183 alvherre@alvh.no-ip. 3587 :UBC 0 : wal_segment_close(xlogreader);
3588 : :
2353 3589 : 0 : goto retry;
3590 : : }
3591 : : }
3592 : :
4927 heikki.linnakangas@i 3593 :CBC 109573 : output_message.len += nbytes;
3594 : 109573 : output_message.data[output_message.len] = '\0';
3595 : :
3596 : : /*
3597 : : * Fill the send timestamp last, so that it is taken as late as possible.
3598 : : */
3599 : 109573 : resetStringInfo(&tmpbuf);
3358 tgl@sss.pgh.pa.us 3600 : 109573 : pq_sendint64(&tmpbuf, GetCurrentTimestamp());
4927 heikki.linnakangas@i 3601 : 109573 : memcpy(&output_message.data[1 + sizeof(int64) + sizeof(int64)],
3602 : 109573 : tmpbuf.data, sizeof(int64));
3603 : :
286 nathan@postgresql.or 3604 :GNC 109573 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
3605 : :
5815 tgl@sss.pgh.pa.us 3606 :CBC 109573 : sentPtr = endptr;
3607 : :
3608 : : /* Update shared memory status */
3609 : : {
3617 rhaas@postgresql.org 3610 : 109573 : WalSnd *walsnd = MyWalSnd;
3611 : :
5815 tgl@sss.pgh.pa.us 3612 [ - + ]: 109573 : SpinLockAcquire(&walsnd->mutex);
3613 : 109573 : walsnd->sentPtr = sentPtr;
3614 : 109573 : SpinLockRelease(&walsnd->mutex);
3615 : : }
3616 : :
3617 : : /* Report progress of XLOG streaming in PS display */
3618 [ + - ]: 109573 : if (update_process_title)
3619 : : {
3620 : : char activitymsg[50];
3621 : :
302 alvherre@kurilemu.de 3622 :GNC 109573 : snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%08X",
1897 peter@eisentraut.org 3623 :CBC 109573 : LSN_FORMAT_ARGS(sentPtr));
2246 3624 : 109573 : set_ps_display(activitymsg);
3625 : : }
3626 : : }
3627 : :
3628 : : /*
3629 : : * Stream out logically decoded data.
3630 : : */
3631 : : static void
4439 rhaas@postgresql.org 3632 : 927417 : XLogSendLogical(void)
3633 : : {
3634 : : XLogRecord *record;
3635 : : char *errm;
3636 : :
3637 : : /*
3638 : : * We'll use the current flush point to determine whether we've caught up.
3639 : : * This variable is static in order to cache it across calls. Caching is
3640 : : * helpful because GetFlushRecPtr() needs to acquire a heavily-contended
3641 : : * spinlock.
3642 : : */
3643 : : static XLogRecPtr flushPtr = InvalidXLogRecPtr;
3644 : :
3645 : : /*
3646 : : * Don't know whether we've caught up yet. We'll set WalSndCaughtUp to
3647 : : * true in WalSndWaitForWal, if we're actually waiting. We also set to
3648 : : * true if XLogReadRecord() had to stop reading but WalSndWaitForWal
3649 : : * didn't wait - i.e. when we're shutting down.
3650 : : */
3651 : 927417 : WalSndCaughtUp = false;
3652 : :
1821 tmunro@postgresql.or 3653 : 927417 : record = XLogReadRecord(logical_decoding_ctx->reader, &errm);
3654 : :
3655 : : /* xlog record was invalid */
4439 rhaas@postgresql.org 3656 [ - + ]: 927199 : if (errm != NULL)
1637 michael@paquier.xyz 3657 [ # # ]:UBC 0 : elog(ERROR, "could not find record while sending logically-decoded data: %s",
3658 : : errm);
3659 : :
4439 rhaas@postgresql.org 3660 [ + + ]:CBC 927199 : if (record != NULL)
3661 : : {
3662 : : /*
3663 : : * Note the lack of any call to LagTrackerWrite() which is handled by
3664 : : * WalSndUpdateProgress which is called by output plugin through
3665 : : * logical decoding write api.
3666 : : */
4184 heikki.linnakangas@i 3667 : 923832 : LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
3668 : :
4439 rhaas@postgresql.org 3669 : 923829 : sentPtr = logical_decoding_ctx->reader->EndRecPtr;
3670 : : }
3671 : :
3672 : : /*
3673 : : * If first time through in this session, initialize flushPtr. Otherwise,
3674 : : * we only need to update flushPtr if EndRecPtr is past it.
3675 : : */
180 alvherre@kurilemu.de 3676 [ + + ]:GNC 927196 : if (!XLogRecPtrIsValid(flushPtr) ||
1123 andres@anarazel.de 3677 [ + + ]:CBC 926777 : logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3678 : : {
3679 : : /*
3680 : : * For cascading logical WAL senders, we use the replay LSN instead of
3681 : : * the flush LSN, since logical decoding on a standby only processes
3682 : : * WAL that has been replayed. This distinction becomes particularly
3683 : : * important during shutdown, as new WAL is no longer replayed and the
3684 : : * last replayed LSN marks the furthest point up to which decoding can
3685 : : * proceed.
3686 : : */
3687 [ + + ]: 6135 : if (am_cascading_walsender)
337 michael@paquier.xyz 3688 : 98 : flushPtr = GetXLogReplayRecPtr(NULL);
3689 : : else
1123 andres@anarazel.de 3690 : 6037 : flushPtr = GetFlushRecPtr(NULL);
3691 : : }
3692 : :
3693 : : /* If EndRecPtr is still past our flushPtr, it means we caught up. */
2392 alvherre@alvh.no-ip. 3694 [ + + ]: 927196 : if (logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3695 : 4964 : WalSndCaughtUp = true;
3696 : :
3697 : : /*
3698 : : * If we're caught up and have been requested to stop, have WalSndLoop()
3699 : : * terminate the connection in an orderly manner, after writing out all
3700 : : * the pending data.
3701 : : */
3702 [ + + + + ]: 927196 : if (WalSndCaughtUp && got_STOPPING)
3703 : 3195 : got_SIGUSR2 = true;
3704 : :
3705 : : /* Update shared memory status */
3706 : : {
3617 rhaas@postgresql.org 3707 : 927196 : WalSnd *walsnd = MyWalSnd;
3708 : :
4439 3709 [ - + ]: 927196 : SpinLockAcquire(&walsnd->mutex);
3710 : 927196 : walsnd->sentPtr = sentPtr;
3711 : 927196 : SpinLockRelease(&walsnd->mutex);
3712 : : }
3713 : 927196 : }
3714 : :
3715 : : /*
3716 : : * Forced shutdown of walsender if wal_sender_shutdown_timeout has expired.
3717 : : */
3718 : : static void
29 fujii@postgresql.org 3719 :GNC 4 : WalSndDoneImmediate(void)
3720 : : {
3721 : 4 : WalSndState state = MyWalSnd->state;
3722 : :
4 3723 [ + - + + ]: 4 : if ((state == WALSNDSTATE_CATCHUP ||
3724 [ + - ]: 1 : state == WALSNDSTATE_STREAMING ||
3725 : 4 : state == WALSNDSTATE_STOPPING) &&
3726 [ + - ]: 4 : !shutdown_stream_done_queued)
3727 : : {
3728 : : QueryCompletion qc;
3729 : :
3730 : : /* Try to inform receiver that XLOG streaming is done */
29 3731 : 4 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
4 3732 : 4 : EndCommandExtended(&qc, DestRemote, false, true);
3733 : 4 : shutdown_stream_done_queued = true;
3734 : :
3735 : : /*
3736 : : * Note that the output buffer may be full during the forced shutdown
3737 : : * of walsender. If pq_flush() is called at that time, the walsender
3738 : : * process will be stuck. Therefore, call pq_flush_if_writable()
3739 : : * instead. Successful reception of the done message with the
3740 : : * walsender forced into a shutdown is not guaranteed.
3741 : : */
29 3742 : 4 : pq_flush_if_writable();
3743 : : }
3744 : :
3745 : : /*
3746 : : * Prevent ereport from attempting to send any more messages to the
3747 : : * standby. Otherwise, it can cause the process to get stuck if the output
3748 : : * buffers are full.
3749 : : */
3750 [ + - ]: 4 : if (whereToSendOutput == DestRemote)
3751 : 4 : whereToSendOutput = DestNone;
3752 : :
3753 [ + - ]: 4 : ereport(WARNING,
3754 : : (errmsg("terminating walsender process due to replication shutdown timeout"),
3755 : : errdetail("Walsender process might have been terminated before all WAL data was replicated to the receiver.")));
3756 : :
3757 : 4 : proc_exit(0);
3758 : : }
3759 : :
3760 : : /*
3761 : : * Shutdown if the sender is caught up.
3762 : : *
3763 : : * NB: This should only be called when the shutdown signal has been received
3764 : : * from postmaster.
3765 : : *
3766 : : * Note that if we determine that there's still more data to send, this
3767 : : * function will return control to the caller.
3768 : : */
3769 : : static void
4439 rhaas@postgresql.org 3770 :CBC 2160 : WalSndDone(WalSndSendDataCallback send_data)
3771 : : {
3772 : : XLogRecPtr replicatedPtr;
3773 : :
3774 : : /* ... let's just be real sure we're caught up ... */
3775 : 2160 : send_data();
3776 : :
3777 : : /*
3778 : : * To figure out whether all WAL has successfully been replicated, check
3779 : : * flush location if valid, write otherwise. Tools like pg_receivewal will
3780 : : * usually (unless in synchronous mode) return an invalid flush location.
3781 : : */
180 alvherre@kurilemu.de 3782 :GNC 4320 : replicatedPtr = XLogRecPtrIsValid(MyWalSnd->flush) ?
3783 [ + + ]: 2160 : MyWalSnd->flush : MyWalSnd->write;
3784 : :
4432 fujii@postgresql.org 3785 [ + + + + ]:CBC 2160 : if (WalSndCaughtUp && sentPtr == replicatedPtr &&
4439 rhaas@postgresql.org 3786 [ + - ]: 43 : !pq_is_send_pending())
3787 : : {
3788 : : QueryCompletion qc;
3789 : :
4 fujii@postgresql.org 3790 [ - + ]:GNC 43 : Assert(!shutdown_stream_done_queued);
3791 : :
3792 : : /* Inform the standby that XLOG streaming is done */
2255 alvherre@alvh.no-ip. 3793 :CBC 43 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
4 fujii@postgresql.org 3794 :GNC 43 : EndCommandExtended(&qc, DestRemote, false, true);
3795 : 43 : shutdown_stream_done_queued = true;
3796 : :
3797 : : /*
3798 : : * Reset last_reply_timestamp so subsequent WalSndComputeSleeptime()
3799 : : * calls ignore wal_sender_timeout during shutdown.
3800 : : */
3801 : 43 : last_reply_timestamp = 0;
3802 : :
3803 : : /*
3804 : : * Do not call pq_flush() here, since it can block indefinitely while
3805 : : * waiting for the socket to become writable, preventing
3806 : : * wal_sender_shutdown_timeout from being enforced. Instead, use the
3807 : : * walsender nonblocking flush path so the shutdown timeout continues
3808 : : * to be checked while the send buffer drains.
3809 : : */
3810 : : for (;;)
3811 : 43 : {
3812 : : long sleeptime;
3813 : :
3814 : : /*
3815 : : * During shutdown, die if the shutdown timeout expires. Call this
3816 : : * before WalSndComputeSleeptime() so the timeout is considered
3817 : : * when computing sleep time.
3818 : : */
3819 : 86 : WalSndCheckShutdownTimeout();
3820 : :
3821 [ + + ]: 86 : if (!pq_is_send_pending())
3822 : 43 : break;
3823 : :
3824 : 43 : sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
3825 : :
3826 : : /* Sleep until something happens or we time out */
3827 : 43 : WalSndWait(WL_SOCKET_WRITEABLE, sleeptime,
3828 : : WAIT_EVENT_WAL_SENDER_WRITE_DATA);
3829 : :
3830 : : /* Clear any already-pending wakeups */
3831 : 43 : ResetLatch(MyLatch);
3832 : :
3833 [ - + ]: 43 : CHECK_FOR_INTERRUPTS();
3834 : :
3835 : : /* Try to flush pending output to the client */
3836 [ - + ]: 43 : if (pq_flush_if_writable() != 0)
4 fujii@postgresql.org 3837 :UNC 0 : WalSndShutdown();
3838 : : }
3839 : :
4439 rhaas@postgresql.org 3840 :CBC 43 : proc_exit(0);
3841 : : }
3842 [ + + ]: 2117 : if (!waiting_for_ping_response)
1497 akapila@postgresql.o 3843 : 509 : WalSndKeepalive(true, InvalidXLogRecPtr);
4439 rhaas@postgresql.org 3844 : 2117 : }
3845 : :
3846 : : /*
3847 : : * Returns the latest point in WAL that has been safely flushed to disk.
3848 : : * This should only be called when in recovery.
3849 : : *
3850 : : * This is called either by cascading walsender to find WAL position to be sent
3851 : : * to a cascaded standby or by slot synchronization operation to validate remote
3852 : : * slot's lsn before syncing it locally.
3853 : : *
3854 : : * As a side-effect, *tli is updated to the TLI of the last
3855 : : * replayed WAL record.
3856 : : */
3857 : : XLogRecPtr
1642 3858 : 1117 : GetStandbyFlushRecPtr(TimeLineID *tli)
3859 : : {
3860 : : XLogRecPtr replayPtr;
3861 : : TimeLineID replayTLI;
3862 : : XLogRecPtr receivePtr;
3863 : : TimeLineID receiveTLI;
3864 : : XLogRecPtr result;
3865 : :
811 akapila@postgresql.o 3866 [ + + - + ]: 1117 : Assert(am_cascading_walsender || IsSyncingReplicationSlots());
3867 : :
3868 : : /*
3869 : : * We can safely send what's already been replayed. Also, if walreceiver
3870 : : * is streaming WAL from the same timeline, we can send anything that it
3871 : : * has streamed, but hasn't been replayed yet.
3872 : : */
3873 : :
2218 tmunro@postgresql.or 3874 : 1117 : receivePtr = GetWalRcvFlushRecPtr(NULL, &receiveTLI);
4884 heikki.linnakangas@i 3875 : 1117 : replayPtr = GetXLogReplayRecPtr(&replayTLI);
3876 : :
1123 andres@anarazel.de 3877 [ + + ]: 1117 : if (tli)
3878 : 1069 : *tli = replayTLI;
3879 : :
4884 heikki.linnakangas@i 3880 : 1117 : result = replayPtr;
1642 rhaas@postgresql.org 3881 [ + - + + ]: 1117 : if (receiveTLI == replayTLI && receivePtr > replayPtr)
4884 heikki.linnakangas@i 3882 : 135 : result = receivePtr;
3883 : :
3884 : 1117 : return result;
3885 : : }
3886 : :
3887 : : /*
3888 : : * Request walsenders to reload the currently-open WAL file
3889 : : */
3890 : : void
5404 simon@2ndQuadrant.co 3891 : 29 : WalSndRqstFileReload(void)
3892 : : {
3893 : : int i;
3894 : :
3895 [ + + ]: 295 : for (i = 0; i < max_wal_senders; i++)
3896 : : {
3617 rhaas@postgresql.org 3897 : 266 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3898 : :
3231 alvherre@alvh.no-ip. 3899 [ - + ]: 266 : SpinLockAcquire(&walsnd->mutex);
5404 simon@2ndQuadrant.co 3900 [ + - ]: 266 : if (walsnd->pid == 0)
3901 : : {
3231 alvherre@alvh.no-ip. 3902 : 266 : SpinLockRelease(&walsnd->mutex);
5404 simon@2ndQuadrant.co 3903 : 266 : continue;
3904 : : }
5404 simon@2ndQuadrant.co 3905 :UBC 0 : walsnd->needreload = true;
3906 : 0 : SpinLockRelease(&walsnd->mutex);
3907 : : }
5404 simon@2ndQuadrant.co 3908 :CBC 29 : }
3909 : :
3910 : : /*
3911 : : * Handle PROCSIG_WALSND_INIT_STOPPING signal.
3912 : : */
3913 : : void
3256 andres@anarazel.de 3914 : 47 : HandleWalSndInitStopping(void)
3915 : : {
3916 [ - + ]: 47 : Assert(am_walsender);
3917 : :
3918 : : /*
3919 : : * If replication has not yet started, die like with SIGTERM. If
3920 : : * replication is active, only set a flag and wake up the main loop. It
3921 : : * will send any outstanding WAL, wait for it to be replicated to the
3922 : : * standby, and then exit gracefully.
3923 : : */
3924 [ - + ]: 47 : if (!replication_active)
3256 andres@anarazel.de 3925 :UBC 0 : kill(MyProcPid, SIGTERM);
3926 : : else
3256 andres@anarazel.de 3927 :CBC 47 : got_STOPPING = true;
3928 : :
3929 : : /* latch will be set by procsignal_sigusr1_handler */
3930 : 47 : }
3931 : :
3932 : : /*
3933 : : * SIGUSR2: set flag to do a last cycle and shut down afterwards. The WAL
3934 : : * sender should already have been switched to WALSNDSTATE_STOPPING at
3935 : : * this point.
3936 : : */
3937 : : static void
5954 heikki.linnakangas@i 3938 : 47 : WalSndLastCycleHandler(SIGNAL_ARGS)
3939 : : {
3256 andres@anarazel.de 3940 : 47 : got_SIGUSR2 = true;
4126 3941 : 47 : SetLatch(MyLatch);
5954 heikki.linnakangas@i 3942 : 47 : }
3943 : :
3944 : : /* Set up signal handlers */
3945 : : void
3946 : 1279 : WalSndSignals(void)
3947 : : {
3948 : : /* Set up signal handlers */
2331 rhaas@postgresql.org 3949 : 1279 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
3256 andres@anarazel.de 3950 : 1279 : pqsignal(SIGINT, StatementCancelHandler); /* query cancel */
4724 bruce@momjian.us 3951 : 1279 : pqsignal(SIGTERM, die); /* request shutdown */
3952 : : /* SIGQUIT handler was already set up by InitPostmasterChild */
5041 alvherre@alvh.no-ip. 3953 : 1279 : InitializeTimeouts(); /* establishes SIGALRM handler */
21 andrew@dunslane.net 3954 :GNC 1279 : pqsignal(SIGPIPE, PG_SIG_IGN);
3256 andres@anarazel.de 3955 :CBC 1279 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
3956 : 1279 : pqsignal(SIGUSR2, WalSndLastCycleHandler); /* request a last cycle and
3957 : : * shutdown */
3958 : :
3959 : : /* Reset some signals that are accepted by postmaster but not here */
21 andrew@dunslane.net 3960 :GNC 1279 : pqsignal(SIGCHLD, PG_SIG_DFL);
5954 heikki.linnakangas@i 3961 :CBC 1279 : }
3962 : :
3963 : : /* Register shared-memory space needed by walsender */
3964 : : static void
29 heikki.linnakangas@i 3965 :GNC 1244 : WalSndShmemRequest(void *arg)
3966 : : {
3967 : : Size size;
3968 : :
5954 heikki.linnakangas@i 3969 :CBC 1244 : size = offsetof(WalSndCtlData, walsnds);
5878 rhaas@postgresql.org 3970 : 1244 : size = add_size(size, mul_size(max_wal_senders, sizeof(WalSnd)));
29 heikki.linnakangas@i 3971 :GNC 1244 : ShmemRequestStruct(.name = "Wal Sender Ctl",
3972 : : .size = size,
3973 : : .ptr = (void **) &WalSndCtl,
3974 : : );
5954 heikki.linnakangas@i 3975 :GIC 1244 : }
3976 : :
3977 : : /* Initialize walsender-related shared memory */
3978 : : static void
29 heikki.linnakangas@i 3979 :GNC 1241 : WalSndShmemInit(void *arg)
3980 : : {
3981 [ + + ]: 4964 : for (int i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
3982 : 3723 : dlist_init(&(WalSndCtl->SyncRepQueue[i]));
3983 : :
3984 [ + + ]: 9149 : for (int i = 0; i < max_wal_senders; i++)
3985 : : {
3986 : 7908 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3987 : :
3988 : 7908 : SpinLockInit(&walsnd->mutex);
3989 : : }
3990 : :
3991 : 1241 : ConditionVariableInit(&WalSndCtl->wal_flush_cv);
3992 : 1241 : ConditionVariableInit(&WalSndCtl->wal_replay_cv);
3993 : 1241 : ConditionVariableInit(&WalSndCtl->wal_confirm_rcv_cv);
5954 heikki.linnakangas@i 3994 :CBC 1241 : }
3995 : :
3996 : : /*
3997 : : * Wake up physical, logical or both kinds of walsenders
3998 : : *
3999 : : * The distinction between physical and logical walsenders is done, because:
4000 : : * - physical walsenders can't send data until it's been flushed
4001 : : * - logical walsenders on standby can't decode and send data until it's been
4002 : : * applied
4003 : : *
4004 : : * For cascading replication we need to wake up physical walsenders separately
4005 : : * from logical walsenders (see the comment before calling WalSndWakeup() in
4006 : : * ApplyWalRecord() for more details).
4007 : : *
4008 : : * This will be called inside critical sections, so throwing an error is not
4009 : : * advisable.
4010 : : */
4011 : : void
1123 andres@anarazel.de 4012 : 2848891 : WalSndWakeup(bool physical, bool logical)
4013 : : {
4014 : : /*
4015 : : * Wake up all the walsenders waiting on WAL being flushed or replayed
4016 : : * respectively. Note that waiting walsender would have prepared to sleep
4017 : : * on the CV (i.e., added itself to the CV's waitlist) in WalSndWait()
4018 : : * before actually waiting.
4019 : : */
1080 4020 [ + + ]: 2848891 : if (physical)
4021 : 156859 : ConditionVariableBroadcast(&WalSndCtl->wal_flush_cv);
4022 : :
4023 [ + + ]: 2848891 : if (logical)
4024 : 2804651 : ConditionVariableBroadcast(&WalSndCtl->wal_replay_cv);
5715 heikki.linnakangas@i 4025 : 2848891 : }
4026 : :
4027 : : /*
4028 : : * Wait for readiness on the FeBe socket, or a timeout. The mask should be
4029 : : * composed of optional WL_SOCKET_WRITEABLE and WL_SOCKET_READABLE flags. Exit
4030 : : * on postmaster death.
4031 : : */
4032 : : static void
1891 tmunro@postgresql.or 4033 : 92843 : WalSndWait(uint32 socket_events, long timeout, uint32 wait_event)
4034 : : {
4035 : : WaitEvent event;
4036 : :
4037 : 92843 : ModifyWaitEvent(FeBeWaitSet, FeBeWaitSetSocketPos, socket_events, NULL);
4038 : :
4039 : : /*
4040 : : * We use a condition variable to efficiently wake up walsenders in
4041 : : * WalSndWakeup().
4042 : : *
4043 : : * Every walsender prepares to sleep on a shared memory CV. Note that it
4044 : : * just prepares to sleep on the CV (i.e., adds itself to the CV's
4045 : : * waitlist), but does not actually wait on the CV (IOW, it never calls
4046 : : * ConditionVariableSleep()). It still uses WaitEventSetWait() for
4047 : : * waiting, because we also need to wait for socket events. The processes
4048 : : * (startup process, walreceiver etc.) wanting to wake up walsenders use
4049 : : * ConditionVariableBroadcast(), which in turn calls SetLatch(), helping
4050 : : * walsenders come out of WaitEventSetWait().
4051 : : *
4052 : : * This approach is simple and efficient because, one doesn't have to loop
4053 : : * through all the walsenders slots, with a spinlock acquisition and
4054 : : * release for every iteration, just to wake up only the waiting
4055 : : * walsenders. It makes WalSndWakeup() callers' life easy.
4056 : : *
4057 : : * XXX: A desirable future improvement would be to add support for CVs
4058 : : * into WaitEventSetWait().
4059 : : *
4060 : : * And, we use separate shared memory CVs for physical and logical
4061 : : * walsenders for selective wake ups, see WalSndWakeup() for more details.
4062 : : *
4063 : : * If the wait event is WAIT_FOR_STANDBY_CONFIRMATION, wait on another CV
4064 : : * until awakened by physical walsenders after the walreceiver confirms
4065 : : * the receipt of the LSN.
4066 : : */
788 akapila@postgresql.o 4067 [ + + ]: 92843 : if (wait_event == WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
4068 : 11 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_confirm_rcv_cv);
4069 [ + + ]: 92832 : else if (MyWalSnd->kind == REPLICATION_KIND_PHYSICAL)
1080 andres@anarazel.de 4070 : 87154 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_flush_cv);
4071 [ + - ]: 5678 : else if (MyWalSnd->kind == REPLICATION_KIND_LOGICAL)
4072 : 5678 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_replay_cv);
4073 : :
1891 tmunro@postgresql.or 4074 [ + + ]: 92843 : if (WaitEventSetWait(FeBeWaitSet, timeout, &event, 1, wait_event) == 1 &&
4075 [ - + ]: 92841 : (event.events & WL_POSTMASTER_DEATH))
4076 : : {
1080 andres@anarazel.de 4077 :UBC 0 : ConditionVariableCancelSleep();
1891 tmunro@postgresql.or 4078 : 0 : proc_exit(1);
4079 : : }
4080 : :
1080 andres@anarazel.de 4081 :CBC 92843 : ConditionVariableCancelSleep();
1891 tmunro@postgresql.or 4082 : 92843 : }
4083 : :
4084 : : /*
4085 : : * Signal all walsenders to move to stopping state.
4086 : : *
4087 : : * This will trigger walsenders to move to a state where no further WAL can be
4088 : : * generated. See this file's header for details.
4089 : : */
4090 : : void
3256 andres@anarazel.de 4091 : 768 : WalSndInitStopping(void)
4092 : : {
4093 : : int i;
4094 : :
4095 [ + + ]: 5700 : for (i = 0; i < max_wal_senders; i++)
4096 : : {
4097 : 4932 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
4098 : : pid_t pid;
4099 : :
4100 [ - + ]: 4932 : SpinLockAcquire(&walsnd->mutex);
4101 : 4932 : pid = walsnd->pid;
4102 : 4932 : SpinLockRelease(&walsnd->mutex);
4103 : :
4104 [ + + ]: 4932 : if (pid == 0)
4105 : 4885 : continue;
4106 : :
793 heikki.linnakangas@i 4107 : 47 : SendProcSignal(pid, PROCSIG_WALSND_INIT_STOPPING, INVALID_PROC_NUMBER);
4108 : : }
3256 andres@anarazel.de 4109 : 768 : }
4110 : :
4111 : : /*
4112 : : * Wait that all the WAL senders have quit or reached the stopping state. This
4113 : : * is used by the checkpointer to control when the shutdown checkpoint can
4114 : : * safely be performed.
4115 : : */
4116 : : void
4117 : 768 : WalSndWaitStopping(void)
4118 : : {
4119 : : for (;;)
4120 : 46 : {
4121 : : int i;
4122 : 814 : bool all_stopped = true;
4123 : :
4124 [ + + ]: 5751 : for (i = 0; i < max_wal_senders; i++)
4125 : : {
4126 : 4983 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
4127 : :
4128 [ - + ]: 4983 : SpinLockAcquire(&walsnd->mutex);
4129 : :
4130 [ + + ]: 4983 : if (walsnd->pid == 0)
4131 : : {
4132 : 4903 : SpinLockRelease(&walsnd->mutex);
4133 : 4903 : continue;
4134 : : }
4135 : :
3231 alvherre@alvh.no-ip. 4136 [ + + ]: 80 : if (walsnd->state != WALSNDSTATE_STOPPING)
4137 : : {
3256 andres@anarazel.de 4138 : 46 : all_stopped = false;
3231 alvherre@alvh.no-ip. 4139 : 46 : SpinLockRelease(&walsnd->mutex);
3256 andres@anarazel.de 4140 : 46 : break;
4141 : : }
3231 alvherre@alvh.no-ip. 4142 : 34 : SpinLockRelease(&walsnd->mutex);
4143 : : }
4144 : :
4145 : : /* safe to leave if confirmation is done for all WAL senders */
3256 andres@anarazel.de 4146 [ + + ]: 814 : if (all_stopped)
4147 : 768 : return;
4148 : :
4149 : 46 : pg_usleep(10000L); /* wait for 10 msec */
4150 : : }
4151 : : }
4152 : :
4153 : : /* Set state for current walsender (only called in walsender) */
4154 : : void
5593 magnus@hagander.net 4155 : 3526 : WalSndSetState(WalSndState state)
4156 : : {
3617 rhaas@postgresql.org 4157 : 3526 : WalSnd *walsnd = MyWalSnd;
4158 : :
5593 magnus@hagander.net 4159 [ - + ]: 3526 : Assert(am_walsender);
4160 : :
4161 [ + + ]: 3526 : if (walsnd->state == state)
4162 : 1470 : return;
4163 : :
4164 [ - + ]: 2056 : SpinLockAcquire(&walsnd->mutex);
4165 : 2056 : walsnd->state = state;
4166 : 2056 : SpinLockRelease(&walsnd->mutex);
4167 : : }
4168 : :
4169 : : /*
4170 : : * Return a string constant representing the state. This is used
4171 : : * in system views, and should *not* be translated.
4172 : : */
4173 : : static const char *
4174 : 689 : WalSndGetStateString(WalSndState state)
4175 : : {
4176 [ + - + + : 689 : switch (state)
- - ]
4177 : : {
4178 : 4 : case WALSNDSTATE_STARTUP:
5485 bruce@momjian.us 4179 : 4 : return "startup";
5593 magnus@hagander.net 4180 :UBC 0 : case WALSNDSTATE_BACKUP:
5485 bruce@momjian.us 4181 : 0 : return "backup";
5593 magnus@hagander.net 4182 :CBC 17 : case WALSNDSTATE_CATCHUP:
5485 bruce@momjian.us 4183 : 17 : return "catchup";
5593 magnus@hagander.net 4184 : 668 : case WALSNDSTATE_STREAMING:
5485 bruce@momjian.us 4185 : 668 : return "streaming";
3256 andres@anarazel.de 4186 :UBC 0 : case WALSNDSTATE_STOPPING:
4187 : 0 : return "stopping";
4188 : : }
5593 magnus@hagander.net 4189 : 0 : return "UNKNOWN";
4190 : : }
4191 : :
4192 : : static Interval *
3330 simon@2ndQuadrant.co 4193 :CBC 1617 : offset_to_interval(TimeOffset offset)
4194 : : {
146 michael@paquier.xyz 4195 :GNC 1617 : Interval *result = palloc_object(Interval);
4196 : :
3330 simon@2ndQuadrant.co 4197 :CBC 1617 : result->month = 0;
4198 : 1617 : result->day = 0;
4199 : 1617 : result->time = offset;
4200 : :
4201 : 1617 : return result;
4202 : : }
4203 : :
4204 : : /*
4205 : : * Returns activity of walsenders, including pids and xlog locations sent to
4206 : : * standby servers.
4207 : : */
4208 : : Datum
5597 itagaki.takahiro@gma 4209 : 546 : pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
4210 : : {
4211 : : #define PG_STAT_GET_WAL_SENDERS_COLS 12
5504 bruce@momjian.us 4212 : 546 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
4213 : : SyncRepStandbyData *sync_standbys;
4214 : : int num_standbys;
4215 : : int i;
4216 : :
1295 michael@paquier.xyz 4217 : 546 : InitMaterializedSRF(fcinfo, 0);
4218 : :
4219 : : /*
4220 : : * Get the currently active synchronous standbys. This could be out of
4221 : : * date before we're done, but we'll use the data anyway.
4222 : : */
2208 tgl@sss.pgh.pa.us 4223 : 546 : num_standbys = SyncRepGetCandidateStandbys(&sync_standbys);
4224 : :
5597 itagaki.takahiro@gma 4225 [ + + ]: 5994 : for (i = 0; i < max_wal_senders; i++)
4226 : : {
3617 rhaas@postgresql.org 4227 : 5448 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
4228 : : XLogRecPtr sent_ptr;
4229 : : XLogRecPtr write;
4230 : : XLogRecPtr flush;
4231 : : XLogRecPtr apply;
4232 : : TimeOffset writeLag;
4233 : : TimeOffset flushLag;
4234 : : TimeOffset applyLag;
4235 : : int priority;
4236 : : int pid;
4237 : : WalSndState state;
4238 : : TimestampTz replyTime;
4239 : : bool is_sync_standby;
4240 : : Datum values[PG_STAT_GET_WAL_SENDERS_COLS];
1389 peter@eisentraut.org 4241 : 5448 : bool nulls[PG_STAT_GET_WAL_SENDERS_COLS] = {0};
4242 : : int j;
4243 : :
4244 : : /* Collect data from shared memory */
3231 alvherre@alvh.no-ip. 4245 [ - + ]: 5448 : SpinLockAcquire(&walsnd->mutex);
5597 itagaki.takahiro@gma 4246 [ + + ]: 5448 : if (walsnd->pid == 0)
4247 : : {
3231 alvherre@alvh.no-ip. 4248 : 4759 : SpinLockRelease(&walsnd->mutex);
5597 itagaki.takahiro@gma 4249 : 4759 : continue;
4250 : : }
3231 alvherre@alvh.no-ip. 4251 : 689 : pid = walsnd->pid;
978 michael@paquier.xyz 4252 : 689 : sent_ptr = walsnd->sentPtr;
5591 magnus@hagander.net 4253 : 689 : state = walsnd->state;
5563 heikki.linnakangas@i 4254 : 689 : write = walsnd->write;
4255 : 689 : flush = walsnd->flush;
4256 : 689 : apply = walsnd->apply;
3330 simon@2ndQuadrant.co 4257 : 689 : writeLag = walsnd->writeLag;
4258 : 689 : flushLag = walsnd->flushLag;
4259 : 689 : applyLag = walsnd->applyLag;
4162 heikki.linnakangas@i 4260 : 689 : priority = walsnd->sync_standby_priority;
2704 michael@paquier.xyz 4261 : 689 : replyTime = walsnd->replyTime;
5597 itagaki.takahiro@gma 4262 : 689 : SpinLockRelease(&walsnd->mutex);
4263 : :
4264 : : /*
4265 : : * Detect whether walsender is/was considered synchronous. We can
4266 : : * provide some protection against stale data by checking the PID
4267 : : * along with walsnd_index.
4268 : : */
2208 tgl@sss.pgh.pa.us 4269 : 689 : is_sync_standby = false;
4270 [ + + ]: 730 : for (j = 0; j < num_standbys; j++)
4271 : : {
4272 [ + + ]: 66 : if (sync_standbys[j].walsnd_index == i &&
4273 [ + - ]: 25 : sync_standbys[j].pid == pid)
4274 : : {
4275 : 25 : is_sync_standby = true;
4276 : 25 : break;
4277 : : }
4278 : : }
4279 : :
3231 alvherre@alvh.no-ip. 4280 : 689 : values[0] = Int32GetDatum(pid);
4281 : :
1499 mail@joeconway.com 4282 [ - + ]: 689 : if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
4283 : : {
4284 : : /*
4285 : : * Only superusers and roles with privileges of pg_read_all_stats
4286 : : * can see details. Other users only get the pid value to know
4287 : : * it's a walsender, but no details.
4288 : : */
5539 simon@2ndQuadrant.co 4289 [ # # # # :UBC 0 : MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
# # # # #
# ]
4290 : : }
4291 : : else
4292 : : {
5581 magnus@hagander.net 4293 :CBC 689 : values[1] = CStringGetTextDatum(WalSndGetStateString(state));
4294 : :
180 alvherre@kurilemu.de 4295 [ + + ]:GNC 689 : if (!XLogRecPtrIsValid(sent_ptr))
3796 magnus@hagander.net 4296 :CBC 4 : nulls[2] = true;
978 michael@paquier.xyz 4297 : 689 : values[2] = LSNGetDatum(sent_ptr);
4298 : :
180 alvherre@kurilemu.de 4299 [ + + ]:GNC 689 : if (!XLogRecPtrIsValid(write))
5563 heikki.linnakangas@i 4300 :CBC 4 : nulls[3] = true;
4453 rhaas@postgresql.org 4301 : 689 : values[3] = LSNGetDatum(write);
4302 : :
180 alvherre@kurilemu.de 4303 [ + + ]:GNC 689 : if (!XLogRecPtrIsValid(flush))
5563 heikki.linnakangas@i 4304 :CBC 4 : nulls[4] = true;
4453 rhaas@postgresql.org 4305 : 689 : values[4] = LSNGetDatum(flush);
4306 : :
180 alvherre@kurilemu.de 4307 [ + + ]:GNC 689 : if (!XLogRecPtrIsValid(apply))
5563 heikki.linnakangas@i 4308 :CBC 4 : nulls[5] = true;
4453 rhaas@postgresql.org 4309 : 689 : values[5] = LSNGetDatum(apply);
4310 : :
4311 : : /*
4312 : : * Treat a standby such as a pg_basebackup background process
4313 : : * which always returns an invalid flush location, as an
4314 : : * asynchronous standby.
4315 : : */
180 alvherre@kurilemu.de 4316 [ + + ]:GNC 689 : priority = XLogRecPtrIsValid(flush) ? priority : 0;
4317 : :
3330 simon@2ndQuadrant.co 4318 [ + + ]:CBC 689 : if (writeLag < 0)
4319 : 150 : nulls[6] = true;
4320 : : else
4321 : 539 : values[6] = IntervalPGetDatum(offset_to_interval(writeLag));
4322 : :
4323 [ + + ]: 689 : if (flushLag < 0)
4324 : 150 : nulls[7] = true;
4325 : : else
4326 : 539 : values[7] = IntervalPGetDatum(offset_to_interval(flushLag));
4327 : :
4328 [ + + ]: 689 : if (applyLag < 0)
4329 : 150 : nulls[8] = true;
4330 : : else
4331 : 539 : values[8] = IntervalPGetDatum(offset_to_interval(applyLag));
4332 : :
4333 : 689 : values[9] = Int32GetDatum(priority);
4334 : :
4335 : : /*
4336 : : * More easily understood version of standby state. This is purely
4337 : : * informational.
4338 : : *
4339 : : * In quorum-based sync replication, the role of each standby
4340 : : * listed in synchronous_standby_names can be changing very
4341 : : * frequently. Any standbys considered as "sync" at one moment can
4342 : : * be switched to "potential" ones at the next moment. So, it's
4343 : : * basically useless to report "sync" or "potential" as their sync
4344 : : * states. We report just "quorum" for them.
4345 : : */
4162 heikki.linnakangas@i 4346 [ + + ]: 689 : if (priority == 0)
3330 simon@2ndQuadrant.co 4347 : 653 : values[10] = CStringGetTextDatum("async");
2208 tgl@sss.pgh.pa.us 4348 [ + + ]: 36 : else if (is_sync_standby)
3330 simon@2ndQuadrant.co 4349 : 25 : values[10] = SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY ?
3424 fujii@postgresql.org 4350 [ + + ]: 25 : CStringGetTextDatum("sync") : CStringGetTextDatum("quorum");
4351 : : else
3330 simon@2ndQuadrant.co 4352 : 11 : values[10] = CStringGetTextDatum("potential");
4353 : :
2704 michael@paquier.xyz 4354 [ + + ]: 689 : if (replyTime == 0)
4355 : 4 : nulls[11] = true;
4356 : : else
4357 : 685 : values[11] = TimestampTzGetDatum(replyTime);
4358 : : }
4359 : :
1520 4360 : 689 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
4361 : : values, nulls);
4362 : : }
4363 : :
5597 itagaki.takahiro@gma 4364 : 546 : return (Datum) 0;
4365 : : }
4366 : :
4367 : : /*
4368 : : * Send a keepalive message to standby.
4369 : : *
4370 : : * If requestReply is set, the message requests the other party to send
4371 : : * a message back to us, for heartbeat purposes. We also set a flag to
4372 : : * let nearby code know that we're waiting for that response, to avoid
4373 : : * repeated requests.
4374 : : *
4375 : : * writePtr is the location up to which the WAL is sent. It is essentially
4376 : : * the same as sentPtr but in some cases, we need to send keep alive before
4377 : : * sentPtr is updated like when skipping empty transactions.
4378 : : */
4379 : : static void
1497 akapila@postgresql.o 4380 : 2524 : WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
4381 : : {
5239 simon@2ndQuadrant.co 4382 [ + + ]: 2524 : elog(DEBUG2, "sending replication keepalive");
4383 : :
4384 : : /* construct the message... */
4927 heikki.linnakangas@i 4385 : 2524 : resetStringInfo(&output_message);
272 nathan@postgresql.or 4386 :GNC 2524 : pq_sendbyte(&output_message, PqReplMsg_Keepalive);
180 alvherre@kurilemu.de 4387 [ - + ]: 2524 : pq_sendint64(&output_message, XLogRecPtrIsValid(writePtr) ? writePtr : sentPtr);
3358 tgl@sss.pgh.pa.us 4388 :CBC 2524 : pq_sendint64(&output_message, GetCurrentTimestamp());
4927 heikki.linnakangas@i 4389 : 2524 : pq_sendbyte(&output_message, requestReply ? 1 : 0);
4390 : :
4391 : : /* ... and send it wrapped in CopyData */
286 nathan@postgresql.or 4392 :GNC 2524 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
4393 : :
4394 : : /* Set local flag */
2096 alvherre@alvh.no-ip. 4395 [ + + ]:CBC 2524 : if (requestReply)
4396 : 509 : waiting_for_ping_response = true;
5239 simon@2ndQuadrant.co 4397 : 2524 : }
4398 : :
4399 : : /*
4400 : : * Send keepalive message if too much time has elapsed.
4401 : : */
4402 : : static void
2804 noah@leadboat.com 4403 : 1118782 : WalSndKeepaliveIfNecessary(void)
4404 : : {
4405 : : TimestampTz ping_time;
4406 : :
4407 : : /*
4408 : : * Don't send keepalive messages if timeouts are globally disabled or
4409 : : * we're doing something not partaking in timeouts.
4410 : : */
4359 andres@anarazel.de 4411 [ + - + + ]: 1118782 : if (wal_sender_timeout <= 0 || last_reply_timestamp <= 0)
4439 rhaas@postgresql.org 4412 : 28 : return;
4413 : :
4414 [ + + ]: 1118754 : if (waiting_for_ping_response)
4415 : 2623 : return;
4416 : :
4417 : : /*
4418 : : * If half of wal_sender_timeout has lapsed without receiving any reply
4419 : : * from the standby, send a keep-alive message to the standby requesting
4420 : : * an immediate reply.
4421 : : */
4422 : 1116131 : ping_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
4423 : : wal_sender_timeout / 2);
2804 noah@leadboat.com 4424 [ - + ]: 1116131 : if (last_processing >= ping_time)
4425 : : {
1497 akapila@postgresql.o 4426 :UBC 0 : WalSndKeepalive(true, InvalidXLogRecPtr);
4427 : :
4428 : : /* Try to flush pending output to the client */
4439 rhaas@postgresql.org 4429 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
4430 : 0 : WalSndShutdown();
4431 : : }
4432 : : }
4433 : :
4434 : : /*
4435 : : * Record the end of the WAL and the time it was flushed locally, so that
4436 : : * LagTrackerRead can compute the elapsed time (lag) when this WAL location is
4437 : : * eventually reported to have been written, flushed and applied by the
4438 : : * standby in a reply message.
4439 : : */
4440 : : static void
3330 simon@2ndQuadrant.co 4441 :CBC 144955 : LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time)
4442 : : {
4443 : : int new_write_head;
4444 : : int i;
4445 : :
4446 [ - + ]: 144955 : if (!am_walsender)
3330 simon@2ndQuadrant.co 4447 :UBC 0 : return;
4448 : :
4449 : : /*
4450 : : * If the lsn hasn't advanced since last time, then do nothing. This way
4451 : : * we only record a new sample when new WAL has been written.
4452 : : */
2758 tmunro@postgresql.or 4453 [ + + ]:CBC 144955 : if (lag_tracker->last_lsn == lsn)
3330 simon@2ndQuadrant.co 4454 : 119724 : return;
2758 tmunro@postgresql.or 4455 : 25231 : lag_tracker->last_lsn = lsn;
4456 : :
4457 : : /*
4458 : : * If advancing the write head of the circular buffer would crash into any
4459 : : * of the read heads, then the buffer is full. In other words, the
4460 : : * slowest reader (presumably apply) is the one that controls the release
4461 : : * of space.
4462 : : */
4463 : 25231 : new_write_head = (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
3330 simon@2ndQuadrant.co 4464 [ + + ]: 100924 : for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; ++i)
4465 : : {
4466 : : /*
4467 : : * If the buffer is full, move the slowest reader to a separate
4468 : : * overflow entry and free its space in the buffer so the write head
4469 : : * can advance.
4470 : : */
2758 tmunro@postgresql.or 4471 [ - + ]: 75693 : if (new_write_head == lag_tracker->read_heads[i])
4472 : : {
195 fujii@postgresql.org 4473 :UBC 0 : lag_tracker->overflowed[i] =
4474 : 0 : lag_tracker->buffer[lag_tracker->read_heads[i]];
4475 : 0 : lag_tracker->read_heads[i] = -1;
4476 : : }
4477 : : }
4478 : :
4479 : : /* Store a sample at the current write head position. */
2758 tmunro@postgresql.or 4480 :CBC 25231 : lag_tracker->buffer[lag_tracker->write_head].lsn = lsn;
4481 : 25231 : lag_tracker->buffer[lag_tracker->write_head].time = local_flush_time;
4482 : 25231 : lag_tracker->write_head = new_write_head;
4483 : : }
4484 : :
4485 : : /*
4486 : : * Find out how much time has elapsed between the moment WAL location 'lsn'
4487 : : * (or the highest known earlier LSN) was flushed locally and the time 'now'.
4488 : : * We have a separate read head for each of the reported LSN locations we
4489 : : * receive in replies from standby; 'head' controls which read head is
4490 : : * used. Whenever a read head crosses an LSN which was written into the
4491 : : * lag buffer with LagTrackerWrite, we can use the associated timestamp to
4492 : : * find out the time this LSN (or an earlier one) was flushed locally, and
4493 : : * therefore compute the lag.
4494 : : *
4495 : : * Return -1 if no new sample data is available, and otherwise the elapsed
4496 : : * time in microseconds.
4497 : : */
4498 : : static TimeOffset
3330 simon@2ndQuadrant.co 4499 : 322089 : LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
4500 : : {
4501 : 322089 : TimestampTz time = 0;
4502 : :
4503 : : /*
4504 : : * If 'lsn' has not passed the WAL position stored in the overflow entry,
4505 : : * return the elapsed time (in microseconds) since the saved local flush
4506 : : * time. If the flush time is in the future (due to clock drift), return
4507 : : * -1 to treat as no valid sample.
4508 : : *
4509 : : * Otherwise, switch back to using the buffer to control the read head and
4510 : : * compute the elapsed time. The read head is then reset to point to the
4511 : : * oldest entry in the buffer.
4512 : : */
195 fujii@postgresql.org 4513 [ - + ]: 322089 : if (lag_tracker->read_heads[head] == -1)
4514 : : {
195 fujii@postgresql.org 4515 [ # # ]:UBC 0 : if (lag_tracker->overflowed[head].lsn > lsn)
4516 : 0 : return (now >= lag_tracker->overflowed[head].time) ?
4517 [ # # ]: 0 : now - lag_tracker->overflowed[head].time : -1;
4518 : :
4519 : 0 : time = lag_tracker->overflowed[head].time;
4520 : 0 : lag_tracker->last_read[head] = lag_tracker->overflowed[head];
4521 : 0 : lag_tracker->read_heads[head] =
4522 : 0 : (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
4523 : : }
4524 : :
4525 : : /* Read all unread samples up to this LSN or end of buffer. */
2758 tmunro@postgresql.or 4526 [ + + ]:CBC 396486 : while (lag_tracker->read_heads[head] != lag_tracker->write_head &&
4527 [ + + ]: 270193 : lag_tracker->buffer[lag_tracker->read_heads[head]].lsn <= lsn)
4528 : : {
4529 : 74397 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4530 : 74397 : lag_tracker->last_read[head] =
4531 : 74397 : lag_tracker->buffer[lag_tracker->read_heads[head]];
4532 : 74397 : lag_tracker->read_heads[head] =
4533 : 74397 : (lag_tracker->read_heads[head] + 1) % LAG_TRACKER_BUFFER_SIZE;
4534 : : }
4535 : :
4536 : : /*
4537 : : * If the lag tracker is empty, that means the standby has processed
4538 : : * everything we've ever sent so we should now clear 'last_read'. If we
4539 : : * didn't do that, we'd risk using a stale and irrelevant sample for
4540 : : * interpolation at the beginning of the next burst of WAL after a period
4541 : : * of idleness.
4542 : : */
4543 [ + + ]: 322089 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4544 : 126293 : lag_tracker->last_read[head].time = 0;
4545 : :
3330 simon@2ndQuadrant.co 4546 [ - + ]: 322089 : if (time > now)
4547 : : {
4548 : : /* If the clock somehow went backwards, treat as not found. */
3330 simon@2ndQuadrant.co 4549 :UBC 0 : return -1;
4550 : : }
3330 simon@2ndQuadrant.co 4551 [ + + ]:CBC 322089 : else if (time == 0)
4552 : : {
4553 : : /*
4554 : : * We didn't cross a time. If there is a future sample that we
4555 : : * haven't reached yet, and we've already reached at least one sample,
4556 : : * let's interpolate the local flushed time. This is mainly useful
4557 : : * for reporting a completely stuck apply position as having
4558 : : * increasing lag, since otherwise we'd have to wait for it to
4559 : : * eventually start moving again and cross one of our samples before
4560 : : * we can show the lag increasing.
4561 : : */
2758 tmunro@postgresql.or 4562 [ + + ]: 260471 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4563 : : {
4564 : : /* There are no future samples, so we can't interpolate. */
3238 simon@2ndQuadrant.co 4565 : 73666 : return -1;
4566 : : }
2758 tmunro@postgresql.or 4567 [ + + ]: 186805 : else if (lag_tracker->last_read[head].time != 0)
4568 : : {
4569 : : /* We can interpolate between last_read and the next sample. */
4570 : : double fraction;
4571 : 63331 : WalTimeSample prev = lag_tracker->last_read[head];
4572 : 63331 : WalTimeSample next = lag_tracker->buffer[lag_tracker->read_heads[head]];
4573 : :
3299 simon@2ndQuadrant.co 4574 [ - + ]: 63331 : if (lsn < prev.lsn)
4575 : : {
4576 : : /*
4577 : : * Reported LSNs shouldn't normally go backwards, but it's
4578 : : * possible when there is a timeline change. Treat as not
4579 : : * found.
4580 : : */
3299 simon@2ndQuadrant.co 4581 :UBC 0 : return -1;
4582 : : }
4583 : :
3330 simon@2ndQuadrant.co 4584 [ - + ]:CBC 63331 : Assert(prev.lsn < next.lsn);
4585 : :
4586 [ - + ]: 63331 : if (prev.time > next.time)
4587 : : {
4588 : : /* If the clock somehow went backwards, treat as not found. */
3330 simon@2ndQuadrant.co 4589 :UBC 0 : return -1;
4590 : : }
4591 : :
4592 : : /* See how far we are between the previous and next samples. */
3330 simon@2ndQuadrant.co 4593 :CBC 63331 : fraction =
4594 : 63331 : (double) (lsn - prev.lsn) / (double) (next.lsn - prev.lsn);
4595 : :
4596 : : /* Scale the local flush time proportionally. */
4597 : 63331 : time = (TimestampTz)
4598 : 63331 : ((double) prev.time + (next.time - prev.time) * fraction);
4599 : : }
4600 : : else
4601 : : {
4602 : : /*
4603 : : * We have only a future sample, implying that we were entirely
4604 : : * caught up but and now there is a new burst of WAL and the
4605 : : * standby hasn't processed the first sample yet. Until the
4606 : : * standby reaches the future sample the best we can do is report
4607 : : * the hypothetical lag if that sample were to be replayed now.
4608 : : */
2758 tmunro@postgresql.or 4609 : 123474 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4610 : : }
4611 : : }
4612 : :
4613 : : /* Return the elapsed time since local flush time in microseconds. */
3330 simon@2ndQuadrant.co 4614 [ - + ]: 248423 : Assert(time != 0);
4615 : 248423 : return now - time;
4616 : : }
|