Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * walsender.c
4 : : *
5 : : * The WAL sender process (walsender) is new as of Postgres 9.0. It takes
6 : : * care of sending XLOG from the primary server to a single recipient.
7 : : * (Note that there can be more than one walsender process concurrently.)
8 : : * It is started by the postmaster when the walreceiver of a standby server
9 : : * connects to the primary server and requests XLOG streaming replication.
10 : : *
11 : : * A walsender is similar to a regular backend, ie. there is a one-to-one
12 : : * relationship between a connection and a walsender process, but instead
13 : : * of processing SQL queries, it understands a small set of special
14 : : * replication-mode commands. The START_REPLICATION command begins streaming
15 : : * WAL to the client. While streaming, the walsender keeps reading XLOG
16 : : * records from the disk and sends them to the standby server over the
17 : : * COPY protocol, until either side ends the replication by exiting COPY
18 : : * mode (or until the connection is closed).
19 : : *
20 : : * Normal termination is by SIGTERM, which instructs the walsender to
21 : : * close the connection and exit(0) at the next convenient moment. Emergency
22 : : * termination is by SIGQUIT; like any backend, the walsender will simply
23 : : * abort and exit on SIGQUIT. A close of the connection and a FATAL error
24 : : * are treated as not a crash but approximately normal termination;
25 : : * the walsender will exit quickly without sending any more XLOG records.
26 : : *
27 : : * If the server is shut down, checkpointer sends us
28 : : * PROCSIG_WALSND_INIT_STOPPING after all regular backends have exited. If
29 : : * the backend is idle or runs an SQL query this causes the backend to
30 : : * shutdown, if logical replication is in progress all existing WAL records
31 : : * are processed followed by a shutdown. Otherwise this causes the walsender
32 : : * to switch to the "stopping" state. In this state, the walsender will reject
33 : : * any further replication commands. The checkpointer begins the shutdown
34 : : * checkpoint once all walsenders are confirmed as stopping. When the shutdown
35 : : * checkpoint finishes, the postmaster sends us SIGUSR2. This instructs
36 : : * walsender to send any outstanding WAL, including the shutdown checkpoint
37 : : * record, wait for it to be replicated to the standby, and then exit.
38 : : *
39 : : *
40 : : * Portions Copyright (c) 2010-2025, PostgreSQL Global Development Group
41 : : *
42 : : * IDENTIFICATION
43 : : * src/backend/replication/walsender.c
44 : : *
45 : : *-------------------------------------------------------------------------
46 : : */
47 : : #include "postgres.h"
48 : :
49 : : #include <signal.h>
50 : : #include <unistd.h>
51 : :
52 : : #include "access/timeline.h"
53 : : #include "access/transam.h"
54 : : #include "access/twophase.h"
55 : : #include "access/xact.h"
56 : : #include "access/xlog_internal.h"
57 : : #include "access/xlogreader.h"
58 : : #include "access/xlogrecovery.h"
59 : : #include "access/xlogutils.h"
60 : : #include "backup/basebackup.h"
61 : : #include "backup/basebackup_incremental.h"
62 : : #include "catalog/pg_authid.h"
63 : : #include "catalog/pg_type.h"
64 : : #include "commands/defrem.h"
65 : : #include "funcapi.h"
66 : : #include "libpq/libpq.h"
67 : : #include "libpq/pqformat.h"
68 : : #include "libpq/protocol.h"
69 : : #include "miscadmin.h"
70 : : #include "nodes/replnodes.h"
71 : : #include "pgstat.h"
72 : : #include "postmaster/interrupt.h"
73 : : #include "replication/decode.h"
74 : : #include "replication/logical.h"
75 : : #include "replication/slotsync.h"
76 : : #include "replication/slot.h"
77 : : #include "replication/snapbuild.h"
78 : : #include "replication/syncrep.h"
79 : : #include "replication/walreceiver.h"
80 : : #include "replication/walsender.h"
81 : : #include "replication/walsender_private.h"
82 : : #include "storage/condition_variable.h"
83 : : #include "storage/aio_subsys.h"
84 : : #include "storage/fd.h"
85 : : #include "storage/ipc.h"
86 : : #include "storage/pmsignal.h"
87 : : #include "storage/proc.h"
88 : : #include "storage/procarray.h"
89 : : #include "tcop/dest.h"
90 : : #include "tcop/tcopprot.h"
91 : : #include "utils/acl.h"
92 : : #include "utils/builtins.h"
93 : : #include "utils/guc.h"
94 : : #include "utils/lsyscache.h"
95 : : #include "utils/memutils.h"
96 : : #include "utils/pg_lsn.h"
97 : : #include "utils/pgstat_internal.h"
98 : : #include "utils/ps_status.h"
99 : : #include "utils/timeout.h"
100 : : #include "utils/timestamp.h"
101 : :
102 : : /* Minimum interval used by walsender for stats flushes, in ms */
103 : : #define WALSENDER_STATS_FLUSH_INTERVAL 1000
104 : :
105 : : /*
106 : : * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
107 : : *
108 : : * We don't have a good idea of what a good value would be; there's some
109 : : * overhead per message in both walsender and walreceiver, but on the other
110 : : * hand sending large batches makes walsender less responsive to signals
111 : : * because signals are checked only between messages. 128kB (with
112 : : * default 8k blocks) seems like a reasonable guess for now.
113 : : */
114 : : #define MAX_SEND_SIZE (XLOG_BLCKSZ * 16)
115 : :
116 : : /* Array of WalSnds in shared memory */
117 : : WalSndCtlData *WalSndCtl = NULL;
118 : :
119 : : /* My slot in the shared memory array */
120 : : WalSnd *MyWalSnd = NULL;
121 : :
122 : : /* Global state */
123 : : bool am_walsender = false; /* Am I a walsender process? */
124 : : bool am_cascading_walsender = false; /* Am I cascading WAL to another
125 : : * standby? */
126 : : bool am_db_walsender = false; /* Connected to a database? */
127 : :
128 : : /* GUC variables */
129 : : int max_wal_senders = 10; /* the maximum number of concurrent
130 : : * walsenders */
131 : : int wal_sender_timeout = 60 * 1000; /* maximum time to send one WAL
132 : : * data message */
133 : : bool log_replication_commands = false;
134 : :
135 : : /*
136 : : * State for WalSndWakeupRequest
137 : : */
138 : : bool wake_wal_senders = false;
139 : :
140 : : /*
141 : : * xlogreader used for replication. Note that a WAL sender doing physical
142 : : * replication does not need xlogreader to read WAL, but it needs one to
143 : : * keep a state of its work.
144 : : */
145 : : static XLogReaderState *xlogreader = NULL;
146 : :
147 : : /*
148 : : * If the UPLOAD_MANIFEST command is used to provide a backup manifest in
149 : : * preparation for an incremental backup, uploaded_manifest will be point
150 : : * to an object containing information about its contexts, and
151 : : * uploaded_manifest_mcxt will point to the memory context that contains
152 : : * that object and all of its subordinate data. Otherwise, both values will
153 : : * be NULL.
154 : : */
155 : : static IncrementalBackupInfo *uploaded_manifest = NULL;
156 : : static MemoryContext uploaded_manifest_mcxt = NULL;
157 : :
158 : : /*
159 : : * These variables keep track of the state of the timeline we're currently
160 : : * sending. sendTimeLine identifies the timeline. If sendTimeLineIsHistoric,
161 : : * the timeline is not the latest timeline on this server, and the server's
162 : : * history forked off from that timeline at sendTimeLineValidUpto.
163 : : */
164 : : static TimeLineID sendTimeLine = 0;
165 : : static TimeLineID sendTimeLineNextTLI = 0;
166 : : static bool sendTimeLineIsHistoric = false;
167 : : static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
168 : :
169 : : /*
170 : : * How far have we sent WAL already? This is also advertised in
171 : : * MyWalSnd->sentPtr. (Actually, this is the next WAL location to send.)
172 : : */
173 : : static XLogRecPtr sentPtr = InvalidXLogRecPtr;
174 : :
175 : : /* Buffers for constructing outgoing messages and processing reply messages. */
176 : : static StringInfoData output_message;
177 : : static StringInfoData reply_message;
178 : : static StringInfoData tmpbuf;
179 : :
180 : : /* Timestamp of last ProcessRepliesIfAny(). */
181 : : static TimestampTz last_processing = 0;
182 : :
183 : : /*
184 : : * Timestamp of last ProcessRepliesIfAny() that saw a reply from the
185 : : * standby. Set to 0 if wal_sender_timeout doesn't need to be active.
186 : : */
187 : : static TimestampTz last_reply_timestamp = 0;
188 : :
189 : : /* Have we sent a heartbeat message asking for reply, since last reply? */
190 : : static bool waiting_for_ping_response = false;
191 : :
192 : : /*
193 : : * While streaming WAL in Copy mode, streamingDoneSending is set to true
194 : : * after we have sent CopyDone. We should not send any more CopyData messages
195 : : * after that. streamingDoneReceiving is set to true when we receive CopyDone
196 : : * from the other end. When both become true, it's time to exit Copy mode.
197 : : */
198 : : static bool streamingDoneSending;
199 : : static bool streamingDoneReceiving;
200 : :
201 : : /* Are we there yet? */
202 : : static bool WalSndCaughtUp = false;
203 : :
204 : : /* Flags set by signal handlers for later service in main loop */
205 : : static volatile sig_atomic_t got_SIGUSR2 = false;
206 : : static volatile sig_atomic_t got_STOPPING = false;
207 : :
208 : : /*
209 : : * This is set while we are streaming. When not set
210 : : * PROCSIG_WALSND_INIT_STOPPING signal will be handled like SIGTERM. When set,
211 : : * the main loop is responsible for checking got_STOPPING and terminating when
212 : : * it's set (after streaming any remaining WAL).
213 : : */
214 : : static volatile sig_atomic_t replication_active = false;
215 : :
216 : : static LogicalDecodingContext *logical_decoding_ctx = NULL;
217 : :
218 : : /* A sample associating a WAL location with the time it was written. */
219 : : typedef struct
220 : : {
221 : : XLogRecPtr lsn;
222 : : TimestampTz time;
223 : : } WalTimeSample;
224 : :
225 : : /* The size of our buffer of time samples. */
226 : : #define LAG_TRACKER_BUFFER_SIZE 8192
227 : :
228 : : /* A mechanism for tracking replication lag. */
229 : : typedef struct
230 : : {
231 : : XLogRecPtr last_lsn;
232 : : WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
233 : : int write_head;
234 : : int read_heads[NUM_SYNC_REP_WAIT_MODE];
235 : : WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
236 : :
237 : : /*
238 : : * Overflow entries for read heads that collide with the write head.
239 : : *
240 : : * When the cyclic buffer fills (write head is about to collide with a
241 : : * read head), we save that read head's current sample here and mark it as
242 : : * using overflow (read_heads[i] = -1). This allows the write head to
243 : : * continue advancing while the overflowed mode continues lag computation
244 : : * using the saved sample.
245 : : *
246 : : * Once the standby's reported LSN advances past the overflow entry's LSN,
247 : : * we transition back to normal buffer-based tracking.
248 : : */
249 : : WalTimeSample overflowed[NUM_SYNC_REP_WAIT_MODE];
250 : : } LagTracker;
251 : :
252 : : static LagTracker *lag_tracker;
253 : :
254 : : /* Signal handlers */
255 : : static void WalSndLastCycleHandler(SIGNAL_ARGS);
256 : :
257 : : /* Prototypes for private functions */
258 : : typedef void (*WalSndSendDataCallback) (void);
259 : : static void WalSndLoop(WalSndSendDataCallback send_data);
260 : : static void InitWalSenderSlot(void);
261 : : static void WalSndKill(int code, Datum arg);
262 : : pg_noreturn static void WalSndShutdown(void);
263 : : static void XLogSendPhysical(void);
264 : : static void XLogSendLogical(void);
265 : : static void WalSndDone(WalSndSendDataCallback send_data);
266 : : static void IdentifySystem(void);
267 : : static void UploadManifest(void);
268 : : static bool HandleUploadManifestPacket(StringInfo buf, off_t *offset,
269 : : IncrementalBackupInfo *ib);
270 : : static void ReadReplicationSlot(ReadReplicationSlotCmd *cmd);
271 : : static void CreateReplicationSlot(CreateReplicationSlotCmd *cmd);
272 : : static void DropReplicationSlot(DropReplicationSlotCmd *cmd);
273 : : static void StartReplication(StartReplicationCmd *cmd);
274 : : static void StartLogicalReplication(StartReplicationCmd *cmd);
275 : : static void ProcessStandbyMessage(void);
276 : : static void ProcessStandbyReplyMessage(void);
277 : : static void ProcessStandbyHSFeedbackMessage(void);
278 : : static void ProcessStandbyPSRequestMessage(void);
279 : : static void ProcessRepliesIfAny(void);
280 : : static void ProcessPendingWrites(void);
281 : : static void WalSndKeepalive(bool requestReply, XLogRecPtr writePtr);
282 : : static void WalSndKeepaliveIfNecessary(void);
283 : : static void WalSndCheckTimeOut(void);
284 : : static long WalSndComputeSleeptime(TimestampTz now);
285 : : static void WalSndWait(uint32 socket_events, long timeout, uint32 wait_event);
286 : : static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
287 : : static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
288 : : static void WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
289 : : bool skipped_xact);
290 : : static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc);
291 : : static void LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time);
292 : : static TimeOffset LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now);
293 : : static bool TransactionIdInRecentPast(TransactionId xid, uint32 epoch);
294 : :
295 : : static void WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
296 : : TimeLineID *tli_p);
297 : :
298 : :
299 : : /* Initialize walsender process before entering the main command loop */
300 : : void
4770 heikki.linnakangas@i 301 :CBC 1112 : InitWalSender(void)
302 : : {
5214 simon@2ndQuadrant.co 303 : 1112 : am_cascading_walsender = RecoveryInProgress();
304 : :
305 : : /* Create a per-walsender data structure in shared memory */
4770 heikki.linnakangas@i 306 : 1112 : InitWalSenderSlot();
307 : :
308 : : /* need resource owner for e.g. basebackups */
384 andres@anarazel.de 309 : 1112 : CreateAuxProcessResourceOwner();
310 : :
311 : : /*
312 : : * Let postmaster know that we're a WAL sender. Once we've declared us as
313 : : * a WAL sender process, postmaster will let us outlive the bgwriter and
314 : : * kill us last in the shutdown sequence, so we get a chance to stream all
315 : : * remaining WAL at shutdown, including the shutdown checkpoint. Note that
316 : : * there's no going back, and we mustn't write any WAL records after this.
317 : : */
4701 heikki.linnakangas@i 318 : 1112 : MarkPostmasterChildWalSender();
319 : 1112 : SendPostmasterSignal(PMSIGNAL_ADVANCE_STATE_MACHINE);
320 : :
321 : : /*
322 : : * If the client didn't specify a database to connect to, show in PGPROC
323 : : * that our advertised xmin should affect vacuum horizons in all
324 : : * databases. This allows physical replication clients to send hot
325 : : * standby feedback that will delay vacuum cleanup in all databases.
326 : : */
1291 tgl@sss.pgh.pa.us 327 [ + + ]: 1112 : if (MyDatabaseId == InvalidOid)
328 : : {
329 [ - + ]: 453 : Assert(MyProc->xmin == InvalidTransactionId);
330 : 453 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
331 : 453 : MyProc->statusFlags |= PROC_AFFECTS_ALL_HORIZONS;
332 : 453 : ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
333 : 453 : LWLockRelease(ProcArrayLock);
334 : : }
335 : :
336 : : /* Initialize empty timestamp buffer for lag tracking. */
2568 tmunro@postgresql.or 337 : 1112 : lag_tracker = MemoryContextAllocZero(TopMemoryContext, sizeof(LagTracker));
5764 heikki.linnakangas@i 338 : 1112 : }
339 : :
340 : : /*
341 : : * Clean up after an error.
342 : : *
343 : : * WAL sender processes don't use transactions like regular backends do.
344 : : * This function does any cleanup required after an error in a WAL sender
345 : : * process, similar to what transaction abort does in a regular backend.
346 : : */
347 : : void
3726 andres@anarazel.de 348 : 35 : WalSndErrorCleanup(void)
349 : : {
4287 rhaas@postgresql.org 350 : 35 : LWLockReleaseAll();
3261 351 : 35 : ConditionVariableCancelSleep();
3518 352 : 35 : pgstat_report_wait_end();
224 andres@anarazel.de 353 : 35 : pgaio_error_cleanup();
354 : :
1991 alvherre@alvh.no-ip. 355 [ + + + + ]: 35 : if (xlogreader != NULL && xlogreader->seg.ws_file >= 0)
1993 356 : 1 : wal_segment_close(xlogreader);
357 : :
4287 rhaas@postgresql.org 358 [ + + ]: 35 : if (MyReplicationSlot != NULL)
359 : 4 : ReplicationSlotRelease();
360 : :
550 akapila@postgresql.o 361 : 35 : ReplicationSlotCleanup(false);
362 : :
4701 heikki.linnakangas@i 363 : 35 : replication_active = false;
364 : :
365 : : /*
366 : : * If there is a transaction in progress, it will clean up our
367 : : * ResourceOwner, but if a replication command set up a resource owner
368 : : * without a transaction, we've got to clean that up now.
369 : : */
2033 rhaas@postgresql.org 370 [ + + ]: 35 : if (!IsTransactionOrTransactionBlock())
384 andres@anarazel.de 371 : 34 : ReleaseAuxProcessResources(false);
372 : :
3066 373 [ + - - + ]: 35 : if (got_STOPPING || got_SIGUSR2)
4770 heikki.linnakangas@i 374 :UBC 0 : proc_exit(0);
375 : :
376 : : /* Revert back to startup state */
4701 heikki.linnakangas@i 377 :CBC 35 : WalSndSetState(WALSNDSTATE_STARTUP);
5764 378 : 35 : }
379 : :
380 : : /*
381 : : * Handle a client's connection abort in an orderly manner.
382 : : */
383 : : static void
4249 rhaas@postgresql.org 384 : 1 : WalSndShutdown(void)
385 : : {
386 : : /*
387 : : * Reset whereToSendOutput to prevent ereport from attempting to send any
388 : : * more messages to the standby.
389 : : */
390 [ + - ]: 1 : if (whereToSendOutput == DestRemote)
391 : 1 : whereToSendOutput = DestNone;
392 : :
393 : 1 : proc_exit(0);
394 : : abort(); /* keep the compiler quiet */
395 : : }
396 : :
397 : : /*
398 : : * Handle the IDENTIFY_SYSTEM command.
399 : : */
400 : : static void
5400 magnus@hagander.net 401 : 676 : IdentifySystem(void)
402 : : {
403 : : char sysid[32];
404 : : char xloc[MAXFNAMELEN];
405 : : XLogRecPtr logptr;
4249 rhaas@postgresql.org 406 : 676 : char *dbname = NULL;
407 : : DestReceiver *dest;
408 : : TupOutputState *tstate;
409 : : TupleDesc tupdesc;
410 : : Datum values[4];
1199 peter@eisentraut.org 411 : 676 : bool nulls[4] = {0};
412 : : TimeLineID currTLI;
413 : :
414 : : /*
415 : : * Reply with a result set with one row, four columns. First col is system
416 : : * ID, second is timeline ID, third is current xlog location and the
417 : : * fourth contains the database name if we are connected to one.
418 : : */
419 : :
5400 magnus@hagander.net 420 : 676 : snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
421 : : GetSystemIdentifier());
422 : :
4701 heikki.linnakangas@i 423 : 676 : am_cascading_walsender = RecoveryInProgress();
424 [ + + ]: 676 : if (am_cascading_walsender)
1452 rhaas@postgresql.org 425 : 16 : logptr = GetStandbyFlushRecPtr(&currTLI);
426 : : else
427 : 660 : logptr = GetFlushRecPtr(&currTLI);
428 : :
112 alvherre@kurilemu.de 429 :GNC 676 : snprintf(xloc, sizeof(xloc), "%X/%08X", LSN_FORMAT_ARGS(logptr));
430 : :
4249 rhaas@postgresql.org 431 [ + + ]:CBC 676 : if (MyDatabaseId != InvalidOid)
432 : : {
433 : 221 : MemoryContext cur = CurrentMemoryContext;
434 : :
435 : : /* syscache access needs a transaction env. */
436 : 221 : StartTransactionCommand();
437 : 221 : dbname = get_database_name(MyDatabaseId);
438 : : /* copy dbname out of TX context */
483 tgl@sss.pgh.pa.us 439 : 221 : dbname = MemoryContextStrdup(cur, dbname);
4249 rhaas@postgresql.org 440 : 221 : CommitTransactionCommand();
441 : : }
442 : :
3190 443 : 676 : dest = CreateDestReceiver(DestRemoteSimple);
444 : :
445 : : /* need a tuple descriptor representing four columns */
2533 andres@anarazel.de 446 : 676 : tupdesc = CreateTemplateTupleDesc(4);
3190 rhaas@postgresql.org 447 : 676 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "systemid",
448 : : TEXTOID, -1, 0);
449 : 676 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "timeline",
450 : : INT8OID, -1, 0);
451 : 676 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "xlogpos",
452 : : TEXTOID, -1, 0);
453 : 676 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "dbname",
454 : : TEXTOID, -1, 0);
455 : :
456 : : /* prepare for projection of tuples */
2538 andres@anarazel.de 457 : 676 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
458 : :
459 : : /* column 1: system identifier */
3190 rhaas@postgresql.org 460 : 676 : values[0] = CStringGetTextDatum(sysid);
461 : :
462 : : /* column 2: timeline */
1211 peter@eisentraut.org 463 : 676 : values[1] = Int64GetDatum(currTLI);
464 : :
465 : : /* column 3: wal location */
3090 peter_e@gmx.net 466 : 676 : values[2] = CStringGetTextDatum(xloc);
467 : :
468 : : /* column 4: database name, or NULL if none */
4249 rhaas@postgresql.org 469 [ + + ]: 676 : if (dbname)
3190 470 : 221 : values[3] = CStringGetTextDatum(dbname);
471 : : else
472 : 455 : nulls[3] = true;
473 : :
474 : : /* send it to dest */
475 : 676 : do_tup_output(tstate, values, nulls);
476 : :
477 : 676 : end_tup_output(tstate);
5400 magnus@hagander.net 478 : 676 : }
479 : :
480 : : /* Handle READ_REPLICATION_SLOT command */
481 : : static void
1463 michael@paquier.xyz 482 : 6 : ReadReplicationSlot(ReadReplicationSlotCmd *cmd)
483 : : {
484 : : #define READ_REPLICATION_SLOT_COLS 3
485 : : ReplicationSlot *slot;
486 : : DestReceiver *dest;
487 : : TupOutputState *tstate;
488 : : TupleDesc tupdesc;
1199 peter@eisentraut.org 489 : 6 : Datum values[READ_REPLICATION_SLOT_COLS] = {0};
490 : : bool nulls[READ_REPLICATION_SLOT_COLS];
491 : :
1463 michael@paquier.xyz 492 : 6 : tupdesc = CreateTemplateTupleDesc(READ_REPLICATION_SLOT_COLS);
493 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_type",
494 : : TEXTOID, -1, 0);
495 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "restart_lsn",
496 : : TEXTOID, -1, 0);
497 : : /* TimeLineID is unsigned, so int4 is not wide enough. */
498 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "restart_tli",
499 : : INT8OID, -1, 0);
500 : :
1199 peter@eisentraut.org 501 : 6 : memset(nulls, true, READ_REPLICATION_SLOT_COLS * sizeof(bool));
502 : :
1463 michael@paquier.xyz 503 : 6 : LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
504 : 6 : slot = SearchNamedReplicationSlot(cmd->slotname, false);
505 [ + + - + ]: 6 : if (slot == NULL || !slot->in_use)
506 : : {
507 : 2 : LWLockRelease(ReplicationSlotControlLock);
508 : : }
509 : : else
510 : : {
511 : : ReplicationSlot slot_contents;
512 : 4 : int i = 0;
513 : :
514 : : /* Copy slot contents while holding spinlock */
515 [ - + ]: 4 : SpinLockAcquire(&slot->mutex);
516 : 4 : slot_contents = *slot;
517 : 4 : SpinLockRelease(&slot->mutex);
518 : 4 : LWLockRelease(ReplicationSlotControlLock);
519 : :
520 [ + + ]: 4 : if (OidIsValid(slot_contents.data.database))
521 [ + - ]: 1 : ereport(ERROR,
522 : : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
523 : : errmsg("cannot use %s with a logical replication slot",
524 : : "READ_REPLICATION_SLOT"));
525 : :
526 : : /* slot type */
527 : 3 : values[i] = CStringGetTextDatum("physical");
528 : 3 : nulls[i] = false;
529 : 3 : i++;
530 : :
531 : : /* start LSN */
532 [ + - ]: 3 : if (!XLogRecPtrIsInvalid(slot_contents.data.restart_lsn))
533 : : {
534 : : char xloc[64];
535 : :
112 alvherre@kurilemu.de 536 :GNC 3 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1463 michael@paquier.xyz 537 :CBC 3 : LSN_FORMAT_ARGS(slot_contents.data.restart_lsn));
538 : 3 : values[i] = CStringGetTextDatum(xloc);
539 : 3 : nulls[i] = false;
540 : : }
541 : 3 : i++;
542 : :
543 : : /* timeline this WAL was produced on */
544 [ + - ]: 3 : if (!XLogRecPtrIsInvalid(slot_contents.data.restart_lsn))
545 : : {
546 : : TimeLineID slots_position_timeline;
547 : : TimeLineID current_timeline;
548 : 3 : List *timeline_history = NIL;
549 : :
550 : : /*
551 : : * While in recovery, use as timeline the currently-replaying one
552 : : * to get the LSN position's history.
553 : : */
554 [ - + ]: 3 : if (RecoveryInProgress())
1463 michael@paquier.xyz 555 :UBC 0 : (void) GetXLogReplayRecPtr(¤t_timeline);
556 : : else
1452 rhaas@postgresql.org 557 :CBC 3 : current_timeline = GetWALInsertionTimeLine();
558 : :
1463 michael@paquier.xyz 559 : 3 : timeline_history = readTimeLineHistory(current_timeline);
560 : 3 : slots_position_timeline = tliOfPointInHistory(slot_contents.data.restart_lsn,
561 : : timeline_history);
562 : 3 : values[i] = Int64GetDatum((int64) slots_position_timeline);
563 : 3 : nulls[i] = false;
564 : : }
565 : 3 : i++;
566 : :
567 [ - + ]: 3 : Assert(i == READ_REPLICATION_SLOT_COLS);
568 : : }
569 : :
570 : 5 : dest = CreateDestReceiver(DestRemoteSimple);
571 : 5 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
572 : 5 : do_tup_output(tstate, values, nulls);
573 : 5 : end_tup_output(tstate);
574 : 5 : }
575 : :
576 : :
577 : : /*
578 : : * Handle TIMELINE_HISTORY command.
579 : : */
580 : : static void
4701 heikki.linnakangas@i 581 : 14 : SendTimeLineHistory(TimeLineHistoryCmd *cmd)
582 : : {
583 : : DestReceiver *dest;
584 : : TupleDesc tupdesc;
585 : : StringInfoData buf;
586 : : char histfname[MAXFNAMELEN];
587 : : char path[MAXPGPATH];
588 : : int fd;
589 : : off_t histfilelen;
590 : : off_t bytesleft;
591 : : Size len;
592 : :
1211 peter@eisentraut.org 593 : 14 : dest = CreateDestReceiver(DestRemoteSimple);
594 : :
595 : : /*
596 : : * Reply with a result set with one row, and two columns. The first col is
597 : : * the name of the history file, 2nd is the contents.
598 : : */
599 : 14 : tupdesc = CreateTemplateTupleDesc(2);
600 : 14 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "filename", TEXTOID, -1, 0);
601 : 14 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "content", TEXTOID, -1, 0);
602 : :
4701 heikki.linnakangas@i 603 : 14 : TLHistoryFileName(histfname, cmd->timeline);
604 : 14 : TLHistoryFilePath(path, cmd->timeline);
605 : :
606 : : /* Send a RowDescription message */
1211 peter@eisentraut.org 607 : 14 : dest->rStartup(dest, CMD_SELECT, tupdesc);
608 : :
609 : : /* Send a DataRow message */
797 nathan@postgresql.or 610 : 14 : pq_beginmessage(&buf, PqMsg_DataRow);
2938 andres@anarazel.de 611 : 14 : pq_sendint16(&buf, 2); /* # of columns */
3653 alvherre@alvh.no-ip. 612 : 14 : len = strlen(histfname);
2938 andres@anarazel.de 613 : 14 : pq_sendint32(&buf, len); /* col1 len */
3653 alvherre@alvh.no-ip. 614 : 14 : pq_sendbytes(&buf, histfname, len);
615 : :
2956 peter_e@gmx.net 616 : 14 : fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
4701 heikki.linnakangas@i 617 [ - + ]: 14 : if (fd < 0)
4701 heikki.linnakangas@i 618 [ # # ]:UBC 0 : ereport(ERROR,
619 : : (errcode_for_file_access(),
620 : : errmsg("could not open file \"%s\": %m", path)));
621 : :
622 : : /* Determine file length and send it to client */
4701 heikki.linnakangas@i 623 :CBC 14 : histfilelen = lseek(fd, 0, SEEK_END);
624 [ - + ]: 14 : if (histfilelen < 0)
4701 heikki.linnakangas@i 625 [ # # ]:UBC 0 : ereport(ERROR,
626 : : (errcode_for_file_access(),
627 : : errmsg("could not seek to end of file \"%s\": %m", path)));
4701 heikki.linnakangas@i 628 [ - + ]:CBC 14 : if (lseek(fd, 0, SEEK_SET) != 0)
4701 heikki.linnakangas@i 629 [ # # ]:UBC 0 : ereport(ERROR,
630 : : (errcode_for_file_access(),
631 : : errmsg("could not seek to beginning of file \"%s\": %m", path)));
632 : :
2938 andres@anarazel.de 633 :CBC 14 : pq_sendint32(&buf, histfilelen); /* col2 len */
634 : :
4701 heikki.linnakangas@i 635 : 14 : bytesleft = histfilelen;
636 [ + + ]: 28 : while (bytesleft > 0)
637 : : {
638 : : PGAlignedBlock rbuf;
639 : : int nread;
640 : :
3145 rhaas@postgresql.org 641 : 14 : pgstat_report_wait_start(WAIT_EVENT_WALSENDER_TIMELINE_HISTORY_READ);
2613 tgl@sss.pgh.pa.us 642 : 14 : nread = read(fd, rbuf.data, sizeof(rbuf));
3145 rhaas@postgresql.org 643 : 14 : pgstat_report_wait_end();
2658 michael@paquier.xyz 644 [ - + ]: 14 : if (nread < 0)
4701 heikki.linnakangas@i 645 [ # # ]:UBC 0 : ereport(ERROR,
646 : : (errcode_for_file_access(),
647 : : errmsg("could not read file \"%s\": %m",
648 : : path)));
2658 michael@paquier.xyz 649 [ - + ]:CBC 14 : else if (nread == 0)
2658 michael@paquier.xyz 650 [ # # ]:UBC 0 : ereport(ERROR,
651 : : (errcode(ERRCODE_DATA_CORRUPTED),
652 : : errmsg("could not read file \"%s\": read %d of %zu",
653 : : path, nread, (Size) bytesleft)));
654 : :
2613 tgl@sss.pgh.pa.us 655 :CBC 14 : pq_sendbytes(&buf, rbuf.data, nread);
4701 heikki.linnakangas@i 656 : 14 : bytesleft -= nread;
657 : : }
658 : :
2305 peter@eisentraut.org 659 [ - + ]: 14 : if (CloseTransientFile(fd) != 0)
2424 michael@paquier.xyz 660 [ # # ]:UBC 0 : ereport(ERROR,
661 : : (errcode_for_file_access(),
662 : : errmsg("could not close file \"%s\": %m", path)));
663 : :
4701 heikki.linnakangas@i 664 :CBC 14 : pq_endmessage(&buf);
665 : 14 : }
666 : :
667 : : /*
668 : : * Handle UPLOAD_MANIFEST command.
669 : : */
670 : : static void
677 rhaas@postgresql.org 671 : 11 : UploadManifest(void)
672 : : {
673 : : MemoryContext mcxt;
674 : : IncrementalBackupInfo *ib;
675 : 11 : off_t offset = 0;
676 : : StringInfoData buf;
677 : :
678 : : /*
679 : : * parsing the manifest will use the cryptohash stuff, which requires a
680 : : * resource owner
681 : : */
384 andres@anarazel.de 682 [ - + ]: 11 : Assert(AuxProcessResourceOwner != NULL);
683 [ + - - + ]: 11 : Assert(CurrentResourceOwner == AuxProcessResourceOwner ||
684 : : CurrentResourceOwner == NULL);
685 : 11 : CurrentResourceOwner = AuxProcessResourceOwner;
686 : :
687 : : /* Prepare to read manifest data into a temporary context. */
677 rhaas@postgresql.org 688 : 11 : mcxt = AllocSetContextCreate(CurrentMemoryContext,
689 : : "incremental backup information",
690 : : ALLOCSET_DEFAULT_SIZES);
691 : 11 : ib = CreateIncrementalBackupInfo(mcxt);
692 : :
693 : : /* Send a CopyInResponse message */
467 nathan@postgresql.or 694 : 11 : pq_beginmessage(&buf, PqMsg_CopyInResponse);
677 rhaas@postgresql.org 695 : 11 : pq_sendbyte(&buf, 0);
696 : 11 : pq_sendint16(&buf, 0);
697 : 11 : pq_endmessage_reuse(&buf);
698 : 11 : pq_flush();
699 : :
700 : : /* Receive packets from client until done. */
701 [ + + ]: 43 : while (HandleUploadManifestPacket(&buf, &offset, ib))
702 : : ;
703 : :
704 : : /* Finish up manifest processing. */
705 : 10 : FinalizeIncrementalManifest(ib);
706 : :
707 : : /*
708 : : * Discard any old manifest information and arrange to preserve the new
709 : : * information we just got.
710 : : *
711 : : * We assume that MemoryContextDelete and MemoryContextSetParent won't
712 : : * fail, and thus we shouldn't end up bailing out of here in such a way as
713 : : * to leave dangling pointers.
714 : : */
715 [ - + ]: 10 : if (uploaded_manifest_mcxt != NULL)
677 rhaas@postgresql.org 716 :UBC 0 : MemoryContextDelete(uploaded_manifest_mcxt);
677 rhaas@postgresql.org 717 :CBC 10 : MemoryContextSetParent(mcxt, CacheMemoryContext);
718 : 10 : uploaded_manifest = ib;
719 : 10 : uploaded_manifest_mcxt = mcxt;
720 : :
721 : : /* clean up the resource owner we created */
384 andres@anarazel.de 722 : 10 : ReleaseAuxProcessResources(true);
677 rhaas@postgresql.org 723 : 10 : }
724 : :
725 : : /*
726 : : * Process one packet received during the handling of an UPLOAD_MANIFEST
727 : : * operation.
728 : : *
729 : : * 'buf' is scratch space. This function expects it to be initialized, doesn't
730 : : * care what the current contents are, and may override them with completely
731 : : * new contents.
732 : : *
733 : : * The return value is true if the caller should continue processing
734 : : * additional packets and false if the UPLOAD_MANIFEST operation is complete.
735 : : */
736 : : static bool
737 : 43 : HandleUploadManifestPacket(StringInfo buf, off_t *offset,
738 : : IncrementalBackupInfo *ib)
739 : : {
740 : : int mtype;
741 : : int maxmsglen;
742 : :
743 : 43 : HOLD_CANCEL_INTERRUPTS();
744 : :
745 : 43 : pq_startmsgread();
746 : 43 : mtype = pq_getbyte();
747 [ - + ]: 43 : if (mtype == EOF)
677 rhaas@postgresql.org 748 [ # # ]:UBC 0 : ereport(ERROR,
749 : : (errcode(ERRCODE_CONNECTION_FAILURE),
750 : : errmsg("unexpected EOF on client connection with an open transaction")));
751 : :
677 rhaas@postgresql.org 752 [ + + - ]:CBC 43 : switch (mtype)
753 : : {
96 nathan@postgresql.or 754 :GNC 33 : case PqMsg_CopyData:
677 rhaas@postgresql.org 755 :CBC 33 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
756 : 33 : break;
96 nathan@postgresql.or 757 :GNC 10 : case PqMsg_CopyDone:
758 : : case PqMsg_CopyFail:
759 : : case PqMsg_Flush:
760 : : case PqMsg_Sync:
677 rhaas@postgresql.org 761 :CBC 10 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
762 : 10 : break;
677 rhaas@postgresql.org 763 :UBC 0 : default:
764 [ # # ]: 0 : ereport(ERROR,
765 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
766 : : errmsg("unexpected message type 0x%02X during COPY from stdin",
767 : : mtype)));
768 : : maxmsglen = 0; /* keep compiler quiet */
769 : : break;
770 : : }
771 : :
772 : : /* Now collect the message body */
677 rhaas@postgresql.org 773 [ - + ]:CBC 43 : if (pq_getmessage(buf, maxmsglen))
677 rhaas@postgresql.org 774 [ # # ]:UBC 0 : ereport(ERROR,
775 : : (errcode(ERRCODE_CONNECTION_FAILURE),
776 : : errmsg("unexpected EOF on client connection with an open transaction")));
677 rhaas@postgresql.org 777 [ - + ]:CBC 43 : RESUME_CANCEL_INTERRUPTS();
778 : :
779 : : /* Process the message */
780 [ + + - - : 43 : switch (mtype)
- ]
781 : : {
96 nathan@postgresql.or 782 :GNC 33 : case PqMsg_CopyData:
677 rhaas@postgresql.org 783 :CBC 33 : AppendIncrementalManifestData(ib, buf->data, buf->len);
784 : 32 : return true;
785 : :
96 nathan@postgresql.or 786 :GNC 10 : case PqMsg_CopyDone:
677 rhaas@postgresql.org 787 :CBC 10 : return false;
788 : :
96 nathan@postgresql.or 789 :UNC 0 : case PqMsg_Sync:
790 : : case PqMsg_Flush:
791 : : /* Ignore these while in CopyOut mode as we do elsewhere. */
677 rhaas@postgresql.org 792 :UBC 0 : return true;
793 : :
96 nathan@postgresql.or 794 :UNC 0 : case PqMsg_CopyFail:
677 rhaas@postgresql.org 795 [ # # ]:UBC 0 : ereport(ERROR,
796 : : (errcode(ERRCODE_QUERY_CANCELED),
797 : : errmsg("COPY from stdin failed: %s",
798 : : pq_getmsgstring(buf))));
799 : : }
800 : :
801 : : /* Not reached. */
802 : 0 : Assert(false);
803 : : return false;
804 : : }
805 : :
806 : : /*
807 : : * Handle START_REPLICATION command.
808 : : *
809 : : * At the moment, this never returns, but an ereport(ERROR) will take us back
810 : : * to the main loop.
811 : : */
812 : : static void
4701 heikki.linnakangas@i 813 :CBC 256 : StartReplication(StartReplicationCmd *cmd)
814 : : {
815 : : StringInfoData buf;
816 : : XLogRecPtr FlushPtr;
817 : : TimeLineID FlushTLI;
818 : :
819 : : /* create xlogreader for physical replication */
1967 michael@paquier.xyz 820 : 256 : xlogreader =
1631 tmunro@postgresql.or 821 : 256 : XLogReaderAllocate(wal_segment_size, NULL,
822 : 256 : XL_ROUTINE(.segment_open = WalSndSegmentOpen,
823 : : .segment_close = wal_segment_close),
824 : : NULL);
825 : :
1967 michael@paquier.xyz 826 [ - + ]: 256 : if (!xlogreader)
1967 michael@paquier.xyz 827 [ # # ]:UBC 0 : ereport(ERROR,
828 : : (errcode(ERRCODE_OUT_OF_MEMORY),
829 : : errmsg("out of memory"),
830 : : errdetail("Failed while allocating a WAL reading processor.")));
831 : :
832 : : /*
833 : : * We assume here that we're logging enough information in the WAL for
834 : : * log-shipping, since this is checked in PostmasterMain().
835 : : *
836 : : * NOTE: wal_level can only change at shutdown, so in most cases it is
837 : : * difficult for there to be WAL data that we can still see that was
838 : : * written at wal_level='minimal'.
839 : : */
840 : :
4287 rhaas@postgresql.org 841 [ + + ]:CBC 256 : if (cmd->slotname)
842 : : {
269 akapila@postgresql.o 843 : 173 : ReplicationSlotAcquire(cmd->slotname, true, true);
3730 andres@anarazel.de 844 [ - + ]: 171 : if (SlotIsLogical(MyReplicationSlot))
4287 rhaas@postgresql.org 845 [ # # ]:UBC 0 : ereport(ERROR,
846 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
847 : : errmsg("cannot use a logical replication slot for physical replication")));
848 : :
849 : : /*
850 : : * We don't need to verify the slot's restart_lsn here; instead we
851 : : * rely on the caller requesting the starting point to use. If the
852 : : * WAL segment doesn't exist, we'll fail later.
853 : : */
854 : : }
855 : :
856 : : /*
857 : : * Select the timeline. If it was given explicitly by the client, use
858 : : * that. Otherwise use the timeline of the last replayed record.
859 : : */
1571 jdavis@postgresql.or 860 :CBC 254 : am_cascading_walsender = RecoveryInProgress();
4694 heikki.linnakangas@i 861 [ + + ]: 254 : if (am_cascading_walsender)
1452 rhaas@postgresql.org 862 : 10 : FlushPtr = GetStandbyFlushRecPtr(&FlushTLI);
863 : : else
864 : 244 : FlushPtr = GetFlushRecPtr(&FlushTLI);
865 : :
4701 heikki.linnakangas@i 866 [ + + ]: 254 : if (cmd->timeline != 0)
867 : : {
868 : : XLogRecPtr switchpoint;
869 : :
870 : 253 : sendTimeLine = cmd->timeline;
1452 rhaas@postgresql.org 871 [ + + ]: 253 : if (sendTimeLine == FlushTLI)
872 : : {
4701 heikki.linnakangas@i 873 : 240 : sendTimeLineIsHistoric = false;
874 : 240 : sendTimeLineValidUpto = InvalidXLogRecPtr;
875 : : }
876 : : else
877 : : {
878 : : List *timeLineHistory;
879 : :
880 : 13 : sendTimeLineIsHistoric = true;
881 : :
882 : : /*
883 : : * Check that the timeline the client requested exists, and the
884 : : * requested start location is on that timeline.
885 : : */
1452 rhaas@postgresql.org 886 : 13 : timeLineHistory = readTimeLineHistory(FlushTLI);
4666 heikki.linnakangas@i 887 : 13 : switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
888 : : &sendTimeLineNextTLI);
4701 889 : 13 : list_free_deep(timeLineHistory);
890 : :
891 : : /*
892 : : * Found the requested timeline in the history. Check that
893 : : * requested startpoint is on that timeline in our history.
894 : : *
895 : : * This is quite loose on purpose. We only check that we didn't
896 : : * fork off the requested timeline before the switchpoint. We
897 : : * don't check that we switched *to* it before the requested
898 : : * starting point. This is because the client can legitimately
899 : : * request to start replication from the beginning of the WAL
900 : : * segment that contains switchpoint, but on the new timeline, so
901 : : * that it doesn't end up with a partial segment. If you ask for
902 : : * too old a starting point, you'll get an error later when we
903 : : * fail to find the requested WAL segment in pg_wal.
904 : : *
905 : : * XXX: we could be more strict here and only allow a startpoint
906 : : * that's older than the switchpoint, if it's still in the same
907 : : * WAL segment.
908 : : */
909 [ + - ]: 13 : if (!XLogRecPtrIsInvalid(switchpoint) &&
4686 alvherre@alvh.no-ip. 910 [ - + ]: 13 : switchpoint < cmd->startpoint)
911 : : {
4701 heikki.linnakangas@i 912 [ # # ]:UBC 0 : ereport(ERROR,
913 : : errmsg("requested starting point %X/%08X on timeline %u is not in this server's history",
914 : : LSN_FORMAT_ARGS(cmd->startpoint),
915 : : cmd->timeline),
916 : : errdetail("This server's history forked from timeline %u at %X/%08X.",
917 : : cmd->timeline,
918 : : LSN_FORMAT_ARGS(switchpoint)));
919 : : }
4701 heikki.linnakangas@i 920 :CBC 13 : sendTimeLineValidUpto = switchpoint;
921 : : }
922 : : }
923 : : else
924 : : {
1452 rhaas@postgresql.org 925 : 1 : sendTimeLine = FlushTLI;
4701 heikki.linnakangas@i 926 : 1 : sendTimeLineValidUpto = InvalidXLogRecPtr;
927 : 1 : sendTimeLineIsHistoric = false;
928 : : }
929 : :
930 : 254 : streamingDoneSending = streamingDoneReceiving = false;
931 : :
932 : : /* If there is nothing to stream, don't even enter COPY mode */
4666 933 [ + + + - ]: 254 : if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
934 : : {
935 : : /*
936 : : * When we first start replication the standby will be behind the
937 : : * primary. For some applications, for example synchronous
938 : : * replication, it is important to have a clear state for this initial
939 : : * catchup mode, so we can trigger actions when we change streaming
940 : : * state later. We may stay in this state for a long time, which is
941 : : * exactly why we want to be able to monitor whether or not we are
942 : : * still here.
943 : : */
4701 944 : 254 : WalSndSetState(WALSNDSTATE_CATCHUP);
945 : :
946 : : /* Send a CopyBothResponse message, and start streaming */
797 nathan@postgresql.or 947 : 254 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4701 heikki.linnakangas@i 948 : 254 : pq_sendbyte(&buf, 0);
2938 andres@anarazel.de 949 : 254 : pq_sendint16(&buf, 0);
4701 heikki.linnakangas@i 950 : 254 : pq_endmessage(&buf);
951 : 254 : pq_flush();
952 : :
953 : : /*
954 : : * Don't allow a request to stream from a future point in WAL that
955 : : * hasn't been flushed to disk in this server yet.
956 : : */
4686 alvherre@alvh.no-ip. 957 [ - + ]: 254 : if (FlushPtr < cmd->startpoint)
958 : : {
4701 heikki.linnakangas@i 959 [ # # ]:UBC 0 : ereport(ERROR,
960 : : errmsg("requested starting point %X/%08X is ahead of the WAL flush position of this server %X/%08X",
961 : : LSN_FORMAT_ARGS(cmd->startpoint),
962 : : LSN_FORMAT_ARGS(FlushPtr)));
963 : : }
964 : :
965 : : /* Start streaming from the requested point */
4701 heikki.linnakangas@i 966 :CBC 254 : sentPtr = cmd->startpoint;
967 : :
968 : : /* Initialize shared memory status, too */
3041 alvherre@alvh.no-ip. 969 [ - + ]: 254 : SpinLockAcquire(&MyWalSnd->mutex);
970 : 254 : MyWalSnd->sentPtr = sentPtr;
971 : 254 : SpinLockRelease(&MyWalSnd->mutex);
972 : :
4701 heikki.linnakangas@i 973 : 254 : SyncRepInitConfig();
974 : :
975 : : /* Main loop of walsender */
976 : 254 : replication_active = true;
977 : :
4249 rhaas@postgresql.org 978 : 254 : WalSndLoop(XLogSendPhysical);
979 : :
4701 heikki.linnakangas@i 980 : 147 : replication_active = false;
3066 andres@anarazel.de 981 [ - + ]: 147 : if (got_STOPPING)
4701 heikki.linnakangas@i 982 :UBC 0 : proc_exit(0);
4701 heikki.linnakangas@i 983 :CBC 147 : WalSndSetState(WALSNDSTATE_STARTUP);
984 : :
4666 985 [ + - - + ]: 147 : Assert(streamingDoneSending && streamingDoneReceiving);
986 : : }
987 : :
4287 rhaas@postgresql.org 988 [ + + ]: 147 : if (cmd->slotname)
989 : 130 : ReplicationSlotRelease();
990 : :
991 : : /*
992 : : * Copy is finished now. Send a single-row result set indicating the next
993 : : * timeline.
994 : : */
4666 heikki.linnakangas@i 995 [ + + ]: 147 : if (sendTimeLineIsHistoric)
996 : : {
997 : : char startpos_str[8 + 1 + 8 + 1];
998 : : DestReceiver *dest;
999 : : TupOutputState *tstate;
1000 : : TupleDesc tupdesc;
1001 : : Datum values[2];
1199 peter@eisentraut.org 1002 : 13 : bool nulls[2] = {0};
1003 : :
112 alvherre@kurilemu.de 1004 :GNC 13 : snprintf(startpos_str, sizeof(startpos_str), "%X/%08X",
1707 peter@eisentraut.org 1005 :CBC 13 : LSN_FORMAT_ARGS(sendTimeLineValidUpto));
1006 : :
3190 rhaas@postgresql.org 1007 : 13 : dest = CreateDestReceiver(DestRemoteSimple);
1008 : :
1009 : : /*
1010 : : * Need a tuple descriptor representing two columns. int8 may seem
1011 : : * like a surprising data type for this, but in theory int4 would not
1012 : : * be wide enough for this, as TimeLineID is unsigned.
1013 : : */
2533 andres@anarazel.de 1014 : 13 : tupdesc = CreateTemplateTupleDesc(2);
3190 rhaas@postgresql.org 1015 : 13 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
1016 : : INT8OID, -1, 0);
1017 : 13 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "next_tli_startpos",
1018 : : TEXTOID, -1, 0);
1019 : :
1020 : : /* prepare for projection of tuple */
2538 andres@anarazel.de 1021 : 13 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1022 : :
3190 rhaas@postgresql.org 1023 : 13 : values[0] = Int64GetDatum((int64) sendTimeLineNextTLI);
1024 : 13 : values[1] = CStringGetTextDatum(startpos_str);
1025 : :
1026 : : /* send it to dest */
1027 : 13 : do_tup_output(tstate, values, nulls);
1028 : :
1029 : 13 : end_tup_output(tstate);
1030 : : }
1031 : :
1032 : : /* Send CommandComplete message */
1867 alvherre@alvh.no-ip. 1033 : 147 : EndReplicationCommand("START_STREAMING");
5400 magnus@hagander.net 1034 : 147 : }
1035 : :
1036 : : /*
1037 : : * XLogReaderRoutine->page_read callback for logical decoding contexts, as a
1038 : : * walsender process.
1039 : : *
1040 : : * Inside the walsender we can do better than read_local_xlog_page,
1041 : : * which has to do a plain sleep/busy loop, because the walsender's latch gets
1042 : : * set every time WAL is flushed.
1043 : : */
1044 : : static int
1631 tmunro@postgresql.or 1045 : 13031 : logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
1046 : : XLogRecPtr targetRecPtr, char *cur_page)
1047 : : {
1048 : : XLogRecPtr flushptr;
1049 : : int count;
1050 : : WALReadError errinfo;
1051 : : XLogSegNo segno;
1052 : : TimeLineID currTLI;
1053 : :
1054 : : /*
1055 : : * Make sure we have enough WAL available before retrieving the current
1056 : : * timeline.
1057 : : */
933 andres@anarazel.de 1058 : 13031 : flushptr = WalSndWaitForWal(targetPagePtr + reqLen);
1059 : :
1060 : : /* Fail if not enough (implies we are going to shut down) */
475 akapila@postgresql.o 1061 [ + + ]: 12833 : if (flushptr < targetPagePtr + reqLen)
1062 : 535 : return -1;
1063 : :
1064 : : /*
1065 : : * Since logical decoding is also permitted on a standby server, we need
1066 : : * to check if the server is in recovery to decide how to get the current
1067 : : * timeline ID (so that it also covers the promotion or timeline change
1068 : : * cases). We must determine am_cascading_walsender after waiting for the
1069 : : * required WAL so that it is correct when the walsender wakes up after a
1070 : : * promotion.
1071 : : */
933 andres@anarazel.de 1072 : 12298 : am_cascading_walsender = RecoveryInProgress();
1073 : :
1074 [ - + ]: 12298 : if (am_cascading_walsender)
933 andres@anarazel.de 1075 :UBC 0 : GetXLogReplayRecPtr(&currTLI);
1076 : : else
933 andres@anarazel.de 1077 :CBC 12298 : currTLI = GetWALInsertionTimeLine();
1078 : :
1452 rhaas@postgresql.org 1079 : 12298 : XLogReadDetermineTimeline(state, targetPagePtr, reqLen, currTLI);
1080 : 12298 : sendTimeLineIsHistoric = (state->currTLI != currTLI);
3141 simon@2ndQuadrant.co 1081 : 12298 : sendTimeLine = state->currTLI;
1082 : 12298 : sendTimeLineValidUpto = state->currTLIValidUntil;
1083 : 12298 : sendTimeLineNextTLI = state->nextTLI;
1084 : :
3041 tgl@sss.pgh.pa.us 1085 [ + + ]: 12298 : if (targetPagePtr + XLOG_BLCKSZ <= flushptr)
1086 : 10477 : count = XLOG_BLCKSZ; /* more than one block available */
1087 : : else
1088 : 1821 : count = flushptr - targetPagePtr; /* part of the page available */
1089 : :
1090 : : /* now actually read the data, we know it's there */
1631 tmunro@postgresql.or 1091 [ - + ]: 12298 : if (!WALRead(state,
1092 : : cur_page,
1093 : : targetPagePtr,
1094 : : count,
1095 : : currTLI, /* Pass the current TLI because only
1096 : : * WalSndSegmentOpen controls whether new TLI
1097 : : * is needed. */
1098 : : &errinfo))
2163 alvherre@alvh.no-ip. 1099 :UBC 0 : WALReadRaiseError(&errinfo);
1100 : :
1101 : : /*
1102 : : * After reading into the buffer, check that what we read was valid. We do
1103 : : * this after reading, because even though the segment was present when we
1104 : : * opened it, it might get recycled or removed while we read it. The
1105 : : * read() succeeds in that case, but the data we tried to read might
1106 : : * already have been overwritten with new WAL records.
1107 : : */
1993 alvherre@alvh.no-ip. 1108 :CBC 12298 : XLByteToSeg(targetPagePtr, segno, state->segcxt.ws_segsize);
1109 : 12298 : CheckXLogRemoved(segno, state->seg.ws_tli);
1110 : :
1631 tmunro@postgresql.or 1111 : 12298 : return count;
1112 : : }
1113 : :
1114 : : /*
1115 : : * Process extra options given to CREATE_REPLICATION_SLOT.
1116 : : */
1117 : : static void
3149 peter_e@gmx.net 1118 : 445 : parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
1119 : : bool *reserve_wal,
1120 : : CRSSnapshotAction *snapshot_action,
1121 : : bool *two_phase, bool *failover)
1122 : : {
1123 : : ListCell *lc;
1124 : 445 : bool snapshot_action_given = false;
1125 : 445 : bool reserve_wal_given = false;
1580 akapila@postgresql.o 1126 : 445 : bool two_phase_given = false;
637 1127 : 445 : bool failover_given = false;
1128 : :
1129 : : /* Parse options */
3085 bruce@momjian.us 1130 [ + + + + : 898 : foreach(lc, cmd->options)
+ + ]
1131 : : {
3149 peter_e@gmx.net 1132 : 453 : DefElem *defel = (DefElem *) lfirst(lc);
1133 : :
1483 rhaas@postgresql.org 1134 [ + + ]: 453 : if (strcmp(defel->defname, "snapshot") == 0)
1135 : : {
1136 : : char *action;
1137 : :
3149 peter_e@gmx.net 1138 [ + - - + ]: 311 : if (snapshot_action_given || cmd->kind != REPLICATION_KIND_LOGICAL)
3149 peter_e@gmx.net 1139 [ # # ]:UBC 0 : ereport(ERROR,
1140 : : (errcode(ERRCODE_SYNTAX_ERROR),
1141 : : errmsg("conflicting or redundant options")));
1142 : :
1483 rhaas@postgresql.org 1143 :CBC 311 : action = defGetString(defel);
3149 peter_e@gmx.net 1144 : 311 : snapshot_action_given = true;
1145 : :
1483 rhaas@postgresql.org 1146 [ + + ]: 311 : if (strcmp(action, "export") == 0)
1147 : 1 : *snapshot_action = CRS_EXPORT_SNAPSHOT;
1148 [ + + ]: 310 : else if (strcmp(action, "nothing") == 0)
1149 : 115 : *snapshot_action = CRS_NOEXPORT_SNAPSHOT;
1150 [ + - ]: 195 : else if (strcmp(action, "use") == 0)
1151 : 195 : *snapshot_action = CRS_USE_SNAPSHOT;
1152 : : else
3140 peter_e@gmx.net 1153 [ # # ]:UBC 0 : ereport(ERROR,
1154 : : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1155 : : errmsg("unrecognized value for CREATE_REPLICATION_SLOT option \"%s\": \"%s\"",
1156 : : defel->defname, action)));
1157 : : }
3149 peter_e@gmx.net 1158 [ + + ]:CBC 142 : else if (strcmp(defel->defname, "reserve_wal") == 0)
1159 : : {
1160 [ + - - + ]: 133 : if (reserve_wal_given || cmd->kind != REPLICATION_KIND_PHYSICAL)
3149 peter_e@gmx.net 1161 [ # # ]:UBC 0 : ereport(ERROR,
1162 : : (errcode(ERRCODE_SYNTAX_ERROR),
1163 : : errmsg("conflicting or redundant options")));
1164 : :
3149 peter_e@gmx.net 1165 :CBC 133 : reserve_wal_given = true;
1483 rhaas@postgresql.org 1166 : 133 : *reserve_wal = defGetBoolean(defel);
1167 : : }
1580 akapila@postgresql.o 1168 [ + + ]: 9 : else if (strcmp(defel->defname, "two_phase") == 0)
1169 : : {
1170 [ + - - + ]: 2 : if (two_phase_given || cmd->kind != REPLICATION_KIND_LOGICAL)
1580 akapila@postgresql.o 1171 [ # # ]:UBC 0 : ereport(ERROR,
1172 : : (errcode(ERRCODE_SYNTAX_ERROR),
1173 : : errmsg("conflicting or redundant options")));
1580 akapila@postgresql.o 1174 :CBC 2 : two_phase_given = true;
1483 rhaas@postgresql.org 1175 : 2 : *two_phase = defGetBoolean(defel);
1176 : : }
637 akapila@postgresql.o 1177 [ + - ]: 7 : else if (strcmp(defel->defname, "failover") == 0)
1178 : : {
1179 [ + - - + ]: 7 : if (failover_given || cmd->kind != REPLICATION_KIND_LOGICAL)
637 akapila@postgresql.o 1180 [ # # ]:UBC 0 : ereport(ERROR,
1181 : : (errcode(ERRCODE_SYNTAX_ERROR),
1182 : : errmsg("conflicting or redundant options")));
637 akapila@postgresql.o 1183 :CBC 7 : failover_given = true;
1184 : 7 : *failover = defGetBoolean(defel);
1185 : : }
1186 : : else
3149 peter_e@gmx.net 1187 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1188 : : }
3149 peter_e@gmx.net 1189 :CBC 445 : }
1190 : :
1191 : : /*
1192 : : * Create a new replication slot.
1193 : : */
1194 : : static void
4287 rhaas@postgresql.org 1195 : 445 : CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
1196 : : {
4249 1197 : 445 : const char *snapshot_name = NULL;
1198 : : char xloc[MAXFNAMELEN];
1199 : : char *slot_name;
3149 peter_e@gmx.net 1200 : 445 : bool reserve_wal = false;
1580 akapila@postgresql.o 1201 : 445 : bool two_phase = false;
637 1202 : 445 : bool failover = false;
3140 peter_e@gmx.net 1203 : 445 : CRSSnapshotAction snapshot_action = CRS_EXPORT_SNAPSHOT;
1204 : : DestReceiver *dest;
1205 : : TupOutputState *tstate;
1206 : : TupleDesc tupdesc;
1207 : : Datum values[4];
1199 peter@eisentraut.org 1208 : 445 : bool nulls[4] = {0};
1209 : :
4287 rhaas@postgresql.org 1210 [ - + ]: 445 : Assert(!MyReplicationSlot);
1211 : :
637 akapila@postgresql.o 1212 : 445 : parseCreateReplSlotOptions(cmd, &reserve_wal, &snapshot_action, &two_phase,
1213 : : &failover);
1214 : :
4249 rhaas@postgresql.org 1215 [ + + ]: 445 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
1216 : : {
3245 peter_e@gmx.net 1217 : 134 : ReplicationSlotCreate(cmd->slotname, false,
1699 akapila@postgresql.o 1218 [ + + ]: 134 : cmd->temporary ? RS_TEMPORARY : RS_PERSISTENT,
1219 : : false, false, false);
1220 : :
706 michael@paquier.xyz 1221 [ + + ]: 133 : if (reserve_wal)
1222 : : {
1223 : 132 : ReplicationSlotReserveWal();
1224 : :
1225 : 132 : ReplicationSlotMarkDirty();
1226 : :
1227 : : /* Write this slot to disk if it's a permanent one. */
1228 [ + + ]: 132 : if (!cmd->temporary)
1229 : 3 : ReplicationSlotSave();
1230 : : }
1231 : : }
1232 : : else
1233 : : {
1234 : : LogicalDecodingContext *ctx;
1235 : 311 : bool need_full_snapshot = false;
1236 : :
1237 [ - + ]: 311 : Assert(cmd->kind == REPLICATION_KIND_LOGICAL);
1238 : :
4249 rhaas@postgresql.org 1239 : 311 : CheckLogicalDecodingRequirements();
1240 : :
1241 : : /*
1242 : : * Initially create persistent slot as ephemeral - that allows us to
1243 : : * nicely handle errors during initialization because it'll get
1244 : : * dropped if this transaction fails. We'll make it persistent at the
1245 : : * end. Temporary slots can be created as temporary from beginning as
1246 : : * they get dropped on error as well.
1247 : : */
3245 peter_e@gmx.net 1248 : 311 : ReplicationSlotCreate(cmd->slotname, true,
1699 akapila@postgresql.o 1249 [ - + ]: 311 : cmd->temporary ? RS_TEMPORARY : RS_EPHEMERAL,
1250 : : two_phase, failover, false);
1251 : :
1252 : : /*
1253 : : * Do options check early so that we can bail before calling the
1254 : : * DecodingContextFindStartpoint which can take long time.
1255 : : */
3140 peter_e@gmx.net 1256 [ + + ]: 311 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1257 : : {
1258 [ - + ]: 1 : if (IsTransactionBlock())
3140 peter_e@gmx.net 1259 [ # # ]:UBC 0 : ereport(ERROR,
1260 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1261 : : (errmsg("%s must not be called inside a transaction",
1262 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'export')")));
1263 : :
3105 andres@anarazel.de 1264 :CBC 1 : need_full_snapshot = true;
1265 : : }
3140 peter_e@gmx.net 1266 [ + + ]: 310 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1267 : : {
1268 [ - + ]: 195 : if (!IsTransactionBlock())
3140 peter_e@gmx.net 1269 [ # # ]:UBC 0 : ereport(ERROR,
1270 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1271 : : (errmsg("%s must be called inside a transaction",
1272 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1273 : :
3140 peter_e@gmx.net 1274 [ - + ]:CBC 195 : if (XactIsoLevel != XACT_REPEATABLE_READ)
3140 peter_e@gmx.net 1275 [ # # ]:UBC 0 : ereport(ERROR,
1276 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1277 : : (errmsg("%s must be called in REPEATABLE READ isolation mode transaction",
1278 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1071 akapila@postgresql.o 1279 [ - + ]:CBC 195 : if (!XactReadOnly)
1071 akapila@postgresql.o 1280 [ # # ]:UBC 0 : ereport(ERROR,
1281 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1282 : : (errmsg("%s must be called in a read-only transaction",
1283 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1284 : :
3140 peter_e@gmx.net 1285 [ - + ]:CBC 195 : if (FirstSnapshotSet)
3140 peter_e@gmx.net 1286 [ # # ]:UBC 0 : ereport(ERROR,
1287 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1288 : : (errmsg("%s must be called before any query",
1289 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1290 : :
3140 peter_e@gmx.net 1291 [ - + ]:CBC 195 : if (IsSubTransaction())
3140 peter_e@gmx.net 1292 [ # # ]:UBC 0 : ereport(ERROR,
1293 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1294 : : (errmsg("%s must not be called in a subtransaction",
1295 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1296 : :
3105 andres@anarazel.de 1297 :CBC 195 : need_full_snapshot = true;
1298 : : }
1299 : :
1300 : 311 : ctx = CreateInitDecodingContext(cmd->plugin, NIL, need_full_snapshot,
1301 : : InvalidXLogRecPtr,
1631 tmunro@postgresql.or 1302 : 311 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1303 : : .segment_open = WalSndSegmentOpen,
1304 : : .segment_close = wal_segment_close),
1305 : : WalSndPrepareWrite, WalSndWriteData,
1306 : : WalSndUpdateProgress);
1307 : :
1308 : : /*
1309 : : * Signal that we don't need the timeout mechanism. We're just
1310 : : * creating the replication slot and don't yet accept feedback
1311 : : * messages or send keepalives. As we possibly need to wait for
1312 : : * further WAL the walsender would otherwise possibly be killed too
1313 : : * soon.
1314 : : */
4169 andres@anarazel.de 1315 : 311 : last_reply_timestamp = 0;
1316 : :
1317 : : /* build initial snapshot, might take a while */
4249 rhaas@postgresql.org 1318 : 311 : DecodingContextFindStartpoint(ctx);
1319 : :
1320 : : /*
1321 : : * Export or use the snapshot if we've been asked to do so.
1322 : : *
1323 : : * NB. We will convert the snapbuild.c kind of snapshot to normal
1324 : : * snapshot when doing this.
1325 : : */
3140 peter_e@gmx.net 1326 [ + + ]: 311 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1327 : : {
3149 1328 : 1 : snapshot_name = SnapBuildExportSnapshot(ctx->snapshot_builder);
1329 : : }
3140 1330 [ + + ]: 310 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1331 : : {
1332 : : Snapshot snap;
1333 : :
3137 tgl@sss.pgh.pa.us 1334 : 195 : snap = SnapBuildInitialSnapshot(ctx->snapshot_builder);
3140 peter_e@gmx.net 1335 : 195 : RestoreTransactionSnapshot(snap, MyProc);
1336 : : }
1337 : :
1338 : : /* don't need the decoding context anymore */
4249 rhaas@postgresql.org 1339 : 311 : FreeDecodingContext(ctx);
1340 : :
3245 peter_e@gmx.net 1341 [ + - ]: 311 : if (!cmd->temporary)
1342 : 311 : ReplicationSlotPersist();
1343 : : }
1344 : :
112 alvherre@kurilemu.de 1345 :GNC 444 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1707 peter@eisentraut.org 1346 :CBC 444 : LSN_FORMAT_ARGS(MyReplicationSlot->data.confirmed_flush));
1347 : :
3190 rhaas@postgresql.org 1348 : 444 : dest = CreateDestReceiver(DestRemoteSimple);
1349 : :
1350 : : /*----------
1351 : : * Need a tuple descriptor representing four columns:
1352 : : * - first field: the slot name
1353 : : * - second field: LSN at which we became consistent
1354 : : * - third field: exported snapshot's name
1355 : : * - fourth field: output plugin
1356 : : */
2533 andres@anarazel.de 1357 : 444 : tupdesc = CreateTemplateTupleDesc(4);
3190 rhaas@postgresql.org 1358 : 444 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_name",
1359 : : TEXTOID, -1, 0);
1360 : 444 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "consistent_point",
1361 : : TEXTOID, -1, 0);
1362 : 444 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "snapshot_name",
1363 : : TEXTOID, -1, 0);
1364 : 444 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "output_plugin",
1365 : : TEXTOID, -1, 0);
1366 : :
1367 : : /* prepare for projection of tuples */
2538 andres@anarazel.de 1368 : 444 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1369 : :
1370 : : /* slot_name */
3190 rhaas@postgresql.org 1371 : 444 : slot_name = NameStr(MyReplicationSlot->data.name);
1372 : 444 : values[0] = CStringGetTextDatum(slot_name);
1373 : :
1374 : : /* consistent wal location */
3090 peter_e@gmx.net 1375 : 444 : values[1] = CStringGetTextDatum(xloc);
1376 : :
1377 : : /* snapshot name, or NULL if none */
4249 rhaas@postgresql.org 1378 [ + + ]: 444 : if (snapshot_name != NULL)
3190 1379 : 1 : values[2] = CStringGetTextDatum(snapshot_name);
1380 : : else
1381 : 443 : nulls[2] = true;
1382 : :
1383 : : /* plugin, or NULL if none */
4249 1384 [ + + ]: 444 : if (cmd->plugin != NULL)
3190 1385 : 311 : values[3] = CStringGetTextDatum(cmd->plugin);
1386 : : else
1387 : 133 : nulls[3] = true;
1388 : :
1389 : : /* send it to dest */
1390 : 444 : do_tup_output(tstate, values, nulls);
1391 : 444 : end_tup_output(tstate);
1392 : :
4287 1393 : 444 : ReplicationSlotRelease();
1394 : 444 : }
1395 : :
1396 : : /*
1397 : : * Get rid of a replication slot that is no longer wanted.
1398 : : */
1399 : : static void
1400 : 268 : DropReplicationSlot(DropReplicationSlotCmd *cmd)
1401 : : {
2978 alvherre@alvh.no-ip. 1402 : 268 : ReplicationSlotDrop(cmd->slotname, !cmd->wait);
4287 rhaas@postgresql.org 1403 : 266 : }
1404 : :
1405 : : /*
1406 : : * Change the definition of a replication slot.
1407 : : */
1408 : : static void
460 akapila@postgresql.o 1409 : 6 : AlterReplicationSlot(AlterReplicationSlotCmd *cmd)
1410 : : {
637 1411 : 6 : bool failover_given = false;
460 1412 : 6 : bool two_phase_given = false;
1413 : : bool failover;
1414 : : bool two_phase;
1415 : :
1416 : : /* Parse options */
637 1417 [ + - + + : 18 : foreach_ptr(DefElem, defel, cmd->options)
+ + ]
1418 : : {
1419 [ + + ]: 6 : if (strcmp(defel->defname, "failover") == 0)
1420 : : {
1421 [ - + ]: 5 : if (failover_given)
637 akapila@postgresql.o 1422 [ # # ]:UBC 0 : ereport(ERROR,
1423 : : (errcode(ERRCODE_SYNTAX_ERROR),
1424 : : errmsg("conflicting or redundant options")));
637 akapila@postgresql.o 1425 :CBC 5 : failover_given = true;
460 1426 : 5 : failover = defGetBoolean(defel);
1427 : : }
1428 [ + - ]: 1 : else if (strcmp(defel->defname, "two_phase") == 0)
1429 : : {
1430 [ - + ]: 1 : if (two_phase_given)
460 akapila@postgresql.o 1431 [ # # ]:UBC 0 : ereport(ERROR,
1432 : : (errcode(ERRCODE_SYNTAX_ERROR),
1433 : : errmsg("conflicting or redundant options")));
460 akapila@postgresql.o 1434 :CBC 1 : two_phase_given = true;
1435 : 1 : two_phase = defGetBoolean(defel);
1436 : : }
1437 : : else
637 akapila@postgresql.o 1438 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1439 : : }
1440 : :
460 akapila@postgresql.o 1441 [ + + + + ]:CBC 6 : ReplicationSlotAlter(cmd->slotname,
1442 : : failover_given ? &failover : NULL,
1443 : : two_phase_given ? &two_phase : NULL);
637 1444 : 5 : }
1445 : :
1446 : : /*
1447 : : * Load previously initiated logical slot and prepare for sending data (via
1448 : : * WalSndLoop).
1449 : : */
1450 : : static void
4249 rhaas@postgresql.org 1451 : 402 : StartLogicalReplication(StartReplicationCmd *cmd)
1452 : : {
1453 : : StringInfoData buf;
1454 : : QueryCompletion qc;
1455 : :
1456 : : /* make sure that our requirements are still fulfilled */
1457 : 402 : CheckLogicalDecodingRequirements();
1458 : :
1459 [ - + ]: 401 : Assert(!MyReplicationSlot);
1460 : :
269 akapila@postgresql.o 1461 : 401 : ReplicationSlotAcquire(cmd->slotname, true, true);
1462 : :
1463 : : /*
1464 : : * Force a disconnect, so that the decoding code doesn't need to care
1465 : : * about an eventual switch from running in recovery, to running in a
1466 : : * normal environment. Client code is expected to handle reconnects.
1467 : : */
4249 rhaas@postgresql.org 1468 [ - + - - ]: 401 : if (am_cascading_walsender && !RecoveryInProgress())
1469 : : {
4249 rhaas@postgresql.org 1470 [ # # ]:UBC 0 : ereport(LOG,
1471 : : (errmsg("terminating walsender process after promotion")));
3066 andres@anarazel.de 1472 : 0 : got_STOPPING = true;
1473 : : }
1474 : :
1475 : : /*
1476 : : * Create our decoding context, making it start at the previously ack'ed
1477 : : * position.
1478 : : *
1479 : : * Do this before sending a CopyBothResponse message, so that any errors
1480 : : * are reported early.
1481 : : */
2644 alvherre@alvh.no-ip. 1482 :CBC 400 : logical_decoding_ctx =
1483 : 401 : CreateDecodingContext(cmd->startpoint, cmd->options, false,
1631 tmunro@postgresql.or 1484 : 401 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1485 : : .segment_open = WalSndSegmentOpen,
1486 : : .segment_close = wal_segment_close),
1487 : : WalSndPrepareWrite, WalSndWriteData,
1488 : : WalSndUpdateProgress);
1993 alvherre@alvh.no-ip. 1489 : 400 : xlogreader = logical_decoding_ctx->reader;
1490 : :
4249 rhaas@postgresql.org 1491 : 400 : WalSndSetState(WALSNDSTATE_CATCHUP);
1492 : :
1493 : : /* Send a CopyBothResponse message, and start streaming */
797 nathan@postgresql.or 1494 : 400 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4249 rhaas@postgresql.org 1495 : 400 : pq_sendbyte(&buf, 0);
2938 andres@anarazel.de 1496 : 400 : pq_sendint16(&buf, 0);
4249 rhaas@postgresql.org 1497 : 400 : pq_endmessage(&buf);
1498 : 400 : pq_flush();
1499 : :
1500 : : /* Start reading WAL from the oldest required WAL. */
2101 heikki.linnakangas@i 1501 : 400 : XLogBeginRead(logical_decoding_ctx->reader,
1502 : 400 : MyReplicationSlot->data.restart_lsn);
1503 : :
1504 : : /*
1505 : : * Report the location after which we'll send out further commits as the
1506 : : * current sentPtr.
1507 : : */
4249 rhaas@postgresql.org 1508 : 400 : sentPtr = MyReplicationSlot->data.confirmed_flush;
1509 : :
1510 : : /* Also update the sent position status in shared memory */
3041 alvherre@alvh.no-ip. 1511 [ - + ]: 400 : SpinLockAcquire(&MyWalSnd->mutex);
1512 : 400 : MyWalSnd->sentPtr = MyReplicationSlot->data.restart_lsn;
1513 : 400 : SpinLockRelease(&MyWalSnd->mutex);
1514 : :
4249 rhaas@postgresql.org 1515 : 400 : replication_active = true;
1516 : :
1517 : 400 : SyncRepInitConfig();
1518 : :
1519 : : /* Main loop of walsender */
1520 : 400 : WalSndLoop(XLogSendLogical);
1521 : :
1522 : 188 : FreeDecodingContext(logical_decoding_ctx);
1523 : 188 : ReplicationSlotRelease();
1524 : :
1525 : 188 : replication_active = false;
3066 andres@anarazel.de 1526 [ - + ]: 188 : if (got_STOPPING)
4249 rhaas@postgresql.org 1527 :UBC 0 : proc_exit(0);
4249 rhaas@postgresql.org 1528 :CBC 188 : WalSndSetState(WALSNDSTATE_STARTUP);
1529 : :
1530 : : /* Get out of COPY mode (CommandComplete). */
2065 alvherre@alvh.no-ip. 1531 : 188 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
1532 : 188 : EndCommand(&qc, DestRemote, false);
4249 rhaas@postgresql.org 1533 : 188 : }
1534 : :
1535 : : /*
1536 : : * LogicalDecodingContext 'prepare_write' callback.
1537 : : *
1538 : : * Prepare a write into a StringInfo.
1539 : : *
1540 : : * Don't do anything lasting in here, it's quite possible that nothing will be done
1541 : : * with the data.
1542 : : */
1543 : : static void
1544 : 185071 : WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write)
1545 : : {
1546 : : /* can't have sync rep confused by sending the same LSN several times */
1547 [ + + ]: 185071 : if (!last_write)
1548 : 405 : lsn = InvalidXLogRecPtr;
1549 : :
1550 : 185071 : resetStringInfo(ctx->out);
1551 : :
82 nathan@postgresql.or 1552 :GNC 185071 : pq_sendbyte(ctx->out, PqReplMsg_WALData);
4249 rhaas@postgresql.org 1553 :CBC 185071 : pq_sendint64(ctx->out, lsn); /* dataStart */
1554 : 185071 : pq_sendint64(ctx->out, lsn); /* walEnd */
1555 : :
1556 : : /*
1557 : : * Fill out the sendtime later, just as it's done in XLogSendPhysical, but
1558 : : * reserve space here.
1559 : : */
4192 bruce@momjian.us 1560 : 185071 : pq_sendint64(ctx->out, 0); /* sendtime */
4249 rhaas@postgresql.org 1561 : 185071 : }
1562 : :
1563 : : /*
1564 : : * LogicalDecodingContext 'write' callback.
1565 : : *
1566 : : * Actually write out data previously prepared by WalSndPrepareWrite out to
1567 : : * the network. Take as long as needed, but process replies from the other
1568 : : * side and check timeouts during that.
1569 : : */
1570 : : static void
1571 : 185071 : WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1572 : : bool last_write)
1573 : : {
1574 : : TimestampTz now;
1575 : :
1576 : : /*
1577 : : * Fill the send timestamp last, so that it is taken as late as possible.
1578 : : * This is somewhat ugly, but the protocol is set as it's already used for
1579 : : * several releases by streaming physical replication.
1580 : : */
1581 : 185071 : resetStringInfo(&tmpbuf);
2874 andrew@dunslane.net 1582 : 185071 : now = GetCurrentTimestamp();
1583 : 185071 : pq_sendint64(&tmpbuf, now);
4249 rhaas@postgresql.org 1584 : 185071 : memcpy(&ctx->out->data[1 + sizeof(int64) + sizeof(int64)],
1585 : 185071 : tmpbuf.data, sizeof(int64));
1586 : :
1587 : : /* output previously gathered data in a CopyData packet */
96 nathan@postgresql.or 1588 :GNC 185071 : pq_putmessage_noblock(PqMsg_CopyData, ctx->out->data, ctx->out->len);
1589 : :
2874 andrew@dunslane.net 1590 [ - + ]:CBC 185071 : CHECK_FOR_INTERRUPTS();
1591 : :
1592 : : /* Try to flush pending output to the client */
4249 rhaas@postgresql.org 1593 [ + + ]: 185071 : if (pq_flush_if_writable() != 0)
1594 : 1 : WalSndShutdown();
1595 : :
1596 : : /* Try taking fast path unless we get too close to walsender timeout. */
2874 andrew@dunslane.net 1597 [ + - ]: 185070 : if (now < TimestampTzPlusMilliseconds(last_reply_timestamp,
1598 : 185070 : wal_sender_timeout / 2) &&
1599 [ + + ]: 185070 : !pq_is_send_pending())
1600 : : {
4249 rhaas@postgresql.org 1601 : 184736 : return;
1602 : : }
1603 : :
1604 : : /* If we have pending write here, go to slow path */
1307 akapila@postgresql.o 1605 : 334 : ProcessPendingWrites();
1606 : : }
1607 : :
1608 : : /*
1609 : : * Wait until there is no pending write. Also process replies from the other
1610 : : * side and check timeouts during that.
1611 : : */
1612 : : static void
1613 : 334 : ProcessPendingWrites(void)
1614 : : {
1615 : : for (;;)
4249 rhaas@postgresql.org 1616 : 434 : {
1617 : : long sleeptime;
1618 : :
1619 : : /* Check for input from the client */
2874 andrew@dunslane.net 1620 : 768 : ProcessRepliesIfAny();
1621 : :
1622 : : /* die if timeout was reached */
2614 noah@leadboat.com 1623 : 768 : WalSndCheckTimeOut();
1624 : :
1625 : : /* Send keepalive if the time has come */
1626 : 768 : WalSndKeepaliveIfNecessary();
1627 : :
2874 andrew@dunslane.net 1628 [ + + ]: 768 : if (!pq_is_send_pending())
1629 : 334 : break;
1630 : :
2614 noah@leadboat.com 1631 : 434 : sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
1632 : :
1633 : : /* Sleep until something happens or we time out */
1701 tmunro@postgresql.or 1634 : 434 : WalSndWait(WL_SOCKET_WRITEABLE | WL_SOCKET_READABLE, sleeptime,
1635 : : WAIT_EVENT_WAL_SENDER_WRITE_DATA);
1636 : :
1637 : : /* Clear any already-pending wakeups */
3936 andres@anarazel.de 1638 : 434 : ResetLatch(MyLatch);
1639 : :
1640 [ - + ]: 434 : CHECK_FOR_INTERRUPTS();
1641 : :
1642 : : /* Process any requests or signals received recently */
3066 1643 [ - + ]: 434 : if (ConfigReloadPending)
1644 : : {
3066 andres@anarazel.de 1645 :UBC 0 : ConfigReloadPending = false;
4249 rhaas@postgresql.org 1646 : 0 : ProcessConfigFile(PGC_SIGHUP);
1647 : 0 : SyncRepInitConfig();
1648 : : }
1649 : :
1650 : : /* Try to flush pending output to the client */
4249 rhaas@postgresql.org 1651 [ - + ]:CBC 434 : if (pq_flush_if_writable() != 0)
4249 rhaas@postgresql.org 1652 :UBC 0 : WalSndShutdown();
1653 : : }
1654 : :
1655 : : /* reactivate latch so WalSndLoop knows to continue */
3936 andres@anarazel.de 1656 :CBC 334 : SetLatch(MyLatch);
4249 rhaas@postgresql.org 1657 : 334 : }
1658 : :
1659 : : /*
1660 : : * LogicalDecodingContext 'update_progress' callback.
1661 : : *
1662 : : * Write the current position to the lag tracker (see XLogSendPhysical).
1663 : : *
1664 : : * When skipping empty transactions, send a keepalive message if necessary.
1665 : : */
1666 : : static void
1307 akapila@postgresql.o 1667 : 2646 : WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1668 : : bool skipped_xact)
1669 : : {
1670 : : static TimestampTz sendTime = 0;
3090 simon@2ndQuadrant.co 1671 : 2646 : TimestampTz now = GetCurrentTimestamp();
1265 akapila@postgresql.o 1672 : 2646 : bool pending_writes = false;
1673 : 2646 : bool end_xact = ctx->end_xact;
1674 : :
1675 : : /*
1676 : : * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
1677 : : * avoid flooding the lag tracker when we commit frequently.
1678 : : *
1679 : : * We don't have a mechanism to get the ack for any LSN other than end
1680 : : * xact LSN from the downstream. So, we track lag only for end of
1681 : : * transaction LSN.
1682 : : */
1683 : : #define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
1684 [ + + + + ]: 2646 : if (end_xact && TimestampDifferenceExceeds(sendTime, now,
1685 : : WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
1686 : : {
1307 1687 : 264 : LagTrackerWrite(lsn, now);
1688 : 264 : sendTime = now;
1689 : : }
1690 : :
1691 : : /*
1692 : : * When skipping empty transactions in synchronous replication, we send a
1693 : : * keepalive message to avoid delaying such transactions.
1694 : : *
1695 : : * It is okay to check sync_standbys_status without lock here as in the
1696 : : * worst case we will just send an extra keepalive message when it is
1697 : : * really not required.
1698 : : */
1699 [ + + ]: 2646 : if (skipped_xact &&
1700 [ + - + - ]: 549 : SyncRepRequested() &&
199 michael@paquier.xyz 1701 [ - + ]: 549 : (((volatile WalSndCtlData *) WalSndCtl)->sync_standbys_status & SYNC_STANDBY_DEFINED))
1702 : : {
1307 akapila@postgresql.o 1703 :UBC 0 : WalSndKeepalive(false, lsn);
1704 : :
1705 : : /* Try to flush pending output to the client */
1706 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
1707 : 0 : WalSndShutdown();
1708 : :
1709 : : /* If we have pending write here, make sure it's actually flushed */
1710 [ # # ]: 0 : if (pq_is_send_pending())
1265 1711 : 0 : pending_writes = true;
1712 : : }
1713 : :
1714 : : /*
1715 : : * Process pending writes if any or try to send a keepalive if required.
1716 : : * We don't need to try sending keep alive messages at the transaction end
1717 : : * as that will be done at a later point in time. This is required only
1718 : : * for large transactions where we don't send any changes to the
1719 : : * downstream and the receiver can timeout due to that.
1720 : : */
1265 akapila@postgresql.o 1721 [ + - + + ]:CBC 2646 : if (pending_writes || (!end_xact &&
1722 [ - + ]: 1538 : now >= TimestampTzPlusMilliseconds(last_reply_timestamp,
1723 : : wal_sender_timeout / 2)))
1265 akapila@postgresql.o 1724 :UBC 0 : ProcessPendingWrites();
3090 simon@2ndQuadrant.co 1725 :CBC 2646 : }
1726 : :
1727 : : /*
1728 : : * Wake up the logical walsender processes with logical failover slots if the
1729 : : * currently acquired physical slot is specified in synchronized_standby_slots GUC.
1730 : : */
1731 : : void
598 akapila@postgresql.o 1732 : 38755 : PhysicalWakeupLogicalWalSnd(void)
1733 : : {
1734 [ + - - + ]: 38755 : Assert(MyReplicationSlot && SlotIsPhysical(MyReplicationSlot));
1735 : :
1736 : : /*
1737 : : * If we are running in a standby, there is no need to wake up walsenders.
1738 : : * This is because we do not support syncing slots to cascading standbys,
1739 : : * so, there are no walsenders waiting for standbys to catch up.
1740 : : */
1741 [ + + ]: 38755 : if (RecoveryInProgress())
1742 : 55 : return;
1743 : :
483 1744 [ + + ]: 38700 : if (SlotExistsInSyncStandbySlots(NameStr(MyReplicationSlot->data.name)))
598 1745 : 6 : ConditionVariableBroadcast(&WalSndCtl->wal_confirm_rcv_cv);
1746 : : }
1747 : :
1748 : : /*
1749 : : * Returns true if not all standbys have caught up to the flushed position
1750 : : * (flushed_lsn) when the current acquired slot is a logical failover
1751 : : * slot and we are streaming; otherwise, returns false.
1752 : : *
1753 : : * If returning true, the function sets the appropriate wait event in
1754 : : * wait_event; otherwise, wait_event is set to 0.
1755 : : */
1756 : : static bool
1757 : 12691 : NeedToWaitForStandbys(XLogRecPtr flushed_lsn, uint32 *wait_event)
1758 : : {
1759 [ + + ]: 12691 : int elevel = got_STOPPING ? ERROR : WARNING;
1760 : : bool failover_slot;
1761 : :
1762 [ + + + + ]: 12691 : failover_slot = (replication_active && MyReplicationSlot->data.failover);
1763 : :
1764 : : /*
1765 : : * Note that after receiving the shutdown signal, an ERROR is reported if
1766 : : * any slots are dropped, invalidated, or inactive. This measure is taken
1767 : : * to prevent the walsender from waiting indefinitely.
1768 : : */
1769 [ + + + + ]: 12691 : if (failover_slot && !StandbySlotsHaveCaughtup(flushed_lsn, elevel))
1770 : : {
1771 : 10 : *wait_event = WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION;
1772 : 10 : return true;
1773 : : }
1774 : :
1775 : 12681 : *wait_event = 0;
1776 : 12681 : return false;
1777 : : }
1778 : :
1779 : : /*
1780 : : * Returns true if we need to wait for WALs to be flushed to disk, or if not
1781 : : * all standbys have caught up to the flushed position (flushed_lsn) when the
1782 : : * current acquired slot is a logical failover slot and we are
1783 : : * streaming; otherwise, returns false.
1784 : : *
1785 : : * If returning true, the function sets the appropriate wait event in
1786 : : * wait_event; otherwise, wait_event is set to 0.
1787 : : */
1788 : : static bool
1789 : 17335 : NeedToWaitForWal(XLogRecPtr target_lsn, XLogRecPtr flushed_lsn,
1790 : : uint32 *wait_event)
1791 : : {
1792 : : /* Check if we need to wait for WALs to be flushed to disk */
1793 [ + + ]: 17335 : if (target_lsn > flushed_lsn)
1794 : : {
1795 : 5028 : *wait_event = WAIT_EVENT_WAL_SENDER_WAIT_FOR_WAL;
1796 : 5028 : return true;
1797 : : }
1798 : :
1799 : : /* Check if the standby slots have caught up to the flushed position */
1800 : 12307 : return NeedToWaitForStandbys(flushed_lsn, wait_event);
1801 : : }
1802 : :
1803 : : /*
1804 : : * Wait till WAL < loc is flushed to disk so it can be safely sent to client.
1805 : : *
1806 : : * If the walsender holds a logical failover slot, we also wait for all the
1807 : : * specified streaming replication standby servers to confirm receipt of WAL
1808 : : * up to RecentFlushPtr. It is beneficial to wait here for the confirmation
1809 : : * up to RecentFlushPtr rather than waiting before transmitting each change
1810 : : * to logical subscribers, which is already covered by RecentFlushPtr.
1811 : : *
1812 : : * Returns end LSN of flushed WAL. Normally this will be >= loc, but if we
1813 : : * detect a shutdown request (either from postmaster or client) we will return
1814 : : * early, so caller must always check.
1815 : : */
1816 : : static XLogRecPtr
4249 rhaas@postgresql.org 1817 : 13031 : WalSndWaitForWal(XLogRecPtr loc)
1818 : : {
1819 : : int wakeEvents;
598 akapila@postgresql.o 1820 : 13031 : uint32 wait_event = 0;
1821 : : static XLogRecPtr RecentFlushPtr = InvalidXLogRecPtr;
202 michael@paquier.xyz 1822 : 13031 : TimestampTz last_flush = 0;
1823 : :
1824 : : /*
1825 : : * Fast path to avoid acquiring the spinlock in case we already know we
1826 : : * have enough WAL available and all the standby servers have confirmed
1827 : : * receipt of WAL up to RecentFlushPtr. This is particularly interesting
1828 : : * if we're far behind.
1829 : : */
598 akapila@postgresql.o 1830 [ + + ]: 13031 : if (!XLogRecPtrIsInvalid(RecentFlushPtr) &&
1831 [ + + ]: 12503 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4249 rhaas@postgresql.org 1832 : 10569 : return RecentFlushPtr;
1833 : :
1834 : : /*
1835 : : * Within the loop, we wait for the necessary WALs to be flushed to disk
1836 : : * first, followed by waiting for standbys to catch up if there are enough
1837 : : * WALs (see NeedToWaitForWal()) or upon receiving the shutdown signal.
1838 : : */
1839 : : for (;;)
1840 : 2952 : {
598 akapila@postgresql.o 1841 : 5414 : bool wait_for_standby_at_stop = false;
1842 : : long sleeptime;
1843 : : TimestampTz now;
1844 : :
1845 : : /* Clear any already-pending wakeups */
3936 andres@anarazel.de 1846 : 5414 : ResetLatch(MyLatch);
1847 : :
1848 [ - + ]: 5414 : CHECK_FOR_INTERRUPTS();
1849 : :
1850 : : /* Process any requests or signals received recently */
3066 1851 [ + + ]: 5414 : if (ConfigReloadPending)
1852 : : {
1853 : 8 : ConfigReloadPending = false;
4249 rhaas@postgresql.org 1854 : 8 : ProcessConfigFile(PGC_SIGHUP);
1855 : 8 : SyncRepInitConfig();
1856 : : }
1857 : :
1858 : : /* Check for input from the client */
1859 : 5414 : ProcessRepliesIfAny();
1860 : :
1861 : : /*
1862 : : * If we're shutting down, trigger pending WAL to be written out,
1863 : : * otherwise we'd possibly end up waiting for WAL that never gets
1864 : : * written, because walwriter has shut down already.
1865 : : */
3066 andres@anarazel.de 1866 [ + + ]: 5216 : if (got_STOPPING)
1867 : 384 : XLogBackgroundFlush();
1868 : :
1869 : : /*
1870 : : * To avoid the scenario where standbys need to catch up to a newer
1871 : : * WAL location in each iteration, we update our idea of the currently
1872 : : * flushed position only if we are not waiting for standbys to catch
1873 : : * up.
1874 : : */
598 akapila@postgresql.o 1875 [ + + ]: 5216 : if (wait_event != WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
1876 : : {
1877 [ + - ]: 5206 : if (!RecoveryInProgress())
1878 : 5206 : RecentFlushPtr = GetFlushRecPtr(NULL);
1879 : : else
598 akapila@postgresql.o 1880 :UBC 0 : RecentFlushPtr = GetXLogReplayRecPtr(NULL);
1881 : : }
1882 : :
1883 : : /*
1884 : : * If postmaster asked us to stop and the standby slots have caught up
1885 : : * to the flushed position, don't wait anymore.
1886 : : *
1887 : : * It's important to do this check after the recomputation of
1888 : : * RecentFlushPtr, so we can send all remaining data before shutting
1889 : : * down.
1890 : : */
3066 andres@anarazel.de 1891 [ + + ]:CBC 5216 : if (got_STOPPING)
1892 : : {
598 akapila@postgresql.o 1893 [ - + ]: 384 : if (NeedToWaitForStandbys(RecentFlushPtr, &wait_event))
598 akapila@postgresql.o 1894 :UBC 0 : wait_for_standby_at_stop = true;
1895 : : else
598 akapila@postgresql.o 1896 :CBC 384 : break;
1897 : : }
1898 : :
1899 : : /*
1900 : : * We only send regular messages to the client for full decoded
1901 : : * transactions, but a synchronous replication and walsender shutdown
1902 : : * possibly are waiting for a later location. So, before sleeping, we
1903 : : * send a ping containing the flush location. If the receiver is
1904 : : * otherwise idle, this keepalive will trigger a reply. Processing the
1905 : : * reply will update these MyWalSnd locations.
1906 : : */
4094 andres@anarazel.de 1907 [ + + ]: 4832 : if (MyWalSnd->flush < sentPtr &&
1908 [ + + ]: 2310 : MyWalSnd->write < sentPtr &&
1909 [ + - ]: 1748 : !waiting_for_ping_response)
1307 akapila@postgresql.o 1910 : 1748 : WalSndKeepalive(false, InvalidXLogRecPtr);
1911 : :
1912 : : /*
1913 : : * Exit the loop if already caught up and doesn't need to wait for
1914 : : * standby slots.
1915 : : */
598 1916 [ + - ]: 4832 : if (!wait_for_standby_at_stop &&
1917 [ + + ]: 4832 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4249 rhaas@postgresql.org 1918 : 1728 : break;
1919 : :
1920 : : /*
1921 : : * Waiting for new WAL or waiting for standbys to catch up. Since we
1922 : : * need to wait, we're now caught up.
1923 : : */
1924 : 3104 : WalSndCaughtUp = true;
1925 : :
1926 : : /*
1927 : : * Try to flush any pending output to the client.
1928 : : */
1929 [ - + ]: 3104 : if (pq_flush_if_writable() != 0)
4249 rhaas@postgresql.org 1930 :UBC 0 : WalSndShutdown();
1931 : :
1932 : : /*
1933 : : * If we have received CopyDone from the client, sent CopyDone
1934 : : * ourselves, and the output buffer is empty, it's time to exit
1935 : : * streaming, so fail the current WAL fetch request.
1936 : : */
3041 tgl@sss.pgh.pa.us 1937 [ + + + - ]:CBC 3104 : if (streamingDoneReceiving && streamingDoneSending &&
1938 [ + - ]: 152 : !pq_is_send_pending())
1939 : 152 : break;
1940 : :
1941 : : /* die if timeout was reached */
2614 noah@leadboat.com 1942 : 2952 : WalSndCheckTimeOut();
1943 : :
1944 : : /* Send keepalive if the time has come */
1945 : 2952 : WalSndKeepaliveIfNecessary();
1946 : :
1947 : : /*
1948 : : * Sleep until something happens or we time out. Also wait for the
1949 : : * socket becoming writable, if there's still pending output.
1950 : : * Otherwise we might sit on sendable output data while waiting for
1951 : : * new WAL to be generated. (But if we have nothing to send, we don't
1952 : : * want to wake on socket-writable.)
1953 : : */
202 michael@paquier.xyz 1954 : 2952 : now = GetCurrentTimestamp();
1955 : 2952 : sleeptime = WalSndComputeSleeptime(now);
1956 : :
1701 tmunro@postgresql.or 1957 : 2952 : wakeEvents = WL_SOCKET_READABLE;
1958 : :
4249 rhaas@postgresql.org 1959 [ - + ]: 2952 : if (pq_is_send_pending())
4249 rhaas@postgresql.org 1960 :UBC 0 : wakeEvents |= WL_SOCKET_WRITEABLE;
1961 : :
598 akapila@postgresql.o 1962 [ - + ]:CBC 2952 : Assert(wait_event != 0);
1963 : :
1964 : : /* Report IO statistics, if needed */
202 michael@paquier.xyz 1965 [ + + ]: 2952 : if (TimestampDifferenceExceeds(last_flush, now,
1966 : : WALSENDER_STATS_FLUSH_INTERVAL))
1967 : : {
1968 : 1488 : pgstat_flush_io(false);
1969 : 1488 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
1970 : 1488 : last_flush = now;
1971 : : }
1972 : :
598 akapila@postgresql.o 1973 : 2952 : WalSndWait(wakeEvents, sleeptime, wait_event);
1974 : : }
1975 : :
1976 : : /* reactivate latch so WalSndLoop knows to continue */
3936 andres@anarazel.de 1977 : 2264 : SetLatch(MyLatch);
4249 rhaas@postgresql.org 1978 : 2264 : return RecentFlushPtr;
1979 : : }
1980 : :
1981 : : /*
1982 : : * Execute an incoming replication command.
1983 : : *
1984 : : * Returns true if the cmd_string was recognized as WalSender command, false
1985 : : * if not.
1986 : : */
1987 : : bool
4770 heikki.linnakangas@i 1988 : 5111 : exec_replication_command(const char *cmd_string)
1989 : : {
1990 : : yyscan_t scanner;
1991 : : int parse_rc;
1992 : : Node *cmd_node;
1993 : : const char *cmdtag;
189 tgl@sss.pgh.pa.us 1994 : 5111 : MemoryContext old_context = CurrentMemoryContext;
1995 : :
1996 : : /* We save and re-use the cmd_context across calls */
1997 : : static MemoryContext cmd_context = NULL;
1998 : :
1999 : : /*
2000 : : * If WAL sender has been told that shutdown is getting close, switch its
2001 : : * status accordingly to handle the next replication commands correctly.
2002 : : */
3066 andres@anarazel.de 2003 [ - + ]: 5111 : if (got_STOPPING)
3066 andres@anarazel.de 2004 :UBC 0 : WalSndSetState(WALSNDSTATE_STOPPING);
2005 : :
2006 : : /*
2007 : : * Throw error if in stopping mode. We need prevent commands that could
2008 : : * generate WAL while the shutdown checkpoint is being written. To be
2009 : : * safe, we just prohibit all new commands.
2010 : : */
3066 andres@anarazel.de 2011 [ - + ]:CBC 5111 : if (MyWalSnd->state == WALSNDSTATE_STOPPING)
3066 andres@anarazel.de 2012 [ # # ]:UBC 0 : ereport(ERROR,
2013 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2014 : : errmsg("cannot execute new commands while WAL sender is in stopping mode")));
2015 : :
2016 : : /*
2017 : : * CREATE_REPLICATION_SLOT ... LOGICAL exports a snapshot until the next
2018 : : * command arrives. Clean up the old stuff if there's anything.
2019 : : */
4249 rhaas@postgresql.org 2020 :CBC 5111 : SnapBuildClearExportedSnapshot();
2021 : :
4770 heikki.linnakangas@i 2022 [ - + ]: 5111 : CHECK_FOR_INTERRUPTS();
2023 : :
2024 : : /*
2025 : : * Prepare to parse and execute the command.
2026 : : *
2027 : : * Because replication command execution can involve beginning or ending
2028 : : * transactions, we need a working context that will survive that, so we
2029 : : * make it a child of TopMemoryContext. That in turn creates a hazard of
2030 : : * long-lived memory leaks if we lose track of the working context. We
2031 : : * deal with that by creating it only once per walsender, and resetting it
2032 : : * for each new command. (Normally this reset is a no-op, but if the
2033 : : * prior exec_replication_command call failed with an error, it won't be.)
2034 : : *
2035 : : * This is subtler than it looks. The transactions we manage can extend
2036 : : * across replication commands, indeed SnapBuildClearExportedSnapshot
2037 : : * might have just ended one. Because transaction exit will revert to the
2038 : : * memory context that was current at transaction start, we need to be
2039 : : * sure that that context is still valid. That motivates re-using the
2040 : : * same cmd_context rather than making a new one each time.
2041 : : */
189 tgl@sss.pgh.pa.us 2042 [ + + ]: 5111 : if (cmd_context == NULL)
2043 : 1112 : cmd_context = AllocSetContextCreate(TopMemoryContext,
2044 : : "Replication command context",
2045 : : ALLOCSET_DEFAULT_SIZES);
2046 : : else
2047 : 3999 : MemoryContextReset(cmd_context);
2048 : :
2049 : 5111 : MemoryContextSwitchTo(cmd_context);
2050 : :
329 peter@eisentraut.org 2051 : 5111 : replication_scanner_init(cmd_string, &scanner);
2052 : :
2053 : : /*
2054 : : * Is it a WalSender command?
2055 : : */
2056 [ + + ]: 5111 : if (!replication_scanner_is_replication_command(scanner))
2057 : : {
2058 : : /* Nope; clean up and get out. */
2059 : 2308 : replication_scanner_finish(scanner);
2060 : :
1869 tgl@sss.pgh.pa.us 2061 : 2308 : MemoryContextSwitchTo(old_context);
189 2062 : 2308 : MemoryContextReset(cmd_context);
2063 : :
2064 : : /* XXX this is a pretty random place to make this check */
1372 2065 [ - + ]: 2308 : if (MyDatabaseId == InvalidOid)
1372 tgl@sss.pgh.pa.us 2066 [ # # ]:UBC 0 : ereport(ERROR,
2067 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2068 : : errmsg("cannot execute SQL commands in WAL sender for physical replication")));
2069 : :
2070 : : /* Tell the caller that this wasn't a WalSender command. */
1869 tgl@sss.pgh.pa.us 2071 :CBC 2308 : return false;
2072 : : }
2073 : :
2074 : : /*
2075 : : * Looks like a WalSender command, so parse it.
2076 : : */
276 peter@eisentraut.org 2077 : 2803 : parse_rc = replication_yyparse(&cmd_node, scanner);
1372 tgl@sss.pgh.pa.us 2078 [ - + ]: 2803 : if (parse_rc != 0)
1372 tgl@sss.pgh.pa.us 2079 [ # # ]:UBC 0 : ereport(ERROR,
2080 : : (errcode(ERRCODE_SYNTAX_ERROR),
2081 : : errmsg_internal("replication command parser returned %d",
2082 : : parse_rc)));
329 peter@eisentraut.org 2083 :CBC 2803 : replication_scanner_finish(scanner);
2084 : :
2085 : : /*
2086 : : * Report query to various monitoring facilities. For this purpose, we
2087 : : * report replication commands just like SQL commands.
2088 : : */
1869 tgl@sss.pgh.pa.us 2089 : 2803 : debug_query_string = cmd_string;
2090 : :
2091 : 2803 : pgstat_report_activity(STATE_RUNNING, cmd_string);
2092 : :
2093 : : /*
2094 : : * Log replication command if log_replication_commands is enabled. Even
2095 : : * when it's disabled, log the command with DEBUG1 level for backward
2096 : : * compatibility.
2097 : : */
2098 [ + - + - ]: 2803 : ereport(log_replication_commands ? LOG : DEBUG1,
2099 : : (errmsg("received replication command: %s", cmd_string)));
2100 : :
2101 : : /*
2102 : : * Disallow replication commands in aborted transaction blocks.
2103 : : */
2104 [ - + ]: 2803 : if (IsAbortedTransactionBlockState())
3140 peter_e@gmx.net 2105 [ # # ]:UBC 0 : ereport(ERROR,
2106 : : (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
2107 : : errmsg("current transaction is aborted, "
2108 : : "commands ignored until end of transaction block")));
2109 : :
3140 peter_e@gmx.net 2110 [ - + ]:CBC 2803 : CHECK_FOR_INTERRUPTS();
2111 : :
2112 : : /*
2113 : : * Allocate buffers that will be used for each outgoing and incoming
2114 : : * message. We do this just once per command to reduce palloc overhead.
2115 : : */
3169 fujii@postgresql.org 2116 : 2803 : initStringInfo(&output_message);
2117 : 2803 : initStringInfo(&reply_message);
2118 : 2803 : initStringInfo(&tmpbuf);
2119 : :
5400 magnus@hagander.net 2120 [ + + + + : 2803 : switch (cmd_node->type)
+ + + + +
+ - ]
2121 : : {
2122 : 676 : case T_IdentifySystemCmd:
1867 alvherre@alvh.no-ip. 2123 : 676 : cmdtag = "IDENTIFY_SYSTEM";
tgl@sss.pgh.pa.us 2124 : 676 : set_ps_display(cmdtag);
5400 magnus@hagander.net 2125 : 676 : IdentifySystem();
1867 alvherre@alvh.no-ip. 2126 : 676 : EndReplicationCommand(cmdtag);
5400 magnus@hagander.net 2127 : 676 : break;
2128 : :
1463 michael@paquier.xyz 2129 : 6 : case T_ReadReplicationSlotCmd:
2130 : 6 : cmdtag = "READ_REPLICATION_SLOT";
2131 : 6 : set_ps_display(cmdtag);
2132 : 6 : ReadReplicationSlot((ReadReplicationSlotCmd *) cmd_node);
2133 : 5 : EndReplicationCommand(cmdtag);
2134 : 5 : break;
2135 : :
5400 magnus@hagander.net 2136 : 184 : case T_BaseBackupCmd:
1867 alvherre@alvh.no-ip. 2137 : 184 : cmdtag = "BASE_BACKUP";
tgl@sss.pgh.pa.us 2138 : 184 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2139 : 184 : PreventInTransactionBlock(true, cmdtag);
677 rhaas@postgresql.org 2140 : 184 : SendBaseBackup((BaseBackupCmd *) cmd_node, uploaded_manifest);
1867 alvherre@alvh.no-ip. 2141 : 158 : EndReplicationCommand(cmdtag);
5391 magnus@hagander.net 2142 : 158 : break;
2143 : :
4287 rhaas@postgresql.org 2144 : 445 : case T_CreateReplicationSlotCmd:
1867 alvherre@alvh.no-ip. 2145 : 445 : cmdtag = "CREATE_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2146 : 445 : set_ps_display(cmdtag);
4287 rhaas@postgresql.org 2147 : 445 : CreateReplicationSlot((CreateReplicationSlotCmd *) cmd_node);
1867 alvherre@alvh.no-ip. 2148 : 444 : EndReplicationCommand(cmdtag);
4287 rhaas@postgresql.org 2149 : 444 : break;
2150 : :
2151 : 268 : case T_DropReplicationSlotCmd:
1867 alvherre@alvh.no-ip. 2152 : 268 : cmdtag = "DROP_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2153 : 268 : set_ps_display(cmdtag);
4287 rhaas@postgresql.org 2154 : 268 : DropReplicationSlot((DropReplicationSlotCmd *) cmd_node);
1867 alvherre@alvh.no-ip. 2155 : 266 : EndReplicationCommand(cmdtag);
4287 rhaas@postgresql.org 2156 : 266 : break;
2157 : :
637 akapila@postgresql.o 2158 : 6 : case T_AlterReplicationSlotCmd:
2159 : 6 : cmdtag = "ALTER_REPLICATION_SLOT";
2160 : 6 : set_ps_display(cmdtag);
2161 : 6 : AlterReplicationSlot((AlterReplicationSlotCmd *) cmd_node);
2162 : 5 : EndReplicationCommand(cmdtag);
2163 : 5 : break;
2164 : :
4287 rhaas@postgresql.org 2165 : 658 : case T_StartReplicationCmd:
2166 : : {
2167 : 658 : StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node;
2168 : :
1867 alvherre@alvh.no-ip. 2169 : 658 : cmdtag = "START_REPLICATION";
tgl@sss.pgh.pa.us 2170 : 658 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2171 : 658 : PreventInTransactionBlock(true, cmdtag);
2172 : :
4287 rhaas@postgresql.org 2173 [ + + ]: 658 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
2174 : 256 : StartReplication(cmd);
2175 : : else
4249 2176 : 402 : StartLogicalReplication(cmd);
2177 : :
2178 : : /* dupe, but necessary per libpqrcv_endstreaming */
1839 alvherre@alvh.no-ip. 2179 : 335 : EndReplicationCommand(cmdtag);
2180 : :
1967 michael@paquier.xyz 2181 [ - + ]: 335 : Assert(xlogreader != NULL);
4287 rhaas@postgresql.org 2182 : 335 : break;
2183 : : }
2184 : :
4701 heikki.linnakangas@i 2185 : 14 : case T_TimeLineHistoryCmd:
1867 alvherre@alvh.no-ip. 2186 : 14 : cmdtag = "TIMELINE_HISTORY";
tgl@sss.pgh.pa.us 2187 : 14 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2188 : 14 : PreventInTransactionBlock(true, cmdtag);
4701 heikki.linnakangas@i 2189 : 14 : SendTimeLineHistory((TimeLineHistoryCmd *) cmd_node);
1867 alvherre@alvh.no-ip. 2190 : 14 : EndReplicationCommand(cmdtag);
4701 heikki.linnakangas@i 2191 : 14 : break;
2192 : :
3198 rhaas@postgresql.org 2193 : 535 : case T_VariableShowStmt:
2194 : : {
2195 : 535 : DestReceiver *dest = CreateDestReceiver(DestRemoteSimple);
2196 : 535 : VariableShowStmt *n = (VariableShowStmt *) cmd_node;
2197 : :
1867 alvherre@alvh.no-ip. 2198 : 535 : cmdtag = "SHOW";
tgl@sss.pgh.pa.us 2199 : 535 : set_ps_display(cmdtag);
2200 : :
2201 : : /* syscache access needs a transaction environment */
2387 michael@paquier.xyz 2202 : 535 : StartTransactionCommand();
3198 rhaas@postgresql.org 2203 : 535 : GetPGVariable(n->name, dest);
2387 michael@paquier.xyz 2204 : 535 : CommitTransactionCommand();
1867 alvherre@alvh.no-ip. 2205 : 535 : EndReplicationCommand(cmdtag);
2206 : : }
3198 rhaas@postgresql.org 2207 : 535 : break;
2208 : :
677 2209 : 11 : case T_UploadManifestCmd:
2210 : 11 : cmdtag = "UPLOAD_MANIFEST";
2211 : 11 : set_ps_display(cmdtag);
2212 : 11 : PreventInTransactionBlock(true, cmdtag);
2213 : 11 : UploadManifest();
2214 : 10 : EndReplicationCommand(cmdtag);
2215 : 10 : break;
2216 : :
5400 magnus@hagander.net 2217 :UBC 0 : default:
4701 heikki.linnakangas@i 2218 [ # # ]: 0 : elog(ERROR, "unrecognized replication command node tag: %u",
2219 : : cmd_node->type);
2220 : : }
2221 : :
2222 : : /*
2223 : : * Done. Revert to caller's memory context, and clean out the cmd_context
2224 : : * to recover memory right away.
2225 : : */
5400 magnus@hagander.net 2226 :CBC 2448 : MemoryContextSwitchTo(old_context);
189 tgl@sss.pgh.pa.us 2227 : 2448 : MemoryContextReset(cmd_context);
2228 : :
2229 : : /*
2230 : : * We need not update ps display or pg_stat_activity, because PostgresMain
2231 : : * will reset those to "idle". But we must reset debug_query_string to
2232 : : * ensure it doesn't become a dangling pointer.
2233 : : */
1869 2234 : 2448 : debug_query_string = NULL;
2235 : :
3140 peter_e@gmx.net 2236 : 2448 : return true;
2237 : : }
2238 : :
2239 : : /*
2240 : : * Process any incoming messages while streaming. Also checks if the remote
2241 : : * end has closed the connection.
2242 : : */
2243 : : static void
5373 heikki.linnakangas@i 2244 : 1043067 : ProcessRepliesIfAny(void)
2245 : : {
2246 : : unsigned char firstchar;
2247 : : int maxmsglen;
2248 : : int r;
5192 tgl@sss.pgh.pa.us 2249 : 1043067 : bool received = false;
2250 : :
2614 noah@leadboat.com 2251 : 1043067 : last_processing = GetCurrentTimestamp();
2252 : :
2253 : : /*
2254 : : * If we already received a CopyDone from the frontend, any subsequent
2255 : : * message is the beginning of a new command, and should be processed in
2256 : : * the main processing loop.
2257 : : */
1778 jdavis@postgresql.or 2258 [ + + ]: 1142470 : while (!streamingDoneReceiving)
2259 : : {
3920 heikki.linnakangas@i 2260 : 1141796 : pq_startmsgread();
5365 simon@2ndQuadrant.co 2261 : 1141796 : r = pq_getbyte_if_available(&firstchar);
2262 [ + + ]: 1141796 : if (r < 0)
2263 : : {
2264 : : /* unexpected error or EOF */
2265 [ + - ]: 17 : ereport(COMMERROR,
2266 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2267 : : errmsg("unexpected EOF on standby connection")));
2268 : 17 : proc_exit(0);
2269 : : }
2270 [ + + ]: 1141779 : if (r == 0)
2271 : : {
2272 : : /* no data available without blocking */
3920 heikki.linnakangas@i 2273 : 1042112 : pq_endmsgread();
5325 2274 : 1042112 : break;
2275 : : }
2276 : :
2277 : : /* Validate message type and set packet size limit */
1643 tgl@sss.pgh.pa.us 2278 [ + + - ]: 99667 : switch (firstchar)
2279 : : {
797 nathan@postgresql.or 2280 : 99068 : case PqMsg_CopyData:
1643 tgl@sss.pgh.pa.us 2281 : 99068 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
2282 : 99068 : break;
797 nathan@postgresql.or 2283 : 599 : case PqMsg_CopyDone:
2284 : : case PqMsg_Terminate:
1643 tgl@sss.pgh.pa.us 2285 : 599 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
2286 : 599 : break;
1643 tgl@sss.pgh.pa.us 2287 :UBC 0 : default:
2288 [ # # ]: 0 : ereport(FATAL,
2289 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2290 : : errmsg("invalid standby message type \"%c\"",
2291 : : firstchar)));
2292 : : maxmsglen = 0; /* keep compiler quiet */
2293 : : break;
2294 : : }
2295 : :
2296 : : /* Read the message contents */
3920 heikki.linnakangas@i 2297 :CBC 99667 : resetStringInfo(&reply_message);
1643 tgl@sss.pgh.pa.us 2298 [ - + ]: 99667 : if (pq_getmessage(&reply_message, maxmsglen))
2299 : : {
3920 heikki.linnakangas@i 2300 [ # # ]:UBC 0 : ereport(COMMERROR,
2301 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2302 : : errmsg("unexpected EOF on standby connection")));
2303 : 0 : proc_exit(0);
2304 : : }
2305 : :
2306 : : /* ... and process it */
5365 simon@2ndQuadrant.co 2307 [ + + + - ]:CBC 99667 : switch (firstchar)
2308 : : {
2309 : : /*
2310 : : * PqMsg_CopyData means a standby reply wrapped in a CopyData
2311 : : * packet.
2312 : : */
797 nathan@postgresql.or 2313 : 99068 : case PqMsg_CopyData:
5365 simon@2ndQuadrant.co 2314 : 99068 : ProcessStandbyMessage();
5325 heikki.linnakangas@i 2315 : 99068 : received = true;
5365 simon@2ndQuadrant.co 2316 : 99068 : break;
2317 : :
2318 : : /*
2319 : : * PqMsg_CopyDone means the standby requested to finish
2320 : : * streaming. Reply with CopyDone, if we had not sent that
2321 : : * already.
2322 : : */
797 nathan@postgresql.or 2323 : 335 : case PqMsg_CopyDone:
4701 heikki.linnakangas@i 2324 [ + + ]: 335 : if (!streamingDoneSending)
2325 : : {
96 nathan@postgresql.or 2326 :GNC 322 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4701 heikki.linnakangas@i 2327 :CBC 322 : streamingDoneSending = true;
2328 : : }
2329 : :
2330 : 335 : streamingDoneReceiving = true;
2331 : 335 : received = true;
2332 : 335 : break;
2333 : :
2334 : : /*
2335 : : * PqMsg_Terminate means that the standby is closing down the
2336 : : * socket.
2337 : : */
797 nathan@postgresql.or 2338 : 264 : case PqMsg_Terminate:
5365 simon@2ndQuadrant.co 2339 : 264 : proc_exit(0);
2340 : :
5365 simon@2ndQuadrant.co 2341 :UBC 0 : default:
1643 tgl@sss.pgh.pa.us 2342 : 0 : Assert(false); /* NOT REACHED */
2343 : : }
2344 : : }
2345 : :
2346 : : /*
2347 : : * Save the last reply timestamp if we've received at least one reply.
2348 : : */
5325 heikki.linnakangas@i 2349 [ + + ]:CBC 1042786 : if (received)
2350 : : {
2614 noah@leadboat.com 2351 : 52144 : last_reply_timestamp = last_processing;
4249 rhaas@postgresql.org 2352 : 52144 : waiting_for_ping_response = false;
2353 : : }
5764 heikki.linnakangas@i 2354 : 1042786 : }
2355 : :
2356 : : /*
2357 : : * Process a status update message received from standby.
2358 : : */
2359 : : static void
5365 simon@2ndQuadrant.co 2360 : 99068 : ProcessStandbyMessage(void)
2361 : : {
2362 : : char msgtype;
2363 : :
2364 : : /*
2365 : : * Check message type from the first byte.
2366 : : */
5368 rhaas@postgresql.org 2367 : 99068 : msgtype = pq_getmsgbyte(&reply_message);
2368 : :
5365 simon@2ndQuadrant.co 2369 [ + + + - ]: 99068 : switch (msgtype)
2370 : : {
82 nathan@postgresql.or 2371 :GNC 98825 : case PqReplMsg_StandbyStatusUpdate:
5365 simon@2ndQuadrant.co 2372 :CBC 98825 : ProcessStandbyReplyMessage();
2373 : 98825 : break;
2374 : :
82 nathan@postgresql.or 2375 :GNC 128 : case PqReplMsg_HotStandbyFeedback:
5365 simon@2ndQuadrant.co 2376 :CBC 128 : ProcessStandbyHSFeedbackMessage();
2377 : 128 : break;
2378 : :
82 nathan@postgresql.or 2379 :GNC 115 : case PqReplMsg_PrimaryStatusRequest:
96 akapila@postgresql.o 2380 : 115 : ProcessStandbyPSRequestMessage();
2381 : 115 : break;
2382 : :
5365 simon@2ndQuadrant.co 2383 :UBC 0 : default:
2384 [ # # ]: 0 : ereport(COMMERROR,
2385 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2386 : : errmsg("unexpected message type \"%c\"", msgtype)));
2387 : 0 : proc_exit(0);
2388 : : }
5365 simon@2ndQuadrant.co 2389 :CBC 99068 : }
2390 : :
2391 : : /*
2392 : : * Remember that a walreceiver just confirmed receipt of lsn `lsn`.
2393 : : */
2394 : : static void
4287 rhaas@postgresql.org 2395 : 85351 : PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
2396 : : {
4192 bruce@momjian.us 2397 : 85351 : bool changed = false;
3674 rhaas@postgresql.org 2398 : 85351 : ReplicationSlot *slot = MyReplicationSlot;
2399 : :
4287 2400 [ - + ]: 85351 : Assert(lsn != InvalidXLogRecPtr);
2401 [ - + ]: 85351 : SpinLockAcquire(&slot->mutex);
2402 [ + + ]: 85351 : if (slot->data.restart_lsn != lsn)
2403 : : {
2404 : 38753 : changed = true;
2405 : 38753 : slot->data.restart_lsn = lsn;
2406 : : }
2407 : 85351 : SpinLockRelease(&slot->mutex);
2408 : :
2409 [ + + ]: 85351 : if (changed)
2410 : : {
2411 : 38753 : ReplicationSlotMarkDirty();
2412 : 38753 : ReplicationSlotsComputeRequiredLSN();
598 akapila@postgresql.o 2413 : 38753 : PhysicalWakeupLogicalWalSnd();
2414 : : }
2415 : :
2416 : : /*
2417 : : * One could argue that the slot should be saved to disk now, but that'd
2418 : : * be energy wasted - the worst thing lost information could cause here is
2419 : : * to give wrong information in a statistics view - we'll just potentially
2420 : : * be more conservative in removing files.
2421 : : */
4287 rhaas@postgresql.org 2422 : 85351 : }
2423 : :
2424 : : /*
2425 : : * Regular reply from standby advising of WAL locations on standby server.
2426 : : */
2427 : : static void
5365 simon@2ndQuadrant.co 2428 : 98825 : ProcessStandbyReplyMessage(void)
2429 : : {
2430 : : XLogRecPtr writePtr,
2431 : : flushPtr,
2432 : : applyPtr;
2433 : : bool replyRequested;
2434 : : TimeOffset writeLag,
2435 : : flushLag,
2436 : : applyLag;
2437 : : bool clearLagTimes;
2438 : : TimestampTz now;
2439 : : TimestampTz replyTime;
2440 : :
2441 : : static bool fullyAppliedLastTime = false;
2442 : :
2443 : : /* the caller already consumed the msgtype byte */
4737 heikki.linnakangas@i 2444 : 98825 : writePtr = pq_getmsgint64(&reply_message);
2445 : 98825 : flushPtr = pq_getmsgint64(&reply_message);
2446 : 98825 : applyPtr = pq_getmsgint64(&reply_message);
2514 michael@paquier.xyz 2447 : 98825 : replyTime = pq_getmsgint64(&reply_message);
4737 heikki.linnakangas@i 2448 : 98825 : replyRequested = pq_getmsgbyte(&reply_message);
2449 : :
1799 tgl@sss.pgh.pa.us 2450 [ + + ]: 98825 : if (message_level_is_interesting(DEBUG2))
2451 : : {
2452 : : char *replyTimeStr;
2453 : :
2454 : : /* Copy because timestamptz_to_str returns a static buffer */
2514 michael@paquier.xyz 2455 : 564 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2456 : :
112 alvherre@kurilemu.de 2457 [ + - - + ]:GNC 564 : elog(DEBUG2, "write %X/%08X flush %X/%08X apply %X/%08X%s reply_time %s",
2458 : : LSN_FORMAT_ARGS(writePtr),
2459 : : LSN_FORMAT_ARGS(flushPtr),
2460 : : LSN_FORMAT_ARGS(applyPtr),
2461 : : replyRequested ? " (reply requested)" : "",
2462 : : replyTimeStr);
2463 : :
2514 michael@paquier.xyz 2464 :CBC 564 : pfree(replyTimeStr);
2465 : : }
2466 : :
2467 : : /* See if we can compute the round-trip lag for these positions. */
3140 simon@2ndQuadrant.co 2468 : 98825 : now = GetCurrentTimestamp();
2469 : 98825 : writeLag = LagTrackerRead(SYNC_REP_WAIT_WRITE, writePtr, now);
2470 : 98825 : flushLag = LagTrackerRead(SYNC_REP_WAIT_FLUSH, flushPtr, now);
2471 : 98825 : applyLag = LagTrackerRead(SYNC_REP_WAIT_APPLY, applyPtr, now);
2472 : :
2473 : : /*
2474 : : * If the standby reports that it has fully replayed the WAL in two
2475 : : * consecutive reply messages, then the second such message must result
2476 : : * from wal_receiver_status_interval expiring on the standby. This is a
2477 : : * convenient time to forget the lag times measured when it last
2478 : : * wrote/flushed/applied a WAL record, to avoid displaying stale lag data
2479 : : * until more WAL traffic arrives.
2480 : : */
2481 : 98825 : clearLagTimes = false;
2482 [ + + ]: 98825 : if (applyPtr == sentPtr)
2483 : : {
2484 [ + + ]: 8749 : if (fullyAppliedLastTime)
2485 : 1582 : clearLagTimes = true;
2486 : 8749 : fullyAppliedLastTime = true;
2487 : : }
2488 : : else
2489 : 90076 : fullyAppliedLastTime = false;
2490 : :
2491 : : /* Send a reply if the standby requested one. */
4737 heikki.linnakangas@i 2492 [ - + ]: 98825 : if (replyRequested)
1307 akapila@postgresql.o 2493 :UBC 0 : WalSndKeepalive(false, InvalidXLogRecPtr);
2494 : :
2495 : : /*
2496 : : * Update shared state for this WalSender process based on reply data from
2497 : : * standby.
2498 : : */
2499 : : {
3427 rhaas@postgresql.org 2500 :CBC 98825 : WalSnd *walsnd = MyWalSnd;
2501 : :
5373 heikki.linnakangas@i 2502 [ - + ]: 98825 : SpinLockAcquire(&walsnd->mutex);
4737 2503 : 98825 : walsnd->write = writePtr;
2504 : 98825 : walsnd->flush = flushPtr;
2505 : 98825 : walsnd->apply = applyPtr;
3140 simon@2ndQuadrant.co 2506 [ + + + + ]: 98825 : if (writeLag != -1 || clearLagTimes)
2507 : 65579 : walsnd->writeLag = writeLag;
2508 [ + + + + ]: 98825 : if (flushLag != -1 || clearLagTimes)
2509 : 81566 : walsnd->flushLag = flushLag;
2510 [ + + + + ]: 98825 : if (applyLag != -1 || clearLagTimes)
2511 : 88488 : walsnd->applyLag = applyLag;
2514 michael@paquier.xyz 2512 : 98825 : walsnd->replyTime = replyTime;
5373 heikki.linnakangas@i 2513 : 98825 : SpinLockRelease(&walsnd->mutex);
2514 : : }
2515 : :
5214 simon@2ndQuadrant.co 2516 [ + + ]: 98825 : if (!am_cascading_walsender)
2517 : 98535 : SyncRepReleaseWaiters();
2518 : :
2519 : : /*
2520 : : * Advance our local xmin horizon when the client confirmed a flush.
2521 : : */
4287 rhaas@postgresql.org 2522 [ + + + + ]: 98825 : if (MyReplicationSlot && flushPtr != InvalidXLogRecPtr)
2523 : : {
3730 andres@anarazel.de 2524 [ + + ]: 96132 : if (SlotIsLogical(MyReplicationSlot))
4249 rhaas@postgresql.org 2525 : 10781 : LogicalConfirmReceivedLocation(flushPtr);
2526 : : else
4287 2527 : 85351 : PhysicalConfirmReceivedLocation(flushPtr);
2528 : : }
2529 : 98825 : }
2530 : :
2531 : : /* compute new replication slot xmin horizon if needed */
2532 : : static void
3138 simon@2ndQuadrant.co 2533 : 52 : PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin, TransactionId feedbackCatalogXmin)
2534 : : {
4192 bruce@momjian.us 2535 : 52 : bool changed = false;
3674 rhaas@postgresql.org 2536 : 52 : ReplicationSlot *slot = MyReplicationSlot;
2537 : :
4287 2538 [ - + ]: 52 : SpinLockAcquire(&slot->mutex);
1901 andres@anarazel.de 2539 : 52 : MyProc->xmin = InvalidTransactionId;
2540 : :
2541 : : /*
2542 : : * For physical replication we don't need the interlock provided by xmin
2543 : : * and effective_xmin since the consequences of a missed increase are
2544 : : * limited to query cancellations, so set both at once.
2545 : : */
4287 rhaas@postgresql.org 2546 [ + + + + ]: 52 : if (!TransactionIdIsNormal(slot->data.xmin) ||
2547 [ + + ]: 27 : !TransactionIdIsNormal(feedbackXmin) ||
2548 : 27 : TransactionIdPrecedes(slot->data.xmin, feedbackXmin))
2549 : : {
2550 : 33 : changed = true;
2551 : 33 : slot->data.xmin = feedbackXmin;
2552 : 33 : slot->effective_xmin = feedbackXmin;
2553 : : }
3138 simon@2ndQuadrant.co 2554 [ + + + + ]: 52 : if (!TransactionIdIsNormal(slot->data.catalog_xmin) ||
2555 [ + + ]: 15 : !TransactionIdIsNormal(feedbackCatalogXmin) ||
2556 : 15 : TransactionIdPrecedes(slot->data.catalog_xmin, feedbackCatalogXmin))
2557 : : {
2558 : 38 : changed = true;
2559 : 38 : slot->data.catalog_xmin = feedbackCatalogXmin;
2560 : 38 : slot->effective_catalog_xmin = feedbackCatalogXmin;
2561 : : }
4287 rhaas@postgresql.org 2562 : 52 : SpinLockRelease(&slot->mutex);
2563 : :
2564 [ + + ]: 52 : if (changed)
2565 : : {
2566 : 41 : ReplicationSlotMarkDirty();
4256 2567 : 41 : ReplicationSlotsComputeRequiredXmin(false);
2568 : : }
5365 simon@2ndQuadrant.co 2569 : 52 : }
2570 : :
2571 : : /*
2572 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2573 : : * and not so far back as to be already wrapped around.
2574 : : *
2575 : : * Epoch of nextXid should be same as standby, or if the counter has
2576 : : * wrapped, then one greater than standby.
2577 : : *
2578 : : * This check doesn't care about whether clog exists for these xids
2579 : : * at all.
2580 : : */
2581 : : static bool
3138 2582 : 56 : TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
2583 : : {
2584 : : FullTransactionId nextFullXid;
2585 : : TransactionId nextXid;
2586 : : uint32 nextEpoch;
2587 : :
2405 tmunro@postgresql.or 2588 : 56 : nextFullXid = ReadNextFullTransactionId();
2589 : 56 : nextXid = XidFromFullTransactionId(nextFullXid);
2590 : 56 : nextEpoch = EpochFromFullTransactionId(nextFullXid);
2591 : :
3138 simon@2ndQuadrant.co 2592 [ + - ]: 56 : if (xid <= nextXid)
2593 : : {
2594 [ - + ]: 56 : if (epoch != nextEpoch)
3138 simon@2ndQuadrant.co 2595 :UBC 0 : return false;
2596 : : }
2597 : : else
2598 : : {
2599 [ # # ]: 0 : if (epoch + 1 != nextEpoch)
2600 : 0 : return false;
2601 : : }
2602 : :
3138 simon@2ndQuadrant.co 2603 [ - + ]:CBC 56 : if (!TransactionIdPrecedesOrEquals(xid, nextXid))
3085 bruce@momjian.us 2604 :UBC 0 : return false; /* epoch OK, but it's wrapped around */
2605 : :
3138 simon@2ndQuadrant.co 2606 :CBC 56 : return true;
2607 : : }
2608 : :
2609 : : /*
2610 : : * Hot Standby feedback
2611 : : */
2612 : : static void
5365 2613 : 128 : ProcessStandbyHSFeedbackMessage(void)
2614 : : {
2615 : : TransactionId feedbackXmin;
2616 : : uint32 feedbackEpoch;
2617 : : TransactionId feedbackCatalogXmin;
2618 : : uint32 feedbackCatalogEpoch;
2619 : : TimestampTz replyTime;
2620 : :
2621 : : /*
2622 : : * Decipher the reply message. The caller already consumed the msgtype
2623 : : * byte. See XLogWalRcvSendHSFeedback() in walreceiver.c for the creation
2624 : : * of this message.
2625 : : */
2514 michael@paquier.xyz 2626 : 128 : replyTime = pq_getmsgint64(&reply_message);
4737 heikki.linnakangas@i 2627 : 128 : feedbackXmin = pq_getmsgint(&reply_message, 4);
2628 : 128 : feedbackEpoch = pq_getmsgint(&reply_message, 4);
3138 simon@2ndQuadrant.co 2629 : 128 : feedbackCatalogXmin = pq_getmsgint(&reply_message, 4);
2630 : 128 : feedbackCatalogEpoch = pq_getmsgint(&reply_message, 4);
2631 : :
1799 tgl@sss.pgh.pa.us 2632 [ + + ]: 128 : if (message_level_is_interesting(DEBUG2))
2633 : : {
2634 : : char *replyTimeStr;
2635 : :
2636 : : /* Copy because timestamptz_to_str returns a static buffer */
2514 michael@paquier.xyz 2637 : 4 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2638 : :
2639 [ + - ]: 4 : elog(DEBUG2, "hot standby feedback xmin %u epoch %u, catalog_xmin %u epoch %u reply_time %s",
2640 : : feedbackXmin,
2641 : : feedbackEpoch,
2642 : : feedbackCatalogXmin,
2643 : : feedbackCatalogEpoch,
2644 : : replyTimeStr);
2645 : :
2646 : 4 : pfree(replyTimeStr);
2647 : : }
2648 : :
2649 : : /*
2650 : : * Update shared state for this WalSender process based on reply data from
2651 : : * standby.
2652 : : */
2653 : : {
2654 : 128 : WalSnd *walsnd = MyWalSnd;
2655 : :
2656 [ - + ]: 128 : SpinLockAcquire(&walsnd->mutex);
2657 : 128 : walsnd->replyTime = replyTime;
2658 : 128 : SpinLockRelease(&walsnd->mutex);
2659 : : }
2660 : :
2661 : : /*
2662 : : * Unset WalSender's xmins if the feedback message values are invalid.
2663 : : * This happens when the downstream turned hot_standby_feedback off.
2664 : : */
3138 simon@2ndQuadrant.co 2665 [ + + ]: 128 : if (!TransactionIdIsNormal(feedbackXmin)
2666 [ + - ]: 91 : && !TransactionIdIsNormal(feedbackCatalogXmin))
2667 : : {
1901 andres@anarazel.de 2668 : 91 : MyProc->xmin = InvalidTransactionId;
4287 rhaas@postgresql.org 2669 [ + + ]: 91 : if (MyReplicationSlot != NULL)
3138 simon@2ndQuadrant.co 2670 : 19 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
5121 tgl@sss.pgh.pa.us 2671 : 91 : return;
2672 : : }
2673 : :
2674 : : /*
2675 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2676 : : * and not so far back as to be already wrapped around. Ignore if not.
2677 : : */
3138 simon@2ndQuadrant.co 2678 [ + - ]: 37 : if (TransactionIdIsNormal(feedbackXmin) &&
2679 [ - + ]: 37 : !TransactionIdInRecentPast(feedbackXmin, feedbackEpoch))
3138 simon@2ndQuadrant.co 2680 :UBC 0 : return;
2681 : :
3138 simon@2ndQuadrant.co 2682 [ + + ]:CBC 37 : if (TransactionIdIsNormal(feedbackCatalogXmin) &&
2683 [ - + ]: 19 : !TransactionIdInRecentPast(feedbackCatalogXmin, feedbackCatalogEpoch))
3138 simon@2ndQuadrant.co 2684 :UBC 0 : return;
2685 : :
2686 : : /*
2687 : : * Set the WalSender's xmin equal to the standby's requested xmin, so that
2688 : : * the xmin will be taken into account by GetSnapshotData() /
2689 : : * ComputeXidHorizons(). This will hold back the removal of dead rows and
2690 : : * thereby prevent the generation of cleanup conflicts on the standby
2691 : : * server.
2692 : : *
2693 : : * There is a small window for a race condition here: although we just
2694 : : * checked that feedbackXmin precedes nextXid, the nextXid could have
2695 : : * gotten advanced between our fetching it and applying the xmin below,
2696 : : * perhaps far enough to make feedbackXmin wrap around. In that case the
2697 : : * xmin we set here would be "in the future" and have no effect. No point
2698 : : * in worrying about this since it's too late to save the desired data
2699 : : * anyway. Assuming that the standby sends us an increasing sequence of
2700 : : * xmins, this could only happen during the first reply cycle, else our
2701 : : * own xmin would prevent nextXid from advancing so far.
2702 : : *
2703 : : * We don't bother taking the ProcArrayLock here. Setting the xmin field
2704 : : * is assumed atomic, and there's no real need to prevent concurrent
2705 : : * horizon determinations. (If we're moving our xmin forward, this is
2706 : : * obviously safe, and if we're moving it backwards, well, the data is at
2707 : : * risk already since a VACUUM could already have determined the horizon.)
2708 : : *
2709 : : * If we're using a replication slot we reserve the xmin via that,
2710 : : * otherwise via the walsender's PGPROC entry. We can only track the
2711 : : * catalog xmin separately when using a slot, so we store the least of the
2712 : : * two provided when not using a slot.
2713 : : *
2714 : : * XXX: It might make sense to generalize the ephemeral slot concept and
2715 : : * always use the slot mechanism to handle the feedback xmin.
2716 : : */
3050 tgl@sss.pgh.pa.us 2717 [ + + ]:CBC 37 : if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
3138 simon@2ndQuadrant.co 2718 : 33 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
2719 : : else
2720 : : {
2721 [ - + ]: 4 : if (TransactionIdIsNormal(feedbackCatalogXmin)
3138 simon@2ndQuadrant.co 2722 [ # # ]:UBC 0 : && TransactionIdPrecedes(feedbackCatalogXmin, feedbackXmin))
1901 andres@anarazel.de 2723 : 0 : MyProc->xmin = feedbackCatalogXmin;
2724 : : else
1901 andres@anarazel.de 2725 :CBC 4 : MyProc->xmin = feedbackXmin;
2726 : : }
2727 : : }
2728 : :
2729 : : /*
2730 : : * Process the request for a primary status update message.
2731 : : */
2732 : : static void
96 akapila@postgresql.o 2733 :GNC 115 : ProcessStandbyPSRequestMessage(void)
2734 : : {
2735 : 115 : XLogRecPtr lsn = InvalidXLogRecPtr;
2736 : : TransactionId oldestXidInCommit;
2737 : : TransactionId oldestGXidInCommit;
2738 : : FullTransactionId nextFullXid;
2739 : : FullTransactionId fullOldestXidInCommit;
2740 : 115 : WalSnd *walsnd = MyWalSnd;
2741 : : TimestampTz replyTime;
2742 : :
2743 : : /*
2744 : : * This shouldn't happen because we don't support getting primary status
2745 : : * message from standby.
2746 : : */
2747 [ - + ]: 115 : if (RecoveryInProgress())
96 akapila@postgresql.o 2748 [ # # ]:UNC 0 : elog(ERROR, "the primary status is unavailable during recovery");
2749 : :
96 akapila@postgresql.o 2750 :GNC 115 : replyTime = pq_getmsgint64(&reply_message);
2751 : :
2752 : : /*
2753 : : * Update shared state for this WalSender process based on reply data from
2754 : : * standby.
2755 : : */
2756 [ - + ]: 115 : SpinLockAcquire(&walsnd->mutex);
2757 : 115 : walsnd->replyTime = replyTime;
2758 : 115 : SpinLockRelease(&walsnd->mutex);
2759 : :
2760 : : /*
2761 : : * Consider transactions in the current database, as only these are the
2762 : : * ones replicated.
2763 : : */
2764 : 115 : oldestXidInCommit = GetOldestActiveTransactionId(true, false);
49 2765 : 115 : oldestGXidInCommit = TwoPhaseGetOldestXidInCommit();
2766 : :
2767 : : /*
2768 : : * Update the oldest xid for standby transmission if an older prepared
2769 : : * transaction exists and is currently in commit phase.
2770 : : */
2771 [ - + - - ]: 115 : if (TransactionIdIsValid(oldestGXidInCommit) &&
49 akapila@postgresql.o 2772 :UNC 0 : TransactionIdPrecedes(oldestGXidInCommit, oldestXidInCommit))
2773 : 0 : oldestXidInCommit = oldestGXidInCommit;
2774 : :
96 akapila@postgresql.o 2775 :GNC 115 : nextFullXid = ReadNextFullTransactionId();
2776 : 115 : fullOldestXidInCommit = FullTransactionIdFromAllowableAt(nextFullXid,
2777 : : oldestXidInCommit);
2778 : 115 : lsn = GetXLogWriteRecPtr();
2779 : :
2780 [ + + ]: 115 : elog(DEBUG2, "sending primary status");
2781 : :
2782 : : /* construct the message... */
2783 : 115 : resetStringInfo(&output_message);
82 nathan@postgresql.or 2784 : 115 : pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate);
96 akapila@postgresql.o 2785 : 115 : pq_sendint64(&output_message, lsn);
2786 : 115 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
2787 : 115 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
2788 : 115 : pq_sendint64(&output_message, GetCurrentTimestamp());
2789 : :
2790 : : /* ... and send it wrapped in CopyData */
nathan@postgresql.or 2791 : 115 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
akapila@postgresql.o 2792 : 115 : }
2793 : :
2794 : : /*
2795 : : * Compute how long send/receive loops should sleep.
2796 : : *
2797 : : * If wal_sender_timeout is enabled we want to wake up in time to send
2798 : : * keepalives and to abort the connection if wal_sender_timeout has been
2799 : : * reached.
2800 : : */
2801 : : static long
4249 rhaas@postgresql.org 2802 :CBC 84387 : WalSndComputeSleeptime(TimestampTz now)
2803 : : {
3050 tgl@sss.pgh.pa.us 2804 : 84387 : long sleeptime = 10000; /* 10 s */
2805 : :
4169 andres@anarazel.de 2806 [ + - + + ]: 84387 : if (wal_sender_timeout > 0 && last_reply_timestamp > 0)
2807 : : {
2808 : : TimestampTz wakeup_time;
2809 : :
2810 : : /*
2811 : : * At the latest stop sleeping once wal_sender_timeout has been
2812 : : * reached.
2813 : : */
4249 rhaas@postgresql.org 2814 : 84386 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2815 : : wal_sender_timeout);
2816 : :
2817 : : /*
2818 : : * If no ping has been sent yet, wakeup when it's time to do so.
2819 : : * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of
2820 : : * the timeout passed without a response.
2821 : : */
2822 [ + + ]: 84386 : if (!waiting_for_ping_response)
2823 : 84186 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2824 : : wal_sender_timeout / 2);
2825 : :
2826 : : /* Compute relative time until wakeup. */
1812 tgl@sss.pgh.pa.us 2827 : 84386 : sleeptime = TimestampDifferenceMilliseconds(now, wakeup_time);
2828 : : }
2829 : :
4249 rhaas@postgresql.org 2830 : 84387 : return sleeptime;
2831 : : }
2832 : :
2833 : : /*
2834 : : * Check whether there have been responses by the client within
2835 : : * wal_sender_timeout and shutdown if not. Using last_processing as the
2836 : : * reference point avoids counting server-side stalls against the client.
2837 : : * However, a long server-side stall can make WalSndKeepaliveIfNecessary()
2838 : : * postdate last_processing by more than wal_sender_timeout. If that happens,
2839 : : * the client must reply almost immediately to avoid a timeout. This rarely
2840 : : * affects the default configuration, under which clients spontaneously send a
2841 : : * message every standby_message_timeout = wal_sender_timeout/6 = 10s. We
2842 : : * could eliminate that problem by recognizing timeout expiration at
2843 : : * wal_sender_timeout/2 after the keepalive.
2844 : : */
2845 : : static void
2614 noah@leadboat.com 2846 : 1039954 : WalSndCheckTimeOut(void)
2847 : : {
2848 : : TimestampTz timeout;
2849 : :
2850 : : /* don't bail out if we're doing something that doesn't require timeouts */
4169 andres@anarazel.de 2851 [ + + ]: 1039954 : if (last_reply_timestamp <= 0)
2852 : 1 : return;
2853 : :
4249 rhaas@postgresql.org 2854 : 1039953 : timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
2855 : : wal_sender_timeout);
2856 : :
2614 noah@leadboat.com 2857 [ + - - + ]: 1039953 : if (wal_sender_timeout > 0 && last_processing >= timeout)
2858 : : {
2859 : : /*
2860 : : * Since typically expiration of replication timeout means
2861 : : * communication problem, we don't send the error message to the
2862 : : * standby.
2863 : : */
4249 rhaas@postgresql.org 2864 [ # # ]:UBC 0 : ereport(COMMERROR,
2865 : : (errmsg("terminating walsender process due to replication timeout")));
2866 : :
2867 : 0 : WalSndShutdown();
2868 : : }
2869 : : }
2870 : :
2871 : : /* Main loop of walsender process that streams the WAL over Copy messages. */
2872 : : static void
4249 rhaas@postgresql.org 2873 :CBC 654 : WalSndLoop(WalSndSendDataCallback send_data)
2874 : : {
202 michael@paquier.xyz 2875 : 654 : TimestampTz last_flush = 0;
2876 : :
2877 : : /*
2878 : : * Initialize the last reply timestamp. That enables timeout processing
2879 : : * from hereon.
2880 : : */
5325 heikki.linnakangas@i 2881 : 654 : last_reply_timestamp = GetCurrentTimestamp();
4249 rhaas@postgresql.org 2882 : 654 : waiting_for_ping_response = false;
2883 : :
2884 : : /*
2885 : : * Loop until we reach the end of this timeline or the client requests to
2886 : : * stop streaming.
2887 : : */
2888 : : for (;;)
2889 : : {
2890 : : /* Clear any already-pending wakeups */
3936 andres@anarazel.de 2891 : 1036888 : ResetLatch(MyLatch);
2892 : :
2893 [ + + ]: 1036888 : CHECK_FOR_INTERRUPTS();
2894 : :
2895 : : /* Process any requests or signals received recently */
3066 2896 [ + + ]: 1036885 : if (ConfigReloadPending)
2897 : : {
2898 : 17 : ConfigReloadPending = false;
5764 heikki.linnakangas@i 2899 : 17 : ProcessConfigFile(PGC_SIGHUP);
5349 simon@2ndQuadrant.co 2900 : 17 : SyncRepInitConfig();
2901 : : }
2902 : :
2903 : : /* Check for input from the client */
5192 tgl@sss.pgh.pa.us 2904 : 1036885 : ProcessRepliesIfAny();
2905 : :
2906 : : /*
2907 : : * If we have received CopyDone from the client, sent CopyDone
2908 : : * ourselves, and the output buffer is empty, it's time to exit
2909 : : * streaming.
2910 : : */
3041 2911 [ + + + - ]: 1036802 : if (streamingDoneReceiving && streamingDoneSending &&
2912 [ + + ]: 522 : !pq_is_send_pending())
4701 heikki.linnakangas@i 2913 : 335 : break;
2914 : :
2915 : : /*
2916 : : * If we don't have any pending data in the output buffer, try to send
2917 : : * some more. If there is some, we don't bother to call send_data
2918 : : * again until we've flushed it ... but we'd better assume we are not
2919 : : * caught up.
2920 : : */
5325 2921 [ + + ]: 1036467 : if (!pq_is_send_pending())
4249 rhaas@postgresql.org 2922 : 997776 : send_data();
2923 : : else
2924 : 38691 : WalSndCaughtUp = false;
2925 : :
2926 : : /* Try to flush pending output to the client */
5192 tgl@sss.pgh.pa.us 2927 [ - + ]: 1036266 : if (pq_flush_if_writable() != 0)
4249 rhaas@postgresql.org 2928 :UBC 0 : WalSndShutdown();
2929 : :
2930 : : /* If nothing remains to be sent right now ... */
4249 rhaas@postgresql.org 2931 [ + + + + ]:CBC 1036266 : if (WalSndCaughtUp && !pq_is_send_pending())
2932 : : {
2933 : : /*
2934 : : * If we're in catchup state, move to streaming. This is an
2935 : : * important state change for users to know about, since before
2936 : : * this point data loss might occur if the primary dies and we
2937 : : * need to failover to the standby. The state change is also
2938 : : * important for synchronous replication, since commits that
2939 : : * started to wait at that point might wait for some time.
2940 : : */
5192 tgl@sss.pgh.pa.us 2941 [ + + ]: 75959 : if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
2942 : : {
2943 [ + + ]: 616 : ereport(DEBUG1,
2944 : : (errmsg_internal("\"%s\" has now caught up with upstream server",
2945 : : application_name)));
2946 : 616 : WalSndSetState(WALSNDSTATE_STREAMING);
2947 : : }
2948 : :
2949 : : /*
2950 : : * When SIGUSR2 arrives, we send any outstanding logs up to the
2951 : : * shutdown checkpoint record (i.e., the latest record), wait for
2952 : : * them to be replicated to the standby, and exit. This may be a
2953 : : * normal termination at shutdown, or a promotion, the walsender
2954 : : * is not sure which.
2955 : : */
3066 andres@anarazel.de 2956 [ + + ]: 75959 : if (got_SIGUSR2)
4249 rhaas@postgresql.org 2957 : 414 : WalSndDone(send_data);
2958 : : }
2959 : :
2960 : : /* Check for replication timeout. */
2614 noah@leadboat.com 2961 : 1036234 : WalSndCheckTimeOut();
2962 : :
2963 : : /* Send keepalive if the time has come */
2964 : 1036234 : WalSndKeepaliveIfNecessary();
2965 : :
2966 : : /*
2967 : : * Block if we have unsent data. XXX For logical replication, let
2968 : : * WalSndWaitForWal() handle any other blocking; idle receivers need
2969 : : * its additional actions. For physical replication, also block if
2970 : : * caught up; its send_data does not block.
2971 : : *
2972 : : * The IO statistics are reported in WalSndWaitForWal() for the
2973 : : * logical WAL senders.
2974 : : */
2011 2975 [ + + + + ]: 1036234 : if ((WalSndCaughtUp && send_data != XLogSendLogical &&
2976 [ + + + + ]: 1067061 : !streamingDoneSending) ||
2977 : 992233 : pq_is_send_pending())
2978 : : {
2979 : : long sleeptime;
2980 : : int wakeEvents;
2981 : : TimestampTz now;
2982 : :
1778 jdavis@postgresql.or 2983 [ + + ]: 81001 : if (!streamingDoneReceiving)
1701 tmunro@postgresql.or 2984 : 80984 : wakeEvents = WL_SOCKET_READABLE;
2985 : : else
2986 : 17 : wakeEvents = 0;
2987 : :
2988 : : /*
2989 : : * Use fresh timestamp, not last_processing, to reduce the chance
2990 : : * of reaching wal_sender_timeout before sending a keepalive.
2991 : : */
202 michael@paquier.xyz 2992 : 81001 : now = GetCurrentTimestamp();
2993 : 81001 : sleeptime = WalSndComputeSleeptime(now);
2994 : :
2011 noah@leadboat.com 2995 [ + + ]: 81001 : if (pq_is_send_pending())
2996 : 38638 : wakeEvents |= WL_SOCKET_WRITEABLE;
2997 : :
2998 : : /* Report IO statistics, if needed */
202 michael@paquier.xyz 2999 [ + + ]: 81001 : if (TimestampDifferenceExceeds(last_flush, now,
3000 : : WALSENDER_STATS_FLUSH_INTERVAL))
3001 : : {
3002 : 489 : pgstat_flush_io(false);
3003 : 489 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
3004 : 489 : last_flush = now;
3005 : : }
3006 : :
3007 : : /* Sleep until something happens or we time out */
1701 tmunro@postgresql.or 3008 : 81001 : WalSndWait(wakeEvents, sleeptime, WAIT_EVENT_WAL_SENDER_MAIN);
3009 : : }
3010 : : }
5764 heikki.linnakangas@i 3011 : 335 : }
3012 : :
3013 : : /* Initialize a per-walsender data structure for this walsender process */
3014 : : static void
4770 3015 : 1112 : InitWalSenderSlot(void)
3016 : : {
3017 : : int i;
3018 : :
3019 : : /*
3020 : : * WalSndCtl should be set up already (we inherit this by fork() or
3021 : : * EXEC_BACKEND mechanism from the postmaster).
3022 : : */
5764 3023 [ - + ]: 1112 : Assert(WalSndCtl != NULL);
3024 [ - + ]: 1112 : Assert(MyWalSnd == NULL);
3025 : :
3026 : : /*
3027 : : * Find a free walsender slot and reserve it. This must not fail due to
3028 : : * the prior check for free WAL senders in InitProcess().
3029 : : */
5688 rhaas@postgresql.org 3030 [ + - ]: 1651 : for (i = 0; i < max_wal_senders; i++)
3031 : : {
3427 3032 : 1651 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3033 : :
5764 heikki.linnakangas@i 3034 [ - + ]: 1651 : SpinLockAcquire(&walsnd->mutex);
3035 : :
3036 [ + + ]: 1651 : if (walsnd->pid != 0)
3037 : : {
3038 : 539 : SpinLockRelease(&walsnd->mutex);
3039 : 539 : continue;
3040 : : }
3041 : : else
3042 : : {
3043 : : /*
3044 : : * Found a free slot. Reserve it for us.
3045 : : */
3046 : 1112 : walsnd->pid = MyProcPid;
2018 tgl@sss.pgh.pa.us 3047 : 1112 : walsnd->state = WALSNDSTATE_STARTUP;
4687 alvherre@alvh.no-ip. 3048 : 1112 : walsnd->sentPtr = InvalidXLogRecPtr;
2018 tgl@sss.pgh.pa.us 3049 : 1112 : walsnd->needreload = false;
3606 magnus@hagander.net 3050 : 1112 : walsnd->write = InvalidXLogRecPtr;
3051 : 1112 : walsnd->flush = InvalidXLogRecPtr;
3052 : 1112 : walsnd->apply = InvalidXLogRecPtr;
3140 simon@2ndQuadrant.co 3053 : 1112 : walsnd->writeLag = -1;
3054 : 1112 : walsnd->flushLag = -1;
3055 : 1112 : walsnd->applyLag = -1;
2018 tgl@sss.pgh.pa.us 3056 : 1112 : walsnd->sync_standby_priority = 0;
2514 michael@paquier.xyz 3057 : 1112 : walsnd->replyTime = 0;
3058 : :
3059 : : /*
3060 : : * The kind assignment is done here and not in StartReplication()
3061 : : * and StartLogicalReplication(). Indeed, the logical walsender
3062 : : * needs to read WAL records (like snapshot of running
3063 : : * transactions) during the slot creation. So it needs to be woken
3064 : : * up based on its kind.
3065 : : *
3066 : : * The kind assignment could also be done in StartReplication(),
3067 : : * StartLogicalReplication() and CREATE_REPLICATION_SLOT but it
3068 : : * seems better to set it on one place.
3069 : : */
933 andres@anarazel.de 3070 [ + + ]: 1112 : if (MyDatabaseId == InvalidOid)
3071 : 453 : walsnd->kind = REPLICATION_KIND_PHYSICAL;
3072 : : else
3073 : 659 : walsnd->kind = REPLICATION_KIND_LOGICAL;
3074 : :
5764 heikki.linnakangas@i 3075 : 1112 : SpinLockRelease(&walsnd->mutex);
3076 : : /* don't need the lock anymore */
5521 3077 : 1112 : MyWalSnd = (WalSnd *) walsnd;
3078 : :
5764 3079 : 1112 : break;
3080 : : }
3081 : : }
3082 : :
2449 michael@paquier.xyz 3083 [ - + ]: 1112 : Assert(MyWalSnd != NULL);
3084 : :
3085 : : /* Arrange to clean up at walsender exit */
5764 heikki.linnakangas@i 3086 : 1112 : on_shmem_exit(WalSndKill, 0);
3087 : 1112 : }
3088 : :
3089 : : /* Destroy the per-walsender data structure for this walsender process */
3090 : : static void
3091 : 1112 : WalSndKill(int code, Datum arg)
3092 : : {
4286 tgl@sss.pgh.pa.us 3093 : 1112 : WalSnd *walsnd = MyWalSnd;
3094 : :
3095 [ - + ]: 1112 : Assert(walsnd != NULL);
3096 : :
3097 : 1112 : MyWalSnd = NULL;
3098 : :
3936 andres@anarazel.de 3099 [ - + ]: 1112 : SpinLockAcquire(&walsnd->mutex);
3100 : : /* Mark WalSnd struct as no longer being in use. */
4286 tgl@sss.pgh.pa.us 3101 : 1112 : walsnd->pid = 0;
3936 andres@anarazel.de 3102 : 1112 : SpinLockRelease(&walsnd->mutex);
5764 heikki.linnakangas@i 3103 : 1112 : }
3104 : :
3105 : : /* XLogReaderRoutine->segment_open callback */
3106 : : static void
1993 alvherre@alvh.no-ip. 3107 : 1996 : WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
3108 : : TimeLineID *tli_p)
3109 : : {
3110 : : char path[MAXPGPATH];
3111 : :
3112 : : /*-------
3113 : : * When reading from a historic timeline, and there is a timeline switch
3114 : : * within this segment, read from the WAL segment belonging to the new
3115 : : * timeline.
3116 : : *
3117 : : * For example, imagine that this server is currently on timeline 5, and
3118 : : * we're streaming timeline 4. The switch from timeline 4 to 5 happened at
3119 : : * 0/13002088. In pg_wal, we have these files:
3120 : : *
3121 : : * ...
3122 : : * 000000040000000000000012
3123 : : * 000000040000000000000013
3124 : : * 000000050000000000000013
3125 : : * 000000050000000000000014
3126 : : * ...
3127 : : *
3128 : : * In this situation, when requested to send the WAL from segment 0x13, on
3129 : : * timeline 4, we read the WAL from file 000000050000000000000013. Archive
3130 : : * recovery prefers files from newer timelines, so if the segment was
3131 : : * restored from the archive on this server, the file belonging to the old
3132 : : * timeline, 000000040000000000000013, might not exist. Their contents are
3133 : : * equal up to the switchpoint, because at a timeline switch, the used
3134 : : * portion of the old segment is copied to the new file.
3135 : : */
2163 3136 : 1996 : *tli_p = sendTimeLine;
3137 [ + + ]: 1996 : if (sendTimeLineIsHistoric)
3138 : : {
3139 : : XLogSegNo endSegNo;
3140 : :
1993 3141 : 13 : XLByteToSeg(sendTimeLineValidUpto, endSegNo, state->segcxt.ws_segsize);
1747 fujii@postgresql.org 3142 [ + + ]: 13 : if (nextSegNo == endSegNo)
2163 alvherre@alvh.no-ip. 3143 : 11 : *tli_p = sendTimeLineNextTLI;
3144 : : }
3145 : :
1993 3146 : 1996 : XLogFilePath(path, *tli_p, nextSegNo, state->segcxt.ws_segsize);
3147 : 1996 : state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
3148 [ + + ]: 1996 : if (state->seg.ws_file >= 0)
3149 : 1995 : return;
3150 : :
3151 : : /*
3152 : : * If the file is not found, assume it's because the standby asked for a
3153 : : * too old WAL segment that has already been removed or recycled.
3154 : : */
2163 alvherre@alvh.no-ip. 3155 [ + - ]:GBC 1 : if (errno == ENOENT)
3156 : : {
3157 : : char xlogfname[MAXFNAMELEN];
2155 michael@paquier.xyz 3158 : 1 : int save_errno = errno;
3159 : :
3160 : 1 : XLogFileName(xlogfname, *tli_p, nextSegNo, wal_segment_size);
3161 : 1 : errno = save_errno;
2163 alvherre@alvh.no-ip. 3162 [ + - ]: 1 : ereport(ERROR,
3163 : : (errcode_for_file_access(),
3164 : : errmsg("requested WAL segment %s has already been removed",
3165 : : xlogfname)));
3166 : : }
3167 : : else
2163 alvherre@alvh.no-ip. 3168 [ # # ]:UBC 0 : ereport(ERROR,
3169 : : (errcode_for_file_access(),
3170 : : errmsg("could not open file \"%s\": %m",
3171 : : path)));
3172 : : }
3173 : :
3174 : : /*
3175 : : * Send out the WAL in its normal physical/stored form.
3176 : : *
3177 : : * Read up to MAX_SEND_SIZE bytes of WAL that's been flushed to disk,
3178 : : * but not yet sent to the client, and buffer it in the libpq output
3179 : : * buffer.
3180 : : *
3181 : : * If there is no unsent WAL remaining, WalSndCaughtUp is set to true,
3182 : : * otherwise WalSndCaughtUp is set to false.
3183 : : */
3184 : : static void
4249 rhaas@postgresql.org 3185 :CBC 157946 : XLogSendPhysical(void)
3186 : : {
3187 : : XLogRecPtr SendRqstPtr;
3188 : : XLogRecPtr startptr;
3189 : : XLogRecPtr endptr;
3190 : : Size nbytes;
3191 : : XLogSegNo segno;
3192 : : WALReadError errinfo;
3193 : : Size rbytes;
3194 : :
3195 : : /* If requested switch the WAL sender to the stopping state. */
3066 andres@anarazel.de 3196 [ + + ]: 157946 : if (got_STOPPING)
3197 : 505 : WalSndSetState(WALSNDSTATE_STOPPING);
3198 : :
4701 heikki.linnakangas@i 3199 [ + + ]: 157946 : if (streamingDoneSending)
3200 : : {
4249 rhaas@postgresql.org 3201 : 30814 : WalSndCaughtUp = true;
4701 heikki.linnakangas@i 3202 : 57428 : return;
3203 : : }
3204 : :
3205 : : /* Figure out how far we can safely send the WAL. */
4693 3206 [ + + ]: 127132 : if (sendTimeLineIsHistoric)
3207 : : {
3208 : : /*
3209 : : * Streaming an old timeline that's in this server's history, but is
3210 : : * not the one we're currently inserting or replaying. It can be
3211 : : * streamed up to the point where we switched off that timeline.
3212 : : */
3213 : 167 : SendRqstPtr = sendTimeLineValidUpto;
3214 : : }
3215 [ + + ]: 126965 : else if (am_cascading_walsender)
3216 : : {
3217 : : TimeLineID SendRqstTLI;
3218 : :
3219 : : /*
3220 : : * Streaming the latest timeline on a standby.
3221 : : *
3222 : : * Attempt to send all WAL that has already been replayed, so that we
3223 : : * know it's valid. If we're receiving WAL through streaming
3224 : : * replication, it's also OK to send any WAL that has been received
3225 : : * but not replayed.
3226 : : *
3227 : : * The timeline we're recovering from can change, or we can be
3228 : : * promoted. In either case, the current timeline becomes historic. We
3229 : : * need to detect that so that we don't try to stream past the point
3230 : : * where we switched to another timeline. We check for promotion or
3231 : : * timeline switch after calculating FlushPtr, to avoid a race
3232 : : * condition: if the timeline becomes historic just after we checked
3233 : : * that it was still current, it's still be OK to stream it up to the
3234 : : * FlushPtr that was calculated before it became historic.
3235 : : */
4701 3236 : 688 : bool becameHistoric = false;
3237 : :
1452 rhaas@postgresql.org 3238 : 688 : SendRqstPtr = GetStandbyFlushRecPtr(&SendRqstTLI);
3239 : :
4701 heikki.linnakangas@i 3240 [ - + ]: 688 : if (!RecoveryInProgress())
3241 : : {
3242 : : /* We have been promoted. */
1452 rhaas@postgresql.org 3243 :UBC 0 : SendRqstTLI = GetWALInsertionTimeLine();
4701 heikki.linnakangas@i 3244 : 0 : am_cascading_walsender = false;
3245 : 0 : becameHistoric = true;
3246 : : }
3247 : : else
3248 : : {
3249 : : /*
3250 : : * Still a cascading standby. But is the timeline we're sending
3251 : : * still the one recovery is recovering from?
3252 : : */
1452 rhaas@postgresql.org 3253 [ - + ]:CBC 688 : if (sendTimeLine != SendRqstTLI)
4701 heikki.linnakangas@i 3254 :UBC 0 : becameHistoric = true;
3255 : : }
3256 : :
4701 heikki.linnakangas@i 3257 [ - + ]:CBC 688 : if (becameHistoric)
3258 : : {
3259 : : /*
3260 : : * The timeline we were sending has become historic. Read the
3261 : : * timeline history file of the new timeline to see where exactly
3262 : : * we forked off from the timeline we were sending.
3263 : : */
3264 : : List *history;
3265 : :
1452 rhaas@postgresql.org 3266 :UBC 0 : history = readTimeLineHistory(SendRqstTLI);
4666 heikki.linnakangas@i 3267 : 0 : sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history, &sendTimeLineNextTLI);
3268 : :
3269 [ # # ]: 0 : Assert(sendTimeLine < sendTimeLineNextTLI);
4701 3270 : 0 : list_free_deep(history);
3271 : :
3272 : 0 : sendTimeLineIsHistoric = true;
3273 : :
4693 3274 : 0 : SendRqstPtr = sendTimeLineValidUpto;
3275 : : }
3276 : : }
3277 : : else
3278 : : {
3279 : : /*
3280 : : * Streaming the current timeline on a primary.
3281 : : *
3282 : : * Attempt to send all data that's already been written out and
3283 : : * fsync'd to disk. We cannot go further than what's been written out
3284 : : * given the current implementation of WALRead(). And in any case
3285 : : * it's unsafe to send WAL that is not securely down to disk on the
3286 : : * primary: if the primary subsequently crashes and restarts, standbys
3287 : : * must not have applied any WAL that got lost on the primary.
3288 : : */
1452 rhaas@postgresql.org 3289 :CBC 126277 : SendRqstPtr = GetFlushRecPtr(NULL);
3290 : : }
3291 : :
3292 : : /*
3293 : : * Record the current system time as an approximation of the time at which
3294 : : * this WAL location was written for the purposes of lag tracking.
3295 : : *
3296 : : * In theory we could make XLogFlush() record a time in shmem whenever WAL
3297 : : * is flushed and we could get that time as well as the LSN when we call
3298 : : * GetFlushRecPtr() above (and likewise for the cascading standby
3299 : : * equivalent), but rather than putting any new code into the hot WAL path
3300 : : * it seems good enough to capture the time here. We should reach this
3301 : : * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
3302 : : * may take some time, we read the WAL flush pointer and take the time
3303 : : * very close to together here so that we'll get a later position if it is
3304 : : * still moving.
3305 : : *
3306 : : * Because LagTrackerWrite ignores samples when the LSN hasn't advanced,
3307 : : * this gives us a cheap approximation for the WAL flush time for this
3308 : : * LSN.
3309 : : *
3310 : : * Note that the LSN is not necessarily the LSN for the data contained in
3311 : : * the present message; it's the end of the WAL, which might be further
3312 : : * ahead. All the lag tracking machinery cares about is finding out when
3313 : : * that arbitrary LSN is eventually reported as written, flushed and
3314 : : * applied, so that it can measure the elapsed time.
3315 : : */
3140 simon@2ndQuadrant.co 3316 : 127132 : LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
3317 : :
3318 : : /*
3319 : : * If this is a historic timeline and we've reached the point where we
3320 : : * forked to the next timeline, stop streaming.
3321 : : *
3322 : : * Note: We might already have sent WAL > sendTimeLineValidUpto. The
3323 : : * startup process will normally replay all WAL that has been received
3324 : : * from the primary, before promoting, but if the WAL streaming is
3325 : : * terminated at a WAL page boundary, the valid portion of the timeline
3326 : : * might end in the middle of a WAL record. We might've already sent the
3327 : : * first half of that partial WAL record to the cascading standby, so that
3328 : : * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
3329 : : * replay the partial WAL record either, so it can still follow our
3330 : : * timeline switch.
3331 : : */
4686 alvherre@alvh.no-ip. 3332 [ + + + + ]: 127132 : if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
3333 : : {
3334 : : /* close the current file. */
1993 3335 [ + - ]: 13 : if (xlogreader->seg.ws_file >= 0)
3336 : 13 : wal_segment_close(xlogreader);
3337 : :
3338 : : /* Send CopyDone */
96 nathan@postgresql.or 3339 :GNC 13 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4701 heikki.linnakangas@i 3340 :CBC 13 : streamingDoneSending = true;
3341 : :
4249 rhaas@postgresql.org 3342 : 13 : WalSndCaughtUp = true;
3343 : :
112 alvherre@kurilemu.de 3344 [ - + ]:GNC 13 : elog(DEBUG1, "walsender reached end of timeline at %X/%08X (sent up to %X/%08X)",
3345 : : LSN_FORMAT_ARGS(sendTimeLineValidUpto),
3346 : : LSN_FORMAT_ARGS(sentPtr));
4701 heikki.linnakangas@i 3347 :CBC 13 : return;
3348 : : }
3349 : :
3350 : : /* Do we have any work to do? */
4686 alvherre@alvh.no-ip. 3351 [ - + ]: 127119 : Assert(sentPtr <= SendRqstPtr);
3352 [ + + ]: 127119 : if (SendRqstPtr <= sentPtr)
3353 : : {
4249 rhaas@postgresql.org 3354 : 26601 : WalSndCaughtUp = true;
5325 heikki.linnakangas@i 3355 : 26601 : return;
3356 : : }
3357 : :
3358 : : /*
3359 : : * Figure out how much to send in one message. If there's no more than
3360 : : * MAX_SEND_SIZE bytes to send, send everything. Otherwise send
3361 : : * MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
3362 : : *
3363 : : * The rounding is not only for performance reasons. Walreceiver relies on
3364 : : * the fact that we never split a WAL record across two messages. Since a
3365 : : * long WAL record is split at page boundary into continuation records,
3366 : : * page boundary is always a safe cut-off point. We also assume that
3367 : : * SendRqstPtr never points to the middle of a WAL record.
3368 : : */
5633 3369 : 100518 : startptr = sentPtr;
3370 : 100518 : endptr = startptr;
4686 alvherre@alvh.no-ip. 3371 : 100518 : endptr += MAX_SEND_SIZE;
3372 : :
3373 : : /* if we went beyond SendRqstPtr, back off */
3374 [ + + ]: 100518 : if (SendRqstPtr <= endptr)
3375 : : {
5625 tgl@sss.pgh.pa.us 3376 : 17657 : endptr = SendRqstPtr;
4701 heikki.linnakangas@i 3377 [ + + ]: 17657 : if (sendTimeLineIsHistoric)
4249 rhaas@postgresql.org 3378 : 13 : WalSndCaughtUp = false;
3379 : : else
3380 : 17644 : WalSndCaughtUp = true;
3381 : : }
3382 : : else
3383 : : {
3384 : : /* round down to page boundary. */
4873 heikki.linnakangas@i 3385 : 82861 : endptr -= (endptr % XLOG_BLCKSZ);
4249 rhaas@postgresql.org 3386 : 82861 : WalSndCaughtUp = false;
3387 : : }
3388 : :
4873 heikki.linnakangas@i 3389 : 100518 : nbytes = endptr - startptr;
5625 tgl@sss.pgh.pa.us 3390 [ - + ]: 100518 : Assert(nbytes <= MAX_SEND_SIZE);
3391 : :
3392 : : /*
3393 : : * OK to read and send the slice.
3394 : : */
4737 heikki.linnakangas@i 3395 : 100518 : resetStringInfo(&output_message);
82 nathan@postgresql.or 3396 :GNC 100518 : pq_sendbyte(&output_message, PqReplMsg_WALData);
3397 : :
4737 heikki.linnakangas@i 3398 :CBC 100518 : pq_sendint64(&output_message, startptr); /* dataStart */
4534 bruce@momjian.us 3399 : 100518 : pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
3400 : 100518 : pq_sendint64(&output_message, 0); /* sendtime, filled in last */
3401 : :
3402 : : /*
3403 : : * Read the log directly into the output buffer to avoid extra memcpy
3404 : : * calls.
3405 : : */
4737 heikki.linnakangas@i 3406 : 100518 : enlargeStringInfo(&output_message, nbytes);
3407 : :
2163 alvherre@alvh.no-ip. 3408 : 100518 : retry:
3409 : : /* attempt to read WAL from WAL buffers first */
623 jdavis@postgresql.or 3410 : 100518 : rbytes = WALReadFromBuffers(&output_message.data[output_message.len],
3411 : 100518 : startptr, nbytes, xlogreader->seg.ws_tli);
3412 : 100518 : output_message.len += rbytes;
3413 : 100518 : startptr += rbytes;
3414 : 100518 : nbytes -= rbytes;
3415 : :
3416 : : /* now read the remaining WAL from WAL file */
3417 [ + + ]: 100518 : if (nbytes > 0 &&
3418 [ - + ]: 93320 : !WALRead(xlogreader,
1998 alvherre@alvh.no-ip. 3419 : 93321 : &output_message.data[output_message.len],
3420 : : startptr,
3421 : : nbytes,
1993 3422 : 93321 : xlogreader->seg.ws_tli, /* Pass the current TLI because
3423 : : * only WalSndSegmentOpen controls
3424 : : * whether new TLI is needed. */
3425 : : &errinfo))
2163 alvherre@alvh.no-ip. 3426 :UBC 0 : WALReadRaiseError(&errinfo);
3427 : :
3428 : : /* See logical_read_xlog_page(). */
1993 alvherre@alvh.no-ip. 3429 :CBC 100517 : XLByteToSeg(startptr, segno, xlogreader->segcxt.ws_segsize);
3430 : 100517 : CheckXLogRemoved(segno, xlogreader->seg.ws_tli);
3431 : :
3432 : : /*
3433 : : * During recovery, the currently-open WAL file might be replaced with the
3434 : : * file of the same name retrieved from archive. So we always need to
3435 : : * check what we read was valid after reading into the buffer. If it's
3436 : : * invalid, we try to open and read the file again.
3437 : : */
2163 3438 [ + + ]: 100517 : if (am_cascading_walsender)
3439 : : {
3440 : 484 : WalSnd *walsnd = MyWalSnd;
3441 : : bool reload;
3442 : :
3443 [ - + ]: 484 : SpinLockAcquire(&walsnd->mutex);
3444 : 484 : reload = walsnd->needreload;
3445 : 484 : walsnd->needreload = false;
3446 : 484 : SpinLockRelease(&walsnd->mutex);
3447 : :
1993 3448 [ - + - - ]: 484 : if (reload && xlogreader->seg.ws_file >= 0)
3449 : : {
1993 alvherre@alvh.no-ip. 3450 :UBC 0 : wal_segment_close(xlogreader);
3451 : :
2163 3452 : 0 : goto retry;
3453 : : }
3454 : : }
3455 : :
4737 heikki.linnakangas@i 3456 :CBC 100517 : output_message.len += nbytes;
3457 : 100517 : output_message.data[output_message.len] = '\0';
3458 : :
3459 : : /*
3460 : : * Fill the send timestamp last, so that it is taken as late as possible.
3461 : : */
3462 : 100517 : resetStringInfo(&tmpbuf);
3168 tgl@sss.pgh.pa.us 3463 : 100517 : pq_sendint64(&tmpbuf, GetCurrentTimestamp());
4737 heikki.linnakangas@i 3464 : 100517 : memcpy(&output_message.data[1 + sizeof(int64) + sizeof(int64)],
3465 : 100517 : tmpbuf.data, sizeof(int64));
3466 : :
96 nathan@postgresql.or 3467 :GNC 100517 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
3468 : :
5625 tgl@sss.pgh.pa.us 3469 :CBC 100517 : sentPtr = endptr;
3470 : :
3471 : : /* Update shared memory status */
3472 : : {
3427 rhaas@postgresql.org 3473 : 100517 : WalSnd *walsnd = MyWalSnd;
3474 : :
5625 tgl@sss.pgh.pa.us 3475 [ - + ]: 100517 : SpinLockAcquire(&walsnd->mutex);
3476 : 100517 : walsnd->sentPtr = sentPtr;
3477 : 100517 : SpinLockRelease(&walsnd->mutex);
3478 : : }
3479 : :
3480 : : /* Report progress of XLOG streaming in PS display */
3481 [ + - ]: 100517 : if (update_process_title)
3482 : : {
3483 : : char activitymsg[50];
3484 : :
112 alvherre@kurilemu.de 3485 :GNC 100517 : snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%08X",
1707 peter@eisentraut.org 3486 :CBC 100517 : LSN_FORMAT_ARGS(sentPtr));
2056 3487 : 100517 : set_ps_display(activitymsg);
3488 : : }
3489 : : }
3490 : :
3491 : : /*
3492 : : * Stream out logically decoded data.
3493 : : */
3494 : : static void
4249 rhaas@postgresql.org 3495 : 840244 : XLogSendLogical(void)
3496 : : {
3497 : : XLogRecord *record;
3498 : : char *errm;
3499 : :
3500 : : /*
3501 : : * We'll use the current flush point to determine whether we've caught up.
3502 : : * This variable is static in order to cache it across calls. Caching is
3503 : : * helpful because GetFlushRecPtr() needs to acquire a heavily-contended
3504 : : * spinlock.
3505 : : */
3506 : : static XLogRecPtr flushPtr = InvalidXLogRecPtr;
3507 : :
3508 : : /*
3509 : : * Don't know whether we've caught up yet. We'll set WalSndCaughtUp to
3510 : : * true in WalSndWaitForWal, if we're actually waiting. We also set to
3511 : : * true if XLogReadRecord() had to stop reading but WalSndWaitForWal
3512 : : * didn't wait - i.e. when we're shutting down.
3513 : : */
3514 : 840244 : WalSndCaughtUp = false;
3515 : :
1631 tmunro@postgresql.or 3516 : 840244 : record = XLogReadRecord(logical_decoding_ctx->reader, &errm);
3517 : :
3518 : : /* xlog record was invalid */
4249 rhaas@postgresql.org 3519 [ - + ]: 840046 : if (errm != NULL)
1447 michael@paquier.xyz 3520 [ # # ]:UBC 0 : elog(ERROR, "could not find record while sending logically-decoded data: %s",
3521 : : errm);
3522 : :
4249 rhaas@postgresql.org 3523 [ + + ]:CBC 840046 : if (record != NULL)
3524 : : {
3525 : : /*
3526 : : * Note the lack of any call to LagTrackerWrite() which is handled by
3527 : : * WalSndUpdateProgress which is called by output plugin through
3528 : : * logical decoding write api.
3529 : : */
3994 heikki.linnakangas@i 3530 : 839511 : LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
3531 : :
4249 rhaas@postgresql.org 3532 : 839509 : sentPtr = logical_decoding_ctx->reader->EndRecPtr;
3533 : : }
3534 : :
3535 : : /*
3536 : : * If first time through in this session, initialize flushPtr. Otherwise,
3537 : : * we only need to update flushPtr if EndRecPtr is past it.
3538 : : */
933 andres@anarazel.de 3539 [ + + ]: 840044 : if (flushPtr == InvalidXLogRecPtr ||
3540 [ + + ]: 839677 : logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3541 : : {
3542 : : /*
3543 : : * For cascading logical WAL senders, we use the replay LSN instead of
3544 : : * the flush LSN, since logical decoding on a standby only processes
3545 : : * WAL that has been replayed. This distinction becomes particularly
3546 : : * important during shutdown, as new WAL is no longer replayed and the
3547 : : * last replayed LSN marks the furthest point up to which decoding can
3548 : : * proceed.
3549 : : */
3550 [ - + ]: 3020 : if (am_cascading_walsender)
147 michael@paquier.xyz 3551 :UBC 0 : flushPtr = GetXLogReplayRecPtr(NULL);
3552 : : else
933 andres@anarazel.de 3553 :CBC 3020 : flushPtr = GetFlushRecPtr(NULL);
3554 : : }
3555 : :
3556 : : /* If EndRecPtr is still past our flushPtr, it means we caught up. */
2202 alvherre@alvh.no-ip. 3557 [ + + ]: 840044 : if (logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3558 : 1974 : WalSndCaughtUp = true;
3559 : :
3560 : : /*
3561 : : * If we're caught up and have been requested to stop, have WalSndLoop()
3562 : : * terminate the connection in an orderly manner, after writing out all
3563 : : * the pending data.
3564 : : */
3565 [ + + + + ]: 840044 : if (WalSndCaughtUp && got_STOPPING)
3566 : 385 : got_SIGUSR2 = true;
3567 : :
3568 : : /* Update shared memory status */
3569 : : {
3427 rhaas@postgresql.org 3570 : 840044 : WalSnd *walsnd = MyWalSnd;
3571 : :
4249 3572 [ - + ]: 840044 : SpinLockAcquire(&walsnd->mutex);
3573 : 840044 : walsnd->sentPtr = sentPtr;
3574 : 840044 : SpinLockRelease(&walsnd->mutex);
3575 : : }
3576 : 840044 : }
3577 : :
3578 : : /*
3579 : : * Shutdown if the sender is caught up.
3580 : : *
3581 : : * NB: This should only be called when the shutdown signal has been received
3582 : : * from postmaster.
3583 : : *
3584 : : * Note that if we determine that there's still more data to send, this
3585 : : * function will return control to the caller.
3586 : : */
3587 : : static void
3588 : 414 : WalSndDone(WalSndSendDataCallback send_data)
3589 : : {
3590 : : XLogRecPtr replicatedPtr;
3591 : :
3592 : : /* ... let's just be real sure we're caught up ... */
3593 : 414 : send_data();
3594 : :
3595 : : /*
3596 : : * To figure out whether all WAL has successfully been replicated, check
3597 : : * flush location if valid, write otherwise. Tools like pg_receivewal will
3598 : : * usually (unless in synchronous mode) return an invalid flush location.
3599 : : */
4242 fujii@postgresql.org 3600 : 828 : replicatedPtr = XLogRecPtrIsInvalid(MyWalSnd->flush) ?
3601 [ - + ]: 414 : MyWalSnd->write : MyWalSnd->flush;
3602 : :
3603 [ + + + + ]: 414 : if (WalSndCaughtUp && sentPtr == replicatedPtr &&
4249 rhaas@postgresql.org 3604 [ + - ]: 32 : !pq_is_send_pending())
3605 : : {
3606 : : QueryCompletion qc;
3607 : :
3608 : : /* Inform the standby that XLOG streaming is done */
2065 alvherre@alvh.no-ip. 3609 : 32 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
3610 : 32 : EndCommand(&qc, DestRemote, false);
4249 rhaas@postgresql.org 3611 : 32 : pq_flush();
3612 : :
3613 : 32 : proc_exit(0);
3614 : : }
3615 [ + + ]: 382 : if (!waiting_for_ping_response)
1307 akapila@postgresql.o 3616 : 103 : WalSndKeepalive(true, InvalidXLogRecPtr);
4249 rhaas@postgresql.org 3617 : 382 : }
3618 : :
3619 : : /*
3620 : : * Returns the latest point in WAL that has been safely flushed to disk.
3621 : : * This should only be called when in recovery.
3622 : : *
3623 : : * This is called either by cascading walsender to find WAL position to be sent
3624 : : * to a cascaded standby or by slot synchronization operation to validate remote
3625 : : * slot's lsn before syncing it locally.
3626 : : *
3627 : : * As a side-effect, *tli is updated to the TLI of the last
3628 : : * replayed WAL record.
3629 : : */
3630 : : XLogRecPtr
1452 3631 : 757 : GetStandbyFlushRecPtr(TimeLineID *tli)
3632 : : {
3633 : : XLogRecPtr replayPtr;
3634 : : TimeLineID replayTLI;
3635 : : XLogRecPtr receivePtr;
3636 : : TimeLineID receiveTLI;
3637 : : XLogRecPtr result;
3638 : :
621 akapila@postgresql.o 3639 [ + + - + ]: 757 : Assert(am_cascading_walsender || IsSyncingReplicationSlots());
3640 : :
3641 : : /*
3642 : : * We can safely send what's already been replayed. Also, if walreceiver
3643 : : * is streaming WAL from the same timeline, we can send anything that it
3644 : : * has streamed, but hasn't been replayed yet.
3645 : : */
3646 : :
2028 tmunro@postgresql.or 3647 : 757 : receivePtr = GetWalRcvFlushRecPtr(NULL, &receiveTLI);
4694 heikki.linnakangas@i 3648 : 757 : replayPtr = GetXLogReplayRecPtr(&replayTLI);
3649 : :
933 andres@anarazel.de 3650 [ + + ]: 757 : if (tli)
3651 : 714 : *tli = replayTLI;
3652 : :
4694 heikki.linnakangas@i 3653 : 757 : result = replayPtr;
1452 rhaas@postgresql.org 3654 [ + - + + ]: 757 : if (receiveTLI == replayTLI && receivePtr > replayPtr)
4694 heikki.linnakangas@i 3655 : 138 : result = receivePtr;
3656 : :
3657 : 757 : return result;
3658 : : }
3659 : :
3660 : : /*
3661 : : * Request walsenders to reload the currently-open WAL file
3662 : : */
3663 : : void
5214 simon@2ndQuadrant.co 3664 : 23 : WalSndRqstFileReload(void)
3665 : : {
3666 : : int i;
3667 : :
3668 [ + + ]: 253 : for (i = 0; i < max_wal_senders; i++)
3669 : : {
3427 rhaas@postgresql.org 3670 : 230 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3671 : :
3041 alvherre@alvh.no-ip. 3672 [ - + ]: 230 : SpinLockAcquire(&walsnd->mutex);
5214 simon@2ndQuadrant.co 3673 [ + - ]: 230 : if (walsnd->pid == 0)
3674 : : {
3041 alvherre@alvh.no-ip. 3675 : 230 : SpinLockRelease(&walsnd->mutex);
5214 simon@2ndQuadrant.co 3676 : 230 : continue;
3677 : : }
5214 simon@2ndQuadrant.co 3678 :UBC 0 : walsnd->needreload = true;
3679 : 0 : SpinLockRelease(&walsnd->mutex);
3680 : : }
5214 simon@2ndQuadrant.co 3681 :CBC 23 : }
3682 : :
3683 : : /*
3684 : : * Handle PROCSIG_WALSND_INIT_STOPPING signal.
3685 : : */
3686 : : void
3066 andres@anarazel.de 3687 : 32 : HandleWalSndInitStopping(void)
3688 : : {
3689 [ - + ]: 32 : Assert(am_walsender);
3690 : :
3691 : : /*
3692 : : * If replication has not yet started, die like with SIGTERM. If
3693 : : * replication is active, only set a flag and wake up the main loop. It
3694 : : * will send any outstanding WAL, wait for it to be replicated to the
3695 : : * standby, and then exit gracefully.
3696 : : */
3697 [ - + ]: 32 : if (!replication_active)
3066 andres@anarazel.de 3698 :UBC 0 : kill(MyProcPid, SIGTERM);
3699 : : else
3066 andres@anarazel.de 3700 :CBC 32 : got_STOPPING = true;
3701 : 32 : }
3702 : :
3703 : : /*
3704 : : * SIGUSR2: set flag to do a last cycle and shut down afterwards. The WAL
3705 : : * sender should already have been switched to WALSNDSTATE_STOPPING at
3706 : : * this point.
3707 : : */
3708 : : static void
5764 heikki.linnakangas@i 3709 : 32 : WalSndLastCycleHandler(SIGNAL_ARGS)
3710 : : {
3066 andres@anarazel.de 3711 : 32 : got_SIGUSR2 = true;
3936 3712 : 32 : SetLatch(MyLatch);
5764 heikki.linnakangas@i 3713 : 32 : }
3714 : :
3715 : : /* Set up signal handlers */
3716 : : void
3717 : 1112 : WalSndSignals(void)
3718 : : {
3719 : : /* Set up signal handlers */
2141 rhaas@postgresql.org 3720 : 1112 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
3066 andres@anarazel.de 3721 : 1112 : pqsignal(SIGINT, StatementCancelHandler); /* query cancel */
4534 bruce@momjian.us 3722 : 1112 : pqsignal(SIGTERM, die); /* request shutdown */
3723 : : /* SIGQUIT handler was already set up by InitPostmasterChild */
4851 alvherre@alvh.no-ip. 3724 : 1112 : InitializeTimeouts(); /* establishes SIGALRM handler */
5764 heikki.linnakangas@i 3725 : 1112 : pqsignal(SIGPIPE, SIG_IGN);
3066 andres@anarazel.de 3726 : 1112 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
3727 : 1112 : pqsignal(SIGUSR2, WalSndLastCycleHandler); /* request a last cycle and
3728 : : * shutdown */
3729 : :
3730 : : /* Reset some signals that are accepted by postmaster but not here */
5764 heikki.linnakangas@i 3731 : 1112 : pqsignal(SIGCHLD, SIG_DFL);
3732 : 1112 : }
3733 : :
3734 : : /* Report shared-memory space needed by WalSndShmemInit */
3735 : : Size
3736 : 4047 : WalSndShmemSize(void)
3737 : : {
5722 bruce@momjian.us 3738 : 4047 : Size size = 0;
3739 : :
5764 heikki.linnakangas@i 3740 : 4047 : size = offsetof(WalSndCtlData, walsnds);
5688 rhaas@postgresql.org 3741 : 4047 : size = add_size(size, mul_size(max_wal_senders, sizeof(WalSnd)));
3742 : :
5764 heikki.linnakangas@i 3743 : 4047 : return size;
3744 : : }
3745 : :
3746 : : /* Allocate and initialize walsender-related shared memory */
3747 : : void
3748 : 1049 : WalSndShmemInit(void)
3749 : : {
3750 : : bool found;
3751 : : int i;
3752 : :
3753 : 1049 : WalSndCtl = (WalSndCtlData *)
3754 : 1049 : ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);
3755 : :
5661 tgl@sss.pgh.pa.us 3756 [ + - ]: 1049 : if (!found)
3757 : : {
3758 : : /* First time through, so initialize */
3759 [ + - + - : 6855 : MemSet(WalSndCtl, 0, WalSndShmemSize());
+ - + + +
+ ]
3760 : :
5025 simon@2ndQuadrant.co 3761 [ + + ]: 4196 : for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
1013 andres@anarazel.de 3762 : 3147 : dlist_init(&(WalSndCtl->SyncRepQueue[i]));
3763 : :
5661 tgl@sss.pgh.pa.us 3764 [ + + ]: 8081 : for (i = 0; i < max_wal_senders; i++)
3765 : : {
3766 : 7032 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3767 : :
3768 : 7032 : SpinLockInit(&walsnd->mutex);
3769 : : }
3770 : :
890 andres@anarazel.de 3771 : 1049 : ConditionVariableInit(&WalSndCtl->wal_flush_cv);
3772 : 1049 : ConditionVariableInit(&WalSndCtl->wal_replay_cv);
598 akapila@postgresql.o 3773 : 1049 : ConditionVariableInit(&WalSndCtl->wal_confirm_rcv_cv);
3774 : : }
5764 heikki.linnakangas@i 3775 : 1049 : }
3776 : :
3777 : : /*
3778 : : * Wake up physical, logical or both kinds of walsenders
3779 : : *
3780 : : * The distinction between physical and logical walsenders is done, because:
3781 : : * - physical walsenders can't send data until it's been flushed
3782 : : * - logical walsenders on standby can't decode and send data until it's been
3783 : : * applied
3784 : : *
3785 : : * For cascading replication we need to wake up physical walsenders separately
3786 : : * from logical walsenders (see the comment before calling WalSndWakeup() in
3787 : : * ApplyWalRecord() for more details).
3788 : : *
3789 : : * This will be called inside critical sections, so throwing an error is not
3790 : : * advisable.
3791 : : */
3792 : : void
933 andres@anarazel.de 3793 : 2692924 : WalSndWakeup(bool physical, bool logical)
3794 : : {
3795 : : /*
3796 : : * Wake up all the walsenders waiting on WAL being flushed or replayed
3797 : : * respectively. Note that waiting walsender would have prepared to sleep
3798 : : * on the CV (i.e., added itself to the CV's waitlist) in WalSndWait()
3799 : : * before actually waiting.
3800 : : */
890 3801 [ + + ]: 2692924 : if (physical)
3802 : 142453 : ConditionVariableBroadcast(&WalSndCtl->wal_flush_cv);
3803 : :
3804 [ + + ]: 2692924 : if (logical)
3805 : 2652258 : ConditionVariableBroadcast(&WalSndCtl->wal_replay_cv);
5525 heikki.linnakangas@i 3806 : 2692924 : }
3807 : :
3808 : : /*
3809 : : * Wait for readiness on the FeBe socket, or a timeout. The mask should be
3810 : : * composed of optional WL_SOCKET_WRITEABLE and WL_SOCKET_READABLE flags. Exit
3811 : : * on postmaster death.
3812 : : */
3813 : : static void
1701 tmunro@postgresql.or 3814 : 84387 : WalSndWait(uint32 socket_events, long timeout, uint32 wait_event)
3815 : : {
3816 : : WaitEvent event;
3817 : :
3818 : 84387 : ModifyWaitEvent(FeBeWaitSet, FeBeWaitSetSocketPos, socket_events, NULL);
3819 : :
3820 : : /*
3821 : : * We use a condition variable to efficiently wake up walsenders in
3822 : : * WalSndWakeup().
3823 : : *
3824 : : * Every walsender prepares to sleep on a shared memory CV. Note that it
3825 : : * just prepares to sleep on the CV (i.e., adds itself to the CV's
3826 : : * waitlist), but does not actually wait on the CV (IOW, it never calls
3827 : : * ConditionVariableSleep()). It still uses WaitEventSetWait() for
3828 : : * waiting, because we also need to wait for socket events. The processes
3829 : : * (startup process, walreceiver etc.) wanting to wake up walsenders use
3830 : : * ConditionVariableBroadcast(), which in turn calls SetLatch(), helping
3831 : : * walsenders come out of WaitEventSetWait().
3832 : : *
3833 : : * This approach is simple and efficient because, one doesn't have to loop
3834 : : * through all the walsenders slots, with a spinlock acquisition and
3835 : : * release for every iteration, just to wake up only the waiting
3836 : : * walsenders. It makes WalSndWakeup() callers' life easy.
3837 : : *
3838 : : * XXX: A desirable future improvement would be to add support for CVs
3839 : : * into WaitEventSetWait().
3840 : : *
3841 : : * And, we use separate shared memory CVs for physical and logical
3842 : : * walsenders for selective wake ups, see WalSndWakeup() for more details.
3843 : : *
3844 : : * If the wait event is WAIT_FOR_STANDBY_CONFIRMATION, wait on another CV
3845 : : * until awakened by physical walsenders after the walreceiver confirms
3846 : : * the receipt of the LSN.
3847 : : */
598 akapila@postgresql.o 3848 [ + + ]: 84387 : if (wait_event == WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
3849 : 10 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_confirm_rcv_cv);
3850 [ + + ]: 84377 : else if (MyWalSnd->kind == REPLICATION_KIND_PHYSICAL)
890 andres@anarazel.de 3851 : 80999 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_flush_cv);
3852 [ + - ]: 3378 : else if (MyWalSnd->kind == REPLICATION_KIND_LOGICAL)
3853 : 3378 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_replay_cv);
3854 : :
1701 tmunro@postgresql.or 3855 [ + - ]: 84387 : if (WaitEventSetWait(FeBeWaitSet, timeout, &event, 1, wait_event) == 1 &&
3856 [ - + ]: 84387 : (event.events & WL_POSTMASTER_DEATH))
3857 : : {
890 andres@anarazel.de 3858 :UBC 0 : ConditionVariableCancelSleep();
1701 tmunro@postgresql.or 3859 : 0 : proc_exit(1);
3860 : : }
3861 : :
890 andres@anarazel.de 3862 :CBC 84387 : ConditionVariableCancelSleep();
1701 tmunro@postgresql.or 3863 : 84387 : }
3864 : :
3865 : : /*
3866 : : * Signal all walsenders to move to stopping state.
3867 : : *
3868 : : * This will trigger walsenders to move to a state where no further WAL can be
3869 : : * generated. See this file's header for details.
3870 : : */
3871 : : void
3066 andres@anarazel.de 3872 : 625 : WalSndInitStopping(void)
3873 : : {
3874 : : int i;
3875 : :
3876 [ + + ]: 4887 : for (i = 0; i < max_wal_senders; i++)
3877 : : {
3878 : 4262 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3879 : : pid_t pid;
3880 : :
3881 [ - + ]: 4262 : SpinLockAcquire(&walsnd->mutex);
3882 : 4262 : pid = walsnd->pid;
3883 : 4262 : SpinLockRelease(&walsnd->mutex);
3884 : :
3885 [ + + ]: 4262 : if (pid == 0)
3886 : 4230 : continue;
3887 : :
603 heikki.linnakangas@i 3888 : 32 : SendProcSignal(pid, PROCSIG_WALSND_INIT_STOPPING, INVALID_PROC_NUMBER);
3889 : : }
3066 andres@anarazel.de 3890 : 625 : }
3891 : :
3892 : : /*
3893 : : * Wait that all the WAL senders have quit or reached the stopping state. This
3894 : : * is used by the checkpointer to control when the shutdown checkpoint can
3895 : : * safely be performed.
3896 : : */
3897 : : void
3898 : 625 : WalSndWaitStopping(void)
3899 : : {
3900 : : for (;;)
3901 : 23 : {
3902 : : int i;
3903 : 648 : bool all_stopped = true;
3904 : :
3905 [ + + ]: 4911 : for (i = 0; i < max_wal_senders; i++)
3906 : : {
3907 : 4286 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3908 : :
3909 [ - + ]: 4286 : SpinLockAcquire(&walsnd->mutex);
3910 : :
3911 [ + + ]: 4286 : if (walsnd->pid == 0)
3912 : : {
3913 : 4240 : SpinLockRelease(&walsnd->mutex);
3914 : 4240 : continue;
3915 : : }
3916 : :
3041 alvherre@alvh.no-ip. 3917 [ + + ]: 46 : if (walsnd->state != WALSNDSTATE_STOPPING)
3918 : : {
3066 andres@anarazel.de 3919 : 23 : all_stopped = false;
3041 alvherre@alvh.no-ip. 3920 : 23 : SpinLockRelease(&walsnd->mutex);
3066 andres@anarazel.de 3921 : 23 : break;
3922 : : }
3041 alvherre@alvh.no-ip. 3923 : 23 : SpinLockRelease(&walsnd->mutex);
3924 : : }
3925 : :
3926 : : /* safe to leave if confirmation is done for all WAL senders */
3066 andres@anarazel.de 3927 [ + + ]: 648 : if (all_stopped)
3928 : 625 : return;
3929 : :
3930 : 23 : pg_usleep(10000L); /* wait for 10 msec */
3931 : : }
3932 : : }
3933 : :
3934 : : /* Set state for current walsender (only called in walsender) */
3935 : : void
5403 magnus@hagander.net 3936 : 2314 : WalSndSetState(WalSndState state)
3937 : : {
3427 rhaas@postgresql.org 3938 : 2314 : WalSnd *walsnd = MyWalSnd;
3939 : :
5403 magnus@hagander.net 3940 [ - + ]: 2314 : Assert(am_walsender);
3941 : :
3942 [ + + ]: 2314 : if (walsnd->state == state)
3943 : 508 : return;
3944 : :
3945 [ - + ]: 1806 : SpinLockAcquire(&walsnd->mutex);
3946 : 1806 : walsnd->state = state;
3947 : 1806 : SpinLockRelease(&walsnd->mutex);
3948 : : }
3949 : :
3950 : : /*
3951 : : * Return a string constant representing the state. This is used
3952 : : * in system views, and should *not* be translated.
3953 : : */
3954 : : static const char *
3955 : 809 : WalSndGetStateString(WalSndState state)
3956 : : {
3957 [ - - + + : 809 : switch (state)
- - ]
3958 : : {
5403 magnus@hagander.net 3959 :LBC (2) : case WALSNDSTATE_STARTUP:
5295 bruce@momjian.us 3960 : (2) : return "startup";
5403 magnus@hagander.net 3961 :UBC 0 : case WALSNDSTATE_BACKUP:
5295 bruce@momjian.us 3962 : 0 : return "backup";
5403 magnus@hagander.net 3963 :CBC 11 : case WALSNDSTATE_CATCHUP:
5295 bruce@momjian.us 3964 : 11 : return "catchup";
5403 magnus@hagander.net 3965 : 798 : case WALSNDSTATE_STREAMING:
5295 bruce@momjian.us 3966 : 798 : return "streaming";
3066 andres@anarazel.de 3967 :UBC 0 : case WALSNDSTATE_STOPPING:
3968 : 0 : return "stopping";
3969 : : }
5403 magnus@hagander.net 3970 : 0 : return "UNKNOWN";
3971 : : }
3972 : :
3973 : : static Interval *
3140 simon@2ndQuadrant.co 3974 :CBC 1041 : offset_to_interval(TimeOffset offset)
3975 : : {
3085 bruce@momjian.us 3976 : 1041 : Interval *result = palloc(sizeof(Interval));
3977 : :
3140 simon@2ndQuadrant.co 3978 : 1041 : result->month = 0;
3979 : 1041 : result->day = 0;
3980 : 1041 : result->time = offset;
3981 : :
3982 : 1041 : return result;
3983 : : }
3984 : :
3985 : : /*
3986 : : * Returns activity of walsenders, including pids and xlog locations sent to
3987 : : * standby servers.
3988 : : */
3989 : : Datum
5407 itagaki.takahiro@gma 3990 : 673 : pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
3991 : : {
3992 : : #define PG_STAT_GET_WAL_SENDERS_COLS 12
5314 bruce@momjian.us 3993 : 673 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
3994 : : SyncRepStandbyData *sync_standbys;
3995 : : int num_standbys;
3996 : : int i;
3997 : :
1105 michael@paquier.xyz 3998 : 673 : InitMaterializedSRF(fcinfo, 0);
3999 : :
4000 : : /*
4001 : : * Get the currently active synchronous standbys. This could be out of
4002 : : * date before we're done, but we'll use the data anyway.
4003 : : */
2018 tgl@sss.pgh.pa.us 4004 : 673 : num_standbys = SyncRepGetCandidateStandbys(&sync_standbys);
4005 : :
5407 itagaki.takahiro@gma 4006 [ + + ]: 7395 : for (i = 0; i < max_wal_senders; i++)
4007 : : {
3427 rhaas@postgresql.org 4008 : 6722 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
4009 : : XLogRecPtr sent_ptr;
4010 : : XLogRecPtr write;
4011 : : XLogRecPtr flush;
4012 : : XLogRecPtr apply;
4013 : : TimeOffset writeLag;
4014 : : TimeOffset flushLag;
4015 : : TimeOffset applyLag;
4016 : : int priority;
4017 : : int pid;
4018 : : WalSndState state;
4019 : : TimestampTz replyTime;
4020 : : bool is_sync_standby;
4021 : : Datum values[PG_STAT_GET_WAL_SENDERS_COLS];
1199 peter@eisentraut.org 4022 : 6722 : bool nulls[PG_STAT_GET_WAL_SENDERS_COLS] = {0};
4023 : : int j;
4024 : :
4025 : : /* Collect data from shared memory */
3041 alvherre@alvh.no-ip. 4026 [ - + ]: 6722 : SpinLockAcquire(&walsnd->mutex);
5407 itagaki.takahiro@gma 4027 [ + + ]: 6722 : if (walsnd->pid == 0)
4028 : : {
3041 alvherre@alvh.no-ip. 4029 : 5913 : SpinLockRelease(&walsnd->mutex);
5407 itagaki.takahiro@gma 4030 : 5913 : continue;
4031 : : }
3041 alvherre@alvh.no-ip. 4032 : 809 : pid = walsnd->pid;
788 michael@paquier.xyz 4033 : 809 : sent_ptr = walsnd->sentPtr;
5401 magnus@hagander.net 4034 : 809 : state = walsnd->state;
5373 heikki.linnakangas@i 4035 : 809 : write = walsnd->write;
4036 : 809 : flush = walsnd->flush;
4037 : 809 : apply = walsnd->apply;
3140 simon@2ndQuadrant.co 4038 : 809 : writeLag = walsnd->writeLag;
4039 : 809 : flushLag = walsnd->flushLag;
4040 : 809 : applyLag = walsnd->applyLag;
3972 heikki.linnakangas@i 4041 : 809 : priority = walsnd->sync_standby_priority;
2514 michael@paquier.xyz 4042 : 809 : replyTime = walsnd->replyTime;
5407 itagaki.takahiro@gma 4043 : 809 : SpinLockRelease(&walsnd->mutex);
4044 : :
4045 : : /*
4046 : : * Detect whether walsender is/was considered synchronous. We can
4047 : : * provide some protection against stale data by checking the PID
4048 : : * along with walsnd_index.
4049 : : */
2018 tgl@sss.pgh.pa.us 4050 : 809 : is_sync_standby = false;
4051 [ + + ]: 850 : for (j = 0; j < num_standbys; j++)
4052 : : {
4053 [ + + ]: 68 : if (sync_standbys[j].walsnd_index == i &&
4054 [ + - ]: 27 : sync_standbys[j].pid == pid)
4055 : : {
4056 : 27 : is_sync_standby = true;
4057 : 27 : break;
4058 : : }
4059 : : }
4060 : :
3041 alvherre@alvh.no-ip. 4061 : 809 : values[0] = Int32GetDatum(pid);
4062 : :
1309 mail@joeconway.com 4063 [ - + ]: 809 : if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
4064 : : {
4065 : : /*
4066 : : * Only superusers and roles with privileges of pg_read_all_stats
4067 : : * can see details. Other users only get the pid value to know
4068 : : * it's a walsender, but no details.
4069 : : */
5349 simon@2ndQuadrant.co 4070 [ # # # # :UBC 0 : MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
# # # # #
# ]
4071 : : }
4072 : : else
4073 : : {
5391 magnus@hagander.net 4074 :CBC 809 : values[1] = CStringGetTextDatum(WalSndGetStateString(state));
4075 : :
788 michael@paquier.xyz 4076 [ - + ]: 809 : if (XLogRecPtrIsInvalid(sent_ptr))
3606 magnus@hagander.net 4077 :LBC (2) : nulls[2] = true;
788 michael@paquier.xyz 4078 :CBC 809 : values[2] = LSNGetDatum(sent_ptr);
4079 : :
3606 magnus@hagander.net 4080 [ - + ]: 809 : if (XLogRecPtrIsInvalid(write))
5373 heikki.linnakangas@i 4081 :LBC (2) : nulls[3] = true;
4263 rhaas@postgresql.org 4082 :CBC 809 : values[3] = LSNGetDatum(write);
4083 : :
3606 magnus@hagander.net 4084 [ - + ]: 809 : if (XLogRecPtrIsInvalid(flush))
5373 heikki.linnakangas@i 4085 :LBC (2) : nulls[4] = true;
4263 rhaas@postgresql.org 4086 :CBC 809 : values[4] = LSNGetDatum(flush);
4087 : :
3606 magnus@hagander.net 4088 [ - + ]: 809 : if (XLogRecPtrIsInvalid(apply))
5373 heikki.linnakangas@i 4089 :LBC (2) : nulls[5] = true;
4263 rhaas@postgresql.org 4090 :CBC 809 : values[5] = LSNGetDatum(apply);
4091 : :
4092 : : /*
4093 : : * Treat a standby such as a pg_basebackup background process
4094 : : * which always returns an invalid flush location, as an
4095 : : * asynchronous standby.
4096 : : */
3041 alvherre@alvh.no-ip. 4097 [ + - ]: 809 : priority = XLogRecPtrIsInvalid(flush) ? 0 : priority;
4098 : :
3140 simon@2ndQuadrant.co 4099 [ + + ]: 809 : if (writeLag < 0)
4100 : 493 : nulls[6] = true;
4101 : : else
4102 : 316 : values[6] = IntervalPGetDatum(offset_to_interval(writeLag));
4103 : :
4104 [ + + ]: 809 : if (flushLag < 0)
4105 : 399 : nulls[7] = true;
4106 : : else
4107 : 410 : values[7] = IntervalPGetDatum(offset_to_interval(flushLag));
4108 : :
4109 [ + + ]: 809 : if (applyLag < 0)
4110 : 494 : nulls[8] = true;
4111 : : else
4112 : 315 : values[8] = IntervalPGetDatum(offset_to_interval(applyLag));
4113 : :
4114 : 809 : values[9] = Int32GetDatum(priority);
4115 : :
4116 : : /*
4117 : : * More easily understood version of standby state. This is purely
4118 : : * informational.
4119 : : *
4120 : : * In quorum-based sync replication, the role of each standby
4121 : : * listed in synchronous_standby_names can be changing very
4122 : : * frequently. Any standbys considered as "sync" at one moment can
4123 : : * be switched to "potential" ones at the next moment. So, it's
4124 : : * basically useless to report "sync" or "potential" as their sync
4125 : : * states. We report just "quorum" for them.
4126 : : */
3972 heikki.linnakangas@i 4127 [ + + ]: 809 : if (priority == 0)
3140 simon@2ndQuadrant.co 4128 : 771 : values[10] = CStringGetTextDatum("async");
2018 tgl@sss.pgh.pa.us 4129 [ + + ]: 38 : else if (is_sync_standby)
3140 simon@2ndQuadrant.co 4130 : 27 : values[10] = SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY ?
3234 fujii@postgresql.org 4131 [ + + ]: 27 : CStringGetTextDatum("sync") : CStringGetTextDatum("quorum");
4132 : : else
3140 simon@2ndQuadrant.co 4133 : 11 : values[10] = CStringGetTextDatum("potential");
4134 : :
2514 michael@paquier.xyz 4135 [ - + ]: 809 : if (replyTime == 0)
2514 michael@paquier.xyz 4136 :LBC (2) : nulls[11] = true;
4137 : : else
2514 michael@paquier.xyz 4138 :CBC 809 : values[11] = TimestampTzGetDatum(replyTime);
4139 : : }
4140 : :
1330 4141 : 809 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
4142 : : values, nulls);
4143 : : }
4144 : :
5407 itagaki.takahiro@gma 4145 : 673 : return (Datum) 0;
4146 : : }
4147 : :
4148 : : /*
4149 : : * Send a keepalive message to standby.
4150 : : *
4151 : : * If requestReply is set, the message requests the other party to send
4152 : : * a message back to us, for heartbeat purposes. We also set a flag to
4153 : : * let nearby code know that we're waiting for that response, to avoid
4154 : : * repeated requests.
4155 : : *
4156 : : * writePtr is the location up to which the WAL is sent. It is essentially
4157 : : * the same as sentPtr but in some cases, we need to send keep alive before
4158 : : * sentPtr is updated like when skipping empty transactions.
4159 : : */
4160 : : static void
1307 akapila@postgresql.o 4161 : 1851 : WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
4162 : : {
5049 simon@2ndQuadrant.co 4163 [ + + ]: 1851 : elog(DEBUG2, "sending replication keepalive");
4164 : :
4165 : : /* construct the message... */
4737 heikki.linnakangas@i 4166 : 1851 : resetStringInfo(&output_message);
82 nathan@postgresql.or 4167 :GNC 1851 : pq_sendbyte(&output_message, PqReplMsg_Keepalive);
1307 akapila@postgresql.o 4168 [ + - ]:CBC 1851 : pq_sendint64(&output_message, XLogRecPtrIsInvalid(writePtr) ? sentPtr : writePtr);
3168 tgl@sss.pgh.pa.us 4169 : 1851 : pq_sendint64(&output_message, GetCurrentTimestamp());
4737 heikki.linnakangas@i 4170 : 1851 : pq_sendbyte(&output_message, requestReply ? 1 : 0);
4171 : :
4172 : : /* ... and send it wrapped in CopyData */
96 nathan@postgresql.or 4173 :GNC 1851 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
4174 : :
4175 : : /* Set local flag */
1906 alvherre@alvh.no-ip. 4176 [ + + ]:CBC 1851 : if (requestReply)
4177 : 103 : waiting_for_ping_response = true;
5049 simon@2ndQuadrant.co 4178 : 1851 : }
4179 : :
4180 : : /*
4181 : : * Send keepalive message if too much time has elapsed.
4182 : : */
4183 : : static void
2614 noah@leadboat.com 4184 : 1039954 : WalSndKeepaliveIfNecessary(void)
4185 : : {
4186 : : TimestampTz ping_time;
4187 : :
4188 : : /*
4189 : : * Don't send keepalive messages if timeouts are globally disabled or
4190 : : * we're doing something not partaking in timeouts.
4191 : : */
4169 andres@anarazel.de 4192 [ + - + + ]: 1039954 : if (wal_sender_timeout <= 0 || last_reply_timestamp <= 0)
4249 rhaas@postgresql.org 4193 : 1 : return;
4194 : :
4195 [ + + ]: 1039953 : if (waiting_for_ping_response)
4196 : 481 : return;
4197 : :
4198 : : /*
4199 : : * If half of wal_sender_timeout has lapsed without receiving any reply
4200 : : * from the standby, send a keep-alive message to the standby requesting
4201 : : * an immediate reply.
4202 : : */
4203 : 1039472 : ping_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
4204 : : wal_sender_timeout / 2);
2614 noah@leadboat.com 4205 [ - + ]: 1039472 : if (last_processing >= ping_time)
4206 : : {
1307 akapila@postgresql.o 4207 :UBC 0 : WalSndKeepalive(true, InvalidXLogRecPtr);
4208 : :
4209 : : /* Try to flush pending output to the client */
4249 rhaas@postgresql.org 4210 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
4211 : 0 : WalSndShutdown();
4212 : : }
4213 : : }
4214 : :
4215 : : /*
4216 : : * Record the end of the WAL and the time it was flushed locally, so that
4217 : : * LagTrackerRead can compute the elapsed time (lag) when this WAL location is
4218 : : * eventually reported to have been written, flushed and applied by the
4219 : : * standby in a reply message.
4220 : : */
4221 : : static void
3140 simon@2ndQuadrant.co 4222 :CBC 127396 : LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time)
4223 : : {
4224 : : int new_write_head;
4225 : : int i;
4226 : :
4227 [ - + ]: 127396 : if (!am_walsender)
3140 simon@2ndQuadrant.co 4228 :UBC 0 : return;
4229 : :
4230 : : /*
4231 : : * If the lsn hasn't advanced since last time, then do nothing. This way
4232 : : * we only record a new sample when new WAL has been written.
4233 : : */
2568 tmunro@postgresql.or 4234 [ + + ]:CBC 127396 : if (lag_tracker->last_lsn == lsn)
3140 simon@2ndQuadrant.co 4235 : 106879 : return;
2568 tmunro@postgresql.or 4236 : 20517 : lag_tracker->last_lsn = lsn;
4237 : :
4238 : : /*
4239 : : * If advancing the write head of the circular buffer would crash into any
4240 : : * of the read heads, then the buffer is full. In other words, the
4241 : : * slowest reader (presumably apply) is the one that controls the release
4242 : : * of space.
4243 : : */
4244 : 20517 : new_write_head = (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
3140 simon@2ndQuadrant.co 4245 [ + + ]: 82068 : for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; ++i)
4246 : : {
4247 : : /*
4248 : : * If the buffer is full, move the slowest reader to a separate
4249 : : * overflow entry and free its space in the buffer so the write head
4250 : : * can advance.
4251 : : */
2568 tmunro@postgresql.or 4252 [ - + ]: 61551 : if (new_write_head == lag_tracker->read_heads[i])
4253 : : {
5 fujii@postgresql.org 4254 :UBC 0 : lag_tracker->overflowed[i] =
4255 : 0 : lag_tracker->buffer[lag_tracker->read_heads[i]];
4256 : 0 : lag_tracker->read_heads[i] = -1;
4257 : : }
4258 : : }
4259 : :
4260 : : /* Store a sample at the current write head position. */
2568 tmunro@postgresql.or 4261 :CBC 20517 : lag_tracker->buffer[lag_tracker->write_head].lsn = lsn;
4262 : 20517 : lag_tracker->buffer[lag_tracker->write_head].time = local_flush_time;
4263 : 20517 : lag_tracker->write_head = new_write_head;
4264 : : }
4265 : :
4266 : : /*
4267 : : * Find out how much time has elapsed between the moment WAL location 'lsn'
4268 : : * (or the highest known earlier LSN) was flushed locally and the time 'now'.
4269 : : * We have a separate read head for each of the reported LSN locations we
4270 : : * receive in replies from standby; 'head' controls which read head is
4271 : : * used. Whenever a read head crosses an LSN which was written into the
4272 : : * lag buffer with LagTrackerWrite, we can use the associated timestamp to
4273 : : * find out the time this LSN (or an earlier one) was flushed locally, and
4274 : : * therefore compute the lag.
4275 : : *
4276 : : * Return -1 if no new sample data is available, and otherwise the elapsed
4277 : : * time in microseconds.
4278 : : */
4279 : : static TimeOffset
3140 simon@2ndQuadrant.co 4280 : 296475 : LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
4281 : : {
4282 : 296475 : TimestampTz time = 0;
4283 : :
4284 : : /*
4285 : : * If 'lsn' has not passed the WAL position stored in the overflow entry,
4286 : : * return the elapsed time (in microseconds) since the saved local flush
4287 : : * time. If the flush time is in the future (due to clock drift), return
4288 : : * -1 to treat as no valid sample.
4289 : : *
4290 : : * Otherwise, switch back to using the buffer to control the read head and
4291 : : * compute the elapsed time. The read head is then reset to point to the
4292 : : * oldest entry in the buffer.
4293 : : */
5 fujii@postgresql.org 4294 [ - + ]: 296475 : if (lag_tracker->read_heads[head] == -1)
4295 : : {
5 fujii@postgresql.org 4296 [ # # ]:UBC 0 : if (lag_tracker->overflowed[head].lsn > lsn)
4297 : 0 : return (now >= lag_tracker->overflowed[head].time) ?
4298 [ # # ]: 0 : now - lag_tracker->overflowed[head].time : -1;
4299 : :
4300 : 0 : time = lag_tracker->overflowed[head].time;
4301 : 0 : lag_tracker->last_read[head] = lag_tracker->overflowed[head];
4302 : 0 : lag_tracker->read_heads[head] =
4303 : 0 : (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
4304 : : }
4305 : :
4306 : : /* Read all unread samples up to this LSN or end of buffer. */
2568 tmunro@postgresql.or 4307 [ + + ]:CBC 357030 : while (lag_tracker->read_heads[head] != lag_tracker->write_head &&
4308 [ + + ]: 252482 : lag_tracker->buffer[lag_tracker->read_heads[head]].lsn <= lsn)
4309 : : {
4310 : 60555 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4311 : 60555 : lag_tracker->last_read[head] =
4312 : 60555 : lag_tracker->buffer[lag_tracker->read_heads[head]];
4313 : 60555 : lag_tracker->read_heads[head] =
4314 : 60555 : (lag_tracker->read_heads[head] + 1) % LAG_TRACKER_BUFFER_SIZE;
4315 : : }
4316 : :
4317 : : /*
4318 : : * If the lag tracker is empty, that means the standby has processed
4319 : : * everything we've ever sent so we should now clear 'last_read'. If we
4320 : : * didn't do that, we'd risk using a stale and irrelevant sample for
4321 : : * interpolation at the beginning of the next burst of WAL after a period
4322 : : * of idleness.
4323 : : */
4324 [ + + ]: 296475 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4325 : 104548 : lag_tracker->last_read[head].time = 0;
4326 : :
3140 simon@2ndQuadrant.co 4327 [ - + ]: 296475 : if (time > now)
4328 : : {
4329 : : /* If the clock somehow went backwards, treat as not found. */
3140 simon@2ndQuadrant.co 4330 :UBC 0 : return -1;
4331 : : }
3140 simon@2ndQuadrant.co 4332 [ + + ]:CBC 296475 : else if (time == 0)
4333 : : {
4334 : : /*
4335 : : * We didn't cross a time. If there is a future sample that we
4336 : : * haven't reached yet, and we've already reached at least one sample,
4337 : : * let's interpolate the local flushed time. This is mainly useful
4338 : : * for reporting a completely stuck apply position as having
4339 : : * increasing lag, since otherwise we'd have to wait for it to
4340 : : * eventually start moving again and cross one of our samples before
4341 : : * we can show the lag increasing.
4342 : : */
2568 tmunro@postgresql.or 4343 [ + + ]: 248311 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4344 : : {
4345 : : /* There are no future samples, so we can't interpolate. */
3048 simon@2ndQuadrant.co 4346 : 64877 : return -1;
4347 : : }
2568 tmunro@postgresql.or 4348 [ + + ]: 183434 : else if (lag_tracker->last_read[head].time != 0)
4349 : : {
4350 : : /* We can interpolate between last_read and the next sample. */
4351 : : double fraction;
4352 : 85308 : WalTimeSample prev = lag_tracker->last_read[head];
4353 : 85308 : WalTimeSample next = lag_tracker->buffer[lag_tracker->read_heads[head]];
4354 : :
3109 simon@2ndQuadrant.co 4355 [ - + ]: 85308 : if (lsn < prev.lsn)
4356 : : {
4357 : : /*
4358 : : * Reported LSNs shouldn't normally go backwards, but it's
4359 : : * possible when there is a timeline change. Treat as not
4360 : : * found.
4361 : : */
3109 simon@2ndQuadrant.co 4362 :UBC 0 : return -1;
4363 : : }
4364 : :
3140 simon@2ndQuadrant.co 4365 [ - + ]:CBC 85308 : Assert(prev.lsn < next.lsn);
4366 : :
4367 [ - + ]: 85308 : if (prev.time > next.time)
4368 : : {
4369 : : /* If the clock somehow went backwards, treat as not found. */
3140 simon@2ndQuadrant.co 4370 :UBC 0 : return -1;
4371 : : }
4372 : :
4373 : : /* See how far we are between the previous and next samples. */
3140 simon@2ndQuadrant.co 4374 :CBC 85308 : fraction =
4375 : 85308 : (double) (lsn - prev.lsn) / (double) (next.lsn - prev.lsn);
4376 : :
4377 : : /* Scale the local flush time proportionally. */
4378 : 85308 : time = (TimestampTz)
4379 : 85308 : ((double) prev.time + (next.time - prev.time) * fraction);
4380 : : }
4381 : : else
4382 : : {
4383 : : /*
4384 : : * We have only a future sample, implying that we were entirely
4385 : : * caught up but and now there is a new burst of WAL and the
4386 : : * standby hasn't processed the first sample yet. Until the
4387 : : * standby reaches the future sample the best we can do is report
4388 : : * the hypothetical lag if that sample were to be replayed now.
4389 : : */
2568 tmunro@postgresql.or 4390 : 98126 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4391 : : }
4392 : : }
4393 : :
4394 : : /* Return the elapsed time since local flush time in microseconds. */
3140 simon@2ndQuadrant.co 4395 [ - + ]: 231598 : Assert(time != 0);
4396 : 231598 : return now - time;
4397 : : }
|