Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * walsender.c
4 : : *
5 : : * The WAL sender process (walsender) is new as of Postgres 9.0. It takes
6 : : * care of sending XLOG from the primary server to a single recipient.
7 : : * (Note that there can be more than one walsender process concurrently.)
8 : : * It is started by the postmaster when the walreceiver of a standby server
9 : : * connects to the primary server and requests XLOG streaming replication.
10 : : *
11 : : * A walsender is similar to a regular backend, ie. there is a one-to-one
12 : : * relationship between a connection and a walsender process, but instead
13 : : * of processing SQL queries, it understands a small set of special
14 : : * replication-mode commands. The START_REPLICATION command begins streaming
15 : : * WAL to the client. While streaming, the walsender keeps reading XLOG
16 : : * records from the disk and sends them to the standby server over the
17 : : * COPY protocol, until either side ends the replication by exiting COPY
18 : : * mode (or until the connection is closed).
19 : : *
20 : : * Normal termination is by SIGTERM, which instructs the walsender to
21 : : * close the connection and exit(0) at the next convenient moment. Emergency
22 : : * termination is by SIGQUIT; like any backend, the walsender will simply
23 : : * abort and exit on SIGQUIT. A close of the connection and a FATAL error
24 : : * are treated as not a crash but approximately normal termination;
25 : : * the walsender will exit quickly without sending any more XLOG records.
26 : : *
27 : : * If the server is shut down, checkpointer sends us
28 : : * PROCSIG_WALSND_INIT_STOPPING after all regular backends have exited. If
29 : : * the backend is idle or runs an SQL query this causes the backend to
30 : : * shutdown, if logical replication is in progress all existing WAL records
31 : : * are processed followed by a shutdown. Otherwise this causes the walsender
32 : : * to switch to the "stopping" state. In this state, the walsender will reject
33 : : * any further replication commands. The checkpointer begins the shutdown
34 : : * checkpoint once all walsenders are confirmed as stopping. When the shutdown
35 : : * checkpoint finishes, the postmaster sends us SIGUSR2. This instructs
36 : : * walsender to send any outstanding WAL, including the shutdown checkpoint
37 : : * record, wait for it to be replicated to the standby, and then exit.
38 : : *
39 : : *
40 : : * Portions Copyright (c) 2010-2026, PostgreSQL Global Development Group
41 : : *
42 : : * IDENTIFICATION
43 : : * src/backend/replication/walsender.c
44 : : *
45 : : *-------------------------------------------------------------------------
46 : : */
47 : : #include "postgres.h"
48 : :
49 : : #include <signal.h>
50 : : #include <unistd.h>
51 : :
52 : : #include "access/timeline.h"
53 : : #include "access/transam.h"
54 : : #include "access/twophase.h"
55 : : #include "access/xact.h"
56 : : #include "access/xlog_internal.h"
57 : : #include "access/xlogreader.h"
58 : : #include "access/xlogrecovery.h"
59 : : #include "access/xlogutils.h"
60 : : #include "backup/basebackup.h"
61 : : #include "backup/basebackup_incremental.h"
62 : : #include "catalog/pg_authid.h"
63 : : #include "catalog/pg_type.h"
64 : : #include "commands/defrem.h"
65 : : #include "funcapi.h"
66 : : #include "libpq/libpq.h"
67 : : #include "libpq/pqformat.h"
68 : : #include "libpq/protocol.h"
69 : : #include "miscadmin.h"
70 : : #include "nodes/replnodes.h"
71 : : #include "pgstat.h"
72 : : #include "postmaster/interrupt.h"
73 : : #include "replication/decode.h"
74 : : #include "replication/logical.h"
75 : : #include "replication/slotsync.h"
76 : : #include "replication/slot.h"
77 : : #include "replication/snapbuild.h"
78 : : #include "replication/syncrep.h"
79 : : #include "replication/walreceiver.h"
80 : : #include "replication/walsender.h"
81 : : #include "replication/walsender_private.h"
82 : : #include "storage/condition_variable.h"
83 : : #include "storage/aio_subsys.h"
84 : : #include "storage/fd.h"
85 : : #include "storage/ipc.h"
86 : : #include "storage/pmsignal.h"
87 : : #include "storage/proc.h"
88 : : #include "storage/procarray.h"
89 : : #include "tcop/dest.h"
90 : : #include "tcop/tcopprot.h"
91 : : #include "utils/acl.h"
92 : : #include "utils/builtins.h"
93 : : #include "utils/guc.h"
94 : : #include "utils/lsyscache.h"
95 : : #include "utils/memutils.h"
96 : : #include "utils/pg_lsn.h"
97 : : #include "utils/pgstat_internal.h"
98 : : #include "utils/ps_status.h"
99 : : #include "utils/timeout.h"
100 : : #include "utils/timestamp.h"
101 : : #include "utils/wait_event.h"
102 : :
103 : : /* Minimum interval used by walsender for stats flushes, in ms */
104 : : #define WALSENDER_STATS_FLUSH_INTERVAL 1000
105 : :
106 : : /*
107 : : * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
108 : : *
109 : : * We don't have a good idea of what a good value would be; there's some
110 : : * overhead per message in both walsender and walreceiver, but on the other
111 : : * hand sending large batches makes walsender less responsive to signals
112 : : * because signals are checked only between messages. 128kB (with
113 : : * default 8k blocks) seems like a reasonable guess for now.
114 : : */
115 : : #define MAX_SEND_SIZE (XLOG_BLCKSZ * 16)
116 : :
117 : : /* Array of WalSnds in shared memory */
118 : : WalSndCtlData *WalSndCtl = NULL;
119 : :
120 : : /* My slot in the shared memory array */
121 : : WalSnd *MyWalSnd = NULL;
122 : :
123 : : /* Global state */
124 : : bool am_walsender = false; /* Am I a walsender process? */
125 : : bool am_cascading_walsender = false; /* Am I cascading WAL to another
126 : : * standby? */
127 : : bool am_db_walsender = false; /* Connected to a database? */
128 : :
129 : : /* GUC variables */
130 : : int max_wal_senders = 10; /* the maximum number of concurrent
131 : : * walsenders */
132 : : int wal_sender_timeout = 60 * 1000; /* maximum time to send one WAL
133 : : * data message */
134 : : bool log_replication_commands = false;
135 : :
136 : : /*
137 : : * State for WalSndWakeupRequest
138 : : */
139 : : bool wake_wal_senders = false;
140 : :
141 : : /*
142 : : * xlogreader used for replication. Note that a WAL sender doing physical
143 : : * replication does not need xlogreader to read WAL, but it needs one to
144 : : * keep a state of its work.
145 : : */
146 : : static XLogReaderState *xlogreader = NULL;
147 : :
148 : : /*
149 : : * If the UPLOAD_MANIFEST command is used to provide a backup manifest in
150 : : * preparation for an incremental backup, uploaded_manifest will be point
151 : : * to an object containing information about its contexts, and
152 : : * uploaded_manifest_mcxt will point to the memory context that contains
153 : : * that object and all of its subordinate data. Otherwise, both values will
154 : : * be NULL.
155 : : */
156 : : static IncrementalBackupInfo *uploaded_manifest = NULL;
157 : : static MemoryContext uploaded_manifest_mcxt = NULL;
158 : :
159 : : /*
160 : : * These variables keep track of the state of the timeline we're currently
161 : : * sending. sendTimeLine identifies the timeline. If sendTimeLineIsHistoric,
162 : : * the timeline is not the latest timeline on this server, and the server's
163 : : * history forked off from that timeline at sendTimeLineValidUpto.
164 : : */
165 : : static TimeLineID sendTimeLine = 0;
166 : : static TimeLineID sendTimeLineNextTLI = 0;
167 : : static bool sendTimeLineIsHistoric = false;
168 : : static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
169 : :
170 : : /*
171 : : * How far have we sent WAL already? This is also advertised in
172 : : * MyWalSnd->sentPtr. (Actually, this is the next WAL location to send.)
173 : : */
174 : : static XLogRecPtr sentPtr = InvalidXLogRecPtr;
175 : :
176 : : /* Buffers for constructing outgoing messages and processing reply messages. */
177 : : static StringInfoData output_message;
178 : : static StringInfoData reply_message;
179 : : static StringInfoData tmpbuf;
180 : :
181 : : /* Timestamp of last ProcessRepliesIfAny(). */
182 : : static TimestampTz last_processing = 0;
183 : :
184 : : /*
185 : : * Timestamp of last ProcessRepliesIfAny() that saw a reply from the
186 : : * standby. Set to 0 if wal_sender_timeout doesn't need to be active.
187 : : */
188 : : static TimestampTz last_reply_timestamp = 0;
189 : :
190 : : /* Have we sent a heartbeat message asking for reply, since last reply? */
191 : : static bool waiting_for_ping_response = false;
192 : :
193 : : /*
194 : : * While streaming WAL in Copy mode, streamingDoneSending is set to true
195 : : * after we have sent CopyDone. We should not send any more CopyData messages
196 : : * after that. streamingDoneReceiving is set to true when we receive CopyDone
197 : : * from the other end. When both become true, it's time to exit Copy mode.
198 : : */
199 : : static bool streamingDoneSending;
200 : : static bool streamingDoneReceiving;
201 : :
202 : : /* Are we there yet? */
203 : : static bool WalSndCaughtUp = false;
204 : :
205 : : /* Flags set by signal handlers for later service in main loop */
206 : : static volatile sig_atomic_t got_SIGUSR2 = false;
207 : : static volatile sig_atomic_t got_STOPPING = false;
208 : :
209 : : /*
210 : : * This is set while we are streaming. When not set
211 : : * PROCSIG_WALSND_INIT_STOPPING signal will be handled like SIGTERM. When set,
212 : : * the main loop is responsible for checking got_STOPPING and terminating when
213 : : * it's set (after streaming any remaining WAL).
214 : : */
215 : : static volatile sig_atomic_t replication_active = false;
216 : :
217 : : static LogicalDecodingContext *logical_decoding_ctx = NULL;
218 : :
219 : : /* A sample associating a WAL location with the time it was written. */
220 : : typedef struct
221 : : {
222 : : XLogRecPtr lsn;
223 : : TimestampTz time;
224 : : } WalTimeSample;
225 : :
226 : : /* The size of our buffer of time samples. */
227 : : #define LAG_TRACKER_BUFFER_SIZE 8192
228 : :
229 : : /* A mechanism for tracking replication lag. */
230 : : typedef struct
231 : : {
232 : : XLogRecPtr last_lsn;
233 : : WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE];
234 : : int write_head;
235 : : int read_heads[NUM_SYNC_REP_WAIT_MODE];
236 : : WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
237 : :
238 : : /*
239 : : * Overflow entries for read heads that collide with the write head.
240 : : *
241 : : * When the cyclic buffer fills (write head is about to collide with a
242 : : * read head), we save that read head's current sample here and mark it as
243 : : * using overflow (read_heads[i] = -1). This allows the write head to
244 : : * continue advancing while the overflowed mode continues lag computation
245 : : * using the saved sample.
246 : : *
247 : : * Once the standby's reported LSN advances past the overflow entry's LSN,
248 : : * we transition back to normal buffer-based tracking.
249 : : */
250 : : WalTimeSample overflowed[NUM_SYNC_REP_WAIT_MODE];
251 : : } LagTracker;
252 : :
253 : : static LagTracker *lag_tracker;
254 : :
255 : : /* Signal handlers */
256 : : static void WalSndLastCycleHandler(SIGNAL_ARGS);
257 : :
258 : : /* Prototypes for private functions */
259 : : typedef void (*WalSndSendDataCallback) (void);
260 : : static void WalSndLoop(WalSndSendDataCallback send_data);
261 : : static void InitWalSenderSlot(void);
262 : : static void WalSndKill(int code, Datum arg);
263 : : pg_noreturn static void WalSndShutdown(void);
264 : : static void XLogSendPhysical(void);
265 : : static void XLogSendLogical(void);
266 : : static void WalSndDone(WalSndSendDataCallback send_data);
267 : : static void IdentifySystem(void);
268 : : static void UploadManifest(void);
269 : : static bool HandleUploadManifestPacket(StringInfo buf, off_t *offset,
270 : : IncrementalBackupInfo *ib);
271 : : static void ReadReplicationSlot(ReadReplicationSlotCmd *cmd);
272 : : static void CreateReplicationSlot(CreateReplicationSlotCmd *cmd);
273 : : static void DropReplicationSlot(DropReplicationSlotCmd *cmd);
274 : : static void StartReplication(StartReplicationCmd *cmd);
275 : : static void StartLogicalReplication(StartReplicationCmd *cmd);
276 : : static void ProcessStandbyMessage(void);
277 : : static void ProcessStandbyReplyMessage(void);
278 : : static void ProcessStandbyHSFeedbackMessage(void);
279 : : static void ProcessStandbyPSRequestMessage(void);
280 : : static void ProcessRepliesIfAny(void);
281 : : static void ProcessPendingWrites(void);
282 : : static void WalSndKeepalive(bool requestReply, XLogRecPtr writePtr);
283 : : static void WalSndKeepaliveIfNecessary(void);
284 : : static void WalSndCheckTimeOut(void);
285 : : static long WalSndComputeSleeptime(TimestampTz now);
286 : : static void WalSndWait(uint32 socket_events, long timeout, uint32 wait_event);
287 : : static void WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
288 : : static void WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write);
289 : : static void WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
290 : : bool skipped_xact);
291 : : static XLogRecPtr WalSndWaitForWal(XLogRecPtr loc);
292 : : static void LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time);
293 : : static TimeOffset LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now);
294 : : static bool TransactionIdInRecentPast(TransactionId xid, uint32 epoch);
295 : :
296 : : static void WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
297 : : TimeLineID *tli_p);
298 : :
299 : :
300 : : /* Initialize walsender process before entering the main command loop */
301 : : void
4909 heikki.linnakangas@i 302 :CBC 1244 : InitWalSender(void)
303 : : {
5353 simon@2ndQuadrant.co 304 : 1244 : am_cascading_walsender = RecoveryInProgress();
305 : :
306 : : /* Create a per-walsender data structure in shared memory */
4909 heikki.linnakangas@i 307 : 1244 : InitWalSenderSlot();
308 : :
309 : : /* need resource owner for e.g. basebackups */
523 andres@anarazel.de 310 : 1244 : CreateAuxProcessResourceOwner();
311 : :
312 : : /*
313 : : * Let postmaster know that we're a WAL sender. Once we've declared us as
314 : : * a WAL sender process, postmaster will let us outlive the bgwriter and
315 : : * kill us last in the shutdown sequence, so we get a chance to stream all
316 : : * remaining WAL at shutdown, including the shutdown checkpoint. Note that
317 : : * there's no going back, and we mustn't write any WAL records after this.
318 : : */
4840 heikki.linnakangas@i 319 : 1244 : MarkPostmasterChildWalSender();
320 : 1244 : SendPostmasterSignal(PMSIGNAL_ADVANCE_STATE_MACHINE);
321 : :
322 : : /*
323 : : * If the client didn't specify a database to connect to, show in PGPROC
324 : : * that our advertised xmin should affect vacuum horizons in all
325 : : * databases. This allows physical replication clients to send hot
326 : : * standby feedback that will delay vacuum cleanup in all databases.
327 : : */
1430 tgl@sss.pgh.pa.us 328 [ + + ]: 1244 : if (MyDatabaseId == InvalidOid)
329 : : {
330 [ - + ]: 485 : Assert(MyProc->xmin == InvalidTransactionId);
331 : 485 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
332 : 485 : MyProc->statusFlags |= PROC_AFFECTS_ALL_HORIZONS;
333 : 485 : ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
334 : 485 : LWLockRelease(ProcArrayLock);
335 : : }
336 : :
337 : : /* Initialize empty timestamp buffer for lag tracking. */
2707 tmunro@postgresql.or 338 : 1244 : lag_tracker = MemoryContextAllocZero(TopMemoryContext, sizeof(LagTracker));
5903 heikki.linnakangas@i 339 : 1244 : }
340 : :
341 : : /*
342 : : * Clean up after an error.
343 : : *
344 : : * WAL sender processes don't use transactions like regular backends do.
345 : : * This function does any cleanup required after an error in a WAL sender
346 : : * process, similar to what transaction abort does in a regular backend.
347 : : */
348 : : void
3865 andres@anarazel.de 349 : 48 : WalSndErrorCleanup(void)
350 : : {
4426 rhaas@postgresql.org 351 : 48 : LWLockReleaseAll();
3400 352 : 48 : ConditionVariableCancelSleep();
3657 353 : 48 : pgstat_report_wait_end();
363 andres@anarazel.de 354 : 48 : pgaio_error_cleanup();
355 : :
2130 alvherre@alvh.no-ip. 356 [ + + + + ]: 48 : if (xlogreader != NULL && xlogreader->seg.ws_file >= 0)
2132 357 : 6 : wal_segment_close(xlogreader);
358 : :
4426 rhaas@postgresql.org 359 [ + + ]: 48 : if (MyReplicationSlot != NULL)
360 : 15 : ReplicationSlotRelease();
361 : :
689 akapila@postgresql.o 362 : 48 : ReplicationSlotCleanup(false);
363 : :
4840 heikki.linnakangas@i 364 : 48 : replication_active = false;
365 : :
366 : : /*
367 : : * If there is a transaction in progress, it will clean up our
368 : : * ResourceOwner, but if a replication command set up a resource owner
369 : : * without a transaction, we've got to clean that up now.
370 : : */
2172 rhaas@postgresql.org 371 [ + + ]: 48 : if (!IsTransactionOrTransactionBlock())
523 andres@anarazel.de 372 : 47 : ReleaseAuxProcessResources(false);
373 : :
3205 374 [ + - - + ]: 48 : if (got_STOPPING || got_SIGUSR2)
4909 heikki.linnakangas@i 375 :UBC 0 : proc_exit(0);
376 : :
377 : : /* Revert back to startup state */
4840 heikki.linnakangas@i 378 :CBC 48 : WalSndSetState(WALSNDSTATE_STARTUP);
5903 379 : 48 : }
380 : :
381 : : /*
382 : : * Handle a client's connection abort in an orderly manner.
383 : : */
384 : : static void
4388 rhaas@postgresql.org 385 : 4 : WalSndShutdown(void)
386 : : {
387 : : /*
388 : : * Reset whereToSendOutput to prevent ereport from attempting to send any
389 : : * more messages to the standby.
390 : : */
391 [ + - ]: 4 : if (whereToSendOutput == DestRemote)
392 : 4 : whereToSendOutput = DestNone;
393 : :
394 : 4 : proc_exit(0);
395 : : }
396 : :
397 : : /*
398 : : * Handle the IDENTIFY_SYSTEM command.
399 : : */
400 : : static void
5539 magnus@hagander.net 401 : 775 : IdentifySystem(void)
402 : : {
403 : : char sysid[32];
404 : : char xloc[MAXFNAMELEN];
405 : : XLogRecPtr logptr;
4388 rhaas@postgresql.org 406 : 775 : char *dbname = NULL;
407 : : DestReceiver *dest;
408 : : TupOutputState *tstate;
409 : : TupleDesc tupdesc;
410 : : Datum values[4];
1338 peter@eisentraut.org 411 : 775 : bool nulls[4] = {0};
412 : : TimeLineID currTLI;
413 : :
414 : : /*
415 : : * Reply with a result set with one row, four columns. First col is system
416 : : * ID, second is timeline ID, third is current xlog location and the
417 : : * fourth contains the database name if we are connected to one.
418 : : */
419 : :
5539 magnus@hagander.net 420 : 775 : snprintf(sysid, sizeof(sysid), UINT64_FORMAT,
421 : : GetSystemIdentifier());
422 : :
4840 heikki.linnakangas@i 423 : 775 : am_cascading_walsender = RecoveryInProgress();
424 [ + + ]: 775 : if (am_cascading_walsender)
1591 rhaas@postgresql.org 425 : 62 : logptr = GetStandbyFlushRecPtr(&currTLI);
426 : : else
427 : 713 : logptr = GetFlushRecPtr(&currTLI);
428 : :
251 alvherre@kurilemu.de 429 :GNC 775 : snprintf(xloc, sizeof(xloc), "%X/%08X", LSN_FORMAT_ARGS(logptr));
430 : :
4388 rhaas@postgresql.org 431 [ + + ]:CBC 775 : if (MyDatabaseId != InvalidOid)
432 : : {
433 : 278 : MemoryContext cur = CurrentMemoryContext;
434 : :
435 : : /* syscache access needs a transaction env. */
436 : 278 : StartTransactionCommand();
437 : 278 : dbname = get_database_name(MyDatabaseId);
438 : : /* copy dbname out of TX context */
622 tgl@sss.pgh.pa.us 439 : 278 : dbname = MemoryContextStrdup(cur, dbname);
4388 rhaas@postgresql.org 440 : 278 : CommitTransactionCommand();
441 : : }
442 : :
3329 443 : 775 : dest = CreateDestReceiver(DestRemoteSimple);
444 : :
445 : : /* need a tuple descriptor representing four columns */
2672 andres@anarazel.de 446 : 775 : tupdesc = CreateTemplateTupleDesc(4);
3329 rhaas@postgresql.org 447 : 775 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "systemid",
448 : : TEXTOID, -1, 0);
449 : 775 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "timeline",
450 : : INT8OID, -1, 0);
451 : 775 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "xlogpos",
452 : : TEXTOID, -1, 0);
453 : 775 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "dbname",
454 : : TEXTOID, -1, 0);
455 : :
456 : : /* prepare for projection of tuples */
2677 andres@anarazel.de 457 : 775 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
458 : :
459 : : /* column 1: system identifier */
3329 rhaas@postgresql.org 460 : 775 : values[0] = CStringGetTextDatum(sysid);
461 : :
462 : : /* column 2: timeline */
1350 peter@eisentraut.org 463 : 775 : values[1] = Int64GetDatum(currTLI);
464 : :
465 : : /* column 3: wal location */
3229 peter_e@gmx.net 466 : 775 : values[2] = CStringGetTextDatum(xloc);
467 : :
468 : : /* column 4: database name, or NULL if none */
4388 rhaas@postgresql.org 469 [ + + ]: 775 : if (dbname)
3329 470 : 278 : values[3] = CStringGetTextDatum(dbname);
471 : : else
472 : 497 : nulls[3] = true;
473 : :
474 : : /* send it to dest */
475 : 775 : do_tup_output(tstate, values, nulls);
476 : :
477 : 775 : end_tup_output(tstate);
5539 magnus@hagander.net 478 : 775 : }
479 : :
480 : : /* Handle READ_REPLICATION_SLOT command */
481 : : static void
1602 michael@paquier.xyz 482 : 6 : ReadReplicationSlot(ReadReplicationSlotCmd *cmd)
483 : : {
484 : : #define READ_REPLICATION_SLOT_COLS 3
485 : : ReplicationSlot *slot;
486 : : DestReceiver *dest;
487 : : TupOutputState *tstate;
488 : : TupleDesc tupdesc;
1338 peter@eisentraut.org 489 : 6 : Datum values[READ_REPLICATION_SLOT_COLS] = {0};
490 : : bool nulls[READ_REPLICATION_SLOT_COLS];
491 : :
1602 michael@paquier.xyz 492 : 6 : tupdesc = CreateTemplateTupleDesc(READ_REPLICATION_SLOT_COLS);
493 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_type",
494 : : TEXTOID, -1, 0);
495 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "restart_lsn",
496 : : TEXTOID, -1, 0);
497 : : /* TimeLineID is unsigned, so int4 is not wide enough. */
498 : 6 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "restart_tli",
499 : : INT8OID, -1, 0);
500 : :
1338 peter@eisentraut.org 501 : 6 : memset(nulls, true, READ_REPLICATION_SLOT_COLS * sizeof(bool));
502 : :
1602 michael@paquier.xyz 503 : 6 : LWLockAcquire(ReplicationSlotControlLock, LW_SHARED);
504 : 6 : slot = SearchNamedReplicationSlot(cmd->slotname, false);
505 [ + + - + ]: 6 : if (slot == NULL || !slot->in_use)
506 : : {
507 : 2 : LWLockRelease(ReplicationSlotControlLock);
508 : : }
509 : : else
510 : : {
511 : : ReplicationSlot slot_contents;
512 : 4 : int i = 0;
513 : :
514 : : /* Copy slot contents while holding spinlock */
515 [ - + ]: 4 : SpinLockAcquire(&slot->mutex);
516 : 4 : slot_contents = *slot;
517 : 4 : SpinLockRelease(&slot->mutex);
518 : 4 : LWLockRelease(ReplicationSlotControlLock);
519 : :
520 [ + + ]: 4 : if (OidIsValid(slot_contents.data.database))
521 [ + - ]: 1 : ereport(ERROR,
522 : : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
523 : : errmsg("cannot use %s with a logical replication slot",
524 : : "READ_REPLICATION_SLOT"));
525 : :
526 : : /* slot type */
527 : 3 : values[i] = CStringGetTextDatum("physical");
528 : 3 : nulls[i] = false;
529 : 3 : i++;
530 : :
531 : : /* start LSN */
129 alvherre@kurilemu.de 532 [ + - ]:GNC 3 : if (XLogRecPtrIsValid(slot_contents.data.restart_lsn))
533 : : {
534 : : char xloc[64];
535 : :
251 536 : 3 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1602 michael@paquier.xyz 537 :CBC 3 : LSN_FORMAT_ARGS(slot_contents.data.restart_lsn));
538 : 3 : values[i] = CStringGetTextDatum(xloc);
539 : 3 : nulls[i] = false;
540 : : }
541 : 3 : i++;
542 : :
543 : : /* timeline this WAL was produced on */
129 alvherre@kurilemu.de 544 [ + - ]:GNC 3 : if (XLogRecPtrIsValid(slot_contents.data.restart_lsn))
545 : : {
546 : : TimeLineID slots_position_timeline;
547 : : TimeLineID current_timeline;
1602 michael@paquier.xyz 548 :CBC 3 : List *timeline_history = NIL;
549 : :
550 : : /*
551 : : * While in recovery, use as timeline the currently-replaying one
552 : : * to get the LSN position's history.
553 : : */
554 [ - + ]: 3 : if (RecoveryInProgress())
1602 michael@paquier.xyz 555 :UBC 0 : (void) GetXLogReplayRecPtr(¤t_timeline);
556 : : else
1591 rhaas@postgresql.org 557 :CBC 3 : current_timeline = GetWALInsertionTimeLine();
558 : :
1602 michael@paquier.xyz 559 : 3 : timeline_history = readTimeLineHistory(current_timeline);
560 : 3 : slots_position_timeline = tliOfPointInHistory(slot_contents.data.restart_lsn,
561 : : timeline_history);
562 : 3 : values[i] = Int64GetDatum((int64) slots_position_timeline);
563 : 3 : nulls[i] = false;
564 : : }
565 : 3 : i++;
566 : :
567 [ - + ]: 3 : Assert(i == READ_REPLICATION_SLOT_COLS);
568 : : }
569 : :
570 : 5 : dest = CreateDestReceiver(DestRemoteSimple);
571 : 5 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
572 : 5 : do_tup_output(tstate, values, nulls);
573 : 5 : end_tup_output(tstate);
574 : 5 : }
575 : :
576 : :
577 : : /*
578 : : * Handle TIMELINE_HISTORY command.
579 : : */
580 : : static void
4840 heikki.linnakangas@i 581 : 13 : SendTimeLineHistory(TimeLineHistoryCmd *cmd)
582 : : {
583 : : DestReceiver *dest;
584 : : TupleDesc tupdesc;
585 : : StringInfoData buf;
586 : : char histfname[MAXFNAMELEN];
587 : : char path[MAXPGPATH];
588 : : int fd;
589 : : off_t histfilelen;
590 : : off_t bytesleft;
591 : : Size len;
592 : :
1350 peter@eisentraut.org 593 : 13 : dest = CreateDestReceiver(DestRemoteSimple);
594 : :
595 : : /*
596 : : * Reply with a result set with one row, and two columns. The first col is
597 : : * the name of the history file, 2nd is the contents.
598 : : */
599 : 13 : tupdesc = CreateTemplateTupleDesc(2);
600 : 13 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "filename", TEXTOID, -1, 0);
601 : 13 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "content", TEXTOID, -1, 0);
602 : :
4840 heikki.linnakangas@i 603 : 13 : TLHistoryFileName(histfname, cmd->timeline);
604 : 13 : TLHistoryFilePath(path, cmd->timeline);
605 : :
606 : : /* Send a RowDescription message */
1350 peter@eisentraut.org 607 : 13 : dest->rStartup(dest, CMD_SELECT, tupdesc);
608 : :
609 : : /* Send a DataRow message */
936 nathan@postgresql.or 610 : 13 : pq_beginmessage(&buf, PqMsg_DataRow);
3077 andres@anarazel.de 611 : 13 : pq_sendint16(&buf, 2); /* # of columns */
3792 alvherre@alvh.no-ip. 612 : 13 : len = strlen(histfname);
3077 andres@anarazel.de 613 : 13 : pq_sendint32(&buf, len); /* col1 len */
3792 alvherre@alvh.no-ip. 614 : 13 : pq_sendbytes(&buf, histfname, len);
615 : :
3095 peter_e@gmx.net 616 : 13 : fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
4840 heikki.linnakangas@i 617 [ - + ]: 13 : if (fd < 0)
4840 heikki.linnakangas@i 618 [ # # ]:UBC 0 : ereport(ERROR,
619 : : (errcode_for_file_access(),
620 : : errmsg("could not open file \"%s\": %m", path)));
621 : :
622 : : /* Determine file length and send it to client */
4840 heikki.linnakangas@i 623 :CBC 13 : histfilelen = lseek(fd, 0, SEEK_END);
624 [ - + ]: 13 : if (histfilelen < 0)
4840 heikki.linnakangas@i 625 [ # # ]:UBC 0 : ereport(ERROR,
626 : : (errcode_for_file_access(),
627 : : errmsg("could not seek to end of file \"%s\": %m", path)));
4840 heikki.linnakangas@i 628 [ - + ]:CBC 13 : if (lseek(fd, 0, SEEK_SET) != 0)
4840 heikki.linnakangas@i 629 [ # # ]:UBC 0 : ereport(ERROR,
630 : : (errcode_for_file_access(),
631 : : errmsg("could not seek to beginning of file \"%s\": %m", path)));
632 : :
3077 andres@anarazel.de 633 :CBC 13 : pq_sendint32(&buf, histfilelen); /* col2 len */
634 : :
4840 heikki.linnakangas@i 635 : 13 : bytesleft = histfilelen;
636 [ + + ]: 26 : while (bytesleft > 0)
637 : : {
638 : : PGAlignedBlock rbuf;
639 : : int nread;
640 : :
3284 rhaas@postgresql.org 641 : 13 : pgstat_report_wait_start(WAIT_EVENT_WALSENDER_TIMELINE_HISTORY_READ);
2752 tgl@sss.pgh.pa.us 642 : 13 : nread = read(fd, rbuf.data, sizeof(rbuf));
3284 rhaas@postgresql.org 643 : 13 : pgstat_report_wait_end();
2797 michael@paquier.xyz 644 [ - + ]: 13 : if (nread < 0)
4840 heikki.linnakangas@i 645 [ # # ]:UBC 0 : ereport(ERROR,
646 : : (errcode_for_file_access(),
647 : : errmsg("could not read file \"%s\": %m",
648 : : path)));
2797 michael@paquier.xyz 649 [ - + ]:CBC 13 : else if (nread == 0)
2797 michael@paquier.xyz 650 [ # # ]:UBC 0 : ereport(ERROR,
651 : : (errcode(ERRCODE_DATA_CORRUPTED),
652 : : errmsg("could not read file \"%s\": read %d of %zu",
653 : : path, nread, (Size) bytesleft)));
654 : :
2752 tgl@sss.pgh.pa.us 655 :CBC 13 : pq_sendbytes(&buf, rbuf.data, nread);
4840 heikki.linnakangas@i 656 : 13 : bytesleft -= nread;
657 : : }
658 : :
2444 peter@eisentraut.org 659 [ - + ]: 13 : if (CloseTransientFile(fd) != 0)
2563 michael@paquier.xyz 660 [ # # ]:UBC 0 : ereport(ERROR,
661 : : (errcode_for_file_access(),
662 : : errmsg("could not close file \"%s\": %m", path)));
663 : :
4840 heikki.linnakangas@i 664 :CBC 13 : pq_endmessage(&buf);
665 : 13 : }
666 : :
667 : : /*
668 : : * Handle UPLOAD_MANIFEST command.
669 : : */
670 : : static void
816 rhaas@postgresql.org 671 : 12 : UploadManifest(void)
672 : : {
673 : : MemoryContext mcxt;
674 : : IncrementalBackupInfo *ib;
675 : 12 : off_t offset = 0;
676 : : StringInfoData buf;
677 : :
678 : : /*
679 : : * parsing the manifest will use the cryptohash stuff, which requires a
680 : : * resource owner
681 : : */
523 andres@anarazel.de 682 [ - + ]: 12 : Assert(AuxProcessResourceOwner != NULL);
683 [ + - - + ]: 12 : Assert(CurrentResourceOwner == AuxProcessResourceOwner ||
684 : : CurrentResourceOwner == NULL);
685 : 12 : CurrentResourceOwner = AuxProcessResourceOwner;
686 : :
687 : : /* Prepare to read manifest data into a temporary context. */
816 rhaas@postgresql.org 688 : 12 : mcxt = AllocSetContextCreate(CurrentMemoryContext,
689 : : "incremental backup information",
690 : : ALLOCSET_DEFAULT_SIZES);
691 : 12 : ib = CreateIncrementalBackupInfo(mcxt);
692 : :
693 : : /* Send a CopyInResponse message */
606 nathan@postgresql.or 694 : 12 : pq_beginmessage(&buf, PqMsg_CopyInResponse);
816 rhaas@postgresql.org 695 : 12 : pq_sendbyte(&buf, 0);
696 : 12 : pq_sendint16(&buf, 0);
697 : 12 : pq_endmessage_reuse(&buf);
698 : 12 : pq_flush();
699 : :
700 : : /* Receive packets from client until done. */
701 [ + + ]: 47 : while (HandleUploadManifestPacket(&buf, &offset, ib))
702 : : ;
703 : :
704 : : /* Finish up manifest processing. */
705 : 11 : FinalizeIncrementalManifest(ib);
706 : :
707 : : /*
708 : : * Discard any old manifest information and arrange to preserve the new
709 : : * information we just got.
710 : : *
711 : : * We assume that MemoryContextDelete and MemoryContextSetParent won't
712 : : * fail, and thus we shouldn't end up bailing out of here in such a way as
713 : : * to leave dangling pointers.
714 : : */
715 [ - + ]: 11 : if (uploaded_manifest_mcxt != NULL)
816 rhaas@postgresql.org 716 :UBC 0 : MemoryContextDelete(uploaded_manifest_mcxt);
816 rhaas@postgresql.org 717 :CBC 11 : MemoryContextSetParent(mcxt, CacheMemoryContext);
718 : 11 : uploaded_manifest = ib;
719 : 11 : uploaded_manifest_mcxt = mcxt;
720 : :
721 : : /* clean up the resource owner we created */
523 andres@anarazel.de 722 : 11 : ReleaseAuxProcessResources(true);
816 rhaas@postgresql.org 723 : 11 : }
724 : :
725 : : /*
726 : : * Process one packet received during the handling of an UPLOAD_MANIFEST
727 : : * operation.
728 : : *
729 : : * 'buf' is scratch space. This function expects it to be initialized, doesn't
730 : : * care what the current contents are, and may override them with completely
731 : : * new contents.
732 : : *
733 : : * The return value is true if the caller should continue processing
734 : : * additional packets and false if the UPLOAD_MANIFEST operation is complete.
735 : : */
736 : : static bool
737 : 47 : HandleUploadManifestPacket(StringInfo buf, off_t *offset,
738 : : IncrementalBackupInfo *ib)
739 : : {
740 : : int mtype;
741 : : int maxmsglen;
742 : :
743 : 47 : HOLD_CANCEL_INTERRUPTS();
744 : :
745 : 47 : pq_startmsgread();
746 : 47 : mtype = pq_getbyte();
747 [ - + ]: 47 : if (mtype == EOF)
816 rhaas@postgresql.org 748 [ # # ]:UBC 0 : ereport(ERROR,
749 : : (errcode(ERRCODE_CONNECTION_FAILURE),
750 : : errmsg("unexpected EOF on client connection with an open transaction")));
751 : :
816 rhaas@postgresql.org 752 [ + + - ]:CBC 47 : switch (mtype)
753 : : {
235 nathan@postgresql.or 754 :GNC 36 : case PqMsg_CopyData:
816 rhaas@postgresql.org 755 :CBC 36 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
756 : 36 : break;
235 nathan@postgresql.or 757 :GNC 11 : case PqMsg_CopyDone:
758 : : case PqMsg_CopyFail:
759 : : case PqMsg_Flush:
760 : : case PqMsg_Sync:
816 rhaas@postgresql.org 761 :CBC 11 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
762 : 11 : break;
816 rhaas@postgresql.org 763 :UBC 0 : default:
764 [ # # ]: 0 : ereport(ERROR,
765 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
766 : : errmsg("unexpected message type 0x%02X during COPY from stdin",
767 : : mtype)));
768 : : maxmsglen = 0; /* keep compiler quiet */
769 : : break;
770 : : }
771 : :
772 : : /* Now collect the message body */
816 rhaas@postgresql.org 773 [ - + ]:CBC 47 : if (pq_getmessage(buf, maxmsglen))
816 rhaas@postgresql.org 774 [ # # ]:UBC 0 : ereport(ERROR,
775 : : (errcode(ERRCODE_CONNECTION_FAILURE),
776 : : errmsg("unexpected EOF on client connection with an open transaction")));
816 rhaas@postgresql.org 777 [ - + ]:CBC 47 : RESUME_CANCEL_INTERRUPTS();
778 : :
779 : : /* Process the message */
780 [ + + - - : 47 : switch (mtype)
- ]
781 : : {
235 nathan@postgresql.or 782 :GNC 36 : case PqMsg_CopyData:
816 rhaas@postgresql.org 783 :CBC 36 : AppendIncrementalManifestData(ib, buf->data, buf->len);
784 : 35 : return true;
785 : :
235 nathan@postgresql.or 786 :GNC 11 : case PqMsg_CopyDone:
816 rhaas@postgresql.org 787 :CBC 11 : return false;
788 : :
235 nathan@postgresql.or 789 :UNC 0 : case PqMsg_Sync:
790 : : case PqMsg_Flush:
791 : : /* Ignore these while in CopyOut mode as we do elsewhere. */
816 rhaas@postgresql.org 792 :UBC 0 : return true;
793 : :
235 nathan@postgresql.or 794 :UNC 0 : case PqMsg_CopyFail:
816 rhaas@postgresql.org 795 [ # # ]:UBC 0 : ereport(ERROR,
796 : : (errcode(ERRCODE_QUERY_CANCELED),
797 : : errmsg("COPY from stdin failed: %s",
798 : : pq_getmsgstring(buf))));
799 : : }
800 : :
801 : : /* Not reached. */
802 : 0 : Assert(false);
803 : : return false;
804 : : }
805 : :
806 : : /*
807 : : * Handle START_REPLICATION command.
808 : : *
809 : : * At the moment, this never returns, but an ereport(ERROR) will take us back
810 : : * to the main loop.
811 : : */
812 : : static void
4840 heikki.linnakangas@i 813 :CBC 287 : StartReplication(StartReplicationCmd *cmd)
814 : : {
815 : : StringInfoData buf;
816 : : XLogRecPtr FlushPtr;
817 : : TimeLineID FlushTLI;
818 : :
819 : : /* create xlogreader for physical replication */
2106 michael@paquier.xyz 820 : 287 : xlogreader =
1770 tmunro@postgresql.or 821 : 287 : XLogReaderAllocate(wal_segment_size, NULL,
822 : 287 : XL_ROUTINE(.segment_open = WalSndSegmentOpen,
823 : : .segment_close = wal_segment_close),
824 : : NULL);
825 : :
2106 michael@paquier.xyz 826 [ - + ]: 287 : if (!xlogreader)
2106 michael@paquier.xyz 827 [ # # ]:UBC 0 : ereport(ERROR,
828 : : (errcode(ERRCODE_OUT_OF_MEMORY),
829 : : errmsg("out of memory"),
830 : : errdetail("Failed while allocating a WAL reading processor.")));
831 : :
832 : : /*
833 : : * We assume here that we're logging enough information in the WAL for
834 : : * log-shipping, since this is checked in PostmasterMain().
835 : : *
836 : : * NOTE: wal_level can only change at shutdown, so in most cases it is
837 : : * difficult for there to be WAL data that we can still see that was
838 : : * written at wal_level='minimal'.
839 : : */
840 : :
4426 rhaas@postgresql.org 841 [ + + ]:CBC 287 : if (cmd->slotname)
842 : : {
408 akapila@postgresql.o 843 : 190 : ReplicationSlotAcquire(cmd->slotname, true, true);
3869 andres@anarazel.de 844 [ - + ]: 188 : if (SlotIsLogical(MyReplicationSlot))
4426 rhaas@postgresql.org 845 [ # # ]:UBC 0 : ereport(ERROR,
846 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
847 : : errmsg("cannot use a logical replication slot for physical replication")));
848 : :
849 : : /*
850 : : * We don't need to verify the slot's restart_lsn here; instead we
851 : : * rely on the caller requesting the starting point to use. If the
852 : : * WAL segment doesn't exist, we'll fail later.
853 : : */
854 : : }
855 : :
856 : : /*
857 : : * Select the timeline. If it was given explicitly by the client, use
858 : : * that. Otherwise use the timeline of the last replayed record.
859 : : */
1710 jdavis@postgresql.or 860 :CBC 285 : am_cascading_walsender = RecoveryInProgress();
4833 heikki.linnakangas@i 861 [ + + ]: 285 : if (am_cascading_walsender)
1591 rhaas@postgresql.org 862 : 13 : FlushPtr = GetStandbyFlushRecPtr(&FlushTLI);
863 : : else
864 : 272 : FlushPtr = GetFlushRecPtr(&FlushTLI);
865 : :
4840 heikki.linnakangas@i 866 [ + + ]: 285 : if (cmd->timeline != 0)
867 : : {
868 : : XLogRecPtr switchpoint;
869 : :
870 : 284 : sendTimeLine = cmd->timeline;
1591 rhaas@postgresql.org 871 [ + + ]: 284 : if (sendTimeLine == FlushTLI)
872 : : {
4840 heikki.linnakangas@i 873 : 275 : sendTimeLineIsHistoric = false;
874 : 275 : sendTimeLineValidUpto = InvalidXLogRecPtr;
875 : : }
876 : : else
877 : : {
878 : : List *timeLineHistory;
879 : :
880 : 9 : sendTimeLineIsHistoric = true;
881 : :
882 : : /*
883 : : * Check that the timeline the client requested exists, and the
884 : : * requested start location is on that timeline.
885 : : */
1591 rhaas@postgresql.org 886 : 9 : timeLineHistory = readTimeLineHistory(FlushTLI);
4805 heikki.linnakangas@i 887 : 9 : switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
888 : : &sendTimeLineNextTLI);
4840 889 : 9 : list_free_deep(timeLineHistory);
890 : :
891 : : /*
892 : : * Found the requested timeline in the history. Check that
893 : : * requested startpoint is on that timeline in our history.
894 : : *
895 : : * This is quite loose on purpose. We only check that we didn't
896 : : * fork off the requested timeline before the switchpoint. We
897 : : * don't check that we switched *to* it before the requested
898 : : * starting point. This is because the client can legitimately
899 : : * request to start replication from the beginning of the WAL
900 : : * segment that contains switchpoint, but on the new timeline, so
901 : : * that it doesn't end up with a partial segment. If you ask for
902 : : * too old a starting point, you'll get an error later when we
903 : : * fail to find the requested WAL segment in pg_wal.
904 : : *
905 : : * XXX: we could be more strict here and only allow a startpoint
906 : : * that's older than the switchpoint, if it's still in the same
907 : : * WAL segment.
908 : : */
129 alvherre@kurilemu.de 909 [ + - ]:GNC 9 : if (XLogRecPtrIsValid(switchpoint) &&
4825 alvherre@alvh.no-ip. 910 [ - + ]:CBC 9 : switchpoint < cmd->startpoint)
911 : : {
4840 heikki.linnakangas@i 912 [ # # ]:UBC 0 : ereport(ERROR,
913 : : errmsg("requested starting point %X/%08X on timeline %u is not in this server's history",
914 : : LSN_FORMAT_ARGS(cmd->startpoint),
915 : : cmd->timeline),
916 : : errdetail("This server's history forked from timeline %u at %X/%08X.",
917 : : cmd->timeline,
918 : : LSN_FORMAT_ARGS(switchpoint)));
919 : : }
4840 heikki.linnakangas@i 920 :CBC 9 : sendTimeLineValidUpto = switchpoint;
921 : : }
922 : : }
923 : : else
924 : : {
1591 rhaas@postgresql.org 925 : 1 : sendTimeLine = FlushTLI;
4840 heikki.linnakangas@i 926 : 1 : sendTimeLineValidUpto = InvalidXLogRecPtr;
927 : 1 : sendTimeLineIsHistoric = false;
928 : : }
929 : :
930 : 285 : streamingDoneSending = streamingDoneReceiving = false;
931 : :
932 : : /* If there is nothing to stream, don't even enter COPY mode */
4805 933 [ + + + - ]: 285 : if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
934 : : {
935 : : /*
936 : : * When we first start replication the standby will be behind the
937 : : * primary. For some applications, for example synchronous
938 : : * replication, it is important to have a clear state for this initial
939 : : * catchup mode, so we can trigger actions when we change streaming
940 : : * state later. We may stay in this state for a long time, which is
941 : : * exactly why we want to be able to monitor whether or not we are
942 : : * still here.
943 : : */
4840 944 : 285 : WalSndSetState(WALSNDSTATE_CATCHUP);
945 : :
946 : : /* Send a CopyBothResponse message, and start streaming */
936 nathan@postgresql.or 947 : 285 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4840 heikki.linnakangas@i 948 : 285 : pq_sendbyte(&buf, 0);
3077 andres@anarazel.de 949 : 285 : pq_sendint16(&buf, 0);
4840 heikki.linnakangas@i 950 : 285 : pq_endmessage(&buf);
951 : 285 : pq_flush();
952 : :
953 : : /*
954 : : * Don't allow a request to stream from a future point in WAL that
955 : : * hasn't been flushed to disk in this server yet.
956 : : */
4825 alvherre@alvh.no-ip. 957 [ - + ]: 285 : if (FlushPtr < cmd->startpoint)
958 : : {
4840 heikki.linnakangas@i 959 [ # # ]:UBC 0 : ereport(ERROR,
960 : : errmsg("requested starting point %X/%08X is ahead of the WAL flush position of this server %X/%08X",
961 : : LSN_FORMAT_ARGS(cmd->startpoint),
962 : : LSN_FORMAT_ARGS(FlushPtr)));
963 : : }
964 : :
965 : : /* Start streaming from the requested point */
4840 heikki.linnakangas@i 966 :CBC 285 : sentPtr = cmd->startpoint;
967 : :
968 : : /* Initialize shared memory status, too */
3180 alvherre@alvh.no-ip. 969 [ - + ]: 285 : SpinLockAcquire(&MyWalSnd->mutex);
970 : 285 : MyWalSnd->sentPtr = sentPtr;
971 : 285 : SpinLockRelease(&MyWalSnd->mutex);
972 : :
4840 heikki.linnakangas@i 973 : 285 : SyncRepInitConfig();
974 : :
975 : : /* Main loop of walsender */
976 : 285 : replication_active = true;
977 : :
4388 rhaas@postgresql.org 978 : 285 : WalSndLoop(XLogSendPhysical);
979 : :
4840 heikki.linnakangas@i 980 : 154 : replication_active = false;
3205 andres@anarazel.de 981 [ - + ]: 154 : if (got_STOPPING)
4840 heikki.linnakangas@i 982 :UBC 0 : proc_exit(0);
4840 heikki.linnakangas@i 983 :CBC 154 : WalSndSetState(WALSNDSTATE_STARTUP);
984 : :
4805 985 [ + - - + ]: 154 : Assert(streamingDoneSending && streamingDoneReceiving);
986 : : }
987 : :
4426 rhaas@postgresql.org 988 [ + + ]: 154 : if (cmd->slotname)
989 : 140 : ReplicationSlotRelease();
990 : :
991 : : /*
992 : : * Copy is finished now. Send a single-row result set indicating the next
993 : : * timeline.
994 : : */
4805 heikki.linnakangas@i 995 [ + + ]: 154 : if (sendTimeLineIsHistoric)
996 : : {
997 : : char startpos_str[8 + 1 + 8 + 1];
998 : : DestReceiver *dest;
999 : : TupOutputState *tstate;
1000 : : TupleDesc tupdesc;
1001 : : Datum values[2];
1338 peter@eisentraut.org 1002 : 10 : bool nulls[2] = {0};
1003 : :
251 alvherre@kurilemu.de 1004 :GNC 10 : snprintf(startpos_str, sizeof(startpos_str), "%X/%08X",
1846 peter@eisentraut.org 1005 :CBC 10 : LSN_FORMAT_ARGS(sendTimeLineValidUpto));
1006 : :
3329 rhaas@postgresql.org 1007 : 10 : dest = CreateDestReceiver(DestRemoteSimple);
1008 : :
1009 : : /*
1010 : : * Need a tuple descriptor representing two columns. int8 may seem
1011 : : * like a surprising data type for this, but in theory int4 would not
1012 : : * be wide enough for this, as TimeLineID is unsigned.
1013 : : */
2672 andres@anarazel.de 1014 : 10 : tupdesc = CreateTemplateTupleDesc(2);
3329 rhaas@postgresql.org 1015 : 10 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "next_tli",
1016 : : INT8OID, -1, 0);
1017 : 10 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "next_tli_startpos",
1018 : : TEXTOID, -1, 0);
1019 : :
1020 : : /* prepare for projection of tuple */
2677 andres@anarazel.de 1021 : 10 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1022 : :
3329 rhaas@postgresql.org 1023 : 10 : values[0] = Int64GetDatum((int64) sendTimeLineNextTLI);
1024 : 10 : values[1] = CStringGetTextDatum(startpos_str);
1025 : :
1026 : : /* send it to dest */
1027 : 10 : do_tup_output(tstate, values, nulls);
1028 : :
1029 : 10 : end_tup_output(tstate);
1030 : : }
1031 : :
1032 : : /* Send CommandComplete message */
2006 alvherre@alvh.no-ip. 1033 : 154 : EndReplicationCommand("START_STREAMING");
5539 magnus@hagander.net 1034 : 154 : }
1035 : :
1036 : : /*
1037 : : * XLogReaderRoutine->page_read callback for logical decoding contexts, as a
1038 : : * walsender process.
1039 : : *
1040 : : * Inside the walsender we can do better than read_local_xlog_page,
1041 : : * which has to do a plain sleep/busy loop, because the walsender's latch gets
1042 : : * set every time WAL is flushed.
1043 : : */
1044 : : static int
1770 tmunro@postgresql.or 1045 : 22882 : logical_read_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen,
1046 : : XLogRecPtr targetRecPtr, char *cur_page)
1047 : : {
1048 : : XLogRecPtr flushptr;
1049 : : int count;
1050 : : WALReadError errinfo;
1051 : : XLogSegNo segno;
1052 : : TimeLineID currTLI;
1053 : :
1054 : : /*
1055 : : * Make sure we have enough WAL available before retrieving the current
1056 : : * timeline.
1057 : : */
1072 andres@anarazel.de 1058 : 22882 : flushptr = WalSndWaitForWal(targetPagePtr + reqLen);
1059 : :
1060 : : /* Fail if not enough (implies we are going to shut down) */
614 akapila@postgresql.o 1061 [ + + ]: 22674 : if (flushptr < targetPagePtr + reqLen)
1062 : 6383 : return -1;
1063 : :
1064 : : /*
1065 : : * Since logical decoding is also permitted on a standby server, we need
1066 : : * to check if the server is in recovery to decide how to get the current
1067 : : * timeline ID (so that it also covers the promotion or timeline change
1068 : : * cases). We must determine am_cascading_walsender after waiting for the
1069 : : * required WAL so that it is correct when the walsender wakes up after a
1070 : : * promotion.
1071 : : */
1072 andres@anarazel.de 1072 : 16291 : am_cascading_walsender = RecoveryInProgress();
1073 : :
1074 [ + + ]: 16291 : if (am_cascading_walsender)
1075 : 147 : GetXLogReplayRecPtr(&currTLI);
1076 : : else
1077 : 16144 : currTLI = GetWALInsertionTimeLine();
1078 : :
1591 rhaas@postgresql.org 1079 : 16291 : XLogReadDetermineTimeline(state, targetPagePtr, reqLen, currTLI);
1080 : 16291 : sendTimeLineIsHistoric = (state->currTLI != currTLI);
3280 simon@2ndQuadrant.co 1081 : 16291 : sendTimeLine = state->currTLI;
1082 : 16291 : sendTimeLineValidUpto = state->currTLIValidUntil;
1083 : 16291 : sendTimeLineNextTLI = state->nextTLI;
1084 : :
3180 tgl@sss.pgh.pa.us 1085 [ + + ]: 16291 : if (targetPagePtr + XLOG_BLCKSZ <= flushptr)
1086 : 14337 : count = XLOG_BLCKSZ; /* more than one block available */
1087 : : else
1088 : 1954 : count = flushptr - targetPagePtr; /* part of the page available */
1089 : :
1090 : : /* now actually read the data, we know it's there */
1770 tmunro@postgresql.or 1091 [ - + ]: 16291 : if (!WALRead(state,
1092 : : cur_page,
1093 : : targetPagePtr,
1094 : : count,
1095 : : currTLI, /* Pass the current TLI because only
1096 : : * WalSndSegmentOpen controls whether new TLI
1097 : : * is needed. */
1098 : : &errinfo))
2302 alvherre@alvh.no-ip. 1099 :UBC 0 : WALReadRaiseError(&errinfo);
1100 : :
1101 : : /*
1102 : : * After reading into the buffer, check that what we read was valid. We do
1103 : : * this after reading, because even though the segment was present when we
1104 : : * opened it, it might get recycled or removed while we read it. The
1105 : : * read() succeeds in that case, but the data we tried to read might
1106 : : * already have been overwritten with new WAL records.
1107 : : */
2132 alvherre@alvh.no-ip. 1108 :CBC 16291 : XLByteToSeg(targetPagePtr, segno, state->segcxt.ws_segsize);
1109 : 16291 : CheckXLogRemoved(segno, state->seg.ws_tli);
1110 : :
1770 tmunro@postgresql.or 1111 : 16291 : return count;
1112 : : }
1113 : :
1114 : : /*
1115 : : * Process extra options given to CREATE_REPLICATION_SLOT.
1116 : : */
1117 : : static void
3288 peter_e@gmx.net 1118 : 500 : parseCreateReplSlotOptions(CreateReplicationSlotCmd *cmd,
1119 : : bool *reserve_wal,
1120 : : CRSSnapshotAction *snapshot_action,
1121 : : bool *two_phase, bool *failover)
1122 : : {
1123 : : ListCell *lc;
1124 : 500 : bool snapshot_action_given = false;
1125 : 500 : bool reserve_wal_given = false;
1719 akapila@postgresql.o 1126 : 500 : bool two_phase_given = false;
776 1127 : 500 : bool failover_given = false;
1128 : :
1129 : : /* Parse options */
3224 bruce@momjian.us 1130 [ + + + + : 1011 : foreach(lc, cmd->options)
+ + ]
1131 : : {
3288 peter_e@gmx.net 1132 : 511 : DefElem *defel = (DefElem *) lfirst(lc);
1133 : :
1622 rhaas@postgresql.org 1134 [ + + ]: 511 : if (strcmp(defel->defname, "snapshot") == 0)
1135 : : {
1136 : : char *action;
1137 : :
3288 peter_e@gmx.net 1138 [ + - - + ]: 356 : if (snapshot_action_given || cmd->kind != REPLICATION_KIND_LOGICAL)
3288 peter_e@gmx.net 1139 [ # # ]:UBC 0 : ereport(ERROR,
1140 : : (errcode(ERRCODE_SYNTAX_ERROR),
1141 : : errmsg("conflicting or redundant options")));
1142 : :
1622 rhaas@postgresql.org 1143 :CBC 356 : action = defGetString(defel);
3288 peter_e@gmx.net 1144 : 356 : snapshot_action_given = true;
1145 : :
1622 rhaas@postgresql.org 1146 [ + + ]: 356 : if (strcmp(action, "export") == 0)
1147 : 1 : *snapshot_action = CRS_EXPORT_SNAPSHOT;
1148 [ + + ]: 355 : else if (strcmp(action, "nothing") == 0)
1149 : 148 : *snapshot_action = CRS_NOEXPORT_SNAPSHOT;
1150 [ + - ]: 207 : else if (strcmp(action, "use") == 0)
1151 : 207 : *snapshot_action = CRS_USE_SNAPSHOT;
1152 : : else
3279 peter_e@gmx.net 1153 [ # # ]:UBC 0 : ereport(ERROR,
1154 : : (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
1155 : : errmsg("unrecognized value for %s option \"%s\": \"%s\"",
1156 : : "CREATE_REPLICATION_SLOT", defel->defname, action)));
1157 : : }
3288 peter_e@gmx.net 1158 [ + + ]:CBC 155 : else if (strcmp(defel->defname, "reserve_wal") == 0)
1159 : : {
1160 [ + - - + ]: 143 : if (reserve_wal_given || cmd->kind != REPLICATION_KIND_PHYSICAL)
3288 peter_e@gmx.net 1161 [ # # ]:UBC 0 : ereport(ERROR,
1162 : : (errcode(ERRCODE_SYNTAX_ERROR),
1163 : : errmsg("conflicting or redundant options")));
1164 : :
3288 peter_e@gmx.net 1165 :CBC 143 : reserve_wal_given = true;
1622 rhaas@postgresql.org 1166 : 143 : *reserve_wal = defGetBoolean(defel);
1167 : : }
1719 akapila@postgresql.o 1168 [ + + ]: 12 : else if (strcmp(defel->defname, "two_phase") == 0)
1169 : : {
1170 [ + - - + ]: 2 : if (two_phase_given || cmd->kind != REPLICATION_KIND_LOGICAL)
1719 akapila@postgresql.o 1171 [ # # ]:UBC 0 : ereport(ERROR,
1172 : : (errcode(ERRCODE_SYNTAX_ERROR),
1173 : : errmsg("conflicting or redundant options")));
1719 akapila@postgresql.o 1174 :CBC 2 : two_phase_given = true;
1622 rhaas@postgresql.org 1175 : 2 : *two_phase = defGetBoolean(defel);
1176 : : }
776 akapila@postgresql.o 1177 [ + - ]: 10 : else if (strcmp(defel->defname, "failover") == 0)
1178 : : {
1179 [ + - - + ]: 10 : if (failover_given || cmd->kind != REPLICATION_KIND_LOGICAL)
776 akapila@postgresql.o 1180 [ # # ]:UBC 0 : ereport(ERROR,
1181 : : (errcode(ERRCODE_SYNTAX_ERROR),
1182 : : errmsg("conflicting or redundant options")));
776 akapila@postgresql.o 1183 :CBC 10 : failover_given = true;
1184 : 10 : *failover = defGetBoolean(defel);
1185 : : }
1186 : : else
3288 peter_e@gmx.net 1187 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1188 : : }
3288 peter_e@gmx.net 1189 :CBC 500 : }
1190 : :
1191 : : /*
1192 : : * Create a new replication slot.
1193 : : */
1194 : : static void
4426 rhaas@postgresql.org 1195 : 500 : CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
1196 : : {
4388 1197 : 500 : const char *snapshot_name = NULL;
1198 : : char xloc[MAXFNAMELEN];
1199 : : char *slot_name;
3288 peter_e@gmx.net 1200 : 500 : bool reserve_wal = false;
1719 akapila@postgresql.o 1201 : 500 : bool two_phase = false;
776 1202 : 500 : bool failover = false;
3279 peter_e@gmx.net 1203 : 500 : CRSSnapshotAction snapshot_action = CRS_EXPORT_SNAPSHOT;
1204 : : DestReceiver *dest;
1205 : : TupOutputState *tstate;
1206 : : TupleDesc tupdesc;
1207 : : Datum values[4];
1338 peter@eisentraut.org 1208 : 500 : bool nulls[4] = {0};
1209 : :
4426 rhaas@postgresql.org 1210 [ - + ]: 500 : Assert(!MyReplicationSlot);
1211 : :
776 akapila@postgresql.o 1212 : 500 : parseCreateReplSlotOptions(cmd, &reserve_wal, &snapshot_action, &two_phase,
1213 : : &failover);
1214 : :
4388 rhaas@postgresql.org 1215 [ + + ]: 500 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
1216 : : {
3384 peter_e@gmx.net 1217 : 144 : ReplicationSlotCreate(cmd->slotname, false,
1838 akapila@postgresql.o 1218 [ + + ]: 144 : cmd->temporary ? RS_TEMPORARY : RS_PERSISTENT,
1219 : : false, false, false);
1220 : :
845 michael@paquier.xyz 1221 [ + + ]: 143 : if (reserve_wal)
1222 : : {
1223 : 142 : ReplicationSlotReserveWal();
1224 : :
1225 : 142 : ReplicationSlotMarkDirty();
1226 : :
1227 : : /* Write this slot to disk if it's a permanent one. */
1228 [ + + ]: 142 : if (!cmd->temporary)
1229 : 3 : ReplicationSlotSave();
1230 : : }
1231 : : }
1232 : : else
1233 : : {
1234 : : LogicalDecodingContext *ctx;
1235 : 356 : bool need_full_snapshot = false;
1236 : :
1237 [ - + ]: 356 : Assert(cmd->kind == REPLICATION_KIND_LOGICAL);
1238 : :
4388 rhaas@postgresql.org 1239 : 356 : CheckLogicalDecodingRequirements();
1240 : :
1241 : : /*
1242 : : * Initially create persistent slot as ephemeral - that allows us to
1243 : : * nicely handle errors during initialization because it'll get
1244 : : * dropped if this transaction fails. We'll make it persistent at the
1245 : : * end. Temporary slots can be created as temporary from beginning as
1246 : : * they get dropped on error as well.
1247 : : */
3384 peter_e@gmx.net 1248 : 356 : ReplicationSlotCreate(cmd->slotname, true,
1838 akapila@postgresql.o 1249 [ - + ]: 356 : cmd->temporary ? RS_TEMPORARY : RS_EPHEMERAL,
1250 : : two_phase, failover, false);
1251 : :
1252 : : /*
1253 : : * Do options check early so that we can bail before calling the
1254 : : * DecodingContextFindStartpoint which can take long time.
1255 : : */
3279 peter_e@gmx.net 1256 [ + + ]: 356 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1257 : : {
1258 [ - + ]: 1 : if (IsTransactionBlock())
3279 peter_e@gmx.net 1259 [ # # ]:UBC 0 : ereport(ERROR,
1260 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1261 : : (errmsg("%s must not be called inside a transaction",
1262 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'export')")));
1263 : :
3244 andres@anarazel.de 1264 :CBC 1 : need_full_snapshot = true;
1265 : : }
3279 peter_e@gmx.net 1266 [ + + ]: 355 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1267 : : {
1268 [ - + ]: 207 : if (!IsTransactionBlock())
3279 peter_e@gmx.net 1269 [ # # ]:UBC 0 : ereport(ERROR,
1270 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1271 : : (errmsg("%s must be called inside a transaction",
1272 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1273 : :
3279 peter_e@gmx.net 1274 [ - + ]:CBC 207 : if (XactIsoLevel != XACT_REPEATABLE_READ)
3279 peter_e@gmx.net 1275 [ # # ]:UBC 0 : ereport(ERROR,
1276 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1277 : : (errmsg("%s must be called in REPEATABLE READ isolation mode transaction",
1278 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1210 akapila@postgresql.o 1279 [ - + ]:CBC 207 : if (!XactReadOnly)
1210 akapila@postgresql.o 1280 [ # # ]:UBC 0 : ereport(ERROR,
1281 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1282 : : (errmsg("%s must be called in a read-only transaction",
1283 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1284 : :
3279 peter_e@gmx.net 1285 [ - + ]:CBC 207 : if (FirstSnapshotSet)
3279 peter_e@gmx.net 1286 [ # # ]:UBC 0 : ereport(ERROR,
1287 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1288 : : (errmsg("%s must be called before any query",
1289 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1290 : :
3279 peter_e@gmx.net 1291 [ - + ]:CBC 207 : if (IsSubTransaction())
3279 peter_e@gmx.net 1292 [ # # ]:UBC 0 : ereport(ERROR,
1293 : : /*- translator: %s is a CREATE_REPLICATION_SLOT statement */
1294 : : (errmsg("%s must not be called in a subtransaction",
1295 : : "CREATE_REPLICATION_SLOT ... (SNAPSHOT 'use')")));
1296 : :
3244 andres@anarazel.de 1297 :CBC 207 : need_full_snapshot = true;
1298 : : }
1299 : :
1300 : : /*
1301 : : * Ensure the logical decoding is enabled before initializing the
1302 : : * logical decoding context.
1303 : : */
82 msawada@postgresql.o 1304 :GNC 356 : EnsureLogicalDecodingEnabled();
1305 [ - + ]: 356 : Assert(IsLogicalDecodingEnabled());
1306 : :
3244 andres@anarazel.de 1307 :CBC 356 : ctx = CreateInitDecodingContext(cmd->plugin, NIL, need_full_snapshot,
1308 : : InvalidXLogRecPtr,
1770 tmunro@postgresql.or 1309 : 356 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1310 : : .segment_open = WalSndSegmentOpen,
1311 : : .segment_close = wal_segment_close),
1312 : : WalSndPrepareWrite, WalSndWriteData,
1313 : : WalSndUpdateProgress);
1314 : :
1315 : : /*
1316 : : * Signal that we don't need the timeout mechanism. We're just
1317 : : * creating the replication slot and don't yet accept feedback
1318 : : * messages or send keepalives. As we possibly need to wait for
1319 : : * further WAL the walsender would otherwise possibly be killed too
1320 : : * soon.
1321 : : */
4308 andres@anarazel.de 1322 : 356 : last_reply_timestamp = 0;
1323 : :
1324 : : /* build initial snapshot, might take a while */
4388 rhaas@postgresql.org 1325 : 356 : DecodingContextFindStartpoint(ctx);
1326 : :
1327 : : /*
1328 : : * Export or use the snapshot if we've been asked to do so.
1329 : : *
1330 : : * NB. We will convert the snapbuild.c kind of snapshot to normal
1331 : : * snapshot when doing this.
1332 : : */
3279 peter_e@gmx.net 1333 [ + + ]: 356 : if (snapshot_action == CRS_EXPORT_SNAPSHOT)
1334 : : {
3288 1335 : 1 : snapshot_name = SnapBuildExportSnapshot(ctx->snapshot_builder);
1336 : : }
3279 1337 [ + + ]: 355 : else if (snapshot_action == CRS_USE_SNAPSHOT)
1338 : : {
1339 : : Snapshot snap;
1340 : :
3276 tgl@sss.pgh.pa.us 1341 : 207 : snap = SnapBuildInitialSnapshot(ctx->snapshot_builder);
3279 peter_e@gmx.net 1342 : 207 : RestoreTransactionSnapshot(snap, MyProc);
1343 : : }
1344 : :
1345 : : /* don't need the decoding context anymore */
4388 rhaas@postgresql.org 1346 : 356 : FreeDecodingContext(ctx);
1347 : :
3384 peter_e@gmx.net 1348 [ + - ]: 356 : if (!cmd->temporary)
1349 : 356 : ReplicationSlotPersist();
1350 : : }
1351 : :
251 alvherre@kurilemu.de 1352 :GNC 499 : snprintf(xloc, sizeof(xloc), "%X/%08X",
1846 peter@eisentraut.org 1353 :CBC 499 : LSN_FORMAT_ARGS(MyReplicationSlot->data.confirmed_flush));
1354 : :
3329 rhaas@postgresql.org 1355 : 499 : dest = CreateDestReceiver(DestRemoteSimple);
1356 : :
1357 : : /*----------
1358 : : * Need a tuple descriptor representing four columns:
1359 : : * - first field: the slot name
1360 : : * - second field: LSN at which we became consistent
1361 : : * - third field: exported snapshot's name
1362 : : * - fourth field: output plugin
1363 : : */
2672 andres@anarazel.de 1364 : 499 : tupdesc = CreateTemplateTupleDesc(4);
3329 rhaas@postgresql.org 1365 : 499 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "slot_name",
1366 : : TEXTOID, -1, 0);
1367 : 499 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 2, "consistent_point",
1368 : : TEXTOID, -1, 0);
1369 : 499 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 3, "snapshot_name",
1370 : : TEXTOID, -1, 0);
1371 : 499 : TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 4, "output_plugin",
1372 : : TEXTOID, -1, 0);
1373 : :
1374 : : /* prepare for projection of tuples */
2677 andres@anarazel.de 1375 : 499 : tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
1376 : :
1377 : : /* slot_name */
3329 rhaas@postgresql.org 1378 : 499 : slot_name = NameStr(MyReplicationSlot->data.name);
1379 : 499 : values[0] = CStringGetTextDatum(slot_name);
1380 : :
1381 : : /* consistent wal location */
3229 peter_e@gmx.net 1382 : 499 : values[1] = CStringGetTextDatum(xloc);
1383 : :
1384 : : /* snapshot name, or NULL if none */
4388 rhaas@postgresql.org 1385 [ + + ]: 499 : if (snapshot_name != NULL)
3329 1386 : 1 : values[2] = CStringGetTextDatum(snapshot_name);
1387 : : else
1388 : 498 : nulls[2] = true;
1389 : :
1390 : : /* plugin, or NULL if none */
4388 1391 [ + + ]: 499 : if (cmd->plugin != NULL)
3329 1392 : 356 : values[3] = CStringGetTextDatum(cmd->plugin);
1393 : : else
1394 : 143 : nulls[3] = true;
1395 : :
1396 : : /* send it to dest */
1397 : 499 : do_tup_output(tstate, values, nulls);
1398 : 499 : end_tup_output(tstate);
1399 : :
4426 1400 : 499 : ReplicationSlotRelease();
1401 : 499 : }
1402 : :
1403 : : /*
1404 : : * Get rid of a replication slot that is no longer wanted.
1405 : : */
1406 : : static void
1407 : 289 : DropReplicationSlot(DropReplicationSlotCmd *cmd)
1408 : : {
3117 alvherre@alvh.no-ip. 1409 : 289 : ReplicationSlotDrop(cmd->slotname, !cmd->wait);
4426 rhaas@postgresql.org 1410 : 287 : }
1411 : :
1412 : : /*
1413 : : * Change the definition of a replication slot.
1414 : : */
1415 : : static void
599 akapila@postgresql.o 1416 : 7 : AlterReplicationSlot(AlterReplicationSlotCmd *cmd)
1417 : : {
776 1418 : 7 : bool failover_given = false;
599 1419 : 7 : bool two_phase_given = false;
1420 : : bool failover;
1421 : : bool two_phase;
1422 : :
1423 : : /* Parse options */
776 1424 [ + - + + : 21 : foreach_ptr(DefElem, defel, cmd->options)
+ + ]
1425 : : {
1426 [ + + ]: 7 : if (strcmp(defel->defname, "failover") == 0)
1427 : : {
1428 [ - + ]: 6 : if (failover_given)
776 akapila@postgresql.o 1429 [ # # ]:UBC 0 : ereport(ERROR,
1430 : : (errcode(ERRCODE_SYNTAX_ERROR),
1431 : : errmsg("conflicting or redundant options")));
776 akapila@postgresql.o 1432 :CBC 6 : failover_given = true;
599 1433 : 6 : failover = defGetBoolean(defel);
1434 : : }
1435 [ + - ]: 1 : else if (strcmp(defel->defname, "two_phase") == 0)
1436 : : {
1437 [ - + ]: 1 : if (two_phase_given)
599 akapila@postgresql.o 1438 [ # # ]:UBC 0 : ereport(ERROR,
1439 : : (errcode(ERRCODE_SYNTAX_ERROR),
1440 : : errmsg("conflicting or redundant options")));
599 akapila@postgresql.o 1441 :CBC 1 : two_phase_given = true;
1442 : 1 : two_phase = defGetBoolean(defel);
1443 : : }
1444 : : else
776 akapila@postgresql.o 1445 [ # # ]:UBC 0 : elog(ERROR, "unrecognized option: %s", defel->defname);
1446 : : }
1447 : :
599 akapila@postgresql.o 1448 [ + + + + ]:CBC 7 : ReplicationSlotAlter(cmd->slotname,
1449 : : failover_given ? &failover : NULL,
1450 : : two_phase_given ? &two_phase : NULL);
776 1451 : 5 : }
1452 : :
1453 : : /*
1454 : : * Load previously initiated logical slot and prepare for sending data (via
1455 : : * WalSndLoop).
1456 : : */
1457 : : static void
4388 rhaas@postgresql.org 1458 : 444 : StartLogicalReplication(StartReplicationCmd *cmd)
1459 : : {
1460 : : StringInfoData buf;
1461 : : QueryCompletion qc;
1462 : :
1463 : : /* make sure that our requirements are still fulfilled */
1464 : 444 : CheckLogicalDecodingRequirements();
1465 : :
1466 [ - + ]: 442 : Assert(!MyReplicationSlot);
1467 : :
408 akapila@postgresql.o 1468 : 442 : ReplicationSlotAcquire(cmd->slotname, true, true);
1469 : :
1470 : : /*
1471 : : * Force a disconnect, so that the decoding code doesn't need to care
1472 : : * about an eventual switch from running in recovery, to running in a
1473 : : * normal environment. Client code is expected to handle reconnects.
1474 : : */
4388 rhaas@postgresql.org 1475 [ + + - + ]: 437 : if (am_cascading_walsender && !RecoveryInProgress())
1476 : : {
4388 rhaas@postgresql.org 1477 [ # # ]:UBC 0 : ereport(LOG,
1478 : : (errmsg("terminating walsender process after promotion")));
3205 andres@anarazel.de 1479 : 0 : got_STOPPING = true;
1480 : : }
1481 : :
1482 : : /*
1483 : : * Create our decoding context, making it start at the previously ack'ed
1484 : : * position.
1485 : : *
1486 : : * Do this before sending a CopyBothResponse message, so that any errors
1487 : : * are reported early.
1488 : : */
2783 alvherre@alvh.no-ip. 1489 :CBC 436 : logical_decoding_ctx =
1490 : 437 : CreateDecodingContext(cmd->startpoint, cmd->options, false,
1770 tmunro@postgresql.or 1491 : 437 : XL_ROUTINE(.page_read = logical_read_xlog_page,
1492 : : .segment_open = WalSndSegmentOpen,
1493 : : .segment_close = wal_segment_close),
1494 : : WalSndPrepareWrite, WalSndWriteData,
1495 : : WalSndUpdateProgress);
2132 alvherre@alvh.no-ip. 1496 : 436 : xlogreader = logical_decoding_ctx->reader;
1497 : :
4388 rhaas@postgresql.org 1498 : 436 : WalSndSetState(WALSNDSTATE_CATCHUP);
1499 : :
1500 : : /* Send a CopyBothResponse message, and start streaming */
936 nathan@postgresql.or 1501 : 436 : pq_beginmessage(&buf, PqMsg_CopyBothResponse);
4388 rhaas@postgresql.org 1502 : 436 : pq_sendbyte(&buf, 0);
3077 andres@anarazel.de 1503 : 436 : pq_sendint16(&buf, 0);
4388 rhaas@postgresql.org 1504 : 436 : pq_endmessage(&buf);
1505 : 436 : pq_flush();
1506 : :
1507 : : /* Start reading WAL from the oldest required WAL. */
2240 heikki.linnakangas@i 1508 : 436 : XLogBeginRead(logical_decoding_ctx->reader,
1509 : 436 : MyReplicationSlot->data.restart_lsn);
1510 : :
1511 : : /*
1512 : : * Report the location after which we'll send out further commits as the
1513 : : * current sentPtr.
1514 : : */
4388 rhaas@postgresql.org 1515 : 436 : sentPtr = MyReplicationSlot->data.confirmed_flush;
1516 : :
1517 : : /* Also update the sent position status in shared memory */
3180 alvherre@alvh.no-ip. 1518 [ - + ]: 436 : SpinLockAcquire(&MyWalSnd->mutex);
1519 : 436 : MyWalSnd->sentPtr = MyReplicationSlot->data.restart_lsn;
1520 : 436 : SpinLockRelease(&MyWalSnd->mutex);
1521 : :
4388 rhaas@postgresql.org 1522 : 436 : replication_active = true;
1523 : :
1524 : 436 : SyncRepInitConfig();
1525 : :
1526 : : /* Main loop of walsender */
1527 : 436 : WalSndLoop(XLogSendLogical);
1528 : :
1529 : 201 : FreeDecodingContext(logical_decoding_ctx);
1530 : 201 : ReplicationSlotRelease();
1531 : :
1532 : 201 : replication_active = false;
3205 andres@anarazel.de 1533 [ - + ]: 201 : if (got_STOPPING)
4388 rhaas@postgresql.org 1534 :UBC 0 : proc_exit(0);
4388 rhaas@postgresql.org 1535 :CBC 201 : WalSndSetState(WALSNDSTATE_STARTUP);
1536 : :
1537 : : /* Get out of COPY mode (CommandComplete). */
2204 alvherre@alvh.no-ip. 1538 : 201 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
1539 : 201 : EndCommand(&qc, DestRemote, false);
4388 rhaas@postgresql.org 1540 : 201 : }
1541 : :
1542 : : /*
1543 : : * LogicalDecodingContext 'prepare_write' callback.
1544 : : *
1545 : : * Prepare a write into a StringInfo.
1546 : : *
1547 : : * Don't do anything lasting in here, it's quite possible that nothing will be done
1548 : : * with the data.
1549 : : */
1550 : : static void
1551 : 185235 : WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid, bool last_write)
1552 : : {
1553 : : /* can't have sync rep confused by sending the same LSN several times */
1554 [ + + ]: 185235 : if (!last_write)
1555 : 427 : lsn = InvalidXLogRecPtr;
1556 : :
1557 : 185235 : resetStringInfo(ctx->out);
1558 : :
221 nathan@postgresql.or 1559 :GNC 185235 : pq_sendbyte(ctx->out, PqReplMsg_WALData);
4388 rhaas@postgresql.org 1560 :CBC 185235 : pq_sendint64(ctx->out, lsn); /* dataStart */
1561 : 185235 : pq_sendint64(ctx->out, lsn); /* walEnd */
1562 : :
1563 : : /*
1564 : : * Fill out the sendtime later, just as it's done in XLogSendPhysical, but
1565 : : * reserve space here.
1566 : : */
4331 bruce@momjian.us 1567 : 185235 : pq_sendint64(ctx->out, 0); /* sendtime */
4388 rhaas@postgresql.org 1568 : 185235 : }
1569 : :
1570 : : /*
1571 : : * LogicalDecodingContext 'write' callback.
1572 : : *
1573 : : * Actually write out data previously prepared by WalSndPrepareWrite out to
1574 : : * the network. Take as long as needed, but process replies from the other
1575 : : * side and check timeouts during that.
1576 : : */
1577 : : static void
1578 : 185235 : WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1579 : : bool last_write)
1580 : : {
1581 : : TimestampTz now;
1582 : :
1583 : : /*
1584 : : * Fill the send timestamp last, so that it is taken as late as possible.
1585 : : * This is somewhat ugly, but the protocol is set as it's already used for
1586 : : * several releases by streaming physical replication.
1587 : : */
1588 : 185235 : resetStringInfo(&tmpbuf);
3013 andrew@dunslane.net 1589 : 185235 : now = GetCurrentTimestamp();
1590 : 185235 : pq_sendint64(&tmpbuf, now);
4388 rhaas@postgresql.org 1591 : 185235 : memcpy(&ctx->out->data[1 + sizeof(int64) + sizeof(int64)],
1592 : 185235 : tmpbuf.data, sizeof(int64));
1593 : :
1594 : : /* output previously gathered data in a CopyData packet */
235 nathan@postgresql.or 1595 :GNC 185235 : pq_putmessage_noblock(PqMsg_CopyData, ctx->out->data, ctx->out->len);
1596 : :
3013 andrew@dunslane.net 1597 [ - + ]:CBC 185235 : CHECK_FOR_INTERRUPTS();
1598 : :
1599 : : /* Try to flush pending output to the client */
4388 rhaas@postgresql.org 1600 [ + + ]: 185235 : if (pq_flush_if_writable() != 0)
1601 : 4 : WalSndShutdown();
1602 : :
1603 : : /* Try taking fast path unless we get too close to walsender timeout. */
3013 andrew@dunslane.net 1604 [ + - ]: 185231 : if (now < TimestampTzPlusMilliseconds(last_reply_timestamp,
1605 : 185231 : wal_sender_timeout / 2) &&
1606 [ + + ]: 185231 : !pq_is_send_pending())
1607 : : {
4388 rhaas@postgresql.org 1608 : 184998 : return;
1609 : : }
1610 : :
1611 : : /* If we have pending write here, go to slow path */
1446 akapila@postgresql.o 1612 : 233 : ProcessPendingWrites();
1613 : : }
1614 : :
1615 : : /*
1616 : : * Handle configuration reload.
1617 : : *
1618 : : * Process the pending configuration file reload and reinitializes synchronous
1619 : : * replication settings. Also releases any waiters that may now be satisfied due
1620 : : * to changes in synchronous replication requirements.
1621 : : */
1622 : : static void
40 fujii@postgresql.org 1623 :GNC 822057 : WalSndHandleConfigReload(void)
1624 : : {
1625 [ + + ]: 822057 : if (!ConfigReloadPending)
1626 : 822021 : return;
1627 : :
1628 : 36 : ConfigReloadPending = false;
1629 : 36 : ProcessConfigFile(PGC_SIGHUP);
1630 : 36 : SyncRepInitConfig();
1631 : :
1632 : : /*
1633 : : * Recheck and release any now-satisfied waiters after config reload
1634 : : * changes synchronous replication requirements (e.g., reducing the number
1635 : : * of sync standbys or changing the standby names).
1636 : : */
1637 [ + + ]: 36 : if (!am_cascading_walsender)
1638 : 33 : SyncRepReleaseWaiters();
1639 : : }
1640 : :
1641 : : /*
1642 : : * Wait until there is no pending write. Also process replies from the other
1643 : : * side and check timeouts during that.
1644 : : */
1645 : : static void
1446 akapila@postgresql.o 1646 :CBC 233 : ProcessPendingWrites(void)
1647 : : {
1648 : : for (;;)
4388 rhaas@postgresql.org 1649 : 291 : {
1650 : : long sleeptime;
1651 : :
1652 : : /* Check for input from the client */
3013 andrew@dunslane.net 1653 : 524 : ProcessRepliesIfAny();
1654 : :
1655 : : /* die if timeout was reached */
2753 noah@leadboat.com 1656 : 524 : WalSndCheckTimeOut();
1657 : :
1658 : : /* Send keepalive if the time has come */
1659 : 524 : WalSndKeepaliveIfNecessary();
1660 : :
3013 andrew@dunslane.net 1661 [ + + ]: 524 : if (!pq_is_send_pending())
1662 : 233 : break;
1663 : :
2753 noah@leadboat.com 1664 : 291 : sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
1665 : :
1666 : : /* Sleep until something happens or we time out */
1840 tmunro@postgresql.or 1667 : 291 : WalSndWait(WL_SOCKET_WRITEABLE | WL_SOCKET_READABLE, sleeptime,
1668 : : WAIT_EVENT_WAL_SENDER_WRITE_DATA);
1669 : :
1670 : : /* Clear any already-pending wakeups */
4075 andres@anarazel.de 1671 : 291 : ResetLatch(MyLatch);
1672 : :
1673 [ - + ]: 291 : CHECK_FOR_INTERRUPTS();
1674 : :
1675 : : /* Process any requests or signals received recently */
40 fujii@postgresql.org 1676 :GNC 291 : WalSndHandleConfigReload();
1677 : :
1678 : : /* Try to flush pending output to the client */
4388 rhaas@postgresql.org 1679 [ - + ]:CBC 291 : if (pq_flush_if_writable() != 0)
4388 rhaas@postgresql.org 1680 :UBC 0 : WalSndShutdown();
1681 : : }
1682 : :
1683 : : /* reactivate latch so WalSndLoop knows to continue */
4075 andres@anarazel.de 1684 :CBC 233 : SetLatch(MyLatch);
4388 rhaas@postgresql.org 1685 : 233 : }
1686 : :
1687 : : /*
1688 : : * LogicalDecodingContext 'update_progress' callback.
1689 : : *
1690 : : * Write the current position to the lag tracker (see XLogSendPhysical).
1691 : : *
1692 : : * When skipping empty transactions, send a keepalive message if necessary.
1693 : : */
1694 : : static void
1446 akapila@postgresql.o 1695 : 2774 : WalSndUpdateProgress(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
1696 : : bool skipped_xact)
1697 : : {
1698 : : static TimestampTz sendTime = 0;
3229 simon@2ndQuadrant.co 1699 : 2774 : TimestampTz now = GetCurrentTimestamp();
1404 akapila@postgresql.o 1700 : 2774 : bool pending_writes = false;
1701 : 2774 : bool end_xact = ctx->end_xact;
1702 : :
1703 : : /*
1704 : : * Track lag no more than once per WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS to
1705 : : * avoid flooding the lag tracker when we commit frequently.
1706 : : *
1707 : : * We don't have a mechanism to get the ack for any LSN other than end
1708 : : * xact LSN from the downstream. So, we track lag only for end of
1709 : : * transaction LSN.
1710 : : */
1711 : : #define WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS 1000
1712 [ + + + + ]: 2774 : if (end_xact && TimestampDifferenceExceeds(sendTime, now,
1713 : : WALSND_LOGICAL_LAG_TRACK_INTERVAL_MS))
1714 : : {
1446 1715 : 287 : LagTrackerWrite(lsn, now);
1716 : 287 : sendTime = now;
1717 : : }
1718 : :
1719 : : /*
1720 : : * When skipping empty transactions in synchronous replication, we send a
1721 : : * keepalive message to avoid delaying such transactions.
1722 : : *
1723 : : * It is okay to check sync_standbys_status without lock here as in the
1724 : : * worst case we will just send an extra keepalive message when it is
1725 : : * really not required.
1726 : : */
1727 [ + + ]: 2774 : if (skipped_xact &&
1728 [ + - + - ]: 633 : SyncRepRequested() &&
338 michael@paquier.xyz 1729 [ - + ]: 633 : (((volatile WalSndCtlData *) WalSndCtl)->sync_standbys_status & SYNC_STANDBY_DEFINED))
1730 : : {
1446 akapila@postgresql.o 1731 :UBC 0 : WalSndKeepalive(false, lsn);
1732 : :
1733 : : /* Try to flush pending output to the client */
1734 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
1735 : 0 : WalSndShutdown();
1736 : :
1737 : : /* If we have pending write here, make sure it's actually flushed */
1738 [ # # ]: 0 : if (pq_is_send_pending())
1404 1739 : 0 : pending_writes = true;
1740 : : }
1741 : :
1742 : : /*
1743 : : * Process pending writes if any or try to send a keepalive if required.
1744 : : * We don't need to try sending keep alive messages at the transaction end
1745 : : * as that will be done at a later point in time. This is required only
1746 : : * for large transactions where we don't send any changes to the
1747 : : * downstream and the receiver can timeout due to that.
1748 : : */
1404 akapila@postgresql.o 1749 [ + - + + ]:CBC 2774 : if (pending_writes || (!end_xact &&
1750 [ - + ]: 1568 : now >= TimestampTzPlusMilliseconds(last_reply_timestamp,
1751 : : wal_sender_timeout / 2)))
1404 akapila@postgresql.o 1752 :UBC 0 : ProcessPendingWrites();
3229 simon@2ndQuadrant.co 1753 :CBC 2774 : }
1754 : :
1755 : : /*
1756 : : * Wake up the logical walsender processes with logical failover slots if the
1757 : : * currently acquired physical slot is specified in synchronized_standby_slots GUC.
1758 : : */
1759 : : void
737 akapila@postgresql.o 1760 : 37701 : PhysicalWakeupLogicalWalSnd(void)
1761 : : {
1762 [ + - - + ]: 37701 : Assert(MyReplicationSlot && SlotIsPhysical(MyReplicationSlot));
1763 : :
1764 : : /*
1765 : : * If we are running in a standby, there is no need to wake up walsenders.
1766 : : * This is because we do not support syncing slots to cascading standbys,
1767 : : * so, there are no walsenders waiting for standbys to catch up.
1768 : : */
1769 [ + + ]: 37701 : if (RecoveryInProgress())
1770 : 53 : return;
1771 : :
622 1772 [ + + ]: 37648 : if (SlotExistsInSyncStandbySlots(NameStr(MyReplicationSlot->data.name)))
737 1773 : 7 : ConditionVariableBroadcast(&WalSndCtl->wal_confirm_rcv_cv);
1774 : : }
1775 : :
1776 : : /*
1777 : : * Returns true if not all standbys have caught up to the flushed position
1778 : : * (flushed_lsn) when the current acquired slot is a logical failover
1779 : : * slot and we are streaming; otherwise, returns false.
1780 : : *
1781 : : * If returning true, the function sets the appropriate wait event in
1782 : : * wait_event; otherwise, wait_event is set to 0.
1783 : : */
1784 : : static bool
1785 : 22517 : NeedToWaitForStandbys(XLogRecPtr flushed_lsn, uint32 *wait_event)
1786 : : {
1787 [ + + ]: 22517 : int elevel = got_STOPPING ? ERROR : WARNING;
1788 : : bool failover_slot;
1789 : :
1790 [ + + + + ]: 22517 : failover_slot = (replication_active && MyReplicationSlot->data.failover);
1791 : :
1792 : : /*
1793 : : * Note that after receiving the shutdown signal, an ERROR is reported if
1794 : : * any slots are dropped, invalidated, or inactive. This measure is taken
1795 : : * to prevent the walsender from waiting indefinitely.
1796 : : */
1797 [ + + + + ]: 22517 : if (failover_slot && !StandbySlotsHaveCaughtup(flushed_lsn, elevel))
1798 : : {
1799 : 8 : *wait_event = WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION;
1800 : 8 : return true;
1801 : : }
1802 : :
1803 : 22509 : *wait_event = 0;
1804 : 22509 : return false;
1805 : : }
1806 : :
1807 : : /*
1808 : : * Returns true if we need to wait for WALs to be flushed to disk, or if not
1809 : : * all standbys have caught up to the flushed position (flushed_lsn) when the
1810 : : * current acquired slot is a logical failover slot and we are
1811 : : * streaming; otherwise, returns false.
1812 : : *
1813 : : * If returning true, the function sets the appropriate wait event in
1814 : : * wait_event; otherwise, wait_event is set to 0.
1815 : : */
1816 : : static bool
1817 : 30082 : NeedToWaitForWal(XLogRecPtr target_lsn, XLogRecPtr flushed_lsn,
1818 : : uint32 *wait_event)
1819 : : {
1820 : : /* Check if we need to wait for WALs to be flushed to disk */
1821 [ + + ]: 30082 : if (target_lsn > flushed_lsn)
1822 : : {
1823 : 13785 : *wait_event = WAIT_EVENT_WAL_SENDER_WAIT_FOR_WAL;
1824 : 13785 : return true;
1825 : : }
1826 : :
1827 : : /* Check if the standby slots have caught up to the flushed position */
1828 : 16297 : return NeedToWaitForStandbys(flushed_lsn, wait_event);
1829 : : }
1830 : :
1831 : : /*
1832 : : * Wait till WAL < loc is flushed to disk so it can be safely sent to client.
1833 : : *
1834 : : * If the walsender holds a logical failover slot, we also wait for all the
1835 : : * specified streaming replication standby servers to confirm receipt of WAL
1836 : : * up to RecentFlushPtr. It is beneficial to wait here for the confirmation
1837 : : * up to RecentFlushPtr rather than waiting before transmitting each change
1838 : : * to logical subscribers, which is already covered by RecentFlushPtr.
1839 : : *
1840 : : * Returns end LSN of flushed WAL. Normally this will be >= loc, but if we
1841 : : * detect a shutdown request (either from postmaster or client) we will return
1842 : : * early, so caller must always check.
1843 : : */
1844 : : static XLogRecPtr
4388 rhaas@postgresql.org 1845 : 22882 : WalSndWaitForWal(XLogRecPtr loc)
1846 : : {
1847 : : int wakeEvents;
737 akapila@postgresql.o 1848 : 22882 : uint32 wait_event = 0;
1849 : : static XLogRecPtr RecentFlushPtr = InvalidXLogRecPtr;
341 michael@paquier.xyz 1850 : 22882 : TimestampTz last_flush = 0;
1851 : :
1852 : : /*
1853 : : * Fast path to avoid acquiring the spinlock in case we already know we
1854 : : * have enough WAL available and all the standby servers have confirmed
1855 : : * receipt of WAL up to RecentFlushPtr. This is particularly interesting
1856 : : * if we're far behind.
1857 : : */
129 alvherre@kurilemu.de 1858 [ + + ]:GNC 22882 : if (XLogRecPtrIsValid(RecentFlushPtr) &&
737 akapila@postgresql.o 1859 [ + + ]:CBC 22284 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4388 rhaas@postgresql.org 1860 : 14396 : return RecentFlushPtr;
1861 : :
1862 : : /*
1863 : : * Within the loop, we wait for the necessary WALs to be flushed to disk
1864 : : * first, followed by waiting for standbys to catch up if there are enough
1865 : : * WALs (see NeedToWaitForWal()) or upon receiving the shutdown signal.
1866 : : */
1867 : : for (;;)
1868 : 5740 : {
737 akapila@postgresql.o 1869 : 14226 : bool wait_for_standby_at_stop = false;
1870 : : long sleeptime;
1871 : : TimestampTz now;
1872 : :
1873 : : /* Clear any already-pending wakeups */
4075 andres@anarazel.de 1874 : 14226 : ResetLatch(MyLatch);
1875 : :
1876 [ + + ]: 14226 : CHECK_FOR_INTERRUPTS();
1877 : :
1878 : : /* Process any requests or signals received recently */
40 fujii@postgresql.org 1879 :GNC 14219 : WalSndHandleConfigReload();
1880 : :
1881 : : /* Check for input from the client */
4388 rhaas@postgresql.org 1882 :CBC 14219 : ProcessRepliesIfAny();
1883 : :
1884 : : /*
1885 : : * If we're shutting down, trigger pending WAL to be written out,
1886 : : * otherwise we'd possibly end up waiting for WAL that never gets
1887 : : * written, because walwriter has shut down already.
1888 : : */
9 fujii@postgresql.org 1889 [ + + + + ]: 14018 : if (got_STOPPING && !RecoveryInProgress())
1890 : 6216 : XLogFlush(GetXLogInsertRecPtr());
1891 : :
1892 : : /*
1893 : : * To avoid the scenario where standbys need to catch up to a newer
1894 : : * WAL location in each iteration, we update our idea of the currently
1895 : : * flushed position only if we are not waiting for standbys to catch
1896 : : * up.
1897 : : */
737 akapila@postgresql.o 1898 [ + + ]: 14018 : if (wait_event != WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
1899 : : {
1900 [ + + ]: 14010 : if (!RecoveryInProgress())
1901 : 13846 : RecentFlushPtr = GetFlushRecPtr(NULL);
1902 : : else
1903 : 164 : RecentFlushPtr = GetXLogReplayRecPtr(NULL);
1904 : : }
1905 : :
1906 : : /*
1907 : : * If postmaster asked us to stop and the standby slots have caught up
1908 : : * to the flushed position, don't wait anymore.
1909 : : *
1910 : : * It's important to do this check after the recomputation of
1911 : : * RecentFlushPtr, so we can send all remaining data before shutting
1912 : : * down.
1913 : : */
3205 andres@anarazel.de 1914 [ + + ]: 14018 : if (got_STOPPING)
1915 : : {
737 akapila@postgresql.o 1916 [ - + ]: 6220 : if (NeedToWaitForStandbys(RecentFlushPtr, &wait_event))
737 akapila@postgresql.o 1917 :UBC 0 : wait_for_standby_at_stop = true;
1918 : : else
737 akapila@postgresql.o 1919 :CBC 6220 : break;
1920 : : }
1921 : :
1922 : : /*
1923 : : * We only send regular messages to the client for full decoded
1924 : : * transactions, but a synchronous replication and walsender shutdown
1925 : : * possibly are waiting for a later location. So, before sleeping, we
1926 : : * send a ping containing the flush location. If the receiver is
1927 : : * otherwise idle, this keepalive will trigger a reply. Processing the
1928 : : * reply will update these MyWalSnd locations.
1929 : : */
4233 andres@anarazel.de 1930 [ + + ]: 7798 : if (MyWalSnd->flush < sentPtr &&
1931 [ + + ]: 2536 : MyWalSnd->write < sentPtr &&
1932 [ + - ]: 1931 : !waiting_for_ping_response)
1446 akapila@postgresql.o 1933 : 1931 : WalSndKeepalive(false, InvalidXLogRecPtr);
1934 : :
1935 : : /*
1936 : : * Exit the loop if already caught up and doesn't need to wait for
1937 : : * standby slots.
1938 : : */
737 1939 [ + - ]: 7798 : if (!wait_for_standby_at_stop &&
1940 [ + + ]: 7798 : !NeedToWaitForWal(loc, RecentFlushPtr, &wait_event))
4388 rhaas@postgresql.org 1941 : 1893 : break;
1942 : :
1943 : : /*
1944 : : * Waiting for new WAL or waiting for standbys to catch up. Since we
1945 : : * need to wait, we're now caught up.
1946 : : */
1947 : 5905 : WalSndCaughtUp = true;
1948 : :
1949 : : /*
1950 : : * Try to flush any pending output to the client.
1951 : : */
1952 [ - + ]: 5905 : if (pq_flush_if_writable() != 0)
4388 rhaas@postgresql.org 1953 :UBC 0 : WalSndShutdown();
1954 : :
1955 : : /*
1956 : : * If we have received CopyDone from the client, sent CopyDone
1957 : : * ourselves, and the output buffer is empty, it's time to exit
1958 : : * streaming, so fail the current WAL fetch request.
1959 : : */
3180 tgl@sss.pgh.pa.us 1960 [ + + + - ]:CBC 5905 : if (streamingDoneReceiving && streamingDoneSending &&
1961 [ + - ]: 165 : !pq_is_send_pending())
1962 : 165 : break;
1963 : :
1964 : : /* die if timeout was reached */
2753 noah@leadboat.com 1965 : 5740 : WalSndCheckTimeOut();
1966 : :
1967 : : /* Send keepalive if the time has come */
1968 : 5740 : WalSndKeepaliveIfNecessary();
1969 : :
1970 : : /*
1971 : : * Sleep until something happens or we time out. Also wait for the
1972 : : * socket becoming writable, if there's still pending output.
1973 : : * Otherwise we might sit on sendable output data while waiting for
1974 : : * new WAL to be generated. (But if we have nothing to send, we don't
1975 : : * want to wake on socket-writable.)
1976 : : */
341 michael@paquier.xyz 1977 : 5740 : now = GetCurrentTimestamp();
1978 : 5740 : sleeptime = WalSndComputeSleeptime(now);
1979 : :
1840 tmunro@postgresql.or 1980 : 5740 : wakeEvents = WL_SOCKET_READABLE;
1981 : :
4388 rhaas@postgresql.org 1982 [ - + ]: 5740 : if (pq_is_send_pending())
4388 rhaas@postgresql.org 1983 :UBC 0 : wakeEvents |= WL_SOCKET_WRITEABLE;
1984 : :
737 akapila@postgresql.o 1985 [ - + ]:CBC 5740 : Assert(wait_event != 0);
1986 : :
1987 : : /* Report IO statistics, if needed */
341 michael@paquier.xyz 1988 [ + + ]: 5740 : if (TimestampDifferenceExceeds(last_flush, now,
1989 : : WALSENDER_STATS_FLUSH_INTERVAL))
1990 : : {
1991 : 1615 : pgstat_flush_io(false);
1992 : 1615 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
1993 : 1615 : last_flush = now;
1994 : : }
1995 : :
737 akapila@postgresql.o 1996 : 5740 : WalSndWait(wakeEvents, sleeptime, wait_event);
1997 : : }
1998 : :
1999 : : /* reactivate latch so WalSndLoop knows to continue */
4075 andres@anarazel.de 2000 : 8278 : SetLatch(MyLatch);
4388 rhaas@postgresql.org 2001 : 8278 : return RecentFlushPtr;
2002 : : }
2003 : :
2004 : : /*
2005 : : * Execute an incoming replication command.
2006 : : *
2007 : : * Returns true if the cmd_string was recognized as WalSender command, false
2008 : : * if not.
2009 : : */
2010 : : bool
4909 heikki.linnakangas@i 2011 : 5656 : exec_replication_command(const char *cmd_string)
2012 : : {
2013 : : yyscan_t scanner;
2014 : : int parse_rc;
2015 : : Node *cmd_node;
2016 : : const char *cmdtag;
328 tgl@sss.pgh.pa.us 2017 : 5656 : MemoryContext old_context = CurrentMemoryContext;
2018 : :
2019 : : /* We save and re-use the cmd_context across calls */
2020 : : static MemoryContext cmd_context = NULL;
2021 : :
2022 : : /*
2023 : : * If WAL sender has been told that shutdown is getting close, switch its
2024 : : * status accordingly to handle the next replication commands correctly.
2025 : : */
3205 andres@anarazel.de 2026 [ - + ]: 5656 : if (got_STOPPING)
3205 andres@anarazel.de 2027 :UBC 0 : WalSndSetState(WALSNDSTATE_STOPPING);
2028 : :
2029 : : /*
2030 : : * Throw error if in stopping mode. We need prevent commands that could
2031 : : * generate WAL while the shutdown checkpoint is being written. To be
2032 : : * safe, we just prohibit all new commands.
2033 : : */
3205 andres@anarazel.de 2034 [ - + ]:CBC 5656 : if (MyWalSnd->state == WALSNDSTATE_STOPPING)
3205 andres@anarazel.de 2035 [ # # ]:UBC 0 : ereport(ERROR,
2036 : : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2037 : : errmsg("cannot execute new commands while WAL sender is in stopping mode")));
2038 : :
2039 : : /*
2040 : : * CREATE_REPLICATION_SLOT ... LOGICAL exports a snapshot until the next
2041 : : * command arrives. Clean up the old stuff if there's anything.
2042 : : */
4388 rhaas@postgresql.org 2043 :CBC 5656 : SnapBuildClearExportedSnapshot();
2044 : :
4909 heikki.linnakangas@i 2045 [ - + ]: 5656 : CHECK_FOR_INTERRUPTS();
2046 : :
2047 : : /*
2048 : : * Prepare to parse and execute the command.
2049 : : *
2050 : : * Because replication command execution can involve beginning or ending
2051 : : * transactions, we need a working context that will survive that, so we
2052 : : * make it a child of TopMemoryContext. That in turn creates a hazard of
2053 : : * long-lived memory leaks if we lose track of the working context. We
2054 : : * deal with that by creating it only once per walsender, and resetting it
2055 : : * for each new command. (Normally this reset is a no-op, but if the
2056 : : * prior exec_replication_command call failed with an error, it won't be.)
2057 : : *
2058 : : * This is subtler than it looks. The transactions we manage can extend
2059 : : * across replication commands, indeed SnapBuildClearExportedSnapshot
2060 : : * might have just ended one. Because transaction exit will revert to the
2061 : : * memory context that was current at transaction start, we need to be
2062 : : * sure that that context is still valid. That motivates re-using the
2063 : : * same cmd_context rather than making a new one each time.
2064 : : */
328 tgl@sss.pgh.pa.us 2065 [ + + ]: 5656 : if (cmd_context == NULL)
2066 : 1244 : cmd_context = AllocSetContextCreate(TopMemoryContext,
2067 : : "Replication command context",
2068 : : ALLOCSET_DEFAULT_SIZES);
2069 : : else
2070 : 4412 : MemoryContextReset(cmd_context);
2071 : :
2072 : 5656 : MemoryContextSwitchTo(cmd_context);
2073 : :
468 peter@eisentraut.org 2074 : 5656 : replication_scanner_init(cmd_string, &scanner);
2075 : :
2076 : : /*
2077 : : * Is it a WalSender command?
2078 : : */
2079 [ + + ]: 5656 : if (!replication_scanner_is_replication_command(scanner))
2080 : : {
2081 : : /* Nope; clean up and get out. */
2082 : 2521 : replication_scanner_finish(scanner);
2083 : :
2008 tgl@sss.pgh.pa.us 2084 : 2521 : MemoryContextSwitchTo(old_context);
328 2085 : 2521 : MemoryContextReset(cmd_context);
2086 : :
2087 : : /* XXX this is a pretty random place to make this check */
1511 2088 [ - + ]: 2521 : if (MyDatabaseId == InvalidOid)
1511 tgl@sss.pgh.pa.us 2089 [ # # ]:UBC 0 : ereport(ERROR,
2090 : : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2091 : : errmsg("cannot execute SQL commands in WAL sender for physical replication")));
2092 : :
2093 : : /* Tell the caller that this wasn't a WalSender command. */
2008 tgl@sss.pgh.pa.us 2094 :CBC 2521 : return false;
2095 : : }
2096 : :
2097 : : /*
2098 : : * Looks like a WalSender command, so parse it.
2099 : : */
415 peter@eisentraut.org 2100 : 3135 : parse_rc = replication_yyparse(&cmd_node, scanner);
1511 tgl@sss.pgh.pa.us 2101 [ - + ]: 3135 : if (parse_rc != 0)
1511 tgl@sss.pgh.pa.us 2102 [ # # ]:UBC 0 : ereport(ERROR,
2103 : : (errcode(ERRCODE_SYNTAX_ERROR),
2104 : : errmsg_internal("replication command parser returned %d",
2105 : : parse_rc)));
468 peter@eisentraut.org 2106 :CBC 3135 : replication_scanner_finish(scanner);
2107 : :
2108 : : /*
2109 : : * Report query to various monitoring facilities. For this purpose, we
2110 : : * report replication commands just like SQL commands.
2111 : : */
2008 tgl@sss.pgh.pa.us 2112 : 3135 : debug_query_string = cmd_string;
2113 : :
2114 : 3135 : pgstat_report_activity(STATE_RUNNING, cmd_string);
2115 : :
2116 : : /*
2117 : : * Log replication command if log_replication_commands is enabled. Even
2118 : : * when it's disabled, log the command with DEBUG1 level for backward
2119 : : * compatibility.
2120 : : */
2121 [ + - + - ]: 3135 : ereport(log_replication_commands ? LOG : DEBUG1,
2122 : : (errmsg("received replication command: %s", cmd_string)));
2123 : :
2124 : : /*
2125 : : * Disallow replication commands in aborted transaction blocks.
2126 : : */
2127 [ - + ]: 3135 : if (IsAbortedTransactionBlockState())
3279 peter_e@gmx.net 2128 [ # # ]:UBC 0 : ereport(ERROR,
2129 : : (errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
2130 : : errmsg("current transaction is aborted, "
2131 : : "commands ignored until end of transaction block")));
2132 : :
3279 peter_e@gmx.net 2133 [ - + ]:CBC 3135 : CHECK_FOR_INTERRUPTS();
2134 : :
2135 : : /*
2136 : : * Allocate buffers that will be used for each outgoing and incoming
2137 : : * message. We do this just once per command to reduce palloc overhead.
2138 : : */
3308 fujii@postgresql.org 2139 : 3135 : initStringInfo(&output_message);
2140 : 3135 : initStringInfo(&reply_message);
2141 : 3135 : initStringInfo(&tmpbuf);
2142 : :
5539 magnus@hagander.net 2143 [ + + + + : 3135 : switch (cmd_node->type)
+ + + + +
+ - ]
2144 : : {
2145 : 775 : case T_IdentifySystemCmd:
2006 alvherre@alvh.no-ip. 2146 : 775 : cmdtag = "IDENTIFY_SYSTEM";
tgl@sss.pgh.pa.us 2147 : 775 : set_ps_display(cmdtag);
5539 magnus@hagander.net 2148 : 775 : IdentifySystem();
2006 alvherre@alvh.no-ip. 2149 : 775 : EndReplicationCommand(cmdtag);
5539 magnus@hagander.net 2150 : 775 : break;
2151 : :
1602 michael@paquier.xyz 2152 : 6 : case T_ReadReplicationSlotCmd:
2153 : 6 : cmdtag = "READ_REPLICATION_SLOT";
2154 : 6 : set_ps_display(cmdtag);
2155 : 6 : ReadReplicationSlot((ReadReplicationSlotCmd *) cmd_node);
2156 : 5 : EndReplicationCommand(cmdtag);
2157 : 5 : break;
2158 : :
5539 magnus@hagander.net 2159 : 194 : case T_BaseBackupCmd:
2006 alvherre@alvh.no-ip. 2160 : 194 : cmdtag = "BASE_BACKUP";
tgl@sss.pgh.pa.us 2161 : 194 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2162 : 194 : PreventInTransactionBlock(true, cmdtag);
816 rhaas@postgresql.org 2163 : 194 : SendBaseBackup((BaseBackupCmd *) cmd_node, uploaded_manifest);
2006 alvherre@alvh.no-ip. 2164 : 168 : EndReplicationCommand(cmdtag);
5530 magnus@hagander.net 2165 : 168 : break;
2166 : :
4426 rhaas@postgresql.org 2167 : 500 : case T_CreateReplicationSlotCmd:
2006 alvherre@alvh.no-ip. 2168 : 500 : cmdtag = "CREATE_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2169 : 500 : set_ps_display(cmdtag);
4426 rhaas@postgresql.org 2170 : 500 : CreateReplicationSlot((CreateReplicationSlotCmd *) cmd_node);
2006 alvherre@alvh.no-ip. 2171 : 499 : EndReplicationCommand(cmdtag);
4426 rhaas@postgresql.org 2172 : 499 : break;
2173 : :
2174 : 289 : case T_DropReplicationSlotCmd:
2006 alvherre@alvh.no-ip. 2175 : 289 : cmdtag = "DROP_REPLICATION_SLOT";
tgl@sss.pgh.pa.us 2176 : 289 : set_ps_display(cmdtag);
4426 rhaas@postgresql.org 2177 : 289 : DropReplicationSlot((DropReplicationSlotCmd *) cmd_node);
2006 alvherre@alvh.no-ip. 2178 : 287 : EndReplicationCommand(cmdtag);
4426 rhaas@postgresql.org 2179 : 287 : break;
2180 : :
776 akapila@postgresql.o 2181 : 7 : case T_AlterReplicationSlotCmd:
2182 : 7 : cmdtag = "ALTER_REPLICATION_SLOT";
2183 : 7 : set_ps_display(cmdtag);
2184 : 7 : AlterReplicationSlot((AlterReplicationSlotCmd *) cmd_node);
2185 : 5 : EndReplicationCommand(cmdtag);
2186 : 5 : break;
2187 : :
4426 rhaas@postgresql.org 2188 : 731 : case T_StartReplicationCmd:
2189 : : {
2190 : 731 : StartReplicationCmd *cmd = (StartReplicationCmd *) cmd_node;
2191 : :
2006 alvherre@alvh.no-ip. 2192 : 731 : cmdtag = "START_REPLICATION";
tgl@sss.pgh.pa.us 2193 : 731 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2194 : 731 : PreventInTransactionBlock(true, cmdtag);
2195 : :
4426 rhaas@postgresql.org 2196 [ + + ]: 731 : if (cmd->kind == REPLICATION_KIND_PHYSICAL)
2197 : 287 : StartReplication(cmd);
2198 : : else
4388 2199 : 444 : StartLogicalReplication(cmd);
2200 : :
2201 : : /* dupe, but necessary per libpqrcv_endstreaming */
1978 alvherre@alvh.no-ip. 2202 : 355 : EndReplicationCommand(cmdtag);
2203 : :
2106 michael@paquier.xyz 2204 [ - + ]: 355 : Assert(xlogreader != NULL);
4426 rhaas@postgresql.org 2205 : 355 : break;
2206 : : }
2207 : :
4840 heikki.linnakangas@i 2208 : 13 : case T_TimeLineHistoryCmd:
2006 alvherre@alvh.no-ip. 2209 : 13 : cmdtag = "TIMELINE_HISTORY";
tgl@sss.pgh.pa.us 2210 : 13 : set_ps_display(cmdtag);
alvherre@alvh.no-ip. 2211 : 13 : PreventInTransactionBlock(true, cmdtag);
4840 heikki.linnakangas@i 2212 : 13 : SendTimeLineHistory((TimeLineHistoryCmd *) cmd_node);
2006 alvherre@alvh.no-ip. 2213 : 13 : EndReplicationCommand(cmdtag);
4840 heikki.linnakangas@i 2214 : 13 : break;
2215 : :
3337 rhaas@postgresql.org 2216 : 608 : case T_VariableShowStmt:
2217 : : {
2218 : 608 : DestReceiver *dest = CreateDestReceiver(DestRemoteSimple);
2219 : 608 : VariableShowStmt *n = (VariableShowStmt *) cmd_node;
2220 : :
2006 alvherre@alvh.no-ip. 2221 : 608 : cmdtag = "SHOW";
tgl@sss.pgh.pa.us 2222 : 608 : set_ps_display(cmdtag);
2223 : :
2224 : : /* syscache access needs a transaction environment */
2526 michael@paquier.xyz 2225 : 608 : StartTransactionCommand();
3337 rhaas@postgresql.org 2226 : 608 : GetPGVariable(n->name, dest);
2526 michael@paquier.xyz 2227 : 608 : CommitTransactionCommand();
2006 alvherre@alvh.no-ip. 2228 : 608 : EndReplicationCommand(cmdtag);
2229 : : }
3337 rhaas@postgresql.org 2230 : 608 : break;
2231 : :
816 2232 : 12 : case T_UploadManifestCmd:
2233 : 12 : cmdtag = "UPLOAD_MANIFEST";
2234 : 12 : set_ps_display(cmdtag);
2235 : 12 : PreventInTransactionBlock(true, cmdtag);
2236 : 12 : UploadManifest();
2237 : 11 : EndReplicationCommand(cmdtag);
2238 : 11 : break;
2239 : :
5539 magnus@hagander.net 2240 :UBC 0 : default:
4840 heikki.linnakangas@i 2241 [ # # ]: 0 : elog(ERROR, "unrecognized replication command node tag: %u",
2242 : : cmd_node->type);
2243 : : }
2244 : :
2245 : : /*
2246 : : * Done. Revert to caller's memory context, and clean out the cmd_context
2247 : : * to recover memory right away.
2248 : : */
5539 magnus@hagander.net 2249 :CBC 2726 : MemoryContextSwitchTo(old_context);
328 tgl@sss.pgh.pa.us 2250 : 2726 : MemoryContextReset(cmd_context);
2251 : :
2252 : : /*
2253 : : * We need not update ps display or pg_stat_activity, because PostgresMain
2254 : : * will reset those to "idle". But we must reset debug_query_string to
2255 : : * ensure it doesn't become a dangling pointer.
2256 : : */
2008 2257 : 2726 : debug_query_string = NULL;
2258 : :
3279 peter_e@gmx.net 2259 : 2726 : return true;
2260 : : }
2261 : :
2262 : : /*
2263 : : * Process any incoming messages while streaming. Also checks if the remote
2264 : : * end has closed the connection.
2265 : : */
2266 : : static void
5512 heikki.linnakangas@i 2267 : 822290 : ProcessRepliesIfAny(void)
2268 : : {
2269 : : unsigned char firstchar;
2270 : : int maxmsglen;
2271 : : int r;
5331 tgl@sss.pgh.pa.us 2272 : 822290 : bool received = false;
2273 : :
2753 noah@leadboat.com 2274 : 822290 : last_processing = GetCurrentTimestamp();
2275 : :
2276 : : /*
2277 : : * If we already received a CopyDone from the frontend, any subsequent
2278 : : * message is the beginning of a new command, and should be processed in
2279 : : * the main processing loop.
2280 : : */
1917 jdavis@postgresql.or 2281 [ + + ]: 925756 : while (!streamingDoneReceiving)
2282 : : {
4059 heikki.linnakangas@i 2283 : 925034 : pq_startmsgread();
5504 simon@2ndQuadrant.co 2284 : 925034 : r = pq_getbyte_if_available(&firstchar);
2285 [ + + ]: 925034 : if (r < 0)
2286 : : {
2287 : : /* unexpected error or EOF */
2288 [ + - ]: 17 : ereport(COMMERROR,
2289 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2290 : : errmsg("unexpected EOF on standby connection")));
2291 : 17 : proc_exit(0);
2292 : : }
2293 [ + + ]: 925017 : if (r == 0)
2294 : : {
2295 : : /* no data available without blocking */
4059 heikki.linnakangas@i 2296 : 821261 : pq_endmsgread();
5464 2297 : 821261 : break;
2298 : : }
2299 : :
2300 : : /* Validate message type and set packet size limit */
1782 tgl@sss.pgh.pa.us 2301 [ + + - ]: 103756 : switch (firstchar)
2302 : : {
936 nathan@postgresql.or 2303 : 103111 : case PqMsg_CopyData:
1782 tgl@sss.pgh.pa.us 2304 : 103111 : maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
2305 : 103111 : break;
936 nathan@postgresql.or 2306 : 645 : case PqMsg_CopyDone:
2307 : : case PqMsg_Terminate:
1782 tgl@sss.pgh.pa.us 2308 : 645 : maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
2309 : 645 : break;
1782 tgl@sss.pgh.pa.us 2310 :UBC 0 : default:
2311 [ # # ]: 0 : ereport(FATAL,
2312 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2313 : : errmsg("invalid standby message type \"%c\"",
2314 : : firstchar)));
2315 : : maxmsglen = 0; /* keep compiler quiet */
2316 : : break;
2317 : : }
2318 : :
2319 : : /* Read the message contents */
4059 heikki.linnakangas@i 2320 :CBC 103756 : resetStringInfo(&reply_message);
1782 tgl@sss.pgh.pa.us 2321 [ - + ]: 103756 : if (pq_getmessage(&reply_message, maxmsglen))
2322 : : {
4059 heikki.linnakangas@i 2323 [ # # ]:UBC 0 : ereport(COMMERROR,
2324 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2325 : : errmsg("unexpected EOF on standby connection")));
2326 : 0 : proc_exit(0);
2327 : : }
2328 : :
2329 : : /* ... and process it */
5504 simon@2ndQuadrant.co 2330 [ + + + - ]:CBC 103756 : switch (firstchar)
2331 : : {
2332 : : /*
2333 : : * PqMsg_CopyData means a standby reply wrapped in a CopyData
2334 : : * packet.
2335 : : */
936 nathan@postgresql.or 2336 : 103111 : case PqMsg_CopyData:
5504 simon@2ndQuadrant.co 2337 : 103111 : ProcessStandbyMessage();
5464 heikki.linnakangas@i 2338 : 103111 : received = true;
5504 simon@2ndQuadrant.co 2339 : 103111 : break;
2340 : :
2341 : : /*
2342 : : * PqMsg_CopyDone means the standby requested to finish
2343 : : * streaming. Reply with CopyDone, if we had not sent that
2344 : : * already.
2345 : : */
936 nathan@postgresql.or 2346 : 355 : case PqMsg_CopyDone:
4840 heikki.linnakangas@i 2347 [ + + ]: 355 : if (!streamingDoneSending)
2348 : : {
235 nathan@postgresql.or 2349 :GNC 345 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4840 heikki.linnakangas@i 2350 :CBC 345 : streamingDoneSending = true;
2351 : : }
2352 : :
2353 : 355 : streamingDoneReceiving = true;
2354 : 355 : received = true;
2355 : 355 : break;
2356 : :
2357 : : /*
2358 : : * PqMsg_Terminate means that the standby is closing down the
2359 : : * socket.
2360 : : */
936 nathan@postgresql.or 2361 : 290 : case PqMsg_Terminate:
5504 simon@2ndQuadrant.co 2362 : 290 : proc_exit(0);
2363 : :
5504 simon@2ndQuadrant.co 2364 :UBC 0 : default:
1782 tgl@sss.pgh.pa.us 2365 : 0 : Assert(false); /* NOT REACHED */
2366 : : }
2367 : : }
2368 : :
2369 : : /*
2370 : : * Save the last reply timestamp if we've received at least one reply.
2371 : : */
5464 heikki.linnakangas@i 2372 [ + + ]:CBC 821983 : if (received)
2373 : : {
2753 noah@leadboat.com 2374 : 55669 : last_reply_timestamp = last_processing;
4388 rhaas@postgresql.org 2375 : 55669 : waiting_for_ping_response = false;
2376 : : }
5903 heikki.linnakangas@i 2377 : 821983 : }
2378 : :
2379 : : /*
2380 : : * Process a status update message received from standby.
2381 : : */
2382 : : static void
5504 simon@2ndQuadrant.co 2383 : 103111 : ProcessStandbyMessage(void)
2384 : : {
2385 : : char msgtype;
2386 : :
2387 : : /*
2388 : : * Check message type from the first byte.
2389 : : */
5507 rhaas@postgresql.org 2390 : 103111 : msgtype = pq_getmsgbyte(&reply_message);
2391 : :
5504 simon@2ndQuadrant.co 2392 [ + + + - ]: 103111 : switch (msgtype)
2393 : : {
221 nathan@postgresql.or 2394 :GNC 100266 : case PqReplMsg_StandbyStatusUpdate:
5504 simon@2ndQuadrant.co 2395 :CBC 100266 : ProcessStandbyReplyMessage();
2396 : 100266 : break;
2397 : :
221 nathan@postgresql.or 2398 :GNC 146 : case PqReplMsg_HotStandbyFeedback:
5504 simon@2ndQuadrant.co 2399 :CBC 146 : ProcessStandbyHSFeedbackMessage();
2400 : 146 : break;
2401 : :
221 nathan@postgresql.or 2402 :GNC 2699 : case PqReplMsg_PrimaryStatusRequest:
235 akapila@postgresql.o 2403 : 2699 : ProcessStandbyPSRequestMessage();
2404 : 2699 : break;
2405 : :
5504 simon@2ndQuadrant.co 2406 :UBC 0 : default:
2407 [ # # ]: 0 : ereport(COMMERROR,
2408 : : (errcode(ERRCODE_PROTOCOL_VIOLATION),
2409 : : errmsg("unexpected message type \"%c\"", msgtype)));
2410 : 0 : proc_exit(0);
2411 : : }
5504 simon@2ndQuadrant.co 2412 :CBC 103111 : }
2413 : :
2414 : : /*
2415 : : * Remember that a walreceiver just confirmed receipt of lsn `lsn`.
2416 : : */
2417 : : static void
4426 rhaas@postgresql.org 2418 : 83484 : PhysicalConfirmReceivedLocation(XLogRecPtr lsn)
2419 : : {
4331 bruce@momjian.us 2420 : 83484 : bool changed = false;
3813 rhaas@postgresql.org 2421 : 83484 : ReplicationSlot *slot = MyReplicationSlot;
2422 : :
129 alvherre@kurilemu.de 2423 [ - + ]:GNC 83484 : Assert(XLogRecPtrIsValid(lsn));
4426 rhaas@postgresql.org 2424 [ + + ]:CBC 83484 : SpinLockAcquire(&slot->mutex);
2425 [ + + ]: 83484 : if (slot->data.restart_lsn != lsn)
2426 : : {
2427 : 37694 : changed = true;
2428 : 37694 : slot->data.restart_lsn = lsn;
2429 : : }
2430 : 83484 : SpinLockRelease(&slot->mutex);
2431 : :
2432 [ + + ]: 83484 : if (changed)
2433 : : {
2434 : 37694 : ReplicationSlotMarkDirty();
2435 : 37694 : ReplicationSlotsComputeRequiredLSN();
737 akapila@postgresql.o 2436 : 37694 : PhysicalWakeupLogicalWalSnd();
2437 : : }
2438 : :
2439 : : /*
2440 : : * One could argue that the slot should be saved to disk now, but that'd
2441 : : * be energy wasted - the worst thing lost information could cause here is
2442 : : * to give wrong information in a statistics view - we'll just potentially
2443 : : * be more conservative in removing files.
2444 : : */
4426 rhaas@postgresql.org 2445 : 83484 : }
2446 : :
2447 : : /*
2448 : : * Regular reply from standby advising of WAL locations on standby server.
2449 : : */
2450 : : static void
5504 simon@2ndQuadrant.co 2451 : 100266 : ProcessStandbyReplyMessage(void)
2452 : : {
2453 : : XLogRecPtr writePtr,
2454 : : flushPtr,
2455 : : applyPtr;
2456 : : bool replyRequested;
2457 : : TimeOffset writeLag,
2458 : : flushLag,
2459 : : applyLag;
2460 : : bool clearLagTimes;
2461 : : TimestampTz now;
2462 : : TimestampTz replyTime;
2463 : :
2464 : : static bool fullyAppliedLastTime = false;
2465 : :
2466 : : /* the caller already consumed the msgtype byte */
4876 heikki.linnakangas@i 2467 : 100266 : writePtr = pq_getmsgint64(&reply_message);
2468 : 100266 : flushPtr = pq_getmsgint64(&reply_message);
2469 : 100266 : applyPtr = pq_getmsgint64(&reply_message);
2653 michael@paquier.xyz 2470 : 100266 : replyTime = pq_getmsgint64(&reply_message);
4876 heikki.linnakangas@i 2471 : 100266 : replyRequested = pq_getmsgbyte(&reply_message);
2472 : :
1938 tgl@sss.pgh.pa.us 2473 [ + + ]: 100266 : if (message_level_is_interesting(DEBUG2))
2474 : : {
2475 : : char *replyTimeStr;
2476 : :
2477 : : /* Copy because timestamptz_to_str returns a static buffer */
2653 michael@paquier.xyz 2478 : 614 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2479 : :
251 alvherre@kurilemu.de 2480 [ + - - + ]:GNC 614 : elog(DEBUG2, "write %X/%08X flush %X/%08X apply %X/%08X%s reply_time %s",
2481 : : LSN_FORMAT_ARGS(writePtr),
2482 : : LSN_FORMAT_ARGS(flushPtr),
2483 : : LSN_FORMAT_ARGS(applyPtr),
2484 : : replyRequested ? " (reply requested)" : "",
2485 : : replyTimeStr);
2486 : :
2653 michael@paquier.xyz 2487 :CBC 614 : pfree(replyTimeStr);
2488 : : }
2489 : :
2490 : : /* See if we can compute the round-trip lag for these positions. */
3279 simon@2ndQuadrant.co 2491 : 100266 : now = GetCurrentTimestamp();
2492 : 100266 : writeLag = LagTrackerRead(SYNC_REP_WAIT_WRITE, writePtr, now);
2493 : 100266 : flushLag = LagTrackerRead(SYNC_REP_WAIT_FLUSH, flushPtr, now);
2494 : 100266 : applyLag = LagTrackerRead(SYNC_REP_WAIT_APPLY, applyPtr, now);
2495 : :
2496 : : /*
2497 : : * If the standby reports that it has fully replayed the WAL in two
2498 : : * consecutive reply messages, then the second such message must result
2499 : : * from wal_receiver_status_interval expiring on the standby. This is a
2500 : : * convenient time to forget the lag times measured when it last
2501 : : * wrote/flushed/applied a WAL record, to avoid displaying stale lag data
2502 : : * until more WAL traffic arrives.
2503 : : */
2504 : 100266 : clearLagTimes = false;
2505 [ + + ]: 100266 : if (applyPtr == sentPtr)
2506 : : {
2507 [ + + ]: 9203 : if (fullyAppliedLastTime)
2508 : 1575 : clearLagTimes = true;
2509 : 9203 : fullyAppliedLastTime = true;
2510 : : }
2511 : : else
2512 : 91063 : fullyAppliedLastTime = false;
2513 : :
2514 : : /* Send a reply if the standby requested one. */
4876 heikki.linnakangas@i 2515 [ - + ]: 100266 : if (replyRequested)
1446 akapila@postgresql.o 2516 :UBC 0 : WalSndKeepalive(false, InvalidXLogRecPtr);
2517 : :
2518 : : /*
2519 : : * Update shared state for this WalSender process based on reply data from
2520 : : * standby.
2521 : : */
2522 : : {
3566 rhaas@postgresql.org 2523 :CBC 100266 : WalSnd *walsnd = MyWalSnd;
2524 : :
5512 heikki.linnakangas@i 2525 [ - + ]: 100266 : SpinLockAcquire(&walsnd->mutex);
4876 2526 : 100266 : walsnd->write = writePtr;
2527 : 100266 : walsnd->flush = flushPtr;
2528 : 100266 : walsnd->apply = applyPtr;
3279 simon@2ndQuadrant.co 2529 [ + + + + ]: 100266 : if (writeLag != -1 || clearLagTimes)
2530 : 62601 : walsnd->writeLag = writeLag;
2531 [ + + + + ]: 100266 : if (flushLag != -1 || clearLagTimes)
2532 : 80915 : walsnd->flushLag = flushLag;
2533 [ + + + + ]: 100266 : if (applyLag != -1 || clearLagTimes)
2534 : 87027 : walsnd->applyLag = applyLag;
2653 michael@paquier.xyz 2535 : 100266 : walsnd->replyTime = replyTime;
5512 heikki.linnakangas@i 2536 : 100266 : SpinLockRelease(&walsnd->mutex);
2537 : : }
2538 : :
5353 simon@2ndQuadrant.co 2539 [ + + ]: 100266 : if (!am_cascading_walsender)
2540 : 99967 : SyncRepReleaseWaiters();
2541 : :
2542 : : /*
2543 : : * Advance our local xmin horizon when the client confirmed a flush.
2544 : : */
129 alvherre@kurilemu.de 2545 [ + + + + ]:GNC 100266 : if (MyReplicationSlot && XLogRecPtrIsValid(flushPtr))
2546 : : {
3869 andres@anarazel.de 2547 [ + + ]:CBC 97212 : if (SlotIsLogical(MyReplicationSlot))
4388 rhaas@postgresql.org 2548 : 13728 : LogicalConfirmReceivedLocation(flushPtr);
2549 : : else
4426 2550 : 83484 : PhysicalConfirmReceivedLocation(flushPtr);
2551 : : }
2552 : 100266 : }
2553 : :
2554 : : /* compute new replication slot xmin horizon if needed */
2555 : : static void
3277 simon@2ndQuadrant.co 2556 : 64 : PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin, TransactionId feedbackCatalogXmin)
2557 : : {
4331 bruce@momjian.us 2558 : 64 : bool changed = false;
3813 rhaas@postgresql.org 2559 : 64 : ReplicationSlot *slot = MyReplicationSlot;
2560 : :
4426 2561 [ - + ]: 64 : SpinLockAcquire(&slot->mutex);
2040 andres@anarazel.de 2562 : 64 : MyProc->xmin = InvalidTransactionId;
2563 : :
2564 : : /*
2565 : : * For physical replication we don't need the interlock provided by xmin
2566 : : * and effective_xmin since the consequences of a missed increase are
2567 : : * limited to query cancellations, so set both at once.
2568 : : */
4426 rhaas@postgresql.org 2569 [ + + + + ]: 64 : if (!TransactionIdIsNormal(slot->data.xmin) ||
2570 [ + + ]: 29 : !TransactionIdIsNormal(feedbackXmin) ||
2571 : 29 : TransactionIdPrecedes(slot->data.xmin, feedbackXmin))
2572 : : {
2573 : 44 : changed = true;
2574 : 44 : slot->data.xmin = feedbackXmin;
2575 : 44 : slot->effective_xmin = feedbackXmin;
2576 : : }
3277 simon@2ndQuadrant.co 2577 [ + + + + ]: 64 : if (!TransactionIdIsNormal(slot->data.catalog_xmin) ||
2578 [ + + ]: 14 : !TransactionIdIsNormal(feedbackCatalogXmin) ||
2579 : 14 : TransactionIdPrecedes(slot->data.catalog_xmin, feedbackCatalogXmin))
2580 : : {
2581 : 51 : changed = true;
2582 : 51 : slot->data.catalog_xmin = feedbackCatalogXmin;
2583 : 51 : slot->effective_catalog_xmin = feedbackCatalogXmin;
2584 : : }
4426 rhaas@postgresql.org 2585 : 64 : SpinLockRelease(&slot->mutex);
2586 : :
2587 [ + + ]: 64 : if (changed)
2588 : : {
2589 : 54 : ReplicationSlotMarkDirty();
4395 2590 : 54 : ReplicationSlotsComputeRequiredXmin(false);
2591 : : }
5504 simon@2ndQuadrant.co 2592 : 64 : }
2593 : :
2594 : : /*
2595 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2596 : : * and not so far back as to be already wrapped around.
2597 : : *
2598 : : * Epoch of nextXid should be same as standby, or if the counter has
2599 : : * wrapped, then one greater than standby.
2600 : : *
2601 : : * This check doesn't care about whether clog exists for these xids
2602 : : * at all.
2603 : : */
2604 : : static bool
3277 2605 : 63 : TransactionIdInRecentPast(TransactionId xid, uint32 epoch)
2606 : : {
2607 : : FullTransactionId nextFullXid;
2608 : : TransactionId nextXid;
2609 : : uint32 nextEpoch;
2610 : :
2544 tmunro@postgresql.or 2611 : 63 : nextFullXid = ReadNextFullTransactionId();
2612 : 63 : nextXid = XidFromFullTransactionId(nextFullXid);
2613 : 63 : nextEpoch = EpochFromFullTransactionId(nextFullXid);
2614 : :
3277 simon@2ndQuadrant.co 2615 [ + - ]: 63 : if (xid <= nextXid)
2616 : : {
2617 [ - + ]: 63 : if (epoch != nextEpoch)
3277 simon@2ndQuadrant.co 2618 :UBC 0 : return false;
2619 : : }
2620 : : else
2621 : : {
2622 [ # # ]: 0 : if (epoch + 1 != nextEpoch)
2623 : 0 : return false;
2624 : : }
2625 : :
3277 simon@2ndQuadrant.co 2626 [ - + ]:CBC 63 : if (!TransactionIdPrecedesOrEquals(xid, nextXid))
3224 bruce@momjian.us 2627 :UBC 0 : return false; /* epoch OK, but it's wrapped around */
2628 : :
3277 simon@2ndQuadrant.co 2629 :CBC 63 : return true;
2630 : : }
2631 : :
2632 : : /*
2633 : : * Hot Standby feedback
2634 : : */
2635 : : static void
5504 2636 : 146 : ProcessStandbyHSFeedbackMessage(void)
2637 : : {
2638 : : TransactionId feedbackXmin;
2639 : : uint32 feedbackEpoch;
2640 : : TransactionId feedbackCatalogXmin;
2641 : : uint32 feedbackCatalogEpoch;
2642 : : TimestampTz replyTime;
2643 : :
2644 : : /*
2645 : : * Decipher the reply message. The caller already consumed the msgtype
2646 : : * byte. See XLogWalRcvSendHSFeedback() in walreceiver.c for the creation
2647 : : * of this message.
2648 : : */
2653 michael@paquier.xyz 2649 : 146 : replyTime = pq_getmsgint64(&reply_message);
4876 heikki.linnakangas@i 2650 : 146 : feedbackXmin = pq_getmsgint(&reply_message, 4);
2651 : 146 : feedbackEpoch = pq_getmsgint(&reply_message, 4);
3277 simon@2ndQuadrant.co 2652 : 146 : feedbackCatalogXmin = pq_getmsgint(&reply_message, 4);
2653 : 146 : feedbackCatalogEpoch = pq_getmsgint(&reply_message, 4);
2654 : :
1938 tgl@sss.pgh.pa.us 2655 [ + + ]: 146 : if (message_level_is_interesting(DEBUG2))
2656 : : {
2657 : : char *replyTimeStr;
2658 : :
2659 : : /* Copy because timestamptz_to_str returns a static buffer */
2653 michael@paquier.xyz 2660 : 4 : replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
2661 : :
2662 [ + - ]: 4 : elog(DEBUG2, "hot standby feedback xmin %u epoch %u, catalog_xmin %u epoch %u reply_time %s",
2663 : : feedbackXmin,
2664 : : feedbackEpoch,
2665 : : feedbackCatalogXmin,
2666 : : feedbackCatalogEpoch,
2667 : : replyTimeStr);
2668 : :
2669 : 4 : pfree(replyTimeStr);
2670 : : }
2671 : :
2672 : : /*
2673 : : * Update shared state for this WalSender process based on reply data from
2674 : : * standby.
2675 : : */
2676 : : {
2677 : 146 : WalSnd *walsnd = MyWalSnd;
2678 : :
2679 [ - + ]: 146 : SpinLockAcquire(&walsnd->mutex);
2680 : 146 : walsnd->replyTime = replyTime;
2681 : 146 : SpinLockRelease(&walsnd->mutex);
2682 : : }
2683 : :
2684 : : /*
2685 : : * Unset WalSender's xmins if the feedback message values are invalid.
2686 : : * This happens when the downstream turned hot_standby_feedback off.
2687 : : */
3277 simon@2ndQuadrant.co 2688 [ + + ]: 146 : if (!TransactionIdIsNormal(feedbackXmin)
2689 [ + - ]: 103 : && !TransactionIdIsNormal(feedbackCatalogXmin))
2690 : : {
2040 andres@anarazel.de 2691 : 103 : MyProc->xmin = InvalidTransactionId;
4426 rhaas@postgresql.org 2692 [ + + ]: 103 : if (MyReplicationSlot != NULL)
3277 simon@2ndQuadrant.co 2693 : 24 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
5260 tgl@sss.pgh.pa.us 2694 : 103 : return;
2695 : : }
2696 : :
2697 : : /*
2698 : : * Check that the provided xmin/epoch are sane, that is, not in the future
2699 : : * and not so far back as to be already wrapped around. Ignore if not.
2700 : : */
3277 simon@2ndQuadrant.co 2701 [ + - ]: 43 : if (TransactionIdIsNormal(feedbackXmin) &&
2702 [ - + ]: 43 : !TransactionIdInRecentPast(feedbackXmin, feedbackEpoch))
3277 simon@2ndQuadrant.co 2703 :UBC 0 : return;
2704 : :
3277 simon@2ndQuadrant.co 2705 [ + + ]:CBC 43 : if (TransactionIdIsNormal(feedbackCatalogXmin) &&
2706 [ - + ]: 20 : !TransactionIdInRecentPast(feedbackCatalogXmin, feedbackCatalogEpoch))
3277 simon@2ndQuadrant.co 2707 :UBC 0 : return;
2708 : :
2709 : : /*
2710 : : * Set the WalSender's xmin equal to the standby's requested xmin, so that
2711 : : * the xmin will be taken into account by GetSnapshotData() /
2712 : : * ComputeXidHorizons(). This will hold back the removal of dead rows and
2713 : : * thereby prevent the generation of cleanup conflicts on the standby
2714 : : * server.
2715 : : *
2716 : : * There is a small window for a race condition here: although we just
2717 : : * checked that feedbackXmin precedes nextXid, the nextXid could have
2718 : : * gotten advanced between our fetching it and applying the xmin below,
2719 : : * perhaps far enough to make feedbackXmin wrap around. In that case the
2720 : : * xmin we set here would be "in the future" and have no effect. No point
2721 : : * in worrying about this since it's too late to save the desired data
2722 : : * anyway. Assuming that the standby sends us an increasing sequence of
2723 : : * xmins, this could only happen during the first reply cycle, else our
2724 : : * own xmin would prevent nextXid from advancing so far.
2725 : : *
2726 : : * We don't bother taking the ProcArrayLock here. Setting the xmin field
2727 : : * is assumed atomic, and there's no real need to prevent concurrent
2728 : : * horizon determinations. (If we're moving our xmin forward, this is
2729 : : * obviously safe, and if we're moving it backwards, well, the data is at
2730 : : * risk already since a VACUUM could already have determined the horizon.)
2731 : : *
2732 : : * If we're using a replication slot we reserve the xmin via that,
2733 : : * otherwise via the walsender's PGPROC entry. We can only track the
2734 : : * catalog xmin separately when using a slot, so we store the least of the
2735 : : * two provided when not using a slot.
2736 : : *
2737 : : * XXX: It might make sense to generalize the ephemeral slot concept and
2738 : : * always use the slot mechanism to handle the feedback xmin.
2739 : : */
3189 tgl@sss.pgh.pa.us 2740 [ + + ]:CBC 43 : if (MyReplicationSlot != NULL) /* XXX: persistency configurable? */
3277 simon@2ndQuadrant.co 2741 : 40 : PhysicalReplicationSlotNewXmin(feedbackXmin, feedbackCatalogXmin);
2742 : : else
2743 : : {
2744 [ - + ]: 3 : if (TransactionIdIsNormal(feedbackCatalogXmin)
3277 simon@2ndQuadrant.co 2745 [ # # ]:UBC 0 : && TransactionIdPrecedes(feedbackCatalogXmin, feedbackXmin))
2040 andres@anarazel.de 2746 : 0 : MyProc->xmin = feedbackCatalogXmin;
2747 : : else
2040 andres@anarazel.de 2748 :CBC 3 : MyProc->xmin = feedbackXmin;
2749 : : }
2750 : : }
2751 : :
2752 : : /*
2753 : : * Process the request for a primary status update message.
2754 : : */
2755 : : static void
235 akapila@postgresql.o 2756 :GNC 2699 : ProcessStandbyPSRequestMessage(void)
2757 : : {
2758 : 2699 : XLogRecPtr lsn = InvalidXLogRecPtr;
2759 : : TransactionId oldestXidInCommit;
2760 : : TransactionId oldestGXidInCommit;
2761 : : FullTransactionId nextFullXid;
2762 : : FullTransactionId fullOldestXidInCommit;
2763 : 2699 : WalSnd *walsnd = MyWalSnd;
2764 : : TimestampTz replyTime;
2765 : :
2766 : : /*
2767 : : * This shouldn't happen because we don't support getting primary status
2768 : : * message from standby.
2769 : : */
2770 [ - + ]: 2699 : if (RecoveryInProgress())
235 akapila@postgresql.o 2771 [ # # ]:UNC 0 : elog(ERROR, "the primary status is unavailable during recovery");
2772 : :
235 akapila@postgresql.o 2773 :GNC 2699 : replyTime = pq_getmsgint64(&reply_message);
2774 : :
2775 : : /*
2776 : : * Update shared state for this WalSender process based on reply data from
2777 : : * standby.
2778 : : */
2779 : 2699 : SpinLockAcquire(&walsnd->mutex);
2780 : 2699 : walsnd->replyTime = replyTime;
2781 : 2699 : SpinLockRelease(&walsnd->mutex);
2782 : :
2783 : : /*
2784 : : * Consider transactions in the current database, as only these are the
2785 : : * ones replicated.
2786 : : */
2787 : 2699 : oldestXidInCommit = GetOldestActiveTransactionId(true, false);
188 2788 : 2699 : oldestGXidInCommit = TwoPhaseGetOldestXidInCommit();
2789 : :
2790 : : /*
2791 : : * Update the oldest xid for standby transmission if an older prepared
2792 : : * transaction exists and is currently in commit phase.
2793 : : */
2794 [ + + + - ]: 5249 : if (TransactionIdIsValid(oldestGXidInCommit) &&
2795 : 2550 : TransactionIdPrecedes(oldestGXidInCommit, oldestXidInCommit))
2796 : 2550 : oldestXidInCommit = oldestGXidInCommit;
2797 : :
235 2798 : 2699 : nextFullXid = ReadNextFullTransactionId();
2799 : 2699 : fullOldestXidInCommit = FullTransactionIdFromAllowableAt(nextFullXid,
2800 : : oldestXidInCommit);
2801 : 2699 : lsn = GetXLogWriteRecPtr();
2802 : :
2803 [ + + ]: 2699 : elog(DEBUG2, "sending primary status");
2804 : :
2805 : : /* construct the message... */
2806 : 2699 : resetStringInfo(&output_message);
221 nathan@postgresql.or 2807 : 2699 : pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate);
235 akapila@postgresql.o 2808 : 2699 : pq_sendint64(&output_message, lsn);
2809 : 2699 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
2810 : 2699 : pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
2811 : 2699 : pq_sendint64(&output_message, GetCurrentTimestamp());
2812 : :
2813 : : /* ... and send it wrapped in CopyData */
nathan@postgresql.or 2814 : 2699 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
akapila@postgresql.o 2815 : 2699 : }
2816 : :
2817 : : /*
2818 : : * Compute how long send/receive loops should sleep.
2819 : : *
2820 : : * If wal_sender_timeout is enabled we want to wake up in time to send
2821 : : * keepalives and to abort the connection if wal_sender_timeout has been
2822 : : * reached.
2823 : : */
2824 : : static long
4388 rhaas@postgresql.org 2825 :CBC 91384 : WalSndComputeSleeptime(TimestampTz now)
2826 : : {
3189 tgl@sss.pgh.pa.us 2827 : 91384 : long sleeptime = 10000; /* 10 s */
2828 : :
4308 andres@anarazel.de 2829 [ + - + + ]: 91384 : if (wal_sender_timeout > 0 && last_reply_timestamp > 0)
2830 : : {
2831 : : TimestampTz wakeup_time;
2832 : :
2833 : : /*
2834 : : * At the latest stop sleeping once wal_sender_timeout has been
2835 : : * reached.
2836 : : */
4388 rhaas@postgresql.org 2837 : 91356 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2838 : : wal_sender_timeout);
2839 : :
2840 : : /*
2841 : : * If no ping has been sent yet, wakeup when it's time to do so.
2842 : : * WalSndKeepaliveIfNecessary() wants to send a keepalive once half of
2843 : : * the timeout passed without a response.
2844 : : */
2845 [ + + ]: 91356 : if (!waiting_for_ping_response)
2846 : 89710 : wakeup_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
2847 : : wal_sender_timeout / 2);
2848 : :
2849 : : /* Compute relative time until wakeup. */
1951 tgl@sss.pgh.pa.us 2850 : 91356 : sleeptime = TimestampDifferenceMilliseconds(now, wakeup_time);
2851 : : }
2852 : :
4388 rhaas@postgresql.org 2853 : 91384 : return sleeptime;
2854 : : }
2855 : :
2856 : : /*
2857 : : * Check whether there have been responses by the client within
2858 : : * wal_sender_timeout and shutdown if not. Using last_processing as the
2859 : : * reference point avoids counting server-side stalls against the client.
2860 : : * However, a long server-side stall can make WalSndKeepaliveIfNecessary()
2861 : : * postdate last_processing by more than wal_sender_timeout. If that happens,
2862 : : * the client must reply almost immediately to avoid a timeout. This rarely
2863 : : * affects the default configuration, under which clients spontaneously send a
2864 : : * message every standby_message_timeout = wal_sender_timeout/6 = 10s. We
2865 : : * could eliminate that problem by recognizing timeout expiration at
2866 : : * wal_sender_timeout/2 after the keepalive.
2867 : : */
2868 : : static void
2753 noah@leadboat.com 2869 : 813093 : WalSndCheckTimeOut(void)
2870 : : {
2871 : : TimestampTz timeout;
2872 : :
2873 : : /* don't bail out if we're doing something that doesn't require timeouts */
4308 andres@anarazel.de 2874 [ + + ]: 813093 : if (last_reply_timestamp <= 0)
2875 : 28 : return;
2876 : :
4388 rhaas@postgresql.org 2877 : 813065 : timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
2878 : : wal_sender_timeout);
2879 : :
2753 noah@leadboat.com 2880 [ + - - + ]: 813065 : if (wal_sender_timeout > 0 && last_processing >= timeout)
2881 : : {
2882 : : /*
2883 : : * Since typically expiration of replication timeout means
2884 : : * communication problem, we don't send the error message to the
2885 : : * standby.
2886 : : */
4388 rhaas@postgresql.org 2887 [ # # ]:UBC 0 : ereport(COMMERROR,
2888 : : (errmsg("terminating walsender process due to replication timeout")));
2889 : :
2890 : 0 : WalSndShutdown();
2891 : : }
2892 : : }
2893 : :
2894 : : /* Main loop of walsender process that streams the WAL over Copy messages. */
2895 : : static void
4388 rhaas@postgresql.org 2896 :CBC 721 : WalSndLoop(WalSndSendDataCallback send_data)
2897 : : {
341 michael@paquier.xyz 2898 : 721 : TimestampTz last_flush = 0;
2899 : :
2900 : : /*
2901 : : * Initialize the last reply timestamp. That enables timeout processing
2902 : : * from hereon.
2903 : : */
5464 heikki.linnakangas@i 2904 : 721 : last_reply_timestamp = GetCurrentTimestamp();
4388 rhaas@postgresql.org 2905 : 721 : waiting_for_ping_response = false;
2906 : :
2907 : : /*
2908 : : * Loop until we reach the end of this timeline or the client requests to
2909 : : * stop streaming.
2910 : : */
2911 : : for (;;)
2912 : : {
2913 : : /* Clear any already-pending wakeups */
4075 andres@anarazel.de 2914 : 807550 : ResetLatch(MyLatch);
2915 : :
2916 [ + + ]: 807550 : CHECK_FOR_INTERRUPTS();
2917 : :
2918 : : /* Process any requests or signals received recently */
40 fujii@postgresql.org 2919 :GNC 807547 : WalSndHandleConfigReload();
2920 : :
2921 : : /* Check for input from the client */
5331 tgl@sss.pgh.pa.us 2922 :CBC 807547 : ProcessRepliesIfAny();
2923 : :
2924 : : /*
2925 : : * If we have received CopyDone from the client, sent CopyDone
2926 : : * ourselves, and the output buffer is empty, it's time to exit
2927 : : * streaming.
2928 : : */
3180 2929 [ + + + - ]: 807441 : if (streamingDoneReceiving && streamingDoneSending &&
2930 [ + + ]: 557 : !pq_is_send_pending())
4840 heikki.linnakangas@i 2931 : 355 : break;
2932 : :
2933 : : /*
2934 : : * If we don't have any pending data in the output buffer, try to send
2935 : : * some more. If there is some, we don't bother to call send_data
2936 : : * again until we've flushed it ... but we'd better assume we are not
2937 : : * caught up.
2938 : : */
5464 2939 [ + + ]: 807086 : if (!pq_is_send_pending())
4388 rhaas@postgresql.org 2940 : 766942 : send_data();
2941 : : else
2942 : 40144 : WalSndCaughtUp = false;
2943 : :
2944 : : /* Try to flush pending output to the client */
5331 tgl@sss.pgh.pa.us 2945 [ - + ]: 806872 : if (pq_flush_if_writable() != 0)
4388 rhaas@postgresql.org 2946 :UBC 0 : WalSndShutdown();
2947 : :
2948 : : /* If nothing remains to be sent right now ... */
4388 rhaas@postgresql.org 2949 [ + + + + ]:CBC 806872 : if (WalSndCaughtUp && !pq_is_send_pending())
2950 : : {
2951 : : /*
2952 : : * If we're in catchup state, move to streaming. This is an
2953 : : * important state change for users to know about, since before
2954 : : * this point data loss might occur if the primary dies and we
2955 : : * need to failover to the standby. The state change is also
2956 : : * important for synchronous replication, since commits that
2957 : : * started to wait at that point might wait for some time.
2958 : : */
5331 tgl@sss.pgh.pa.us 2959 [ + + ]: 60126 : if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
2960 : : {
2961 [ + + ]: 671 : ereport(DEBUG1,
2962 : : (errmsg_internal("\"%s\" has now caught up with upstream server",
2963 : : application_name)));
2964 : 671 : WalSndSetState(WALSNDSTATE_STREAMING);
2965 : : }
2966 : :
2967 : : /*
2968 : : * When SIGUSR2 arrives, we send any outstanding logs up to the
2969 : : * shutdown checkpoint record (i.e., the latest record), wait for
2970 : : * them to be replicated to the standby, and exit. This may be a
2971 : : * normal termination at shutdown, or a promotion, the walsender
2972 : : * is not sure which.
2973 : : */
3205 andres@anarazel.de 2974 [ + + ]: 60126 : if (got_SIGUSR2)
4388 rhaas@postgresql.org 2975 : 3716 : WalSndDone(send_data);
2976 : : }
2977 : :
2978 : : /* Check for replication timeout. */
2753 noah@leadboat.com 2979 : 806829 : WalSndCheckTimeOut();
2980 : :
2981 : : /* Send keepalive if the time has come */
2982 : 806829 : WalSndKeepaliveIfNecessary();
2983 : :
2984 : : /*
2985 : : * Block if we have unsent data. XXX For logical replication, let
2986 : : * WalSndWaitForWal() handle any other blocking; idle receivers need
2987 : : * its additional actions. For physical replication, also block if
2988 : : * caught up; its send_data does not block.
2989 : : *
2990 : : * The IO statistics are reported in WalSndWaitForWal() for the
2991 : : * logical WAL senders.
2992 : : */
2150 2993 [ + + + + ]: 806829 : if ((WalSndCaughtUp && send_data != XLogSendLogical &&
2994 [ + + + + ]: 815656 : !streamingDoneSending) ||
2995 : 759529 : pq_is_send_pending())
2996 : : {
2997 : : long sleeptime;
2998 : : int wakeEvents;
2999 : : TimestampTz now;
3000 : :
1917 jdavis@postgresql.or 3001 [ + + ]: 85353 : if (!streamingDoneReceiving)
1840 tmunro@postgresql.or 3002 : 85331 : wakeEvents = WL_SOCKET_READABLE;
3003 : : else
3004 : 22 : wakeEvents = 0;
3005 : :
3006 : : /*
3007 : : * Use fresh timestamp, not last_processing, to reduce the chance
3008 : : * of reaching wal_sender_timeout before sending a keepalive.
3009 : : */
341 michael@paquier.xyz 3010 : 85353 : now = GetCurrentTimestamp();
3011 : 85353 : sleeptime = WalSndComputeSleeptime(now);
3012 : :
2150 noah@leadboat.com 3013 [ + + ]: 85353 : if (pq_is_send_pending())
3014 : 40085 : wakeEvents |= WL_SOCKET_WRITEABLE;
3015 : :
3016 : : /* Report IO statistics, if needed */
341 michael@paquier.xyz 3017 [ + + ]: 85353 : if (TimestampDifferenceExceeds(last_flush, now,
3018 : : WALSENDER_STATS_FLUSH_INTERVAL))
3019 : : {
3020 : 574 : pgstat_flush_io(false);
3021 : 574 : (void) pgstat_flush_backend(false, PGSTAT_BACKEND_FLUSH_IO);
3022 : 574 : last_flush = now;
3023 : : }
3024 : :
3025 : : /* Sleep until something happens or we time out */
1840 tmunro@postgresql.or 3026 : 85353 : WalSndWait(wakeEvents, sleeptime, WAIT_EVENT_WAL_SENDER_MAIN);
3027 : : }
3028 : : }
5903 heikki.linnakangas@i 3029 : 355 : }
3030 : :
3031 : : /* Initialize a per-walsender data structure for this walsender process */
3032 : : static void
4909 3033 : 1244 : InitWalSenderSlot(void)
3034 : : {
3035 : : int i;
3036 : :
3037 : : /*
3038 : : * WalSndCtl should be set up already (we inherit this by fork() or
3039 : : * EXEC_BACKEND mechanism from the postmaster).
3040 : : */
5903 3041 [ - + ]: 1244 : Assert(WalSndCtl != NULL);
3042 [ - + ]: 1244 : Assert(MyWalSnd == NULL);
3043 : :
3044 : : /*
3045 : : * Find a free walsender slot and reserve it. This must not fail due to
3046 : : * the prior check for free WAL senders in InitProcess().
3047 : : */
5827 rhaas@postgresql.org 3048 [ + - ]: 1836 : for (i = 0; i < max_wal_senders; i++)
3049 : : {
3566 3050 : 1836 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3051 : :
5903 heikki.linnakangas@i 3052 [ - + ]: 1836 : SpinLockAcquire(&walsnd->mutex);
3053 : :
3054 [ + + ]: 1836 : if (walsnd->pid != 0)
3055 : : {
3056 : 592 : SpinLockRelease(&walsnd->mutex);
3057 : 592 : continue;
3058 : : }
3059 : : else
3060 : : {
3061 : : /*
3062 : : * Found a free slot. Reserve it for us.
3063 : : */
3064 : 1244 : walsnd->pid = MyProcPid;
2157 tgl@sss.pgh.pa.us 3065 : 1244 : walsnd->state = WALSNDSTATE_STARTUP;
4826 alvherre@alvh.no-ip. 3066 : 1244 : walsnd->sentPtr = InvalidXLogRecPtr;
2157 tgl@sss.pgh.pa.us 3067 : 1244 : walsnd->needreload = false;
3745 magnus@hagander.net 3068 : 1244 : walsnd->write = InvalidXLogRecPtr;
3069 : 1244 : walsnd->flush = InvalidXLogRecPtr;
3070 : 1244 : walsnd->apply = InvalidXLogRecPtr;
3279 simon@2ndQuadrant.co 3071 : 1244 : walsnd->writeLag = -1;
3072 : 1244 : walsnd->flushLag = -1;
3073 : 1244 : walsnd->applyLag = -1;
2157 tgl@sss.pgh.pa.us 3074 : 1244 : walsnd->sync_standby_priority = 0;
2653 michael@paquier.xyz 3075 : 1244 : walsnd->replyTime = 0;
3076 : :
3077 : : /*
3078 : : * The kind assignment is done here and not in StartReplication()
3079 : : * and StartLogicalReplication(). Indeed, the logical walsender
3080 : : * needs to read WAL records (like snapshot of running
3081 : : * transactions) during the slot creation. So it needs to be woken
3082 : : * up based on its kind.
3083 : : *
3084 : : * The kind assignment could also be done in StartReplication(),
3085 : : * StartLogicalReplication() and CREATE_REPLICATION_SLOT but it
3086 : : * seems better to set it on one place.
3087 : : */
1072 andres@anarazel.de 3088 [ + + ]: 1244 : if (MyDatabaseId == InvalidOid)
3089 : 485 : walsnd->kind = REPLICATION_KIND_PHYSICAL;
3090 : : else
3091 : 759 : walsnd->kind = REPLICATION_KIND_LOGICAL;
3092 : :
5903 heikki.linnakangas@i 3093 : 1244 : SpinLockRelease(&walsnd->mutex);
3094 : : /* don't need the lock anymore */
103 peter@eisentraut.org 3095 :GNC 1244 : MyWalSnd = walsnd;
3096 : :
5903 heikki.linnakangas@i 3097 :CBC 1244 : break;
3098 : : }
3099 : : }
3100 : :
2588 michael@paquier.xyz 3101 [ - + ]: 1244 : Assert(MyWalSnd != NULL);
3102 : :
3103 : : /* Arrange to clean up at walsender exit */
5903 heikki.linnakangas@i 3104 : 1244 : on_shmem_exit(WalSndKill, 0);
3105 : 1244 : }
3106 : :
3107 : : /* Destroy the per-walsender data structure for this walsender process */
3108 : : static void
3109 : 1244 : WalSndKill(int code, Datum arg)
3110 : : {
4425 tgl@sss.pgh.pa.us 3111 : 1244 : WalSnd *walsnd = MyWalSnd;
3112 : :
3113 [ - + ]: 1244 : Assert(walsnd != NULL);
3114 : :
3115 : 1244 : MyWalSnd = NULL;
3116 : :
4075 andres@anarazel.de 3117 [ - + ]: 1244 : SpinLockAcquire(&walsnd->mutex);
3118 : : /* Mark WalSnd struct as no longer being in use. */
4425 tgl@sss.pgh.pa.us 3119 : 1244 : walsnd->pid = 0;
4075 andres@anarazel.de 3120 : 1244 : SpinLockRelease(&walsnd->mutex);
5903 heikki.linnakangas@i 3121 : 1244 : }
3122 : :
3123 : : /* XLogReaderRoutine->segment_open callback */
3124 : : static void
2132 alvherre@alvh.no-ip. 3125 : 7955 : WalSndSegmentOpen(XLogReaderState *state, XLogSegNo nextSegNo,
3126 : : TimeLineID *tli_p)
3127 : : {
3128 : : char path[MAXPGPATH];
3129 : :
3130 : : /*-------
3131 : : * When reading from a historic timeline, and there is a timeline switch
3132 : : * within this segment, read from the WAL segment belonging to the new
3133 : : * timeline.
3134 : : *
3135 : : * For example, imagine that this server is currently on timeline 5, and
3136 : : * we're streaming timeline 4. The switch from timeline 4 to 5 happened at
3137 : : * 0/13002088. In pg_wal, we have these files:
3138 : : *
3139 : : * ...
3140 : : * 000000040000000000000012
3141 : : * 000000040000000000000013
3142 : : * 000000050000000000000013
3143 : : * 000000050000000000000014
3144 : : * ...
3145 : : *
3146 : : * In this situation, when requested to send the WAL from segment 0x13, on
3147 : : * timeline 4, we read the WAL from file 000000050000000000000013. Archive
3148 : : * recovery prefers files from newer timelines, so if the segment was
3149 : : * restored from the archive on this server, the file belonging to the old
3150 : : * timeline, 000000040000000000000013, might not exist. Their contents are
3151 : : * equal up to the switchpoint, because at a timeline switch, the used
3152 : : * portion of the old segment is copied to the new file.
3153 : : */
2302 3154 : 7955 : *tli_p = sendTimeLine;
3155 [ + + ]: 7955 : if (sendTimeLineIsHistoric)
3156 : : {
3157 : : XLogSegNo endSegNo;
3158 : :
2132 3159 : 9 : XLByteToSeg(sendTimeLineValidUpto, endSegNo, state->segcxt.ws_segsize);
1886 fujii@postgresql.org 3160 [ + + ]: 9 : if (nextSegNo == endSegNo)
2302 alvherre@alvh.no-ip. 3161 : 8 : *tli_p = sendTimeLineNextTLI;
3162 : : }
3163 : :
2132 3164 : 7955 : XLogFilePath(path, *tli_p, nextSegNo, state->segcxt.ws_segsize);
3165 : 7955 : state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
3166 [ + + ]: 7955 : if (state->seg.ws_file >= 0)
3167 : 7954 : return;
3168 : :
3169 : : /*
3170 : : * If the file is not found, assume it's because the standby asked for a
3171 : : * too old WAL segment that has already been removed or recycled.
3172 : : */
2302 3173 [ + - ]: 1 : if (errno == ENOENT)
3174 : : {
3175 : : char xlogfname[MAXFNAMELEN];
2294 michael@paquier.xyz 3176 : 1 : int save_errno = errno;
3177 : :
3178 : 1 : XLogFileName(xlogfname, *tli_p, nextSegNo, wal_segment_size);
3179 : 1 : errno = save_errno;
2302 alvherre@alvh.no-ip. 3180 [ + - ]: 1 : ereport(ERROR,
3181 : : (errcode_for_file_access(),
3182 : : errmsg("requested WAL segment %s has already been removed",
3183 : : xlogfname)));
3184 : : }
3185 : : else
2302 alvherre@alvh.no-ip. 3186 [ # # ]:UBC 0 : ereport(ERROR,
3187 : : (errcode_for_file_access(),
3188 : : errmsg("could not open file \"%s\": %m",
3189 : : path)));
3190 : : }
3191 : :
3192 : : /*
3193 : : * Send out the WAL in its normal physical/stored form.
3194 : : *
3195 : : * Read up to MAX_SEND_SIZE bytes of WAL that's been flushed to disk,
3196 : : * but not yet sent to the client, and buffer it in the libpq output
3197 : : * buffer.
3198 : : *
3199 : : * If there is no unsent WAL remaining, WalSndCaughtUp is set to true,
3200 : : * otherwise WalSndCaughtUp is set to false.
3201 : : */
3202 : : static void
4388 rhaas@postgresql.org 3203 :CBC 142116 : XLogSendPhysical(void)
3204 : : {
3205 : : XLogRecPtr SendRqstPtr;
3206 : : XLogRecPtr startptr;
3207 : : XLogRecPtr endptr;
3208 : : Size nbytes;
3209 : : XLogSegNo segno;
3210 : : WALReadError errinfo;
3211 : : Size rbytes;
3212 : :
3213 : : /* If requested switch the WAL sender to the stopping state. */
3205 andres@anarazel.de 3214 [ + + ]: 142116 : if (got_STOPPING)
3215 : 1558 : WalSndSetState(WALSNDSTATE_STOPPING);
3216 : :
4840 heikki.linnakangas@i 3217 [ + + ]: 142116 : if (streamingDoneSending)
3218 : : {
4388 rhaas@postgresql.org 3219 : 8817 : WalSndCaughtUp = true;
4840 heikki.linnakangas@i 3220 : 37953 : return;
3221 : : }
3222 : :
3223 : : /* Figure out how far we can safely send the WAL. */
4832 3224 [ + + ]: 133299 : if (sendTimeLineIsHistoric)
3225 : : {
3226 : : /*
3227 : : * Streaming an old timeline that's in this server's history, but is
3228 : : * not the one we're currently inserting or replaying. It can be
3229 : : * streamed up to the point where we switched off that timeline.
3230 : : */
3231 : 32 : SendRqstPtr = sendTimeLineValidUpto;
3232 : : }
3233 [ + + ]: 133267 : else if (am_cascading_walsender)
3234 : : {
3235 : : TimeLineID SendRqstTLI;
3236 : :
3237 : : /*
3238 : : * Streaming the latest timeline on a standby.
3239 : : *
3240 : : * Attempt to send all WAL that has already been replayed, so that we
3241 : : * know it's valid. If we're receiving WAL through streaming
3242 : : * replication, it's also OK to send any WAL that has been received
3243 : : * but not replayed.
3244 : : *
3245 : : * The timeline we're recovering from can change, or we can be
3246 : : * promoted. In either case, the current timeline becomes historic. We
3247 : : * need to detect that so that we don't try to stream past the point
3248 : : * where we switched to another timeline. We check for promotion or
3249 : : * timeline switch after calculating FlushPtr, to avoid a race
3250 : : * condition: if the timeline becomes historic just after we checked
3251 : : * that it was still current, it's still be OK to stream it up to the
3252 : : * FlushPtr that was calculated before it became historic.
3253 : : */
4840 3254 : 851 : bool becameHistoric = false;
3255 : :
1591 rhaas@postgresql.org 3256 : 851 : SendRqstPtr = GetStandbyFlushRecPtr(&SendRqstTLI);
3257 : :
4840 heikki.linnakangas@i 3258 [ + + ]: 851 : if (!RecoveryInProgress())
3259 : : {
3260 : : /* We have been promoted. */
1591 rhaas@postgresql.org 3261 :GBC 1 : SendRqstTLI = GetWALInsertionTimeLine();
4840 heikki.linnakangas@i 3262 : 1 : am_cascading_walsender = false;
3263 : 1 : becameHistoric = true;
3264 : : }
3265 : : else
3266 : : {
3267 : : /*
3268 : : * Still a cascading standby. But is the timeline we're sending
3269 : : * still the one recovery is recovering from?
3270 : : */
1591 rhaas@postgresql.org 3271 [ - + ]:CBC 850 : if (sendTimeLine != SendRqstTLI)
4840 heikki.linnakangas@i 3272 :UBC 0 : becameHistoric = true;
3273 : : }
3274 : :
4840 heikki.linnakangas@i 3275 [ + + ]:CBC 851 : if (becameHistoric)
3276 : : {
3277 : : /*
3278 : : * The timeline we were sending has become historic. Read the
3279 : : * timeline history file of the new timeline to see where exactly
3280 : : * we forked off from the timeline we were sending.
3281 : : */
3282 : : List *history;
3283 : :
1591 rhaas@postgresql.org 3284 :GBC 1 : history = readTimeLineHistory(SendRqstTLI);
4805 heikki.linnakangas@i 3285 : 1 : sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history, &sendTimeLineNextTLI);
3286 : :
3287 [ - + ]: 1 : Assert(sendTimeLine < sendTimeLineNextTLI);
4840 3288 : 1 : list_free_deep(history);
3289 : :
3290 : 1 : sendTimeLineIsHistoric = true;
3291 : :
4832 3292 : 1 : SendRqstPtr = sendTimeLineValidUpto;
3293 : : }
3294 : : }
3295 : : else
3296 : : {
3297 : : /*
3298 : : * Streaming the current timeline on a primary.
3299 : : *
3300 : : * Attempt to send all data that's already been written out and
3301 : : * fsync'd to disk. We cannot go further than what's been written out
3302 : : * given the current implementation of WALRead(). And in any case
3303 : : * it's unsafe to send WAL that is not securely down to disk on the
3304 : : * primary: if the primary subsequently crashes and restarts, standbys
3305 : : * must not have applied any WAL that got lost on the primary.
3306 : : */
1591 rhaas@postgresql.org 3307 :CBC 132416 : SendRqstPtr = GetFlushRecPtr(NULL);
3308 : : }
3309 : :
3310 : : /*
3311 : : * Record the current system time as an approximation of the time at which
3312 : : * this WAL location was written for the purposes of lag tracking.
3313 : : *
3314 : : * In theory we could make XLogFlush() record a time in shmem whenever WAL
3315 : : * is flushed and we could get that time as well as the LSN when we call
3316 : : * GetFlushRecPtr() above (and likewise for the cascading standby
3317 : : * equivalent), but rather than putting any new code into the hot WAL path
3318 : : * it seems good enough to capture the time here. We should reach this
3319 : : * after XLogFlush() runs WalSndWakeupProcessRequests(), and although that
3320 : : * may take some time, we read the WAL flush pointer and take the time
3321 : : * very close to together here so that we'll get a later position if it is
3322 : : * still moving.
3323 : : *
3324 : : * Because LagTrackerWrite ignores samples when the LSN hasn't advanced,
3325 : : * this gives us a cheap approximation for the WAL flush time for this
3326 : : * LSN.
3327 : : *
3328 : : * Note that the LSN is not necessarily the LSN for the data contained in
3329 : : * the present message; it's the end of the WAL, which might be further
3330 : : * ahead. All the lag tracking machinery cares about is finding out when
3331 : : * that arbitrary LSN is eventually reported as written, flushed and
3332 : : * applied, so that it can measure the elapsed time.
3333 : : */
3279 simon@2ndQuadrant.co 3334 : 133299 : LagTrackerWrite(SendRqstPtr, GetCurrentTimestamp());
3335 : :
3336 : : /*
3337 : : * If this is a historic timeline and we've reached the point where we
3338 : : * forked to the next timeline, stop streaming.
3339 : : *
3340 : : * Note: We might already have sent WAL > sendTimeLineValidUpto. The
3341 : : * startup process will normally replay all WAL that has been received
3342 : : * from the primary, before promoting, but if the WAL streaming is
3343 : : * terminated at a WAL page boundary, the valid portion of the timeline
3344 : : * might end in the middle of a WAL record. We might've already sent the
3345 : : * first half of that partial WAL record to the cascading standby, so that
3346 : : * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
3347 : : * replay the partial WAL record either, so it can still follow our
3348 : : * timeline switch.
3349 : : */
4825 alvherre@alvh.no-ip. 3350 [ + + + + ]: 133299 : if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
3351 : : {
3352 : : /* close the current file. */
2132 3353 [ + - ]: 10 : if (xlogreader->seg.ws_file >= 0)
3354 : 10 : wal_segment_close(xlogreader);
3355 : :
3356 : : /* Send CopyDone */
235 nathan@postgresql.or 3357 :GNC 10 : pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
4840 heikki.linnakangas@i 3358 :CBC 10 : streamingDoneSending = true;
3359 : :
4388 rhaas@postgresql.org 3360 : 10 : WalSndCaughtUp = true;
3361 : :
251 alvherre@kurilemu.de 3362 [ + + ]:GNC 10 : elog(DEBUG1, "walsender reached end of timeline at %X/%08X (sent up to %X/%08X)",
3363 : : LSN_FORMAT_ARGS(sendTimeLineValidUpto),
3364 : : LSN_FORMAT_ARGS(sentPtr));
4840 heikki.linnakangas@i 3365 :CBC 10 : return;
3366 : : }
3367 : :
3368 : : /* Do we have any work to do? */
4825 alvherre@alvh.no-ip. 3369 [ - + ]: 133289 : Assert(sentPtr <= SendRqstPtr);
3370 [ + + ]: 133289 : if (SendRqstPtr <= sentPtr)
3371 : : {
4388 rhaas@postgresql.org 3372 : 29126 : WalSndCaughtUp = true;
5464 heikki.linnakangas@i 3373 : 29126 : return;
3374 : : }
3375 : :
3376 : : /*
3377 : : * Figure out how much to send in one message. If there's no more than
3378 : : * MAX_SEND_SIZE bytes to send, send everything. Otherwise send
3379 : : * MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
3380 : : *
3381 : : * The rounding is not only for performance reasons. Walreceiver relies on
3382 : : * the fact that we never split a WAL record across two messages. Since a
3383 : : * long WAL record is split at page boundary into continuation records,
3384 : : * page boundary is always a safe cut-off point. We also assume that
3385 : : * SendRqstPtr never points to the middle of a WAL record.
3386 : : */
5772 3387 : 104163 : startptr = sentPtr;
3388 : 104163 : endptr = startptr;
4825 alvherre@alvh.no-ip. 3389 : 104163 : endptr += MAX_SEND_SIZE;
3390 : :
3391 : : /* if we went beyond SendRqstPtr, back off */
3392 [ + + ]: 104163 : if (SendRqstPtr <= endptr)
3393 : : {
5764 tgl@sss.pgh.pa.us 3394 : 18819 : endptr = SendRqstPtr;
4840 heikki.linnakangas@i 3395 [ + + ]: 18819 : if (sendTimeLineIsHistoric)
4388 rhaas@postgresql.org 3396 : 9 : WalSndCaughtUp = false;
3397 : : else
3398 : 18810 : WalSndCaughtUp = true;
3399 : : }
3400 : : else
3401 : : {
3402 : : /* round down to page boundary. */
5012 heikki.linnakangas@i 3403 : 85344 : endptr -= (endptr % XLOG_BLCKSZ);
4388 rhaas@postgresql.org 3404 : 85344 : WalSndCaughtUp = false;
3405 : : }
3406 : :
5012 heikki.linnakangas@i 3407 : 104163 : nbytes = endptr - startptr;
5764 tgl@sss.pgh.pa.us 3408 [ - + ]: 104163 : Assert(nbytes <= MAX_SEND_SIZE);
3409 : :
3410 : : /*
3411 : : * OK to read and send the slice.
3412 : : */
4876 heikki.linnakangas@i 3413 : 104163 : resetStringInfo(&output_message);
221 nathan@postgresql.or 3414 :GNC 104163 : pq_sendbyte(&output_message, PqReplMsg_WALData);
3415 : :
4876 heikki.linnakangas@i 3416 :CBC 104163 : pq_sendint64(&output_message, startptr); /* dataStart */
4673 bruce@momjian.us 3417 : 104163 : pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
3418 : 104163 : pq_sendint64(&output_message, 0); /* sendtime, filled in last */
3419 : :
3420 : : /*
3421 : : * Read the log directly into the output buffer to avoid extra memcpy
3422 : : * calls.
3423 : : */
4876 heikki.linnakangas@i 3424 : 104163 : enlargeStringInfo(&output_message, nbytes);
3425 : :
2302 alvherre@alvh.no-ip. 3426 : 104163 : retry:
3427 : : /* attempt to read WAL from WAL buffers first */
762 jdavis@postgresql.or 3428 : 104163 : rbytes = WALReadFromBuffers(&output_message.data[output_message.len],
3429 : 104163 : startptr, nbytes, xlogreader->seg.ws_tli);
3430 : 104163 : output_message.len += rbytes;
3431 : 104163 : startptr += rbytes;
3432 : 104163 : nbytes -= rbytes;
3433 : :
3434 : : /* now read the remaining WAL from WAL file */
3435 [ + + ]: 104163 : if (nbytes > 0 &&
3436 [ - + ]: 96518 : !WALRead(xlogreader,
2137 alvherre@alvh.no-ip. 3437 : 96519 : &output_message.data[output_message.len],
3438 : : startptr,
3439 : : nbytes,
2132 3440 : 96519 : xlogreader->seg.ws_tli, /* Pass the current TLI because
3441 : : * only WalSndSegmentOpen controls
3442 : : * whether new TLI is needed. */
3443 : : &errinfo))
2302 alvherre@alvh.no-ip. 3444 :UBC 0 : WALReadRaiseError(&errinfo);
3445 : :
3446 : : /* See logical_read_xlog_page(). */
2132 alvherre@alvh.no-ip. 3447 :CBC 104162 : XLByteToSeg(startptr, segno, xlogreader->segcxt.ws_segsize);
3448 : 104162 : CheckXLogRemoved(segno, xlogreader->seg.ws_tli);
3449 : :
3450 : : /*
3451 : : * During recovery, the currently-open WAL file might be replaced with the
3452 : : * file of the same name retrieved from archive. So we always need to
3453 : : * check what we read was valid after reading into the buffer. If it's
3454 : : * invalid, we try to open and read the file again.
3455 : : */
2302 3456 [ + + ]: 104162 : if (am_cascading_walsender)
3457 : : {
3458 : 648 : WalSnd *walsnd = MyWalSnd;
3459 : : bool reload;
3460 : :
3461 [ - + ]: 648 : SpinLockAcquire(&walsnd->mutex);
3462 : 648 : reload = walsnd->needreload;
3463 : 648 : walsnd->needreload = false;
3464 : 648 : SpinLockRelease(&walsnd->mutex);
3465 : :
2132 3466 [ - + - - ]: 648 : if (reload && xlogreader->seg.ws_file >= 0)
3467 : : {
2132 alvherre@alvh.no-ip. 3468 :UBC 0 : wal_segment_close(xlogreader);
3469 : :
2302 3470 : 0 : goto retry;
3471 : : }
3472 : : }
3473 : :
4876 heikki.linnakangas@i 3474 :CBC 104162 : output_message.len += nbytes;
3475 : 104162 : output_message.data[output_message.len] = '\0';
3476 : :
3477 : : /*
3478 : : * Fill the send timestamp last, so that it is taken as late as possible.
3479 : : */
3480 : 104162 : resetStringInfo(&tmpbuf);
3307 tgl@sss.pgh.pa.us 3481 : 104162 : pq_sendint64(&tmpbuf, GetCurrentTimestamp());
4876 heikki.linnakangas@i 3482 : 104162 : memcpy(&output_message.data[1 + sizeof(int64) + sizeof(int64)],
3483 : 104162 : tmpbuf.data, sizeof(int64));
3484 : :
235 nathan@postgresql.or 3485 :GNC 104162 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
3486 : :
5764 tgl@sss.pgh.pa.us 3487 :CBC 104162 : sentPtr = endptr;
3488 : :
3489 : : /* Update shared memory status */
3490 : : {
3566 rhaas@postgresql.org 3491 : 104162 : WalSnd *walsnd = MyWalSnd;
3492 : :
5764 tgl@sss.pgh.pa.us 3493 [ - + ]: 104162 : SpinLockAcquire(&walsnd->mutex);
3494 : 104162 : walsnd->sentPtr = sentPtr;
3495 : 104162 : SpinLockRelease(&walsnd->mutex);
3496 : : }
3497 : :
3498 : : /* Report progress of XLOG streaming in PS display */
3499 [ + - ]: 104162 : if (update_process_title)
3500 : : {
3501 : : char activitymsg[50];
3502 : :
251 alvherre@kurilemu.de 3503 :GNC 104162 : snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%08X",
1846 peter@eisentraut.org 3504 :CBC 104162 : LSN_FORMAT_ARGS(sentPtr));
2195 3505 : 104162 : set_ps_display(activitymsg);
3506 : : }
3507 : : }
3508 : :
3509 : : /*
3510 : : * Stream out logically decoded data.
3511 : : */
3512 : : static void
4388 rhaas@postgresql.org 3513 : 628542 : XLogSendLogical(void)
3514 : : {
3515 : : XLogRecord *record;
3516 : : char *errm;
3517 : :
3518 : : /*
3519 : : * We'll use the current flush point to determine whether we've caught up.
3520 : : * This variable is static in order to cache it across calls. Caching is
3521 : : * helpful because GetFlushRecPtr() needs to acquire a heavily-contended
3522 : : * spinlock.
3523 : : */
3524 : : static XLogRecPtr flushPtr = InvalidXLogRecPtr;
3525 : :
3526 : : /*
3527 : : * Don't know whether we've caught up yet. We'll set WalSndCaughtUp to
3528 : : * true in WalSndWaitForWal, if we're actually waiting. We also set to
3529 : : * true if XLogReadRecord() had to stop reading but WalSndWaitForWal
3530 : : * didn't wait - i.e. when we're shutting down.
3531 : : */
3532 : 628542 : WalSndCaughtUp = false;
3533 : :
1770 tmunro@postgresql.or 3534 : 628542 : record = XLogReadRecord(logical_decoding_ctx->reader, &errm);
3535 : :
3536 : : /* xlog record was invalid */
4388 rhaas@postgresql.org 3537 [ - + ]: 628334 : if (errm != NULL)
1586 michael@paquier.xyz 3538 [ # # ]:UBC 0 : elog(ERROR, "could not find record while sending logically-decoded data: %s",
3539 : : errm);
3540 : :
4388 rhaas@postgresql.org 3541 [ + + ]:CBC 628334 : if (record != NULL)
3542 : : {
3543 : : /*
3544 : : * Note the lack of any call to LagTrackerWrite() which is handled by
3545 : : * WalSndUpdateProgress which is called by output plugin through
3546 : : * logical decoding write api.
3547 : : */
4133 heikki.linnakangas@i 3548 : 621951 : LogicalDecodingProcessRecord(logical_decoding_ctx, logical_decoding_ctx->reader);
3549 : :
4388 rhaas@postgresql.org 3550 : 621946 : sentPtr = logical_decoding_ctx->reader->EndRecPtr;
3551 : : }
3552 : :
3553 : : /*
3554 : : * If first time through in this session, initialize flushPtr. Otherwise,
3555 : : * we only need to update flushPtr if EndRecPtr is past it.
3556 : : */
129 alvherre@kurilemu.de 3557 [ + + ]:GNC 628329 : if (!XLogRecPtrIsValid(flushPtr) ||
1072 andres@anarazel.de 3558 [ + + ]:CBC 627928 : logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3559 : : {
3560 : : /*
3561 : : * For cascading logical WAL senders, we use the replay LSN instead of
3562 : : * the flush LSN, since logical decoding on a standby only processes
3563 : : * WAL that has been replayed. This distinction becomes particularly
3564 : : * important during shutdown, as new WAL is no longer replayed and the
3565 : : * last replayed LSN marks the furthest point up to which decoding can
3566 : : * proceed.
3567 : : */
3568 [ + + ]: 9017 : if (am_cascading_walsender)
286 michael@paquier.xyz 3569 : 68 : flushPtr = GetXLogReplayRecPtr(NULL);
3570 : : else
1072 andres@anarazel.de 3571 : 8949 : flushPtr = GetFlushRecPtr(NULL);
3572 : : }
3573 : :
3574 : : /* If EndRecPtr is still past our flushPtr, it means we caught up. */
2341 alvherre@alvh.no-ip. 3575 [ + + ]: 628329 : if (logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
3576 : 7906 : WalSndCaughtUp = true;
3577 : :
3578 : : /*
3579 : : * If we're caught up and have been requested to stop, have WalSndLoop()
3580 : : * terminate the connection in an orderly manner, after writing out all
3581 : : * the pending data.
3582 : : */
3583 [ + + + + ]: 628329 : if (WalSndCaughtUp && got_STOPPING)
3584 : 6222 : got_SIGUSR2 = true;
3585 : :
3586 : : /* Update shared memory status */
3587 : : {
3566 rhaas@postgresql.org 3588 : 628329 : WalSnd *walsnd = MyWalSnd;
3589 : :
4388 3590 [ - + ]: 628329 : SpinLockAcquire(&walsnd->mutex);
3591 : 628329 : walsnd->sentPtr = sentPtr;
3592 : 628329 : SpinLockRelease(&walsnd->mutex);
3593 : : }
3594 : 628329 : }
3595 : :
3596 : : /*
3597 : : * Shutdown if the sender is caught up.
3598 : : *
3599 : : * NB: This should only be called when the shutdown signal has been received
3600 : : * from postmaster.
3601 : : *
3602 : : * Note that if we determine that there's still more data to send, this
3603 : : * function will return control to the caller.
3604 : : */
3605 : : static void
3606 : 3716 : WalSndDone(WalSndSendDataCallback send_data)
3607 : : {
3608 : : XLogRecPtr replicatedPtr;
3609 : :
3610 : : /* ... let's just be real sure we're caught up ... */
3611 : 3716 : send_data();
3612 : :
3613 : : /*
3614 : : * To figure out whether all WAL has successfully been replicated, check
3615 : : * flush location if valid, write otherwise. Tools like pg_receivewal will
3616 : : * usually (unless in synchronous mode) return an invalid flush location.
3617 : : */
129 alvherre@kurilemu.de 3618 :GNC 7432 : replicatedPtr = XLogRecPtrIsValid(MyWalSnd->flush) ?
3619 [ + + ]: 3716 : MyWalSnd->flush : MyWalSnd->write;
3620 : :
4381 fujii@postgresql.org 3621 [ + + + + ]:CBC 3716 : if (WalSndCaughtUp && sentPtr == replicatedPtr &&
4388 rhaas@postgresql.org 3622 [ + - ]: 43 : !pq_is_send_pending())
3623 : : {
3624 : : QueryCompletion qc;
3625 : :
3626 : : /* Inform the standby that XLOG streaming is done */
2204 alvherre@alvh.no-ip. 3627 : 43 : SetQueryCompletion(&qc, CMDTAG_COPY, 0);
3628 : 43 : EndCommand(&qc, DestRemote, false);
4388 rhaas@postgresql.org 3629 : 43 : pq_flush();
3630 : :
3631 : 43 : proc_exit(0);
3632 : : }
3633 [ + + ]: 3673 : if (!waiting_for_ping_response)
1446 akapila@postgresql.o 3634 : 1362 : WalSndKeepalive(true, InvalidXLogRecPtr);
4388 rhaas@postgresql.org 3635 : 3673 : }
3636 : :
3637 : : /*
3638 : : * Returns the latest point in WAL that has been safely flushed to disk.
3639 : : * This should only be called when in recovery.
3640 : : *
3641 : : * This is called either by cascading walsender to find WAL position to be sent
3642 : : * to a cascaded standby or by slot synchronization operation to validate remote
3643 : : * slot's lsn before syncing it locally.
3644 : : *
3645 : : * As a side-effect, *tli is updated to the TLI of the last
3646 : : * replayed WAL record.
3647 : : */
3648 : : XLogRecPtr
1591 3649 : 972 : GetStandbyFlushRecPtr(TimeLineID *tli)
3650 : : {
3651 : : XLogRecPtr replayPtr;
3652 : : TimeLineID replayTLI;
3653 : : XLogRecPtr receivePtr;
3654 : : TimeLineID receiveTLI;
3655 : : XLogRecPtr result;
3656 : :
760 akapila@postgresql.o 3657 [ + + - + ]: 972 : Assert(am_cascading_walsender || IsSyncingReplicationSlots());
3658 : :
3659 : : /*
3660 : : * We can safely send what's already been replayed. Also, if walreceiver
3661 : : * is streaming WAL from the same timeline, we can send anything that it
3662 : : * has streamed, but hasn't been replayed yet.
3663 : : */
3664 : :
2167 tmunro@postgresql.or 3665 : 972 : receivePtr = GetWalRcvFlushRecPtr(NULL, &receiveTLI);
4833 heikki.linnakangas@i 3666 : 972 : replayPtr = GetXLogReplayRecPtr(&replayTLI);
3667 : :
1072 andres@anarazel.de 3668 [ + + ]: 972 : if (tli)
3669 : 926 : *tli = replayTLI;
3670 : :
4833 heikki.linnakangas@i 3671 : 972 : result = replayPtr;
1591 rhaas@postgresql.org 3672 [ + - + + ]: 972 : if (receiveTLI == replayTLI && receivePtr > replayPtr)
4833 heikki.linnakangas@i 3673 : 117 : result = receivePtr;
3674 : :
3675 : 972 : return result;
3676 : : }
3677 : :
3678 : : /*
3679 : : * Request walsenders to reload the currently-open WAL file
3680 : : */
3681 : : void
5353 simon@2ndQuadrant.co 3682 : 28 : WalSndRqstFileReload(void)
3683 : : {
3684 : : int i;
3685 : :
3686 [ + + ]: 284 : for (i = 0; i < max_wal_senders; i++)
3687 : : {
3566 rhaas@postgresql.org 3688 : 256 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3689 : :
3180 alvherre@alvh.no-ip. 3690 [ - + ]: 256 : SpinLockAcquire(&walsnd->mutex);
5353 simon@2ndQuadrant.co 3691 [ + - ]: 256 : if (walsnd->pid == 0)
3692 : : {
3180 alvherre@alvh.no-ip. 3693 : 256 : SpinLockRelease(&walsnd->mutex);
5353 simon@2ndQuadrant.co 3694 : 256 : continue;
3695 : : }
5353 simon@2ndQuadrant.co 3696 :UBC 0 : walsnd->needreload = true;
3697 : 0 : SpinLockRelease(&walsnd->mutex);
3698 : : }
5353 simon@2ndQuadrant.co 3699 :CBC 28 : }
3700 : :
3701 : : /*
3702 : : * Handle PROCSIG_WALSND_INIT_STOPPING signal.
3703 : : */
3704 : : void
3205 andres@anarazel.de 3705 : 43 : HandleWalSndInitStopping(void)
3706 : : {
3707 [ - + ]: 43 : Assert(am_walsender);
3708 : :
3709 : : /*
3710 : : * If replication has not yet started, die like with SIGTERM. If
3711 : : * replication is active, only set a flag and wake up the main loop. It
3712 : : * will send any outstanding WAL, wait for it to be replicated to the
3713 : : * standby, and then exit gracefully.
3714 : : */
3715 [ - + ]: 43 : if (!replication_active)
3205 andres@anarazel.de 3716 :UBC 0 : kill(MyProcPid, SIGTERM);
3717 : : else
3205 andres@anarazel.de 3718 :CBC 43 : got_STOPPING = true;
3719 : 43 : }
3720 : :
3721 : : /*
3722 : : * SIGUSR2: set flag to do a last cycle and shut down afterwards. The WAL
3723 : : * sender should already have been switched to WALSNDSTATE_STOPPING at
3724 : : * this point.
3725 : : */
3726 : : static void
5903 heikki.linnakangas@i 3727 : 43 : WalSndLastCycleHandler(SIGNAL_ARGS)
3728 : : {
3205 andres@anarazel.de 3729 : 43 : got_SIGUSR2 = true;
4075 3730 : 43 : SetLatch(MyLatch);
5903 heikki.linnakangas@i 3731 : 43 : }
3732 : :
3733 : : /* Set up signal handlers */
3734 : : void
3735 : 1244 : WalSndSignals(void)
3736 : : {
3737 : : /* Set up signal handlers */
2280 rhaas@postgresql.org 3738 : 1244 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
3205 andres@anarazel.de 3739 : 1244 : pqsignal(SIGINT, StatementCancelHandler); /* query cancel */
4673 bruce@momjian.us 3740 : 1244 : pqsignal(SIGTERM, die); /* request shutdown */
3741 : : /* SIGQUIT handler was already set up by InitPostmasterChild */
4990 alvherre@alvh.no-ip. 3742 : 1244 : InitializeTimeouts(); /* establishes SIGALRM handler */
5903 heikki.linnakangas@i 3743 : 1244 : pqsignal(SIGPIPE, SIG_IGN);
3205 andres@anarazel.de 3744 : 1244 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
3745 : 1244 : pqsignal(SIGUSR2, WalSndLastCycleHandler); /* request a last cycle and
3746 : : * shutdown */
3747 : :
3748 : : /* Reset some signals that are accepted by postmaster but not here */
5903 heikki.linnakangas@i 3749 : 1244 : pqsignal(SIGCHLD, SIG_DFL);
3750 : 1244 : }
3751 : :
3752 : : /* Report shared-memory space needed by WalSndShmemInit */
3753 : : Size
3754 : 4447 : WalSndShmemSize(void)
3755 : : {
5861 bruce@momjian.us 3756 : 4447 : Size size = 0;
3757 : :
5903 heikki.linnakangas@i 3758 : 4447 : size = offsetof(WalSndCtlData, walsnds);
5827 rhaas@postgresql.org 3759 : 4447 : size = add_size(size, mul_size(max_wal_senders, sizeof(WalSnd)));
3760 : :
5903 heikki.linnakangas@i 3761 : 4447 : return size;
3762 : : }
3763 : :
3764 : : /* Allocate and initialize walsender-related shared memory */
3765 : : void
3766 : 1150 : WalSndShmemInit(void)
3767 : : {
3768 : : bool found;
3769 : : int i;
3770 : :
3771 : 1150 : WalSndCtl = (WalSndCtlData *)
3772 : 1150 : ShmemInitStruct("Wal Sender Ctl", WalSndShmemSize(), &found);
3773 : :
5800 tgl@sss.pgh.pa.us 3774 [ + - ]: 1150 : if (!found)
3775 : : {
3776 : : /* First time through, so initialize */
3777 [ + - + - : 8242 : MemSet(WalSndCtl, 0, WalSndShmemSize());
+ - + + +
+ ]
3778 : :
5164 simon@2ndQuadrant.co 3779 [ + + ]: 4600 : for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
1152 andres@anarazel.de 3780 : 3450 : dlist_init(&(WalSndCtl->SyncRepQueue[i]));
3781 : :
5800 tgl@sss.pgh.pa.us 3782 [ + + ]: 8618 : for (i = 0; i < max_wal_senders; i++)
3783 : : {
3784 : 7468 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3785 : :
3786 : 7468 : SpinLockInit(&walsnd->mutex);
3787 : : }
3788 : :
1029 andres@anarazel.de 3789 : 1150 : ConditionVariableInit(&WalSndCtl->wal_flush_cv);
3790 : 1150 : ConditionVariableInit(&WalSndCtl->wal_replay_cv);
737 akapila@postgresql.o 3791 : 1150 : ConditionVariableInit(&WalSndCtl->wal_confirm_rcv_cv);
3792 : : }
5903 heikki.linnakangas@i 3793 : 1150 : }
3794 : :
3795 : : /*
3796 : : * Wake up physical, logical or both kinds of walsenders
3797 : : *
3798 : : * The distinction between physical and logical walsenders is done, because:
3799 : : * - physical walsenders can't send data until it's been flushed
3800 : : * - logical walsenders on standby can't decode and send data until it's been
3801 : : * applied
3802 : : *
3803 : : * For cascading replication we need to wake up physical walsenders separately
3804 : : * from logical walsenders (see the comment before calling WalSndWakeup() in
3805 : : * ApplyWalRecord() for more details).
3806 : : *
3807 : : * This will be called inside critical sections, so throwing an error is not
3808 : : * advisable.
3809 : : */
3810 : : void
1072 andres@anarazel.de 3811 : 2752444 : WalSndWakeup(bool physical, bool logical)
3812 : : {
3813 : : /*
3814 : : * Wake up all the walsenders waiting on WAL being flushed or replayed
3815 : : * respectively. Note that waiting walsender would have prepared to sleep
3816 : : * on the CV (i.e., added itself to the CV's waitlist) in WalSndWait()
3817 : : * before actually waiting.
3818 : : */
1029 3819 [ + + ]: 2752444 : if (physical)
3820 : 141911 : ConditionVariableBroadcast(&WalSndCtl->wal_flush_cv);
3821 : :
3822 [ + + ]: 2752444 : if (logical)
3823 : 2712400 : ConditionVariableBroadcast(&WalSndCtl->wal_replay_cv);
5664 heikki.linnakangas@i 3824 : 2752444 : }
3825 : :
3826 : : /*
3827 : : * Wait for readiness on the FeBe socket, or a timeout. The mask should be
3828 : : * composed of optional WL_SOCKET_WRITEABLE and WL_SOCKET_READABLE flags. Exit
3829 : : * on postmaster death.
3830 : : */
3831 : : static void
1840 tmunro@postgresql.or 3832 : 91384 : WalSndWait(uint32 socket_events, long timeout, uint32 wait_event)
3833 : : {
3834 : : WaitEvent event;
3835 : :
3836 : 91384 : ModifyWaitEvent(FeBeWaitSet, FeBeWaitSetSocketPos, socket_events, NULL);
3837 : :
3838 : : /*
3839 : : * We use a condition variable to efficiently wake up walsenders in
3840 : : * WalSndWakeup().
3841 : : *
3842 : : * Every walsender prepares to sleep on a shared memory CV. Note that it
3843 : : * just prepares to sleep on the CV (i.e., adds itself to the CV's
3844 : : * waitlist), but does not actually wait on the CV (IOW, it never calls
3845 : : * ConditionVariableSleep()). It still uses WaitEventSetWait() for
3846 : : * waiting, because we also need to wait for socket events. The processes
3847 : : * (startup process, walreceiver etc.) wanting to wake up walsenders use
3848 : : * ConditionVariableBroadcast(), which in turn calls SetLatch(), helping
3849 : : * walsenders come out of WaitEventSetWait().
3850 : : *
3851 : : * This approach is simple and efficient because, one doesn't have to loop
3852 : : * through all the walsenders slots, with a spinlock acquisition and
3853 : : * release for every iteration, just to wake up only the waiting
3854 : : * walsenders. It makes WalSndWakeup() callers' life easy.
3855 : : *
3856 : : * XXX: A desirable future improvement would be to add support for CVs
3857 : : * into WaitEventSetWait().
3858 : : *
3859 : : * And, we use separate shared memory CVs for physical and logical
3860 : : * walsenders for selective wake ups, see WalSndWakeup() for more details.
3861 : : *
3862 : : * If the wait event is WAIT_FOR_STANDBY_CONFIRMATION, wait on another CV
3863 : : * until awakened by physical walsenders after the walreceiver confirms
3864 : : * the receipt of the LSN.
3865 : : */
737 akapila@postgresql.o 3866 [ + + ]: 91384 : if (wait_event == WAIT_EVENT_WAIT_FOR_STANDBY_CONFIRMATION)
3867 : 8 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_confirm_rcv_cv);
3868 [ + + ]: 91376 : else if (MyWalSnd->kind == REPLICATION_KIND_PHYSICAL)
1029 andres@anarazel.de 3869 : 84279 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_flush_cv);
3870 [ + - ]: 7097 : else if (MyWalSnd->kind == REPLICATION_KIND_LOGICAL)
3871 : 7097 : ConditionVariablePrepareToSleep(&WalSndCtl->wal_replay_cv);
3872 : :
1840 tmunro@postgresql.or 3873 [ + - ]: 91384 : if (WaitEventSetWait(FeBeWaitSet, timeout, &event, 1, wait_event) == 1 &&
3874 [ - + ]: 91384 : (event.events & WL_POSTMASTER_DEATH))
3875 : : {
1029 andres@anarazel.de 3876 :UBC 0 : ConditionVariableCancelSleep();
1840 tmunro@postgresql.or 3877 : 0 : proc_exit(1);
3878 : : }
3879 : :
1029 andres@anarazel.de 3880 :CBC 91384 : ConditionVariableCancelSleep();
1840 tmunro@postgresql.or 3881 : 91384 : }
3882 : :
3883 : : /*
3884 : : * Signal all walsenders to move to stopping state.
3885 : : *
3886 : : * This will trigger walsenders to move to a state where no further WAL can be
3887 : : * generated. See this file's header for details.
3888 : : */
3889 : : void
3205 andres@anarazel.de 3890 : 693 : WalSndInitStopping(void)
3891 : : {
3892 : : int i;
3893 : :
3894 [ + + ]: 5305 : for (i = 0; i < max_wal_senders; i++)
3895 : : {
3896 : 4612 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3897 : : pid_t pid;
3898 : :
3899 [ - + ]: 4612 : SpinLockAcquire(&walsnd->mutex);
3900 : 4612 : pid = walsnd->pid;
3901 : 4612 : SpinLockRelease(&walsnd->mutex);
3902 : :
3903 [ + + ]: 4612 : if (pid == 0)
3904 : 4569 : continue;
3905 : :
742 heikki.linnakangas@i 3906 : 43 : SendProcSignal(pid, PROCSIG_WALSND_INIT_STOPPING, INVALID_PROC_NUMBER);
3907 : : }
3205 andres@anarazel.de 3908 : 693 : }
3909 : :
3910 : : /*
3911 : : * Wait that all the WAL senders have quit or reached the stopping state. This
3912 : : * is used by the checkpointer to control when the shutdown checkpoint can
3913 : : * safely be performed.
3914 : : */
3915 : : void
3916 : 693 : WalSndWaitStopping(void)
3917 : : {
3918 : : for (;;)
3919 : 44 : {
3920 : : int i;
3921 : 737 : bool all_stopped = true;
3922 : :
3923 [ + + ]: 5349 : for (i = 0; i < max_wal_senders; i++)
3924 : : {
3925 : 4656 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
3926 : :
3927 [ - + ]: 4656 : SpinLockAcquire(&walsnd->mutex);
3928 : :
3929 [ + + ]: 4656 : if (walsnd->pid == 0)
3930 : : {
3931 : 4580 : SpinLockRelease(&walsnd->mutex);
3932 : 4580 : continue;
3933 : : }
3934 : :
3180 alvherre@alvh.no-ip. 3935 [ + + ]: 76 : if (walsnd->state != WALSNDSTATE_STOPPING)
3936 : : {
3205 andres@anarazel.de 3937 : 44 : all_stopped = false;
3180 alvherre@alvh.no-ip. 3938 : 44 : SpinLockRelease(&walsnd->mutex);
3205 andres@anarazel.de 3939 : 44 : break;
3940 : : }
3180 alvherre@alvh.no-ip. 3941 : 32 : SpinLockRelease(&walsnd->mutex);
3942 : : }
3943 : :
3944 : : /* safe to leave if confirmation is done for all WAL senders */
3205 andres@anarazel.de 3945 [ + + ]: 737 : if (all_stopped)
3946 : 693 : return;
3947 : :
3948 : 44 : pg_usleep(10000L); /* wait for 10 msec */
3949 : : }
3950 : : }
3951 : :
3952 : : /* Set state for current walsender (only called in walsender) */
3953 : : void
5542 magnus@hagander.net 3954 : 3532 : WalSndSetState(WalSndState state)
3955 : : {
3566 rhaas@postgresql.org 3956 : 3532 : WalSnd *walsnd = MyWalSnd;
3957 : :
5542 magnus@hagander.net 3958 [ - + ]: 3532 : Assert(am_walsender);
3959 : :
3960 [ + + ]: 3532 : if (walsnd->state == state)
3961 : 1560 : return;
3962 : :
3963 [ - + ]: 1972 : SpinLockAcquire(&walsnd->mutex);
3964 : 1972 : walsnd->state = state;
3965 : 1972 : SpinLockRelease(&walsnd->mutex);
3966 : : }
3967 : :
3968 : : /*
3969 : : * Return a string constant representing the state. This is used
3970 : : * in system views, and should *not* be translated.
3971 : : */
3972 : : static const char *
3973 : 873 : WalSndGetStateString(WalSndState state)
3974 : : {
3975 [ + - + + : 873 : switch (state)
- - ]
3976 : : {
3977 : 1 : case WALSNDSTATE_STARTUP:
5434 bruce@momjian.us 3978 : 1 : return "startup";
5542 magnus@hagander.net 3979 :UBC 0 : case WALSNDSTATE_BACKUP:
5434 bruce@momjian.us 3980 : 0 : return "backup";
5542 magnus@hagander.net 3981 :CBC 15 : case WALSNDSTATE_CATCHUP:
5434 bruce@momjian.us 3982 : 15 : return "catchup";
5542 magnus@hagander.net 3983 : 857 : case WALSNDSTATE_STREAMING:
5434 bruce@momjian.us 3984 : 857 : return "streaming";
3205 andres@anarazel.de 3985 :UBC 0 : case WALSNDSTATE_STOPPING:
3986 : 0 : return "stopping";
3987 : : }
5542 magnus@hagander.net 3988 : 0 : return "UNKNOWN";
3989 : : }
3990 : :
3991 : : static Interval *
3279 simon@2ndQuadrant.co 3992 :CBC 1158 : offset_to_interval(TimeOffset offset)
3993 : : {
95 michael@paquier.xyz 3994 :GNC 1158 : Interval *result = palloc_object(Interval);
3995 : :
3279 simon@2ndQuadrant.co 3996 :CBC 1158 : result->month = 0;
3997 : 1158 : result->day = 0;
3998 : 1158 : result->time = offset;
3999 : :
4000 : 1158 : return result;
4001 : : }
4002 : :
4003 : : /*
4004 : : * Returns activity of walsenders, including pids and xlog locations sent to
4005 : : * standby servers.
4006 : : */
4007 : : Datum
5546 itagaki.takahiro@gma 4008 : 737 : pg_stat_get_wal_senders(PG_FUNCTION_ARGS)
4009 : : {
4010 : : #define PG_STAT_GET_WAL_SENDERS_COLS 12
5453 bruce@momjian.us 4011 : 737 : ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
4012 : : SyncRepStandbyData *sync_standbys;
4013 : : int num_standbys;
4014 : : int i;
4015 : :
1244 michael@paquier.xyz 4016 : 737 : InitMaterializedSRF(fcinfo, 0);
4017 : :
4018 : : /*
4019 : : * Get the currently active synchronous standbys. This could be out of
4020 : : * date before we're done, but we'll use the data anyway.
4021 : : */
2157 tgl@sss.pgh.pa.us 4022 : 737 : num_standbys = SyncRepGetCandidateStandbys(&sync_standbys);
4023 : :
5546 itagaki.takahiro@gma 4024 [ + + ]: 7949 : for (i = 0; i < max_wal_senders; i++)
4025 : : {
3566 rhaas@postgresql.org 4026 : 7212 : WalSnd *walsnd = &WalSndCtl->walsnds[i];
4027 : : XLogRecPtr sent_ptr;
4028 : : XLogRecPtr write;
4029 : : XLogRecPtr flush;
4030 : : XLogRecPtr apply;
4031 : : TimeOffset writeLag;
4032 : : TimeOffset flushLag;
4033 : : TimeOffset applyLag;
4034 : : int priority;
4035 : : int pid;
4036 : : WalSndState state;
4037 : : TimestampTz replyTime;
4038 : : bool is_sync_standby;
4039 : : Datum values[PG_STAT_GET_WAL_SENDERS_COLS];
1338 peter@eisentraut.org 4040 : 7212 : bool nulls[PG_STAT_GET_WAL_SENDERS_COLS] = {0};
4041 : : int j;
4042 : :
4043 : : /* Collect data from shared memory */
3180 alvherre@alvh.no-ip. 4044 [ + + ]: 7212 : SpinLockAcquire(&walsnd->mutex);
5546 itagaki.takahiro@gma 4045 [ + + ]: 7212 : if (walsnd->pid == 0)
4046 : : {
3180 alvherre@alvh.no-ip. 4047 : 6339 : SpinLockRelease(&walsnd->mutex);
5546 itagaki.takahiro@gma 4048 : 6339 : continue;
4049 : : }
3180 alvherre@alvh.no-ip. 4050 : 873 : pid = walsnd->pid;
927 michael@paquier.xyz 4051 : 873 : sent_ptr = walsnd->sentPtr;
5540 magnus@hagander.net 4052 : 873 : state = walsnd->state;
5512 heikki.linnakangas@i 4053 : 873 : write = walsnd->write;
4054 : 873 : flush = walsnd->flush;
4055 : 873 : apply = walsnd->apply;
3279 simon@2ndQuadrant.co 4056 : 873 : writeLag = walsnd->writeLag;
4057 : 873 : flushLag = walsnd->flushLag;
4058 : 873 : applyLag = walsnd->applyLag;
4111 heikki.linnakangas@i 4059 : 873 : priority = walsnd->sync_standby_priority;
2653 michael@paquier.xyz 4060 : 873 : replyTime = walsnd->replyTime;
5546 itagaki.takahiro@gma 4061 : 873 : SpinLockRelease(&walsnd->mutex);
4062 : :
4063 : : /*
4064 : : * Detect whether walsender is/was considered synchronous. We can
4065 : : * provide some protection against stale data by checking the PID
4066 : : * along with walsnd_index.
4067 : : */
2157 tgl@sss.pgh.pa.us 4068 : 873 : is_sync_standby = false;
4069 [ + + ]: 914 : for (j = 0; j < num_standbys; j++)
4070 : : {
4071 [ + + ]: 68 : if (sync_standbys[j].walsnd_index == i &&
4072 [ + - ]: 27 : sync_standbys[j].pid == pid)
4073 : : {
4074 : 27 : is_sync_standby = true;
4075 : 27 : break;
4076 : : }
4077 : : }
4078 : :
3180 alvherre@alvh.no-ip. 4079 : 873 : values[0] = Int32GetDatum(pid);
4080 : :
1448 mail@joeconway.com 4081 [ - + ]: 873 : if (!has_privs_of_role(GetUserId(), ROLE_PG_READ_ALL_STATS))
4082 : : {
4083 : : /*
4084 : : * Only superusers and roles with privileges of pg_read_all_stats
4085 : : * can see details. Other users only get the pid value to know
4086 : : * it's a walsender, but no details.
4087 : : */
5488 simon@2ndQuadrant.co 4088 [ # # # # :UBC 0 : MemSet(&nulls[1], true, PG_STAT_GET_WAL_SENDERS_COLS - 1);
# # # # #
# ]
4089 : : }
4090 : : else
4091 : : {
5530 magnus@hagander.net 4092 :CBC 873 : values[1] = CStringGetTextDatum(WalSndGetStateString(state));
4093 : :
129 alvherre@kurilemu.de 4094 [ + + ]:GNC 873 : if (!XLogRecPtrIsValid(sent_ptr))
3745 magnus@hagander.net 4095 :CBC 1 : nulls[2] = true;
927 michael@paquier.xyz 4096 : 873 : values[2] = LSNGetDatum(sent_ptr);
4097 : :
129 alvherre@kurilemu.de 4098 [ + + ]:GNC 873 : if (!XLogRecPtrIsValid(write))
5512 heikki.linnakangas@i 4099 :CBC 3 : nulls[3] = true;
4402 rhaas@postgresql.org 4100 : 873 : values[3] = LSNGetDatum(write);
4101 : :
129 alvherre@kurilemu.de 4102 [ + + ]:GNC 873 : if (!XLogRecPtrIsValid(flush))
5512 heikki.linnakangas@i 4103 :CBC 3 : nulls[4] = true;
4402 rhaas@postgresql.org 4104 : 873 : values[4] = LSNGetDatum(flush);
4105 : :
129 alvherre@kurilemu.de 4106 [ + + ]:GNC 873 : if (!XLogRecPtrIsValid(apply))
5512 heikki.linnakangas@i 4107 :CBC 3 : nulls[5] = true;
4402 rhaas@postgresql.org 4108 : 873 : values[5] = LSNGetDatum(apply);
4109 : :
4110 : : /*
4111 : : * Treat a standby such as a pg_basebackup background process
4112 : : * which always returns an invalid flush location, as an
4113 : : * asynchronous standby.
4114 : : */
129 alvherre@kurilemu.de 4115 [ + + ]:GNC 873 : priority = XLogRecPtrIsValid(flush) ? priority : 0;
4116 : :
3279 simon@2ndQuadrant.co 4117 [ + + ]:CBC 873 : if (writeLag < 0)
4118 : 512 : nulls[6] = true;
4119 : : else
4120 : 361 : values[6] = IntervalPGetDatum(offset_to_interval(writeLag));
4121 : :
4122 [ + + ]: 873 : if (flushLag < 0)
4123 : 437 : nulls[7] = true;
4124 : : else
4125 : 436 : values[7] = IntervalPGetDatum(offset_to_interval(flushLag));
4126 : :
4127 [ + + ]: 873 : if (applyLag < 0)
4128 : 512 : nulls[8] = true;
4129 : : else
4130 : 361 : values[8] = IntervalPGetDatum(offset_to_interval(applyLag));
4131 : :
4132 : 873 : values[9] = Int32GetDatum(priority);
4133 : :
4134 : : /*
4135 : : * More easily understood version of standby state. This is purely
4136 : : * informational.
4137 : : *
4138 : : * In quorum-based sync replication, the role of each standby
4139 : : * listed in synchronous_standby_names can be changing very
4140 : : * frequently. Any standbys considered as "sync" at one moment can
4141 : : * be switched to "potential" ones at the next moment. So, it's
4142 : : * basically useless to report "sync" or "potential" as their sync
4143 : : * states. We report just "quorum" for them.
4144 : : */
4111 heikki.linnakangas@i 4145 [ + + ]: 873 : if (priority == 0)
3279 simon@2ndQuadrant.co 4146 : 835 : values[10] = CStringGetTextDatum("async");
2157 tgl@sss.pgh.pa.us 4147 [ + + ]: 38 : else if (is_sync_standby)
3279 simon@2ndQuadrant.co 4148 : 27 : values[10] = SyncRepConfig->syncrep_method == SYNC_REP_PRIORITY ?
3373 fujii@postgresql.org 4149 [ + + ]: 27 : CStringGetTextDatum("sync") : CStringGetTextDatum("quorum");
4150 : : else
3279 simon@2ndQuadrant.co 4151 : 11 : values[10] = CStringGetTextDatum("potential");
4152 : :
2653 michael@paquier.xyz 4153 [ + + ]: 873 : if (replyTime == 0)
4154 : 1 : nulls[11] = true;
4155 : : else
4156 : 872 : values[11] = TimestampTzGetDatum(replyTime);
4157 : : }
4158 : :
1469 4159 : 873 : tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
4160 : : values, nulls);
4161 : : }
4162 : :
5546 itagaki.takahiro@gma 4163 : 737 : return (Datum) 0;
4164 : : }
4165 : :
4166 : : /*
4167 : : * Send a keepalive message to standby.
4168 : : *
4169 : : * If requestReply is set, the message requests the other party to send
4170 : : * a message back to us, for heartbeat purposes. We also set a flag to
4171 : : * let nearby code know that we're waiting for that response, to avoid
4172 : : * repeated requests.
4173 : : *
4174 : : * writePtr is the location up to which the WAL is sent. It is essentially
4175 : : * the same as sentPtr but in some cases, we need to send keep alive before
4176 : : * sentPtr is updated like when skipping empty transactions.
4177 : : */
4178 : : static void
1446 akapila@postgresql.o 4179 : 3293 : WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
4180 : : {
5188 simon@2ndQuadrant.co 4181 [ + + ]: 3293 : elog(DEBUG2, "sending replication keepalive");
4182 : :
4183 : : /* construct the message... */
4876 heikki.linnakangas@i 4184 : 3293 : resetStringInfo(&output_message);
221 nathan@postgresql.or 4185 :GNC 3293 : pq_sendbyte(&output_message, PqReplMsg_Keepalive);
129 alvherre@kurilemu.de 4186 [ - + ]: 3293 : pq_sendint64(&output_message, XLogRecPtrIsValid(writePtr) ? writePtr : sentPtr);
3307 tgl@sss.pgh.pa.us 4187 :CBC 3293 : pq_sendint64(&output_message, GetCurrentTimestamp());
4876 heikki.linnakangas@i 4188 : 3293 : pq_sendbyte(&output_message, requestReply ? 1 : 0);
4189 : :
4190 : : /* ... and send it wrapped in CopyData */
235 nathan@postgresql.or 4191 :GNC 3293 : pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
4192 : :
4193 : : /* Set local flag */
2045 alvherre@alvh.no-ip. 4194 [ + + ]:CBC 3293 : if (requestReply)
4195 : 1362 : waiting_for_ping_response = true;
5188 simon@2ndQuadrant.co 4196 : 3293 : }
4197 : :
4198 : : /*
4199 : : * Send keepalive message if too much time has elapsed.
4200 : : */
4201 : : static void
2753 noah@leadboat.com 4202 : 813093 : WalSndKeepaliveIfNecessary(void)
4203 : : {
4204 : : TimestampTz ping_time;
4205 : :
4206 : : /*
4207 : : * Don't send keepalive messages if timeouts are globally disabled or
4208 : : * we're doing something not partaking in timeouts.
4209 : : */
4308 andres@anarazel.de 4210 [ + - + + ]: 813093 : if (wal_sender_timeout <= 0 || last_reply_timestamp <= 0)
4388 rhaas@postgresql.org 4211 : 28 : return;
4212 : :
4213 [ + + ]: 813065 : if (waiting_for_ping_response)
4214 : 5034 : return;
4215 : :
4216 : : /*
4217 : : * If half of wal_sender_timeout has lapsed without receiving any reply
4218 : : * from the standby, send a keep-alive message to the standby requesting
4219 : : * an immediate reply.
4220 : : */
4221 : 808031 : ping_time = TimestampTzPlusMilliseconds(last_reply_timestamp,
4222 : : wal_sender_timeout / 2);
2753 noah@leadboat.com 4223 [ - + ]: 808031 : if (last_processing >= ping_time)
4224 : : {
1446 akapila@postgresql.o 4225 :UBC 0 : WalSndKeepalive(true, InvalidXLogRecPtr);
4226 : :
4227 : : /* Try to flush pending output to the client */
4388 rhaas@postgresql.org 4228 [ # # ]: 0 : if (pq_flush_if_writable() != 0)
4229 : 0 : WalSndShutdown();
4230 : : }
4231 : : }
4232 : :
4233 : : /*
4234 : : * Record the end of the WAL and the time it was flushed locally, so that
4235 : : * LagTrackerRead can compute the elapsed time (lag) when this WAL location is
4236 : : * eventually reported to have been written, flushed and applied by the
4237 : : * standby in a reply message.
4238 : : */
4239 : : static void
3279 simon@2ndQuadrant.co 4240 :CBC 133586 : LagTrackerWrite(XLogRecPtr lsn, TimestampTz local_flush_time)
4241 : : {
4242 : : int new_write_head;
4243 : : int i;
4244 : :
4245 [ - + ]: 133586 : if (!am_walsender)
3279 simon@2ndQuadrant.co 4246 :UBC 0 : return;
4247 : :
4248 : : /*
4249 : : * If the lsn hasn't advanced since last time, then do nothing. This way
4250 : : * we only record a new sample when new WAL has been written.
4251 : : */
2707 tmunro@postgresql.or 4252 [ + + ]:CBC 133586 : if (lag_tracker->last_lsn == lsn)
3279 simon@2ndQuadrant.co 4253 : 111829 : return;
2707 tmunro@postgresql.or 4254 : 21757 : lag_tracker->last_lsn = lsn;
4255 : :
4256 : : /*
4257 : : * If advancing the write head of the circular buffer would crash into any
4258 : : * of the read heads, then the buffer is full. In other words, the
4259 : : * slowest reader (presumably apply) is the one that controls the release
4260 : : * of space.
4261 : : */
4262 : 21757 : new_write_head = (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
3279 simon@2ndQuadrant.co 4263 [ + + ]: 87028 : for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; ++i)
4264 : : {
4265 : : /*
4266 : : * If the buffer is full, move the slowest reader to a separate
4267 : : * overflow entry and free its space in the buffer so the write head
4268 : : * can advance.
4269 : : */
2707 tmunro@postgresql.or 4270 [ - + ]: 65271 : if (new_write_head == lag_tracker->read_heads[i])
4271 : : {
144 fujii@postgresql.org 4272 :UBC 0 : lag_tracker->overflowed[i] =
4273 : 0 : lag_tracker->buffer[lag_tracker->read_heads[i]];
4274 : 0 : lag_tracker->read_heads[i] = -1;
4275 : : }
4276 : : }
4277 : :
4278 : : /* Store a sample at the current write head position. */
2707 tmunro@postgresql.or 4279 :CBC 21757 : lag_tracker->buffer[lag_tracker->write_head].lsn = lsn;
4280 : 21757 : lag_tracker->buffer[lag_tracker->write_head].time = local_flush_time;
4281 : 21757 : lag_tracker->write_head = new_write_head;
4282 : : }
4283 : :
4284 : : /*
4285 : : * Find out how much time has elapsed between the moment WAL location 'lsn'
4286 : : * (or the highest known earlier LSN) was flushed locally and the time 'now'.
4287 : : * We have a separate read head for each of the reported LSN locations we
4288 : : * receive in replies from standby; 'head' controls which read head is
4289 : : * used. Whenever a read head crosses an LSN which was written into the
4290 : : * lag buffer with LagTrackerWrite, we can use the associated timestamp to
4291 : : * find out the time this LSN (or an earlier one) was flushed locally, and
4292 : : * therefore compute the lag.
4293 : : *
4294 : : * Return -1 if no new sample data is available, and otherwise the elapsed
4295 : : * time in microseconds.
4296 : : */
4297 : : static TimeOffset
3279 simon@2ndQuadrant.co 4298 : 300798 : LagTrackerRead(int head, XLogRecPtr lsn, TimestampTz now)
4299 : : {
4300 : 300798 : TimestampTz time = 0;
4301 : :
4302 : : /*
4303 : : * If 'lsn' has not passed the WAL position stored in the overflow entry,
4304 : : * return the elapsed time (in microseconds) since the saved local flush
4305 : : * time. If the flush time is in the future (due to clock drift), return
4306 : : * -1 to treat as no valid sample.
4307 : : *
4308 : : * Otherwise, switch back to using the buffer to control the read head and
4309 : : * compute the elapsed time. The read head is then reset to point to the
4310 : : * oldest entry in the buffer.
4311 : : */
144 fujii@postgresql.org 4312 [ - + ]: 300798 : if (lag_tracker->read_heads[head] == -1)
4313 : : {
144 fujii@postgresql.org 4314 [ # # ]:UBC 0 : if (lag_tracker->overflowed[head].lsn > lsn)
4315 : 0 : return (now >= lag_tracker->overflowed[head].time) ?
4316 [ # # ]: 0 : now - lag_tracker->overflowed[head].time : -1;
4317 : :
4318 : 0 : time = lag_tracker->overflowed[head].time;
4319 : 0 : lag_tracker->last_read[head] = lag_tracker->overflowed[head];
4320 : 0 : lag_tracker->read_heads[head] =
4321 : 0 : (lag_tracker->write_head + 1) % LAG_TRACKER_BUFFER_SIZE;
4322 : : }
4323 : :
4324 : : /* Read all unread samples up to this LSN or end of buffer. */
2707 tmunro@postgresql.or 4325 [ + + ]:CBC 365026 : while (lag_tracker->read_heads[head] != lag_tracker->write_head &&
4326 [ + + ]: 248455 : lag_tracker->buffer[lag_tracker->read_heads[head]].lsn <= lsn)
4327 : : {
4328 : 64228 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4329 : 64228 : lag_tracker->last_read[head] =
4330 : 64228 : lag_tracker->buffer[lag_tracker->read_heads[head]];
4331 : 64228 : lag_tracker->read_heads[head] =
4332 : 64228 : (lag_tracker->read_heads[head] + 1) % LAG_TRACKER_BUFFER_SIZE;
4333 : : }
4334 : :
4335 : : /*
4336 : : * If the lag tracker is empty, that means the standby has processed
4337 : : * everything we've ever sent so we should now clear 'last_read'. If we
4338 : : * didn't do that, we'd risk using a stale and irrelevant sample for
4339 : : * interpolation at the beginning of the next burst of WAL after a period
4340 : : * of idleness.
4341 : : */
4342 [ + + ]: 300798 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4343 : 116571 : lag_tracker->last_read[head].time = 0;
4344 : :
3279 simon@2ndQuadrant.co 4345 [ - + ]: 300798 : if (time > now)
4346 : : {
4347 : : /* If the clock somehow went backwards, treat as not found. */
3279 simon@2ndQuadrant.co 4348 :UBC 0 : return -1;
4349 : : }
3279 simon@2ndQuadrant.co 4350 [ + + ]:CBC 300798 : else if (time == 0)
4351 : : {
4352 : : /*
4353 : : * We didn't cross a time. If there is a future sample that we
4354 : : * haven't reached yet, and we've already reached at least one sample,
4355 : : * let's interpolate the local flushed time. This is mainly useful
4356 : : * for reporting a completely stuck apply position as having
4357 : : * increasing lag, since otherwise we'd have to wait for it to
4358 : : * eventually start moving again and cross one of our samples before
4359 : : * we can show the lag increasing.
4360 : : */
2707 tmunro@postgresql.or 4361 [ + + ]: 249462 : if (lag_tracker->read_heads[head] == lag_tracker->write_head)
4362 : : {
4363 : : /* There are no future samples, so we can't interpolate. */
3187 simon@2ndQuadrant.co 4364 : 74297 : return -1;
4365 : : }
2707 tmunro@postgresql.or 4366 [ + + ]: 175165 : else if (lag_tracker->last_read[head].time != 0)
4367 : : {
4368 : : /* We can interpolate between last_read and the next sample. */
4369 : : double fraction;
4370 : 75367 : WalTimeSample prev = lag_tracker->last_read[head];
4371 : 75367 : WalTimeSample next = lag_tracker->buffer[lag_tracker->read_heads[head]];
4372 : :
3248 simon@2ndQuadrant.co 4373 [ - + ]: 75367 : if (lsn < prev.lsn)
4374 : : {
4375 : : /*
4376 : : * Reported LSNs shouldn't normally go backwards, but it's
4377 : : * possible when there is a timeline change. Treat as not
4378 : : * found.
4379 : : */
3248 simon@2ndQuadrant.co 4380 :UBC 0 : return -1;
4381 : : }
4382 : :
3279 simon@2ndQuadrant.co 4383 [ - + ]:CBC 75367 : Assert(prev.lsn < next.lsn);
4384 : :
4385 [ - + ]: 75367 : if (prev.time > next.time)
4386 : : {
4387 : : /* If the clock somehow went backwards, treat as not found. */
3279 simon@2ndQuadrant.co 4388 :UBC 0 : return -1;
4389 : : }
4390 : :
4391 : : /* See how far we are between the previous and next samples. */
3279 simon@2ndQuadrant.co 4392 :CBC 75367 : fraction =
4393 : 75367 : (double) (lsn - prev.lsn) / (double) (next.lsn - prev.lsn);
4394 : :
4395 : : /* Scale the local flush time proportionally. */
4396 : 75367 : time = (TimestampTz)
4397 : 75367 : ((double) prev.time + (next.time - prev.time) * fraction);
4398 : : }
4399 : : else
4400 : : {
4401 : : /*
4402 : : * We have only a future sample, implying that we were entirely
4403 : : * caught up but and now there is a new burst of WAL and the
4404 : : * standby hasn't processed the first sample yet. Until the
4405 : : * standby reaches the future sample the best we can do is report
4406 : : * the hypothetical lag if that sample were to be replayed now.
4407 : : */
2707 tmunro@postgresql.or 4408 : 99798 : time = lag_tracker->buffer[lag_tracker->read_heads[head]].time;
4409 : : }
4410 : : }
4411 : :
4412 : : /* Return the elapsed time since local flush time in microseconds. */
3279 simon@2ndQuadrant.co 4413 [ - + ]: 226501 : Assert(time != 0);
4414 : 226501 : return now - time;
4415 : : }
|