Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * checkpointer.c
4 : : *
5 : : * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 : : * Checkpoints are automatically dispatched after a certain amount of time has
7 : : * elapsed since the last one, and it can be signaled to perform requested
8 : : * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 : : * so many WAL segments is implemented by having backends signal when they
10 : : * fill WAL segments; the checkpointer itself doesn't watch for the
11 : : * condition.)
12 : : *
13 : : * The normal termination sequence is that checkpointer is instructed to
14 : : * execute the shutdown checkpoint by SIGINT. After that checkpointer waits
15 : : * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
16 : : * All backends must be stopped before SIGINT or SIGUSR2 is issued!
17 : : *
18 : : * Emergency termination is by SIGQUIT; like any backend, the checkpointer
19 : : * will simply abort and exit on SIGQUIT.
20 : : *
21 : : * If the checkpointer exits unexpectedly, the postmaster treats that the same
22 : : * as a backend crash: shared memory may be corrupted, so remaining backends
23 : : * should be killed by SIGQUIT and then a recovery cycle started. (Even if
24 : : * shared memory isn't corrupted, we have lost information about which
25 : : * files need to be fsync'd for the next checkpoint, and so a system
26 : : * restart needs to be forced.)
27 : : *
28 : : *
29 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 : : *
31 : : *
32 : : * IDENTIFICATION
33 : : * src/backend/postmaster/checkpointer.c
34 : : *
35 : : *-------------------------------------------------------------------------
36 : : */
37 : : #include "postgres.h"
38 : :
39 : : #include <sys/time.h>
40 : : #include <time.h>
41 : :
42 : : #include "access/xlog.h"
43 : : #include "access/xlog_internal.h"
44 : : #include "access/xlogrecovery.h"
45 : : #include "catalog/pg_authid.h"
46 : : #include "commands/defrem.h"
47 : : #include "libpq/pqsignal.h"
48 : : #include "miscadmin.h"
49 : : #include "pgstat.h"
50 : : #include "postmaster/auxprocess.h"
51 : : #include "postmaster/bgwriter.h"
52 : : #include "postmaster/interrupt.h"
53 : : #include "replication/syncrep.h"
54 : : #include "storage/aio_subsys.h"
55 : : #include "storage/bufmgr.h"
56 : : #include "storage/condition_variable.h"
57 : : #include "storage/fd.h"
58 : : #include "storage/ipc.h"
59 : : #include "storage/lwlock.h"
60 : : #include "storage/pmsignal.h"
61 : : #include "storage/proc.h"
62 : : #include "storage/procsignal.h"
63 : : #include "storage/shmem.h"
64 : : #include "storage/smgr.h"
65 : : #include "storage/spin.h"
66 : : #include "utils/acl.h"
67 : : #include "utils/guc.h"
68 : : #include "utils/memutils.h"
69 : : #include "utils/resowner.h"
70 : :
71 : :
72 : : /*----------
73 : : * Shared memory area for communication between checkpointer and backends
74 : : *
75 : : * The ckpt counters allow backends to watch for completion of a checkpoint
76 : : * request they send. Here's how it works:
77 : : * * At start of a checkpoint, checkpointer reads (and clears) the request
78 : : * flags and increments ckpt_started, while holding ckpt_lck.
79 : : * * On completion of a checkpoint, checkpointer sets ckpt_done to
80 : : * equal ckpt_started.
81 : : * * On failure of a checkpoint, checkpointer increments ckpt_failed
82 : : * and sets ckpt_done to equal ckpt_started.
83 : : *
84 : : * The algorithm for backends is:
85 : : * 1. Record current values of ckpt_failed and ckpt_started, and
86 : : * set request flags, while holding ckpt_lck.
87 : : * 2. Send signal to request checkpoint.
88 : : * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
89 : : * begun since you started this algorithm (although *not* that it was
90 : : * specifically initiated by your signal), and that it is using your flags.
91 : : * 4. Record new value of ckpt_started.
92 : : * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
93 : : * arithmetic here in case counters wrap around.) Now you know a
94 : : * checkpoint has started and completed, but not whether it was
95 : : * successful.
96 : : * 6. If ckpt_failed is different from the originally saved value,
97 : : * assume request failed; otherwise it was definitely successful.
98 : : *
99 : : * ckpt_flags holds the OR of the checkpoint request flags sent by all
100 : : * requesting backends since the last checkpoint start. The flags are
101 : : * chosen so that OR'ing is the correct way to combine multiple requests.
102 : : *
103 : : * The requests array holds fsync requests sent by backends and not yet
104 : : * absorbed by the checkpointer.
105 : : *
106 : : * Unlike the checkpoint fields, requests related fields are protected by
107 : : * CheckpointerCommLock.
108 : : *----------
109 : : */
110 : : typedef struct
111 : : {
112 : : SyncRequestType type; /* request type */
113 : : FileTag ftag; /* file identifier */
114 : : } CheckpointerRequest;
115 : :
116 : : typedef struct
117 : : {
118 : : pid_t checkpointer_pid; /* PID (0 if not started) */
119 : :
120 : : slock_t ckpt_lck; /* protects all the ckpt_* fields */
121 : :
122 : : int ckpt_started; /* advances when checkpoint starts */
123 : : int ckpt_done; /* advances when checkpoint done */
124 : : int ckpt_failed; /* advances when checkpoint fails */
125 : :
126 : : int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
127 : :
128 : : ConditionVariable start_cv; /* signaled when ckpt_started advances */
129 : : ConditionVariable done_cv; /* signaled when ckpt_done advances */
130 : :
131 : : int num_requests; /* current # of requests */
132 : : int max_requests; /* allocated array size */
133 : :
134 : : int head; /* Index of the first request in the ring
135 : : * buffer */
136 : : int tail; /* Index of the last request in the ring
137 : : * buffer */
138 : :
139 : : /* The ring buffer of pending checkpointer requests */
140 : : CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
141 : : } CheckpointerShmemStruct;
142 : :
143 : : static CheckpointerShmemStruct *CheckpointerShmem;
144 : :
145 : : /* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
146 : : #define WRITES_PER_ABSORB 1000
147 : :
148 : : /* Maximum number of checkpointer requests to process in one batch */
149 : : #define CKPT_REQ_BATCH_SIZE 10000
150 : :
151 : : /* Max number of requests the checkpointer request queue can hold */
152 : : #define MAX_CHECKPOINT_REQUESTS 10000000
153 : :
154 : : /*
155 : : * GUC parameters
156 : : */
157 : : int CheckPointTimeout = 300;
158 : : int CheckPointWarning = 30;
159 : : double CheckPointCompletionTarget = 0.9;
160 : :
161 : : /*
162 : : * Private state
163 : : */
164 : : static bool ckpt_active = false;
165 : : static volatile sig_atomic_t ShutdownXLOGPending = false;
166 : :
167 : : /* these values are valid when ckpt_active is true: */
168 : : static pg_time_t ckpt_start_time;
169 : : static XLogRecPtr ckpt_start_recptr;
170 : : static double ckpt_cached_elapsed;
171 : :
172 : : static pg_time_t last_checkpoint_time;
173 : : static pg_time_t last_xlog_switch_time;
174 : :
175 : : /* Prototypes for private functions */
176 : :
177 : : static void ProcessCheckpointerInterrupts(void);
178 : : static void CheckArchiveTimeout(void);
179 : : static bool IsCheckpointOnSchedule(double progress);
180 : : static bool FastCheckpointRequested(void);
181 : : static bool CompactCheckpointerRequestQueue(void);
182 : : static void UpdateSharedMemoryConfig(void);
183 : :
184 : : /* Signal handlers */
185 : : static void ReqShutdownXLOG(SIGNAL_ARGS);
186 : :
187 : :
188 : : /*
189 : : * Main entry point for checkpointer process
190 : : *
191 : : * This is invoked from AuxiliaryProcessMain, which has already created the
192 : : * basic execution environment, but not enabled signals yet.
193 : : */
194 : : void
197 peter@eisentraut.org 195 :CBC 491 : CheckpointerMain(const void *startup_data, size_t startup_data_len)
196 : : {
197 : : sigjmp_buf local_sigjmp_buf;
198 : : MemoryContext checkpointer_context;
199 : :
537 heikki.linnakangas@i 200 [ - + ]: 491 : Assert(startup_data_len == 0);
201 : :
202 : 491 : MyBackendType = B_CHECKPOINTER;
203 : 491 : AuxiliaryProcessMainCommon();
204 : :
4868 simon@2ndQuadrant.co 205 : 491 : CheckpointerShmem->checkpointer_pid = MyProcPid;
206 : :
207 : : /*
208 : : * Properly accept or ignore signals the postmaster might send us
209 : : *
210 : : * Note: we deliberately ignore SIGTERM, because during a standard Unix
211 : : * system shutdown cycle, init will SIGTERM all processes at once. We
212 : : * want to wait for the backends to exit, whereupon the postmaster will
213 : : * tell us it's okay to shut down (via SIGUSR2).
214 : : */
2090 rhaas@postgresql.org 215 : 491 : pqsignal(SIGHUP, SignalHandlerForConfigReload);
224 andres@anarazel.de 216 : 491 : pqsignal(SIGINT, ReqShutdownXLOG);
4836 bruce@momjian.us 217 : 491 : pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
218 : : /* SIGQUIT handler was already set up by InitPostmasterChild */
5058 simon@2ndQuadrant.co 219 : 491 : pqsignal(SIGALRM, SIG_IGN);
220 : 491 : pqsignal(SIGPIPE, SIG_IGN);
2112 rhaas@postgresql.org 221 : 491 : pqsignal(SIGUSR1, procsignal_sigusr1_handler);
2090 222 : 491 : pqsignal(SIGUSR2, SignalHandlerForShutdownRequest);
223 : :
224 : : /*
225 : : * Reset some signals that are accepted by postmaster but not here
226 : : */
5058 simon@2ndQuadrant.co 227 : 491 : pqsignal(SIGCHLD, SIG_DFL);
228 : :
229 : : /*
230 : : * Initialize so that first time-driven event happens at the correct time.
231 : : */
232 : 491 : last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL);
233 : :
234 : : /*
235 : : * Write out stats after shutdown. This needs to be called by exactly one
236 : : * process during a normal shutdown, and since checkpointer is shut down
237 : : * very late...
238 : : *
239 : : * While e.g. walsenders are active after the shutdown checkpoint has been
240 : : * written (and thus could produce more stats), checkpointer stays around
241 : : * after the shutdown checkpoint has been written. postmaster will only
242 : : * signal checkpointer to exit after all processes that could emit stats
243 : : * have been shut down.
244 : : */
1249 andres@anarazel.de 245 : 491 : before_shmem_exit(pgstat_before_server_shutdown, 0);
246 : :
247 : : /*
248 : : * Create a memory context that we will do all our work in. We do this so
249 : : * that we can reset the context during error recovery and thereby avoid
250 : : * possible memory leaks. Formerly this code just ran in
251 : : * TopMemoryContext, but resetting that would be a really bad idea.
252 : : */
5058 simon@2ndQuadrant.co 253 : 491 : checkpointer_context = AllocSetContextCreate(TopMemoryContext,
254 : : "Checkpointer",
255 : : ALLOCSET_DEFAULT_SIZES);
256 : 491 : MemoryContextSwitchTo(checkpointer_context);
257 : :
258 : : /*
259 : : * If an exception is encountered, processing resumes here.
260 : : *
261 : : * You might wonder why this isn't coded as an infinite loop around a
262 : : * PG_TRY construct. The reason is that this is the bottom of the
263 : : * exception stack, and so with PG_TRY there would be no exception handler
264 : : * in force at all during the CATCH part. By leaving the outermost setjmp
265 : : * always active, we have at least some chance of recovering from an error
266 : : * during error recovery. (If we get into an infinite loop thereby, it
267 : : * will soon be stopped by overflow of elog.c's internal state stack.)
268 : : *
269 : : * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
270 : : * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
271 : : * signals other than SIGQUIT will be blocked until we complete error
272 : : * recovery. It might seem that this policy makes the HOLD_INTERRUPTS()
273 : : * call redundant, but it is not since InterruptPending might be set
274 : : * already.
275 : : */
276 [ - + ]: 491 : if (sigsetjmp(local_sigjmp_buf, 1) != 0)
277 : : {
278 : : /* Since not using PG_TRY, must reset error stack by hand */
5058 simon@2ndQuadrant.co 279 :UBC 0 : error_context_stack = NULL;
280 : :
281 : : /* Prevent interrupts while cleaning up */
282 : 0 : HOLD_INTERRUPTS();
283 : :
284 : : /* Report the error to the server log */
285 : 0 : EmitErrorReport();
286 : :
287 : : /*
288 : : * These operations are really just a minimal subset of
289 : : * AbortTransaction(). We don't have very many resources to worry
290 : : * about in checkpointer, but we do have LWLocks, buffers, and temp
291 : : * files.
292 : : */
293 : 0 : LWLockReleaseAll();
3210 rhaas@postgresql.org 294 : 0 : ConditionVariableCancelSleep();
3467 295 : 0 : pgstat_report_wait_end();
173 andres@anarazel.de 296 : 0 : pgaio_error_cleanup();
5058 simon@2ndQuadrant.co 297 : 0 : UnlockBuffers();
2607 tgl@sss.pgh.pa.us 298 : 0 : ReleaseAuxProcessResources(false);
5058 simon@2ndQuadrant.co 299 : 0 : AtEOXact_Buffers(false);
4707 tgl@sss.pgh.pa.us 300 : 0 : AtEOXact_SMgr();
2688 301 : 0 : AtEOXact_Files(false);
5058 simon@2ndQuadrant.co 302 : 0 : AtEOXact_HashTables(false);
303 : :
304 : : /* Warn any waiting backends that the checkpoint failed. */
305 [ # # ]: 0 : if (ckpt_active)
306 : : {
3623 rhaas@postgresql.org 307 [ # # ]: 0 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
308 : 0 : CheckpointerShmem->ckpt_failed++;
309 : 0 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
310 : 0 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
311 : :
2345 tmunro@postgresql.or 312 : 0 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
313 : :
5058 simon@2ndQuadrant.co 314 : 0 : ckpt_active = false;
315 : : }
316 : :
317 : : /*
318 : : * Now return to normal top-level context and clear ErrorContext for
319 : : * next time.
320 : : */
321 : 0 : MemoryContextSwitchTo(checkpointer_context);
322 : 0 : FlushErrorState();
323 : :
324 : : /* Flush any leaked data in the top-level context */
661 nathan@postgresql.or 325 : 0 : MemoryContextReset(checkpointer_context);
326 : :
327 : : /* Now we can allow interrupts again */
5058 simon@2ndQuadrant.co 328 [ # # ]: 0 : RESUME_INTERRUPTS();
329 : :
330 : : /*
331 : : * Sleep at least 1 second after any error. A write error is likely
332 : : * to be repeated, and we don't want to be filling the error logs as
333 : : * fast as we can.
334 : : */
335 : 0 : pg_usleep(1000000L);
336 : : }
337 : :
338 : : /* We can now handle ereport(ERROR) */
5058 simon@2ndQuadrant.co 339 :CBC 491 : PG_exception_stack = &local_sigjmp_buf;
340 : :
341 : : /*
342 : : * Unblock signals (they were blocked when the postmaster forked us)
343 : : */
946 tmunro@postgresql.or 344 : 491 : sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
345 : :
346 : : /*
347 : : * Ensure all shared memory values are set correctly for the config. Doing
348 : : * this here ensures no race conditions from other concurrent updaters.
349 : : */
4973 simon@2ndQuadrant.co 350 : 491 : UpdateSharedMemoryConfig();
351 : :
352 : : /*
353 : : * Advertise our proc number that backends can use to wake us up while
354 : : * we're sleeping.
355 : : */
309 heikki.linnakangas@i 356 : 491 : ProcGlobal->checkpointerProc = MyProcNumber;
357 : :
358 : : /*
359 : : * Loop until we've been asked to write the shutdown checkpoint or
360 : : * terminate.
361 : : */
362 : : for (;;)
5058 simon@2ndQuadrant.co 363 : 3459 : {
364 : 3950 : bool do_checkpoint = false;
365 : 3950 : int flags = 0;
366 : : pg_time_t now;
367 : : int elapsed_secs;
368 : : int cur_timeout;
621 akorotkov@postgresql 369 : 3950 : bool chkpt_or_rstpt_requested = false;
370 : 3950 : bool chkpt_or_rstpt_timed = false;
371 : :
372 : : /* Clear any already-pending wakeups */
3888 andres@anarazel.de 373 : 3950 : ResetLatch(MyLatch);
374 : :
375 : : /*
376 : : * Process any requests or signals received recently.
377 : : */
2347 tmunro@postgresql.or 378 : 3950 : AbsorbSyncRequests();
379 : :
185 heikki.linnakangas@i 380 : 3950 : ProcessCheckpointerInterrupts();
224 andres@anarazel.de 381 [ + + + - ]: 3950 : if (ShutdownXLOGPending || ShutdownRequestPending)
382 : : break;
383 : :
384 : : /*
385 : : * Detect a pending checkpoint request by checking whether the flags
386 : : * word in shared memory is nonzero. We shouldn't need to acquire the
387 : : * ckpt_lck for this.
388 : : */
2363 tgl@sss.pgh.pa.us 389 [ + + ]: 3468 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
390 : : {
391 : 1259 : do_checkpoint = true;
621 akorotkov@postgresql 392 : 1259 : chkpt_or_rstpt_requested = true;
393 : : }
394 : :
395 : : /*
396 : : * Force a checkpoint if too much time has elapsed since the last one.
397 : : * Note that we count a timed checkpoint in stats only when this
398 : : * occurs without an external request, but we set the CAUSE_TIME flag
399 : : * bit even if there is also an external request.
400 : : */
5058 simon@2ndQuadrant.co 401 : 3468 : now = (pg_time_t) time(NULL);
402 : 3468 : elapsed_secs = now - last_checkpoint_time;
403 [ - + ]: 3468 : if (elapsed_secs >= CheckPointTimeout)
404 : : {
5058 simon@2ndQuadrant.co 405 [ # # ]:UBC 0 : if (!do_checkpoint)
621 akorotkov@postgresql 406 : 0 : chkpt_or_rstpt_timed = true;
5058 simon@2ndQuadrant.co 407 : 0 : do_checkpoint = true;
408 : 0 : flags |= CHECKPOINT_CAUSE_TIME;
409 : : }
410 : :
411 : : /*
412 : : * Do a checkpoint if requested.
413 : : */
5058 simon@2ndQuadrant.co 414 [ + + ]:CBC 3468 : if (do_checkpoint)
415 : : {
416 : 1259 : bool ckpt_performed = false;
417 : : bool do_restartpoint;
418 : :
419 : : /* Check if we should perform a checkpoint or a restartpoint. */
420 : 1259 : do_restartpoint = RecoveryInProgress();
421 : :
422 : : /*
423 : : * Atomically fetch the request flags to figure out what kind of a
424 : : * checkpoint we should perform, and increase the started-counter
425 : : * to acknowledge that we've started a new checkpoint.
426 : : */
3623 rhaas@postgresql.org 427 [ - + ]: 1259 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
428 : 1259 : flags |= CheckpointerShmem->ckpt_flags;
429 : 1259 : CheckpointerShmem->ckpt_flags = 0;
430 : 1259 : CheckpointerShmem->ckpt_started++;
431 : 1259 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
432 : :
2368 tmunro@postgresql.or 433 : 1259 : ConditionVariableBroadcast(&CheckpointerShmem->start_cv);
434 : :
435 : : /*
436 : : * The end-of-recovery checkpoint is a real checkpoint that's
437 : : * performed while we're still in recovery.
438 : : */
5058 simon@2ndQuadrant.co 439 [ + + ]: 1259 : if (flags & CHECKPOINT_END_OF_RECOVERY)
440 : 19 : do_restartpoint = false;
441 : :
621 akorotkov@postgresql 442 [ - + ]: 1259 : if (chkpt_or_rstpt_timed)
443 : : {
621 akorotkov@postgresql 444 :UBC 0 : chkpt_or_rstpt_timed = false;
445 [ # # ]: 0 : if (do_restartpoint)
446 : 0 : PendingCheckpointerStats.restartpoints_timed++;
447 : : else
448 : 0 : PendingCheckpointerStats.num_timed++;
449 : : }
450 : :
621 akorotkov@postgresql 451 [ + - ]:CBC 1259 : if (chkpt_or_rstpt_requested)
452 : : {
453 : 1259 : chkpt_or_rstpt_requested = false;
454 [ + + ]: 1259 : if (do_restartpoint)
455 : 536 : PendingCheckpointerStats.restartpoints_requested++;
456 : : else
457 : 723 : PendingCheckpointerStats.num_requested++;
458 : : }
459 : :
460 : : /*
461 : : * We will warn if (a) too soon since last checkpoint (whatever
462 : : * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
463 : : * since the last checkpoint start. Note in particular that this
464 : : * implementation will not generate warnings caused by
465 : : * CheckPointTimeout < CheckPointWarning.
466 : : */
5058 simon@2ndQuadrant.co 467 [ + + ]: 1259 : if (!do_restartpoint &&
468 [ + + ]: 723 : (flags & CHECKPOINT_CAUSE_XLOG) &&
469 [ + + ]: 190 : elapsed_secs < CheckPointWarning)
470 [ + - ]: 189 : ereport(LOG,
471 : : (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
472 : : "checkpoints are occurring too frequently (%d seconds apart)",
473 : : elapsed_secs,
474 : : elapsed_secs),
475 : : errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
476 : :
477 : : /*
478 : : * Initialize checkpointer-private variables used during
479 : : * checkpoint.
480 : : */
481 : 1259 : ckpt_active = true;
3722 heikki.linnakangas@i 482 [ + + ]: 1259 : if (do_restartpoint)
483 : 536 : ckpt_start_recptr = GetXLogReplayRecPtr(NULL);
484 : : else
5058 simon@2ndQuadrant.co 485 : 723 : ckpt_start_recptr = GetInsertRecPtr();
486 : 1259 : ckpt_start_time = now;
487 : 1259 : ckpt_cached_elapsed = 0;
488 : :
489 : : /*
490 : : * Do the checkpoint.
491 : : */
492 [ + + ]: 1259 : if (!do_restartpoint)
341 fujii@postgresql.org 493 : 723 : ckpt_performed = CreateCheckPoint(flags);
494 : : else
5058 simon@2ndQuadrant.co 495 : 536 : ckpt_performed = CreateRestartPoint(flags);
496 : :
497 : : /*
498 : : * After any checkpoint, free all smgr objects. Otherwise we
499 : : * would never do so for dropped relations, as the checkpointer
500 : : * does not process shared invalidation messages or call
501 : : * AtEOXact_SMgr().
502 : : */
584 heikki.linnakangas@i 503 : 1259 : smgrdestroyall();
504 : :
505 : : /*
506 : : * Indicate checkpoint completion to any waiting backends.
507 : : */
3623 rhaas@postgresql.org 508 [ - + ]: 1259 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
509 : 1259 : CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started;
510 : 1259 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
511 : :
2368 tmunro@postgresql.or 512 : 1259 : ConditionVariableBroadcast(&CheckpointerShmem->done_cv);
513 : :
341 fujii@postgresql.org 514 [ + + ]: 1259 : if (!do_restartpoint)
515 : : {
516 : : /*
517 : : * Note we record the checkpoint start time not end time as
518 : : * last_checkpoint_time. This is so that time-driven
519 : : * checkpoints happen at a predictable spacing.
520 : : */
5058 simon@2ndQuadrant.co 521 : 723 : last_checkpoint_time = now;
522 : :
341 fujii@postgresql.org 523 [ + - ]: 723 : if (ckpt_performed)
524 : 723 : PendingCheckpointerStats.num_performed++;
525 : : }
526 : : else
527 : : {
528 [ + + ]: 536 : if (ckpt_performed)
529 : : {
530 : : /*
531 : : * The same as for checkpoint. Please see the
532 : : * corresponding comment.
533 : : */
534 : 176 : last_checkpoint_time = now;
535 : :
536 : 176 : PendingCheckpointerStats.restartpoints_performed++;
537 : : }
538 : : else
539 : : {
540 : : /*
541 : : * We were not able to perform the restartpoint
542 : : * (checkpoints throw an ERROR in case of error). Most
543 : : * likely because we have not received any new checkpoint
544 : : * WAL records since the last restartpoint. Try again in
545 : : * 15 s.
546 : : */
547 : 360 : last_checkpoint_time = now - CheckPointTimeout + 15;
548 : : }
549 : : }
550 : :
5058 simon@2ndQuadrant.co 551 : 1259 : ckpt_active = false;
552 : :
553 : : /*
554 : : * We may have received an interrupt during the checkpoint and the
555 : : * latch might have been reset (e.g. in CheckpointWriteDelay).
556 : : */
185 heikki.linnakangas@i 557 : 1259 : ProcessCheckpointerInterrupts();
224 andres@anarazel.de 558 [ + + + - ]: 1259 : if (ShutdownXLOGPending || ShutdownRequestPending)
559 : : break;
560 : : }
561 : :
562 : : /* Check for archive_timeout and switch xlog files if necessary. */
4869 tgl@sss.pgh.pa.us 563 : 3462 : CheckArchiveTimeout();
564 : :
565 : : /* Report pending statistics to the cumulative stats system */
1249 andres@anarazel.de 566 : 3462 : pgstat_report_checkpointer();
567 : 3462 : pgstat_report_wal(true);
568 : :
569 : : /*
570 : : * If any checkpoint flags have been set, redo the loop to handle the
571 : : * checkpoint without sleeping.
572 : : */
1956 alvherre@alvh.no-ip. 573 [ + + ]: 3462 : if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
574 : 349 : continue;
575 : :
576 : : /*
577 : : * Sleep until we are signaled or it's time for another checkpoint or
578 : : * xlog file switch.
579 : : */
4869 tgl@sss.pgh.pa.us 580 : 3113 : now = (pg_time_t) time(NULL);
581 : 3113 : elapsed_secs = now - last_checkpoint_time;
582 [ - + ]: 3113 : if (elapsed_secs >= CheckPointTimeout)
4869 tgl@sss.pgh.pa.us 583 :UBC 0 : continue; /* no sleep for us ... */
4869 tgl@sss.pgh.pa.us 584 :CBC 3113 : cur_timeout = CheckPointTimeout - elapsed_secs;
585 [ - + - - ]: 3113 : if (XLogArchiveTimeout > 0 && !RecoveryInProgress())
586 : : {
4869 tgl@sss.pgh.pa.us 587 :UBC 0 : elapsed_secs = now - last_xlog_switch_time;
588 [ # # ]: 0 : if (elapsed_secs >= XLogArchiveTimeout)
589 : 0 : continue; /* no sleep for us ... */
590 : 0 : cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
591 : : }
592 : :
2479 tmunro@postgresql.or 593 :CBC 3113 : (void) WaitLatch(MyLatch,
594 : : WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
595 : : cur_timeout * 1000L /* convert to ms */ ,
596 : : WAIT_EVENT_CHECKPOINTER_MAIN);
597 : : }
598 : :
599 : : /*
600 : : * From here on, elog(ERROR) should end with exit(1), not send control
601 : : * back to the sigsetjmp block above.
602 : : */
224 andres@anarazel.de 603 : 488 : ExitOnAnyError = true;
604 : :
605 [ + - ]: 488 : if (ShutdownXLOGPending)
606 : : {
607 : : /*
608 : : * Close down the database.
609 : : *
610 : : * Since ShutdownXLOG() creates restartpoint or checkpoint, and
611 : : * updates the statistics, increment the checkpoint request and flush
612 : : * out pending statistic.
613 : : */
614 : 488 : PendingCheckpointerStats.num_requested++;
615 : 488 : ShutdownXLOG(0, 0);
616 : 488 : pgstat_report_checkpointer();
617 : 488 : pgstat_report_wal(true);
618 : :
619 : : /*
620 : : * Tell postmaster that we're done.
621 : : */
622 : 488 : SendPostmasterSignal(PMSIGNAL_XLOG_IS_SHUTDOWN);
623 : 488 : ShutdownXLOGPending = false;
624 : : }
625 : :
626 : : /*
627 : : * Wait until we're asked to shut down. By separating the writing of the
628 : : * shutdown checkpoint from checkpointer exiting, checkpointer can perform
629 : : * some should-be-as-late-as-possible work like writing out stats.
630 : : */
631 : : for (;;)
632 : : {
633 : : /* Clear any already-pending wakeups */
634 : 1012 : ResetLatch(MyLatch);
635 : :
185 heikki.linnakangas@i 636 : 1012 : ProcessCheckpointerInterrupts();
637 : :
224 andres@anarazel.de 638 [ + + ]: 1012 : if (ShutdownRequestPending)
639 : 488 : break;
640 : :
641 : 524 : (void) WaitLatch(MyLatch,
642 : : WL_LATCH_SET | WL_EXIT_ON_PM_DEATH,
643 : : 0,
644 : : WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
645 : : }
646 : :
647 : : /* Normal exit from the checkpointer is here */
648 : 488 : proc_exit(0); /* done */
649 : : }
650 : :
651 : : /*
652 : : * Process any new interrupts.
653 : : */
654 : : static void
185 heikki.linnakangas@i 655 : 6221 : ProcessCheckpointerInterrupts(void)
656 : : {
2088 rhaas@postgresql.org 657 [ + + ]: 6221 : if (ProcSignalBarrierPending)
658 : 60 : ProcessProcSignalBarrier();
659 : :
2090 660 [ + + ]: 6221 : if (ConfigReloadPending)
661 : : {
662 : 57 : ConfigReloadPending = false;
663 : 57 : ProcessConfigFile(PGC_SIGHUP);
664 : :
665 : : /*
666 : : * Checkpointer is the last process to shut down, so we ask it to hold
667 : : * the keys for a range of other tasks required most of which have
668 : : * nothing to do with checkpointing at all.
669 : : *
670 : : * For various reasons, some config values can change dynamically so
671 : : * the primary copy of them is held in shared memory to make sure all
672 : : * backends see the same value. We make Checkpointer responsible for
673 : : * updating the shared memory copy if the parameter setting changes
674 : : * because of SIGHUP.
675 : : */
676 : 57 : UpdateSharedMemoryConfig();
677 : : }
678 : :
679 : : /* Perform logging of memory contexts of this process */
1334 fujii@postgresql.org 680 [ + + ]: 6221 : if (LogMemoryContextPending)
681 : 1 : ProcessLogMemoryContextInterrupt();
2090 rhaas@postgresql.org 682 : 6221 : }
683 : :
684 : : /*
685 : : * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
686 : : *
687 : : * This will switch to a new WAL file and force an archive file write if
688 : : * meaningful activity is recorded in the current WAL file. This includes most
689 : : * writes, including just a single checkpoint record, but excludes WAL records
690 : : * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
691 : : * snapshots of running transactions). Such records, depending on
692 : : * configuration, occur on regular intervals and don't contain important
693 : : * information. This avoids generating archives with a few unimportant
694 : : * records.
695 : : */
696 : : static void
5058 simon@2ndQuadrant.co 697 : 10802 : CheckArchiveTimeout(void)
698 : : {
699 : : pg_time_t now;
700 : : pg_time_t last_time;
701 : : XLogRecPtr last_switch_lsn;
702 : :
703 [ - + - - ]: 10802 : if (XLogArchiveTimeout <= 0 || RecoveryInProgress())
704 : 10802 : return;
705 : :
5058 simon@2ndQuadrant.co 706 :UBC 0 : now = (pg_time_t) time(NULL);
707 : :
708 : : /* First we do a quick check using possibly-stale local state. */
709 [ # # ]: 0 : if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout)
710 : 0 : return;
711 : :
712 : : /*
713 : : * Update local state ... note that last_xlog_switch_time is the last time
714 : : * a switch was performed *or requested*.
715 : : */
3180 andres@anarazel.de 716 : 0 : last_time = GetLastSegSwitchData(&last_switch_lsn);
717 : :
5058 simon@2ndQuadrant.co 718 : 0 : last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
719 : :
720 : : /* Now we can do the real checks */
721 [ # # ]: 0 : if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
722 : : {
723 : : /*
724 : : * Switch segment only when "important" WAL has been logged since the
725 : : * last segment switch (last_switch_lsn points to end of segment
726 : : * switch occurred in).
727 : : */
3180 andres@anarazel.de 728 [ # # ]: 0 : if (GetLastImportantRecPtr() > last_switch_lsn)
729 : : {
730 : : XLogRecPtr switchpoint;
731 : :
732 : : /* mark switch as unimportant, avoids triggering checkpoints */
733 : 0 : switchpoint = RequestXLogSwitch(true);
734 : :
735 : : /*
736 : : * If the returned pointer points exactly to a segment boundary,
737 : : * assume nothing happened.
738 : : */
2909 739 [ # # ]: 0 : if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
367 michael@paquier.xyz 740 [ # # ]: 0 : elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
741 : : XLogArchiveTimeout);
742 : : }
743 : :
744 : : /*
745 : : * Update state in any case, so we don't retry constantly when the
746 : : * system is idle.
747 : : */
5058 simon@2ndQuadrant.co 748 : 0 : last_xlog_switch_time = now;
749 : : }
750 : : }
751 : :
752 : : /*
753 : : * Returns true if a fast checkpoint request is pending. (Note that this does
754 : : * not check the *current* checkpoint's FAST flag, but whether there is one
755 : : * pending behind it.)
756 : : */
757 : : static bool
57 nathan@postgresql.or 758 :GNC 50024 : FastCheckpointRequested(void)
759 : : {
2363 tgl@sss.pgh.pa.us 760 :CBC 50024 : volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
761 : :
762 : : /*
763 : : * We don't need to acquire the ckpt_lck in this case because we're only
764 : : * looking at a single flag bit.
765 : : */
57 nathan@postgresql.or 766 [ + + ]:GNC 50024 : if (cps->ckpt_flags & CHECKPOINT_FAST)
2363 tgl@sss.pgh.pa.us 767 :CBC 3495 : return true;
5058 simon@2ndQuadrant.co 768 : 46529 : return false;
769 : : }
770 : :
771 : : /*
772 : : * CheckpointWriteDelay -- control rate of checkpoint
773 : : *
774 : : * This function is called after each page write performed by BufferSync().
775 : : * It is responsible for throttling BufferSync()'s write rate to hit
776 : : * checkpoint_completion_target.
777 : : *
778 : : * The checkpoint request flags should be passed in; currently the only one
779 : : * examined is CHECKPOINT_FAST, which disables delays between writes.
780 : : *
781 : : * 'progress' is an estimate of how much of the work has been done, as a
782 : : * fraction between 0.0 meaning none, and 1.0 meaning all done.
783 : : */
784 : : void
785 : 287041 : CheckpointWriteDelay(int flags, double progress)
786 : : {
787 : : static int absorb_counter = WRITES_PER_ABSORB;
788 : :
789 : : /* Do nothing if checkpoint is being executed by non-checkpointer process */
4798 tgl@sss.pgh.pa.us 790 [ + + ]: 287041 : if (!AmCheckpointerProcess())
5058 simon@2ndQuadrant.co 791 : 51595 : return;
792 : :
793 : : /*
794 : : * Perform the usual duties and take a nap, unless we're behind schedule,
795 : : * in which case we just try to catch up as quickly as possible.
796 : : */
57 nathan@postgresql.or 797 [ + + ]:GNC 235446 : if (!(flags & CHECKPOINT_FAST) &&
224 andres@anarazel.de 798 [ + + ]:CBC 50260 : !ShutdownXLOGPending &&
2090 rhaas@postgresql.org 799 [ + - ]: 50024 : !ShutdownRequestPending &&
57 nathan@postgresql.or 800 [ + + + + ]:GNC 96553 : !FastCheckpointRequested() &&
5058 simon@2ndQuadrant.co 801 :CBC 46529 : IsCheckpointOnSchedule(progress))
802 : : {
2090 rhaas@postgresql.org 803 [ - + ]: 7340 : if (ConfigReloadPending)
804 : : {
2090 rhaas@postgresql.org 805 :UBC 0 : ConfigReloadPending = false;
5058 simon@2ndQuadrant.co 806 : 0 : ProcessConfigFile(PGC_SIGHUP);
807 : : /* update shmem copies of config variables */
4968 808 : 0 : UpdateSharedMemoryConfig();
809 : : }
810 : :
2347 tmunro@postgresql.or 811 :CBC 7340 : AbsorbSyncRequests();
5058 simon@2ndQuadrant.co 812 : 7340 : absorb_counter = WRITES_PER_ABSORB;
813 : :
814 : 7340 : CheckArchiveTimeout();
815 : :
816 : : /* Report interim statistics to the cumulative stats system */
1249 andres@anarazel.de 817 : 7340 : pgstat_report_checkpointer();
818 : :
819 : : /*
820 : : * This sleep used to be connected to bgwriter_delay, typically 200ms.
821 : : * That resulted in more frequent wakeups if not much work to do.
822 : : * Checkpointer and bgwriter are no longer related so take the Big
823 : : * Sleep.
824 : : */
1270 tmunro@postgresql.or 825 : 7340 : WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT,
826 : : 100,
827 : : WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
828 : 7340 : ResetLatch(MyLatch);
829 : : }
5058 simon@2ndQuadrant.co 830 [ + + ]: 228106 : else if (--absorb_counter <= 0)
831 : : {
832 : : /*
833 : : * Absorb pending fsync requests after each WRITES_PER_ABSORB write
834 : : * operations even when we don't sleep, to prevent overflow of the
835 : : * fsync request queue.
836 : : */
2347 tmunro@postgresql.or 837 : 100 : AbsorbSyncRequests();
5058 simon@2ndQuadrant.co 838 : 100 : absorb_counter = WRITES_PER_ABSORB;
839 : : }
840 : :
841 : : /* Check for barrier events. */
2088 rhaas@postgresql.org 842 [ + + ]: 235446 : if (ProcSignalBarrierPending)
843 : 5 : ProcessProcSignalBarrier();
844 : : }
845 : :
846 : : /*
847 : : * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
848 : : * (or restartpoint) in time?
849 : : *
850 : : * Compares the current progress against the time/segments elapsed since last
851 : : * checkpoint, and returns true if the progress we've made this far is greater
852 : : * than the elapsed time/segments.
853 : : */
854 : : static bool
5058 simon@2ndQuadrant.co 855 : 46529 : IsCheckpointOnSchedule(double progress)
856 : : {
857 : : XLogRecPtr recptr;
858 : : struct timeval now;
859 : : double elapsed_xlogs,
860 : : elapsed_time;
861 : :
862 [ - + ]: 46529 : Assert(ckpt_active);
863 : :
864 : : /* Scale progress according to checkpoint_completion_target. */
865 : 46529 : progress *= CheckPointCompletionTarget;
866 : :
867 : : /*
868 : : * Check against the cached value first. Only do the more expensive
869 : : * calculations once we reach the target previously calculated. Since
870 : : * neither time or WAL insert pointer moves backwards, a freshly
871 : : * calculated value can only be greater than or equal to the cached value.
872 : : */
873 [ + + ]: 46529 : if (progress < ckpt_cached_elapsed)
874 : 35112 : return false;
875 : :
876 : : /*
877 : : * Check progress against WAL segments written and CheckPointSegments.
878 : : *
879 : : * We compare the current WAL insert location against the location
880 : : * computed before calling CreateCheckPoint. The code in XLogInsert that
881 : : * actually triggers a checkpoint when CheckPointSegments is exceeded
882 : : * compares against RedoRecPtr, so this is not completely accurate.
883 : : * However, it's good enough for our purposes, we're only calculating an
884 : : * estimate anyway.
885 : : *
886 : : * During recovery, we compare last replayed WAL record's location with
887 : : * the location computed before calling CreateRestartPoint. That maintains
888 : : * the same pacing as we have during checkpoints in normal operation, but
889 : : * we might exceed max_wal_size by a fair amount. That's because there can
890 : : * be a large gap between a checkpoint's redo-pointer and the checkpoint
891 : : * record itself, and we only start the restartpoint after we've seen the
892 : : * checkpoint record. (The gap is typically up to CheckPointSegments *
893 : : * checkpoint_completion_target where checkpoint_completion_target is the
894 : : * value that was in effect when the WAL was generated).
895 : : */
3722 heikki.linnakangas@i 896 [ + + ]: 11417 : if (RecoveryInProgress())
897 : 5177 : recptr = GetXLogReplayRecPtr(NULL);
898 : : else
5058 simon@2ndQuadrant.co 899 : 6240 : recptr = GetInsertRecPtr();
2909 andres@anarazel.de 900 : 11417 : elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
901 : 11417 : wal_segment_size) / CheckPointSegments;
902 : :
3722 heikki.linnakangas@i 903 [ + + ]: 11417 : if (progress < elapsed_xlogs)
904 : : {
905 : 4073 : ckpt_cached_elapsed = elapsed_xlogs;
906 : 4073 : return false;
907 : : }
908 : :
909 : : /*
910 : : * Check progress against time elapsed and checkpoint_timeout.
911 : : */
5058 simon@2ndQuadrant.co 912 : 7344 : gettimeofday(&now, NULL);
913 : 7344 : elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
914 : 7344 : now.tv_usec / 1000000.0) / CheckPointTimeout;
915 : :
916 [ + + ]: 7344 : if (progress < elapsed_time)
917 : : {
918 : 4 : ckpt_cached_elapsed = elapsed_time;
919 : 4 : return false;
920 : : }
921 : :
922 : : /* It looks like we're on schedule. */
923 : 7340 : return true;
924 : : }
925 : :
926 : :
927 : : /* --------------------------------
928 : : * signal handler routines
929 : : * --------------------------------
930 : : */
931 : :
932 : : /* SIGINT: set flag to trigger writing of shutdown checkpoint */
933 : : static void
224 andres@anarazel.de 934 : 575 : ReqShutdownXLOG(SIGNAL_ARGS)
935 : : {
936 : 575 : ShutdownXLOGPending = true;
937 : 575 : SetLatch(MyLatch);
938 : 575 : }
939 : :
940 : :
941 : : /* --------------------------------
942 : : * communication with backends
943 : : * --------------------------------
944 : : */
945 : :
946 : : /*
947 : : * CheckpointerShmemSize
948 : : * Compute space needed for checkpointer-related shared memory
949 : : */
950 : : Size
4868 simon@2ndQuadrant.co 951 : 2938 : CheckpointerShmemSize(void)
952 : : {
953 : : Size size;
954 : :
955 : : /*
956 : : * The size of the requests[] array is arbitrarily set equal to NBuffers.
957 : : * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating
958 : : * too many checkpoint requests in the ring buffer.
959 : : */
960 : 2938 : size = offsetof(CheckpointerShmemStruct, requests);
30 akorotkov@postgresql 961 : 2938 : size = add_size(size, mul_size(Min(NBuffers,
962 : : MAX_CHECKPOINT_REQUESTS),
963 : : sizeof(CheckpointerRequest)));
964 : :
5058 simon@2ndQuadrant.co 965 : 2938 : return size;
966 : : }
967 : :
968 : : /*
969 : : * CheckpointerShmemInit
970 : : * Allocate and initialize checkpointer-related shared memory
971 : : */
972 : : void
4868 973 : 1029 : CheckpointerShmemInit(void)
974 : : {
4799 tgl@sss.pgh.pa.us 975 : 1029 : Size size = CheckpointerShmemSize();
976 : : bool found;
977 : :
4868 simon@2ndQuadrant.co 978 : 1029 : CheckpointerShmem = (CheckpointerShmemStruct *)
4867 tgl@sss.pgh.pa.us 979 : 1029 : ShmemInitStruct("Checkpointer Data",
980 : : size,
981 : : &found);
982 : :
5058 simon@2ndQuadrant.co 983 [ + - ]: 1029 : if (!found)
984 : : {
985 : : /*
986 : : * First time through, so initialize. Note that we zero the whole
987 : : * requests array; this is so that CompactCheckpointerRequestQueue can
988 : : * assume that any pad bytes in the request structs are zeroes.
989 : : */
4799 tgl@sss.pgh.pa.us 990 [ + - + - : 1173 : MemSet(CheckpointerShmem, 0, size);
+ - + + +
+ ]
4868 simon@2ndQuadrant.co 991 : 1029 : SpinLockInit(&CheckpointerShmem->ckpt_lck);
41 akorotkov@postgresql 992 : 1029 : CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
41 akorotkov@postgresql 993 :GNC 1029 : CheckpointerShmem->head = CheckpointerShmem->tail = 0;
2368 tmunro@postgresql.or 994 :CBC 1029 : ConditionVariableInit(&CheckpointerShmem->start_cv);
995 : 1029 : ConditionVariableInit(&CheckpointerShmem->done_cv);
996 : : }
5058 simon@2ndQuadrant.co 997 : 1029 : }
998 : :
999 : : /*
1000 : : * ExecCheckpoint
1001 : : * Primary entry point for manual CHECKPOINT commands
1002 : : *
1003 : : * This is mainly a wrapper for RequestCheckpoint().
1004 : : */
1005 : : void
57 nathan@postgresql.or 1006 :GNC 433 : ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt)
1007 : : {
1008 : 433 : bool fast = true;
1009 : 433 : bool unlogged = false;
1010 : :
1011 [ + + + + : 869 : foreach_ptr(DefElem, opt, stmt->options)
+ + ]
1012 : : {
1013 [ + + ]: 15 : if (strcmp(opt->defname, "mode") == 0)
1014 : : {
1015 : 6 : char *mode = defGetString(opt);
1016 : :
1017 [ - + ]: 6 : if (strcmp(mode, "spread") == 0)
57 nathan@postgresql.or 1018 :UNC 0 : fast = false;
57 nathan@postgresql.or 1019 [ + + ]:GNC 6 : else if (strcmp(mode, "fast") != 0)
1020 [ + - ]: 3 : ereport(ERROR,
1021 : : (errcode(ERRCODE_SYNTAX_ERROR),
1022 : : errmsg("unrecognized MODE option \"%s\"", mode),
1023 : : parser_errposition(pstate, opt->location)));
1024 : : }
1025 [ + + ]: 9 : else if (strcmp(opt->defname, "flush_unlogged") == 0)
1026 : 6 : unlogged = defGetBoolean(opt);
1027 : : else
1028 [ + - ]: 3 : ereport(ERROR,
1029 : : (errcode(ERRCODE_SYNTAX_ERROR),
1030 : : errmsg("unrecognized CHECKPOINT option \"%s\"", opt->defname),
1031 : : parser_errposition(pstate, opt->location)));
1032 : : }
1033 : :
1034 [ - + ]: 427 : if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
57 nathan@postgresql.or 1035 [ # # ]:UNC 0 : ereport(ERROR,
1036 : : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1037 : : /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */
1038 : : errmsg("permission denied to execute %s command",
1039 : : "CHECKPOINT"),
1040 : : errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
1041 : : "pg_checkpoint")));
1042 : :
57 nathan@postgresql.or 1043 [ + - ]:GNC 854 : RequestCheckpoint(CHECKPOINT_WAIT |
1044 : 427 : (fast ? CHECKPOINT_FAST : 0) |
1045 [ + + ]: 427 : (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) |
1046 [ + + ]: 427 : (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
1047 : 427 : }
1048 : :
1049 : : /*
1050 : : * RequestCheckpoint
1051 : : * Called in backend processes to request a checkpoint
1052 : : *
1053 : : * flags is a bitwise OR of the following:
1054 : : * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
1055 : : * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
1056 : : * CHECKPOINT_FAST: finish the checkpoint ASAP,
1057 : : * ignoring checkpoint_completion_target parameter.
1058 : : * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
1059 : : * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
1060 : : * CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command).
1061 : : * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
1062 : : * just signal checkpointer to do it, and return).
1063 : : * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
1064 : : * (This affects logging, and in particular enables CheckPointWarning.)
1065 : : */
1066 : : void
5058 simon@2ndQuadrant.co 1067 :CBC 2453 : RequestCheckpoint(int flags)
1068 : : {
1069 : : int ntries;
1070 : : int old_failed,
1071 : : old_started;
1072 : :
1073 : : /*
1074 : : * If in a standalone backend, just do it ourselves.
1075 : : */
1076 [ + + ]: 2453 : if (!IsPostmasterEnvironment)
1077 : : {
1078 : : /*
1079 : : * There's no point in doing slow checkpoints in a standalone backend,
1080 : : * because there's no other backends the checkpoint could disrupt.
1081 : : */
57 nathan@postgresql.or 1082 :GNC 201 : CreateCheckPoint(flags | CHECKPOINT_FAST);
1083 : :
1084 : : /* Free all smgr objects, as CheckpointerMain() normally would. */
584 heikki.linnakangas@i 1085 :CBC 201 : smgrdestroyall();
1086 : :
5058 simon@2ndQuadrant.co 1087 : 201 : return;
1088 : : }
1089 : :
1090 : : /*
1091 : : * Atomically set the request flags, and take a snapshot of the counters.
1092 : : * When we see ckpt_started > old_started, we know the flags we set here
1093 : : * have been seen by checkpointer.
1094 : : *
1095 : : * Note that we OR the flags with any existing flags, to avoid overriding
1096 : : * a "stronger" request by another backend. The flag senses must be
1097 : : * chosen to make this work!
1098 : : */
3623 rhaas@postgresql.org 1099 [ - + ]: 2252 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1100 : :
1101 : 2252 : old_failed = CheckpointerShmem->ckpt_failed;
1102 : 2252 : old_started = CheckpointerShmem->ckpt_started;
2363 tgl@sss.pgh.pa.us 1103 : 2252 : CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED);
1104 : :
3623 rhaas@postgresql.org 1105 : 2252 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1106 : :
1107 : : /*
1108 : : * Set checkpointer's latch to request checkpoint. It's possible that the
1109 : : * checkpointer hasn't started yet, so we will retry a few times if
1110 : : * needed. (Actually, more than a few times, since on slow or overloaded
1111 : : * buildfarm machines, it's been observed that the checkpointer can take
1112 : : * several seconds to start.) However, if not told to wait for the
1113 : : * checkpoint to occur, we consider failure to set the latch to be
1114 : : * nonfatal and merely LOG it. The checkpointer should see the request
1115 : : * when it does start, with or without the SetLatch().
1116 : : */
1117 : : #define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */
5058 simon@2ndQuadrant.co 1118 : 2252 : for (ntries = 0;; ntries++)
1119 : 6 : {
225 andres@anarazel.de 1120 : 2258 : volatile PROC_HDR *procglobal = ProcGlobal;
1121 : 2258 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1122 : :
1123 [ + + ]: 2258 : if (checkpointerProc == INVALID_PROC_NUMBER)
1124 : : {
2363 tgl@sss.pgh.pa.us 1125 [ + - + + ]: 7 : if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
1126 : : {
5058 simon@2ndQuadrant.co 1127 [ - + + - ]:GBC 1 : elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1128 : : "could not notify checkpoint: checkpointer is not running");
1129 : 1 : break;
1130 : : }
1131 : : }
1132 : : else
1133 : : {
225 andres@anarazel.de 1134 :CBC 2251 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1135 : : /* notified successfully */
1136 : 2251 : break;
1137 : : }
1138 : :
5058 simon@2ndQuadrant.co 1139 [ - + ]: 6 : CHECK_FOR_INTERRUPTS();
1140 : 6 : pg_usleep(100000L); /* wait 0.1 sec, then retry */
1141 : : }
1142 : :
1143 : : /*
1144 : : * If requested, wait for completion. We detect completion according to
1145 : : * the algorithm given above.
1146 : : */
1147 [ + + ]: 2252 : if (flags & CHECKPOINT_WAIT)
1148 : : {
1149 : : int new_started,
1150 : : new_failed;
1151 : :
1152 : : /* Wait for a new checkpoint to start. */
2368 tmunro@postgresql.or 1153 : 790 : ConditionVariablePrepareToSleep(&CheckpointerShmem->start_cv);
1154 : : for (;;)
1155 : : {
3623 rhaas@postgresql.org 1156 [ - + ]: 1476 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1157 : 1476 : new_started = CheckpointerShmem->ckpt_started;
1158 : 1476 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1159 : :
5058 simon@2ndQuadrant.co 1160 [ + + ]: 1476 : if (new_started != old_started)
1161 : 790 : break;
1162 : :
2368 tmunro@postgresql.or 1163 : 686 : ConditionVariableSleep(&CheckpointerShmem->start_cv,
1164 : : WAIT_EVENT_CHECKPOINT_START);
1165 : : }
1166 : 790 : ConditionVariableCancelSleep();
1167 : :
1168 : : /*
1169 : : * We are waiting for ckpt_done >= new_started, in a modulo sense.
1170 : : */
1171 : 790 : ConditionVariablePrepareToSleep(&CheckpointerShmem->done_cv);
1172 : : for (;;)
5058 simon@2ndQuadrant.co 1173 : 532 : {
1174 : : int new_done;
1175 : :
3623 rhaas@postgresql.org 1176 [ - + ]: 1322 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1177 : 1322 : new_done = CheckpointerShmem->ckpt_done;
1178 : 1322 : new_failed = CheckpointerShmem->ckpt_failed;
1179 : 1322 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1180 : :
5058 simon@2ndQuadrant.co 1181 [ + + ]: 1322 : if (new_done - new_started >= 0)
1182 : 790 : break;
1183 : :
2368 tmunro@postgresql.or 1184 : 532 : ConditionVariableSleep(&CheckpointerShmem->done_cv,
1185 : : WAIT_EVENT_CHECKPOINT_DONE);
1186 : : }
1187 : 790 : ConditionVariableCancelSleep();
1188 : :
5058 simon@2ndQuadrant.co 1189 [ - + ]: 790 : if (new_failed != old_failed)
5058 simon@2ndQuadrant.co 1190 [ # # ]:UBC 0 : ereport(ERROR,
1191 : : (errmsg("checkpoint request failed"),
1192 : : errhint("Consult recent messages in the server log for details.")));
1193 : : }
1194 : : }
1195 : :
1196 : : /*
1197 : : * ForwardSyncRequest
1198 : : * Forward a file-fsync request from a backend to the checkpointer
1199 : : *
1200 : : * Whenever a backend is compelled to write directly to a relation
1201 : : * (which should be seldom, if the background writer is getting its job done),
1202 : : * the backend calls this routine to pass over knowledge that the relation
1203 : : * is dirty and must be fsync'd before next checkpoint. We also use this
1204 : : * opportunity to count such writes for statistical purposes.
1205 : : *
1206 : : * To avoid holding the lock for longer than necessary, we normally write
1207 : : * to the requests[] queue without checking for duplicates. The checkpointer
1208 : : * will have to eliminate dups internally anyway. However, if we discover
1209 : : * that the queue is full, we make a pass over the entire queue to compact
1210 : : * it. This is somewhat expensive, but the alternative is for the backend
1211 : : * to perform its own fsync, which is far more expensive in practice. It
1212 : : * is theoretically possible a backend fsync might still be necessary, if
1213 : : * the queue is full and contains no duplicate entries. In that case, we
1214 : : * let the backend know by returning false.
1215 : : */
1216 : : bool
2347 tmunro@postgresql.or 1217 :CBC 766575 : ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
1218 : : {
1219 : : CheckpointerRequest *request;
1220 : : bool too_full;
1221 : : int insert_pos;
1222 : :
5058 simon@2ndQuadrant.co 1223 [ - + ]: 766575 : if (!IsUnderPostmaster)
5058 simon@2ndQuadrant.co 1224 :UBC 0 : return false; /* probably shouldn't even get here */
1225 : :
4798 tgl@sss.pgh.pa.us 1226 [ - + ]:CBC 766575 : if (AmCheckpointerProcess())
2347 tmunro@postgresql.or 1227 [ # # ]:UBC 0 : elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
1228 : :
4868 simon@2ndQuadrant.co 1229 :CBC 766575 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1230 : :
1231 : : /*
1232 : : * If the checkpointer isn't running or the request queue is full, the
1233 : : * backend will have to perform its own fsync request. But before forcing
1234 : : * that to happen, we can try to compact the request queue.
1235 : : */
1236 [ + + ]: 766575 : if (CheckpointerShmem->checkpointer_pid == 0 ||
1237 [ + + ]: 766383 : (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
4869 tgl@sss.pgh.pa.us 1238 [ + + ]: 159 : !CompactCheckpointerRequestQueue()))
1239 : : {
4868 simon@2ndQuadrant.co 1240 : 253 : LWLockRelease(CheckpointerCommLock);
5058 1241 : 253 : return false;
1242 : : }
1243 : :
1244 : : /* OK, insert request */
41 akorotkov@postgresql 1245 :GNC 766322 : insert_pos = CheckpointerShmem->tail;
1246 : 766322 : request = &CheckpointerShmem->requests[insert_pos];
2347 tmunro@postgresql.or 1247 :CBC 766322 : request->ftag = *ftag;
1248 : 766322 : request->type = type;
1249 : :
41 akorotkov@postgresql 1250 :GNC 766322 : CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
1251 : 766322 : CheckpointerShmem->num_requests++;
1252 : :
1253 : : /* If queue is more than half full, nudge the checkpointer to empty it */
4868 simon@2ndQuadrant.co 1254 :CBC 766322 : too_full = (CheckpointerShmem->num_requests >=
1255 : 766322 : CheckpointerShmem->max_requests / 2);
1256 : :
1257 : 766322 : LWLockRelease(CheckpointerCommLock);
1258 : :
1259 : : /* ... but not till after we release the lock */
309 heikki.linnakangas@i 1260 [ + + ]: 766322 : if (too_full)
1261 : : {
1262 : 25095 : volatile PROC_HDR *procglobal = ProcGlobal;
1263 : 25095 : ProcNumber checkpointerProc = procglobal->checkpointerProc;
1264 : :
1265 [ + - ]: 25095 : if (checkpointerProc != INVALID_PROC_NUMBER)
1266 : 25095 : SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1267 : : }
1268 : :
5058 simon@2ndQuadrant.co 1269 : 766322 : return true;
1270 : : }
1271 : :
1272 : : /*
1273 : : * CompactCheckpointerRequestQueue
1274 : : * Remove duplicates from the request queue to avoid backend fsyncs.
1275 : : * Returns "true" if any entries were removed.
1276 : : *
1277 : : * Although a full fsync request queue is not common, it can lead to severe
1278 : : * performance problems when it does happen. So far, this situation has
1279 : : * only been observed to occur when the system is under heavy write load,
1280 : : * and especially during the "sync" phase of a checkpoint. Without this
1281 : : * logic, each backend begins doing an fsync for every block written, which
1282 : : * gets very expensive and can slow down the whole system.
1283 : : *
1284 : : * Trying to do this every time the queue is full could lose if there
1285 : : * aren't any removable entries. But that should be vanishingly rare in
1286 : : * practice: there's one queue entry per shared buffer.
1287 : : */
1288 : : static bool
4869 tgl@sss.pgh.pa.us 1289 : 159 : CompactCheckpointerRequestQueue(void)
1290 : : {
1291 : : struct CheckpointerSlotMapping
1292 : : {
1293 : : CheckpointerRequest request;
1294 : : int ring_idx;
1295 : : };
1296 : :
1297 : : int n;
5058 simon@2ndQuadrant.co 1298 : 159 : int num_skipped = 0;
1299 : : int head;
1300 : : int max_requests;
1301 : : int num_requests;
1302 : : int read_idx,
1303 : : write_idx;
1304 : : HASHCTL ctl;
1305 : : HTAB *htab;
1306 : : bool *skip_slot;
1307 : :
1308 : : /* must hold CheckpointerCommLock in exclusive mode */
4868 1309 [ - + ]: 159 : Assert(LWLockHeldByMe(CheckpointerCommLock));
1310 : :
1311 : : /* Avoid memory allocations in a critical section. */
442 heikki.linnakangas@i 1312 [ - + ]: 159 : if (CritSectionCount > 0)
442 heikki.linnakangas@i 1313 :UBC 0 : return false;
1314 : :
41 akorotkov@postgresql 1315 :GNC 159 : max_requests = CheckpointerShmem->max_requests;
1316 : 159 : num_requests = CheckpointerShmem->num_requests;
1317 : :
1318 : : /* Initialize skip_slot array */
1319 : 159 : skip_slot = palloc0(sizeof(bool) * max_requests);
1320 : :
1321 : 159 : head = CheckpointerShmem->head;
1322 : :
1323 : : /* Initialize temporary hash table */
4868 simon@2ndQuadrant.co 1324 :CBC 159 : ctl.keysize = sizeof(CheckpointerRequest);
4867 tgl@sss.pgh.pa.us 1325 : 159 : ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
4799 1326 : 159 : ctl.hcxt = CurrentMemoryContext;
1327 : :
4968 simon@2ndQuadrant.co 1328 : 159 : htab = hash_create("CompactCheckpointerRequestQueue",
4868 1329 : 159 : CheckpointerShmem->num_requests,
1330 : : &ctl,
1331 : : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
1332 : :
1333 : : /*
1334 : : * The basic idea here is that a request can be skipped if it's followed
1335 : : * by a later, identical request. It might seem more sensible to work
1336 : : * backwards from the end of the queue and check whether a request is
1337 : : * *preceded* by an earlier, identical request, in the hopes of doing less
1338 : : * copying. But that might change the semantics, if there's an
1339 : : * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
1340 : : * this way. It would be possible to be even smarter if we made the code
1341 : : * below understand the specific semantics of such requests (it could blow
1342 : : * away preceding entries that would end up being canceled anyhow), but
1343 : : * it's not clear that the extra complexity would buy us anything.
1344 : : */
41 akorotkov@postgresql 1345 :GNC 159 : read_idx = head;
1346 [ + + ]: 17487 : for (n = 0; n < num_requests; n++)
1347 : : {
1348 : : CheckpointerRequest *request;
1349 : : struct CheckpointerSlotMapping *slotmap;
1350 : : bool found;
1351 : :
1352 : : /*
1353 : : * We use the request struct directly as a hashtable key. This
1354 : : * assumes that any padding bytes in the structs are consistently the
1355 : : * same, which should be okay because we zeroed them in
1356 : : * CheckpointerShmemInit. Note also that RelFileLocator had better
1357 : : * contain no pad bytes.
1358 : : */
1359 : 17328 : request = &CheckpointerShmem->requests[read_idx];
5058 simon@2ndQuadrant.co 1360 :CBC 17328 : slotmap = hash_search(htab, request, HASH_ENTER, &found);
1361 [ + + ]: 17328 : if (found)
1362 : : {
1363 : : /* Duplicate, so mark the previous occurrence as skippable */
41 akorotkov@postgresql 1364 :GNC 6597 : skip_slot[slotmap->ring_idx] = true;
4799 tgl@sss.pgh.pa.us 1365 :CBC 6597 : num_skipped++;
1366 : : }
1367 : : /* Remember slot containing latest occurrence of this request value */
41 akorotkov@postgresql 1368 :GNC 17328 : slotmap->ring_idx = read_idx;
1369 : :
1370 : : /* Move to the next request in the ring buffer */
1371 : 17328 : read_idx = (read_idx + 1) % max_requests;
1372 : : }
1373 : :
1374 : : /* Done with the hash table. */
5058 simon@2ndQuadrant.co 1375 :CBC 159 : hash_destroy(htab);
1376 : :
1377 : : /* If no duplicates, we're out of luck. */
1378 [ + + ]: 159 : if (!num_skipped)
1379 : : {
1380 : 61 : pfree(skip_slot);
1381 : 61 : return false;
1382 : : }
1383 : :
1384 : : /* We found some duplicates; remove them. */
41 akorotkov@postgresql 1385 :GNC 98 : read_idx = write_idx = head;
1386 [ + + ]: 9730 : for (n = 0; n < num_requests; n++)
1387 : : {
1388 : : /* If this slot is NOT skipped, keep it */
1389 [ + + ]: 9632 : if (!skip_slot[read_idx])
1390 : : {
1391 : : /* If the read and write positions are different, copy the request */
1392 [ + + ]: 3035 : if (write_idx != read_idx)
1393 : 2880 : CheckpointerShmem->requests[write_idx] =
1394 : 2880 : CheckpointerShmem->requests[read_idx];
1395 : :
1396 : : /* Advance the write position */
1397 : 3035 : write_idx = (write_idx + 1) % max_requests;
1398 : : }
1399 : :
1400 : 9632 : read_idx = (read_idx + 1) % max_requests;
1401 : : }
1402 : :
1403 : : /*
1404 : : * Update ring buffer state: head remains the same, tail moves, count
1405 : : * decreases
1406 : : */
1407 : 98 : CheckpointerShmem->tail = write_idx;
1408 : 98 : CheckpointerShmem->num_requests -= num_skipped;
1409 : :
5058 simon@2ndQuadrant.co 1410 [ + + ]:CBC 98 : ereport(DEBUG1,
1411 : : (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
1412 : : num_requests, CheckpointerShmem->num_requests)));
1413 : :
1414 : : /* Cleanup. */
1415 : 98 : pfree(skip_slot);
1416 : 98 : return true;
1417 : : }
1418 : :
1419 : : /*
1420 : : * AbsorbSyncRequests
1421 : : * Retrieve queued sync requests and pass them to sync mechanism.
1422 : : *
1423 : : * This is exported because it must be called during CreateCheckPoint;
1424 : : * we have to be sure we have accepted all pending requests just before
1425 : : * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1426 : : * non-checkpointer processes, do nothing if not checkpointer.
1427 : : */
1428 : : void
2347 tmunro@postgresql.or 1429 : 17706 : AbsorbSyncRequests(void)
1430 : : {
4868 simon@2ndQuadrant.co 1431 : 17706 : CheckpointerRequest *requests = NULL;
1432 : : CheckpointerRequest *request;
1433 : : int n,
1434 : : i;
1435 : : bool loop;
1436 : :
4798 tgl@sss.pgh.pa.us 1437 [ + + ]: 17706 : if (!AmCheckpointerProcess())
5058 simon@2ndQuadrant.co 1438 : 640 : return;
1439 : :
1440 : : do
1441 : : {
41 akorotkov@postgresql 1442 :GNC 17066 : LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1443 : :
1444 : : /*---
1445 : : * We try to avoid holding the lock for a long time by:
1446 : : * 1. Copying the request array and processing the requests after
1447 : : * releasing the lock;
1448 : : * 2. Processing not the whole queue, but only batches of
1449 : : * CKPT_REQ_BATCH_SIZE at once.
1450 : : *
1451 : : * Once we have cleared the requests from shared memory, we must
1452 : : * PANIC if we then fail to absorb them (e.g., because our hashtable
1453 : : * runs out of memory). This is because the system cannot run safely
1454 : : * if we are unable to fsync what we have been told to fsync.
1455 : : * Fortunately, the hashtable is so small that the problem is quite
1456 : : * unlikely to arise in practice.
1457 : : *
1458 : : * Note: The maximum possible size of a ring buffer is
1459 : : * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
1460 : : * allocation size of 1Gb. Our maximum batch size,
1461 : : * CKPT_REQ_BATCH_SIZE, is even smaller.
1462 : : */
1463 : 17066 : n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
1464 [ + + ]: 17066 : if (n > 0)
1465 : : {
1466 [ + - ]: 9105 : if (!requests)
1467 : 9105 : requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1468 : :
1469 [ + + ]: 684252 : for (i = 0; i < n; i++)
1470 : : {
1471 : 675147 : requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
1472 : 675147 : CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
1473 : : }
1474 : :
1475 : 9105 : CheckpointerShmem->num_requests -= n;
1476 : :
1477 : : }
1478 : :
1479 : 17066 : START_CRIT_SECTION();
1480 : :
1481 : : /* Are there any requests in the queue? If so, keep going. */
1482 : 17066 : loop = CheckpointerShmem->num_requests != 0;
1483 : :
1484 : 17066 : LWLockRelease(CheckpointerCommLock);
1485 : :
1486 [ + + ]: 692213 : for (request = requests; n > 0; request++, n--)
1487 : 675147 : RememberSyncRequest(&request->ftag, request->type);
1488 : :
1489 [ - + ]: 17066 : END_CRIT_SECTION();
1490 [ - + ]: 17066 : } while (loop);
1491 : :
5058 simon@2ndQuadrant.co 1492 [ + + ]:CBC 17066 : if (requests)
1493 : 9105 : pfree(requests);
1494 : : }
1495 : :
1496 : : /*
1497 : : * Update any shared memory configurations based on config parameters
1498 : : */
1499 : : static void
4973 1500 : 548 : UpdateSharedMemoryConfig(void)
1501 : : {
1502 : : /* update global shmem state for sync rep */
1503 : 548 : SyncRepUpdateSyncStandbysDefined();
1504 : :
1505 : : /*
1506 : : * If full_page_writes has been changed by SIGHUP, we update it in shared
1507 : : * memory and write an XLOG_FPW_CHANGE record.
1508 : : */
1509 : 548 : UpdateFullPageWrites();
1510 : :
1511 [ + + ]: 548 : elog(DEBUG2, "checkpointer updated shared memory configuration values");
1512 : 548 : }
1513 : :
1514 : : /*
1515 : : * FirstCallSinceLastCheckpoint allows a process to take an action once
1516 : : * per checkpoint cycle by asynchronously checking for checkpoint completion.
1517 : : */
1518 : : bool
4845 1519 : 11160 : FirstCallSinceLastCheckpoint(void)
1520 : : {
1521 : : static int ckpt_done = 0;
1522 : : int new_done;
4836 bruce@momjian.us 1523 : 11160 : bool FirstCall = false;
1524 : :
3623 rhaas@postgresql.org 1525 [ - + ]: 11160 : SpinLockAcquire(&CheckpointerShmem->ckpt_lck);
1526 : 11160 : new_done = CheckpointerShmem->ckpt_done;
1527 : 11160 : SpinLockRelease(&CheckpointerShmem->ckpt_lck);
1528 : :
4845 simon@2ndQuadrant.co 1529 [ + + ]: 11160 : if (new_done != ckpt_done)
1530 : 542 : FirstCall = true;
1531 : :
1532 : 11160 : ckpt_done = new_done;
1533 : :
1534 : 11160 : return FirstCall;
1535 : : }
|