Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * proc.c
4 : : * routines to manage per-process shared memory data structure
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/proc.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /*
16 : : * Interface (a):
17 : : * JoinWaitQueue(), ProcSleep(), ProcWakeup()
18 : : *
19 : : * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 : : * the lock wakes the process up again (and gives it an error code so it knows
21 : : * whether it was awoken on an error condition).
22 : : *
23 : : * Interface (b):
24 : : *
25 : : * ProcReleaseLocks -- frees the locks associated with current transaction
26 : : *
27 : : * ProcKill -- destroys the shared memory state (and locks)
28 : : * associated with the process.
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : : #include <sys/time.h>
35 : :
36 : : #include "access/transam.h"
37 : : #include "access/twophase.h"
38 : : #include "access/xlogutils.h"
39 : : #include "access/xlogwait.h"
40 : : #include "miscadmin.h"
41 : : #include "pgstat.h"
42 : : #include "postmaster/autovacuum.h"
43 : : #include "replication/slotsync.h"
44 : : #include "replication/syncrep.h"
45 : : #include "storage/condition_variable.h"
46 : : #include "storage/ipc.h"
47 : : #include "storage/lmgr.h"
48 : : #include "storage/pmsignal.h"
49 : : #include "storage/proc.h"
50 : : #include "storage/procarray.h"
51 : : #include "storage/procsignal.h"
52 : : #include "storage/spin.h"
53 : : #include "storage/standby.h"
54 : : #include "utils/timeout.h"
55 : : #include "utils/timestamp.h"
56 : :
57 : : /* GUC variables */
58 : : int DeadlockTimeout = 1000;
59 : : int StatementTimeout = 0;
60 : : int LockTimeout = 0;
61 : : int IdleInTransactionSessionTimeout = 0;
62 : : int TransactionTimeout = 0;
63 : : int IdleSessionTimeout = 0;
64 : : bool log_lock_waits = true;
65 : :
66 : : /* Pointer to this process's PGPROC struct, if any */
67 : : PGPROC *MyProc = NULL;
68 : :
69 : : /*
70 : : * This spinlock protects the freelist of recycled PGPROC structures.
71 : : * We cannot use an LWLock because the LWLock manager depends on already
72 : : * having a PGPROC and a wait semaphore! But these structures are touched
73 : : * relatively infrequently (only at backend startup or shutdown) and not for
74 : : * very long, so a spinlock is okay.
75 : : */
76 : : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
77 : :
78 : : /* Pointers to shared-memory structures */
79 : : PROC_HDR *ProcGlobal = NULL;
80 : : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
81 : : PGPROC *PreparedXactProcs = NULL;
82 : :
83 : : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
84 : :
85 : : /* Is a deadlock check pending? */
86 : : static volatile sig_atomic_t got_deadlock_timeout;
87 : :
88 : : static void RemoveProcFromArray(int code, Datum arg);
89 : : static void ProcKill(int code, Datum arg);
90 : : static void AuxiliaryProcKill(int code, Datum arg);
91 : : static void CheckDeadLock(void);
92 : :
93 : :
94 : : /*
95 : : * Report shared-memory space needed by PGPROC.
96 : : */
97 : : static Size
259 tomas.vondra@postgre 98 :CBC 3055 : PGProcShmemSize(void)
99 : : {
7424 tgl@sss.pgh.pa.us 100 : 3055 : Size size = 0;
101 : : Size TotalProcs =
943 102 : 3055 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
103 : :
1951 andres@anarazel.de 104 : 3055 : size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
105 : 3055 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
106 : 3055 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
1857 alvherre@alvh.no-ip. 107 : 3055 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
108 : :
259 tomas.vondra@postgre 109 : 3055 : return size;
110 : : }
111 : :
112 : : /*
113 : : * Report shared-memory space needed by Fast-Path locks.
114 : : */
115 : : static Size
116 : 3055 : FastPathLockShmemSize(void)
117 : : {
118 : 3055 : Size size = 0;
119 : : Size TotalProcs =
120 : 3055 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
121 : : Size fpLockBitsSize,
122 : : fpRelIdSize;
123 : :
124 : : /*
125 : : * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
126 : : * nicely aligned in each backend.
127 : : */
452 128 : 3055 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
288 129 : 3055 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
130 : :
452 131 : 3055 : size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
132 : :
7749 tgl@sss.pgh.pa.us 133 : 3055 : return size;
134 : : }
135 : :
136 : : /*
137 : : * Report shared-memory space needed by InitProcGlobal.
138 : : */
139 : : Size
259 tomas.vondra@postgre 140 : 1986 : ProcGlobalShmemSize(void)
141 : : {
142 : 1986 : Size size = 0;
143 : :
144 : : /* ProcGlobal */
145 : 1986 : size = add_size(size, sizeof(PROC_HDR));
146 : 1986 : size = add_size(size, sizeof(slock_t));
147 : :
41 heikki.linnakangas@i 148 :GNC 1986 : size = add_size(size, PGSemaphoreShmemSize(ProcGlobalSemas()));
259 tomas.vondra@postgre 149 :CBC 1986 : size = add_size(size, PGProcShmemSize());
150 : 1986 : size = add_size(size, FastPathLockShmemSize());
151 : :
152 : 1986 : return size;
153 : : }
154 : :
155 : : /*
156 : : * Report number of semaphores needed by InitProcGlobal.
157 : : */
158 : : int
7488 tgl@sss.pgh.pa.us 159 : 3970 : ProcGlobalSemas(void)
160 : : {
161 : : /*
162 : : * We need a sema per backend (including autovacuum), plus one for each
163 : : * auxiliary process.
164 : : */
1345 rhaas@postgresql.org 165 : 3970 : return MaxBackends + NUM_AUXILIARY_PROCS;
166 : : }
167 : :
168 : : /*
169 : : * InitProcGlobal -
170 : : * Initialize the global process table during postmaster or standalone
171 : : * backend startup.
172 : : *
173 : : * We also create all the per-process semaphores we will need to support
174 : : * the requested number of backends. We used to allocate semaphores
175 : : * only when backends were actually started up, but that is bad because
176 : : * it lets Postgres fail under load --- a lot of Unix systems are
177 : : * (mis)configured with small limits on the number of semaphores, and
178 : : * running out when trying to start another backend is a common failure.
179 : : * So, now we grab enough semaphores to support the desired max number
180 : : * of backends immediately at initialization --- if the sysadmin has set
181 : : * MaxConnections, max_worker_processes, max_wal_senders, or
182 : : * autovacuum_worker_slots higher than his kernel will support, he'll
183 : : * find out sooner rather than later.
184 : : *
185 : : * Another reason for creating semaphores here is that the semaphore
186 : : * implementation typically requires us to create semaphores in the
187 : : * postmaster, not in backends.
188 : : *
189 : : * Note: this is NOT called by individual backends under a postmaster,
190 : : * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
191 : : * pointers must be propagated specially for EXEC_BACKEND operation.
192 : : */
193 : : void
7488 tgl@sss.pgh.pa.us 194 : 1069 : InitProcGlobal(void)
195 : : {
196 : : PGPROC *procs;
197 : : int i,
198 : : j;
199 : : bool found;
1345 rhaas@postgresql.org 200 : 1069 : uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
201 : :
202 : : /* Used for setup of per-backend fast-path slots. */
203 : : char *fpPtr,
204 : : *fpEndPtr PG_USED_FOR_ASSERTS_ONLY;
205 : : Size fpLockBitsSize,
206 : : fpRelIdSize;
207 : : Size requestSize;
208 : : char *ptr;
209 : :
210 : : /* Create the ProcGlobal shared structure */
10328 bruce@momjian.us 211 : 1069 : ProcGlobal = (PROC_HDR *)
7287 tgl@sss.pgh.pa.us 212 : 1069 : ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
213 [ - + ]: 1069 : Assert(!found);
214 : :
215 : : /*
216 : : * Initialize the data structures.
217 : : */
5302 rhaas@postgresql.org 218 : 1069 : ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
1064 andres@anarazel.de 219 : 1069 : dlist_init(&ProcGlobal->freeProcs);
220 : 1069 : dlist_init(&ProcGlobal->autovacFreeProcs);
221 : 1069 : dlist_init(&ProcGlobal->bgworkerFreeProcs);
222 : 1069 : dlist_init(&ProcGlobal->walsenderFreeProcs);
5251 tgl@sss.pgh.pa.us 223 : 1069 : ProcGlobal->startupBufferPinWaitBufId = -1;
411 heikki.linnakangas@i 224 : 1069 : ProcGlobal->walwriterProc = INVALID_PROC_NUMBER;
225 : 1069 : ProcGlobal->checkpointerProc = INVALID_PROC_NUMBER;
654 226 : 1069 : pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PROC_NUMBER);
227 : 1069 : pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PROC_NUMBER);
228 : :
229 : : /*
230 : : * Create and initialize all the PGPROC structures we'll need. There are
231 : : * six separate consumers: (1) normal backends, (2) autovacuum workers and
232 : : * special workers, (3) background workers, (4) walsenders, (5) auxiliary
233 : : * processes, and (6) prepared transactions. (For largely-historical
234 : : * reasons, we combine autovacuum and special workers into one category
235 : : * with a single freelist.) Each PGPROC structure is dedicated to exactly
236 : : * one of these purposes, and they do not move between groups.
237 : : */
259 tomas.vondra@postgre 238 : 1069 : requestSize = PGProcShmemSize();
239 : :
240 : 1069 : ptr = ShmemInitStruct("PGPROC structures",
241 : : requestSize,
242 : : &found);
243 : :
244 [ + - + + : 1069 : MemSet(ptr, 0, requestSize);
+ - - + -
- ]
245 : :
246 : 1069 : procs = (PGPROC *) ptr;
15 peter@eisentraut.org 247 :GNC 1069 : ptr = ptr + TotalProcs * sizeof(PGPROC);
248 : :
5317 rhaas@postgresql.org 249 :CBC 1069 : ProcGlobal->allProcs = procs;
250 : : /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
1345 251 : 1069 : ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
252 : :
253 : : /*
254 : : * Allocate arrays mirroring PGPROC fields in a dense manner. See
255 : : * PROC_HDR.
256 : : *
257 : : * XXX: It might make sense to increase padding for these arrays, given
258 : : * how hotly they are accessed.
259 : : */
259 tomas.vondra@postgre 260 : 1069 : ProcGlobal->xids = (TransactionId *) ptr;
15 peter@eisentraut.org 261 :GNC 1069 : ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->xids));
262 : :
259 tomas.vondra@postgre 263 :CBC 1069 : ProcGlobal->subxidStates = (XidCacheStatus *) ptr;
15 peter@eisentraut.org 264 :GNC 1069 : ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->subxidStates));
265 : :
259 tomas.vondra@postgre 266 :CBC 1069 : ProcGlobal->statusFlags = (uint8 *) ptr;
15 peter@eisentraut.org 267 :GNC 1069 : ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->statusFlags));
268 : :
269 : : /* make sure wer didn't overflow */
259 tomas.vondra@postgre 270 [ + - - + ]:CBC 1069 : Assert((ptr > (char *) procs) && (ptr <= (char *) procs + requestSize));
271 : :
272 : : /*
273 : : * Allocate arrays for fast-path locks. Those are variable-length, so
274 : : * can't be included in PGPROC directly. We allocate a separate piece of
275 : : * shared memory and then divide that between backends.
276 : : */
452 277 : 1069 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
288 278 : 1069 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
279 : :
259 280 : 1069 : requestSize = FastPathLockShmemSize();
281 : :
282 : 1069 : fpPtr = ShmemInitStruct("Fast-Path Lock Array",
283 : : requestSize,
284 : : &found);
285 : :
286 [ + - + - : 1069 : MemSet(fpPtr, 0, requestSize);
+ - - + -
- ]
287 : :
288 : : /* For asserts checking we did not overflow. */
289 : 1069 : fpEndPtr = fpPtr + requestSize;
290 : :
291 : : /* Reserve space for semaphores. */
41 heikki.linnakangas@i 292 :GNC 1069 : PGReserveSemaphores(ProcGlobalSemas());
293 : :
5302 rhaas@postgresql.org 294 [ + + ]:CBC 141445 : for (i = 0; i < TotalProcs; i++)
295 : : {
1064 andres@anarazel.de 296 : 140376 : PGPROC *proc = &procs[i];
297 : :
298 : : /* Common initialization for all PGPROCs, regardless of type. */
299 : :
300 : : /*
301 : : * Set the fast-path lock arrays, and move the pointer. We interleave
302 : : * the two arrays, to (hopefully) get some locality for each backend.
303 : : */
452 tomas.vondra@postgre 304 : 140376 : proc->fpLockBits = (uint64 *) fpPtr;
305 : 140376 : fpPtr += fpLockBitsSize;
306 : :
307 : 140376 : proc->fpRelId = (Oid *) fpPtr;
308 : 140376 : fpPtr += fpRelIdSize;
309 : :
310 [ - + ]: 140376 : Assert(fpPtr <= fpEndPtr);
311 : :
312 : : /*
313 : : * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
314 : : * dummy PGPROCs don't need these though - they're never associated
315 : : * with a real process
316 : : */
1345 rhaas@postgresql.org 317 [ + + ]: 140376 : if (i < MaxBackends + NUM_AUXILIARY_PROCS)
318 : : {
1064 andres@anarazel.de 319 : 139535 : proc->sem = PGSemaphoreCreate();
320 : 139535 : InitSharedLatch(&(proc->procLatch));
321 : 139535 : LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
322 : : }
323 : :
324 : : /*
325 : : * Newly created PGPROCs for normal backends, autovacuum workers,
326 : : * special workers, bgworkers, and walsenders must be queued up on the
327 : : * appropriate free list. Because there can only ever be a small,
328 : : * fixed number of auxiliary processes, no free list is used in that
329 : : * case; InitAuxiliaryProcess() instead uses a linear search. PGPROCs
330 : : * for prepared transactions are added to a free list by
331 : : * TwoPhaseShmemInit().
332 : : */
5302 rhaas@postgresql.org 333 [ + + ]: 140376 : if (i < MaxConnections)
334 : : {
335 : : /* PGPROC for normal backend, add to freeProcs list */
654 heikki.linnakangas@i 336 : 69401 : dlist_push_tail(&ProcGlobal->freeProcs, &proc->links);
1064 andres@anarazel.de 337 : 69401 : proc->procgloballist = &ProcGlobal->freeProcs;
338 : : }
345 nathan@postgresql.or 339 [ + + ]: 70975 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS)
340 : : {
341 : : /* PGPROC for AV or special worker, add to autovacFreeProcs list */
654 heikki.linnakangas@i 342 : 13860 : dlist_push_tail(&ProcGlobal->autovacFreeProcs, &proc->links);
1064 andres@anarazel.de 343 : 13860 : proc->procgloballist = &ProcGlobal->autovacFreeProcs;
344 : : }
345 nathan@postgresql.or 345 [ + + ]: 57115 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS + max_worker_processes)
346 : : {
347 : : /* PGPROC for bgworker, add to bgworkerFreeProcs list */
654 heikki.linnakangas@i 348 : 8550 : dlist_push_tail(&ProcGlobal->bgworkerFreeProcs, &proc->links);
1064 andres@anarazel.de 349 : 8550 : proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
350 : : }
1345 rhaas@postgresql.org 351 [ + + ]: 48565 : else if (i < MaxBackends)
352 : : {
353 : : /* PGPROC for walsender, add to walsenderFreeProcs list */
654 heikki.linnakangas@i 354 : 7102 : dlist_push_tail(&ProcGlobal->walsenderFreeProcs, &proc->links);
1064 andres@anarazel.de 355 : 7102 : proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
356 : : }
357 : :
358 : : /* Initialize myProcLocks[] shared memory queues. */
5160 rhaas@postgresql.org 359 [ + + ]: 2386392 : for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
1064 andres@anarazel.de 360 : 2246016 : dlist_init(&(proc->myProcLocks[j]));
361 : :
362 : : /* Initialize lockGroupMembers list. */
363 : 140376 : dlist_init(&proc->lockGroupMembers);
364 : :
365 : : /*
366 : : * Initialize the atomic variables, otherwise, it won't be safe to
367 : : * access them for backends that aren't currently in use.
368 : : */
654 heikki.linnakangas@i 369 : 140376 : pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PROC_NUMBER);
370 : 140376 : pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PROC_NUMBER);
1064 andres@anarazel.de 371 : 140376 : pg_atomic_init_u64(&(proc->waitStart), 0);
372 : : }
373 : :
374 : : /* Should have consumed exactly the expected amount of fast-path memory. */
450 tomas.vondra@postgre 375 [ - + ]: 1069 : Assert(fpPtr == fpEndPtr);
376 : :
377 : : /*
378 : : * Save pointers to the blocks of PGPROC structures reserved for auxiliary
379 : : * processes and prepared transactions.
380 : : */
1345 rhaas@postgresql.org 381 : 1069 : AuxiliaryProcs = &procs[MaxBackends];
382 : 1069 : PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
383 : :
384 : : /* Create ProcStructLock spinlock, too */
259 tomas.vondra@postgre 385 : 1069 : ProcStructLock = (slock_t *) ShmemInitStruct("ProcStructLock spinlock",
386 : : sizeof(slock_t),
387 : : &found);
7287 tgl@sss.pgh.pa.us 388 : 1069 : SpinLockInit(ProcStructLock);
10753 scrappy@hub.org 389 : 1069 : }
390 : :
391 : : /*
392 : : * InitProcess -- initialize a per-process PGPROC entry for this backend
393 : : */
394 : : void
9150 tgl@sss.pgh.pa.us 395 : 15469 : InitProcess(void)
396 : : {
397 : : dlist_head *procgloballist;
398 : :
399 : : /*
400 : : * ProcGlobal should be set up already (if we are a backend, we inherit
401 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
402 : : */
3715 rhaas@postgresql.org 403 [ - + ]: 15469 : if (ProcGlobal == NULL)
8182 tgl@sss.pgh.pa.us 404 [ # # ]:UBC 0 : elog(PANIC, "proc header uninitialized");
405 : :
8867 tgl@sss.pgh.pa.us 406 [ - + ]:CBC 15469 : if (MyProc != NULL)
8182 tgl@sss.pgh.pa.us 407 [ # # ]:UBC 0 : elog(ERROR, "you already exist");
408 : :
409 : : /*
410 : : * Before we start accessing the shared memory in a serious way, mark
411 : : * ourselves as an active postmaster child; this is so that the postmaster
412 : : * can detect it if we exit without cleaning up.
413 : : */
398 heikki.linnakangas@i 414 [ + + ]:CBC 15469 : if (IsUnderPostmaster)
435 415 : 15348 : RegisterPostmasterChildActive();
416 : :
417 : : /*
418 : : * Decide which list should supply our PGPROC. This logic must match the
419 : : * way the freelists were constructed in InitProcGlobal().
420 : : */
354 tgl@sss.pgh.pa.us 421 [ + + + + : 15469 : if (AmAutoVacuumWorkerProcess() || AmSpecialWorkerProcess())
+ + ]
3715 rhaas@postgresql.org 422 : 421 : procgloballist = &ProcGlobal->autovacFreeProcs;
653 heikki.linnakangas@i 423 [ + + ]: 15048 : else if (AmBackgroundWorkerProcess())
3715 rhaas@postgresql.org 424 : 2412 : procgloballist = &ProcGlobal->bgworkerFreeProcs;
653 heikki.linnakangas@i 425 [ + + ]: 12636 : else if (AmWalSenderProcess())
2500 michael@paquier.xyz 426 : 1124 : procgloballist = &ProcGlobal->walsenderFreeProcs;
427 : : else
3715 rhaas@postgresql.org 428 : 11512 : procgloballist = &ProcGlobal->freeProcs;
429 : :
430 : : /*
431 : : * Try to get a proc struct from the appropriate free list. If this
432 : : * fails, we must be out of PGPROC structures (not to mention semaphores).
433 : : *
434 : : * While we are holding the ProcStructLock, also copy the current shared
435 : : * estimate of spins_per_delay to local storage.
436 : : */
8845 tgl@sss.pgh.pa.us 437 [ + + ]: 15469 : SpinLockAcquire(ProcStructLock);
438 : :
3715 rhaas@postgresql.org 439 : 15469 : set_spins_per_delay(ProcGlobal->spins_per_delay);
440 : :
1064 andres@anarazel.de 441 [ + + ]: 15469 : if (!dlist_is_empty(procgloballist))
442 : : {
530 heikki.linnakangas@i 443 : 15466 : MyProc = dlist_container(PGPROC, links, dlist_pop_head_node(procgloballist));
8845 tgl@sss.pgh.pa.us 444 : 15466 : SpinLockRelease(ProcStructLock);
445 : : }
446 : : else
447 : : {
448 : : /*
449 : : * If we reach here, all the PGPROCs are in use. This is one of the
450 : : * possible places to detect "too many backends", so give the standard
451 : : * error message. XXX do we need to give a different failure message
452 : : * in the autovacuum case?
453 : : */
454 : 3 : SpinLockRelease(ProcStructLock);
653 heikki.linnakangas@i 455 [ + + ]: 3 : if (AmWalSenderProcess())
2500 michael@paquier.xyz 456 [ + - ]: 2 : ereport(FATAL,
457 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
458 : : errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
459 : : max_wal_senders)));
8182 tgl@sss.pgh.pa.us 460 [ + - ]: 1 : ereport(FATAL,
461 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
462 : : errmsg("sorry, too many clients already")));
463 : : }
664 heikki.linnakangas@i 464 : 15466 : MyProcNumber = GetNumberFromPGProc(MyProc);
465 : :
466 : : /*
467 : : * Cross-check that the PGPROC is of the type we expect; if this were not
468 : : * the case, it would get returned to the wrong list.
469 : : */
3795 rhaas@postgresql.org 470 [ - + ]: 15466 : Assert(MyProc->procgloballist == procgloballist);
471 : :
472 : : /*
473 : : * Initialize all fields of MyProc, except for those previously
474 : : * initialized by InitProcGlobal.
475 : : */
1064 andres@anarazel.de 476 : 15466 : dlist_node_init(&MyProc->links);
2009 peter@eisentraut.org 477 : 15466 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4766 simon@2ndQuadrant.co 478 : 15466 : MyProc->fpVXIDLock = false;
479 : 15466 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1951 andres@anarazel.de 480 : 15466 : MyProc->xid = InvalidTransactionId;
1952 481 : 15466 : MyProc->xmin = InvalidTransactionId;
8845 tgl@sss.pgh.pa.us 482 : 15466 : MyProc->pid = MyProcPid;
654 heikki.linnakangas@i 483 : 15466 : MyProc->vxid.procNumber = MyProcNumber;
484 : 15466 : MyProc->vxid.lxid = InvalidLocalTransactionId;
485 : : /* databaseId and roleId will be filled in later */
7287 tgl@sss.pgh.pa.us 486 : 15466 : MyProc->databaseId = InvalidOid;
7444 487 : 15466 : MyProc->roleId = InvalidOid;
2683 michael@paquier.xyz 488 : 15466 : MyProc->tempNamespaceId = InvalidOid;
354 tgl@sss.pgh.pa.us 489 : 15466 : MyProc->isRegularBackend = AmRegularBackendProcess();
1349 rhaas@postgresql.org 490 : 15466 : MyProc->delayChkptFlags = 0;
1857 alvherre@alvh.no-ip. 491 : 15466 : MyProc->statusFlags = 0;
492 : : /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
653 heikki.linnakangas@i 493 [ + + ]: 15466 : if (AmAutoVacuumWorkerProcess())
1857 alvherre@alvh.no-ip. 494 : 35 : MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
1123 andres@anarazel.de 495 : 15466 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
5070 heikki.linnakangas@i 496 : 15466 : MyProc->lwWaitMode = 0;
9095 tgl@sss.pgh.pa.us 497 : 15466 : MyProc->waitLock = NULL;
7782 498 : 15466 : MyProc->waitProcLock = NULL;
1759 fujii@postgresql.org 499 : 15466 : pg_atomic_write_u64(&MyProc->waitStart, 0);
500 : : #ifdef USE_ASSERT_CHECKING
501 : : {
502 : : int i;
503 : :
504 : : /* Last process should have released all locks. */
5160 rhaas@postgresql.org 505 [ + + ]: 262922 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1064 andres@anarazel.de 506 [ - + ]: 247456 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
507 : : }
508 : : #endif
5814 simon@2ndQuadrant.co 509 : 15466 : MyProc->recoveryConflictPending = false;
510 : :
511 : : /* Initialize fields for sync rep */
4924 heikki.linnakangas@i 512 : 15466 : MyProc->waitLSN = 0;
5400 simon@2ndQuadrant.co 513 : 15466 : MyProc->syncRepState = SYNC_REP_NOT_WAITING;
1064 andres@anarazel.de 514 : 15466 : dlist_node_init(&MyProc->syncRepLinks);
515 : :
516 : : /* Initialize fields for group XID clearing. */
3597 rhaas@postgresql.org 517 : 15466 : MyProc->procArrayGroupMember = false;
518 : 15466 : MyProc->procArrayGroupMemberXid = InvalidTransactionId;
654 heikki.linnakangas@i 519 [ - + ]: 15466 : Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PROC_NUMBER);
520 : :
521 : : /* Check that group locking fields are in a proper initial state. */
3601 rhaas@postgresql.org 522 [ - + ]: 15466 : Assert(MyProc->lockGroupLeader == NULL);
523 [ - + ]: 15466 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
524 : :
525 : : /* Initialize wait event information. */
3569 526 : 15466 : MyProc->wait_event_info = 0;
527 : :
528 : : /* Initialize fields for group transaction status update. */
3029 529 : 15466 : MyProc->clogGroupMember = false;
530 : 15466 : MyProc->clogGroupMemberXid = InvalidTransactionId;
531 : 15466 : MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
532 : 15466 : MyProc->clogGroupMemberPage = -1;
533 : 15466 : MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
654 heikki.linnakangas@i 534 [ - + ]: 15466 : Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PROC_NUMBER);
535 : :
536 : : /*
537 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
538 : : * on it. That allows us to repoint the process latch, which so far
539 : : * points to process local one, to the shared one.
540 : : */
5243 tgl@sss.pgh.pa.us 541 : 15466 : OwnLatch(&MyProc->procLatch);
3990 andres@anarazel.de 542 : 15466 : SwitchToSharedLatch();
543 : :
544 : : /* now that we have a proc, report wait events to shared memory */
1719 545 : 15466 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
546 : :
547 : : /*
548 : : * We might be reusing a semaphore that belonged to a failed process. So
549 : : * be careful and reinitialize its value here. (This is not strictly
550 : : * necessary anymore, but seems like a good idea for cleanliness.)
551 : : */
3292 tgl@sss.pgh.pa.us 552 : 15466 : PGSemaphoreReset(MyProc->sem);
553 : :
554 : : /*
555 : : * Arrange to clean up at backend exit.
556 : : */
9103 557 : 15466 : on_shmem_exit(ProcKill, 0);
558 : :
559 : : /*
560 : : * Now that we have a PGPROC, we could try to acquire locks, so initialize
561 : : * local state needed for LWLocks, and the deadlock checker.
562 : : */
4188 heikki.linnakangas@i 563 : 15466 : InitLWLockAccess();
9092 tgl@sss.pgh.pa.us 564 : 15466 : InitDeadLockChecking();
565 : :
566 : : #ifdef EXEC_BACKEND
567 : :
568 : : /*
569 : : * Initialize backend-local pointers to all the shared data structures.
570 : : * (We couldn't do this until now because it needs LWLocks.)
571 : : */
572 : : if (IsUnderPostmaster)
573 : : AttachSharedMemoryStructs();
574 : : #endif
9103 575 : 15466 : }
576 : :
577 : : /*
578 : : * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
579 : : *
580 : : * This is separate from InitProcess because we can't acquire LWLocks until
581 : : * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
582 : : * work until after we've done AttachSharedMemoryStructs.
583 : : */
584 : : void
7287 585 : 15457 : InitProcessPhase2(void)
586 : : {
587 [ - + ]: 15457 : Assert(MyProc != NULL);
588 : :
589 : : /*
590 : : * Add our PGPROC to the PGPROC array in shared memory.
591 : : */
592 : 15457 : ProcArrayAdd(MyProc);
593 : :
594 : : /*
595 : : * Arrange to clean that up at backend exit.
596 : : */
597 : 15457 : on_shmem_exit(RemoveProcFromArray, 0);
598 : 15457 : }
599 : :
600 : : /*
601 : : * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
602 : : *
603 : : * This is called by bgwriter and similar processes so that they will have a
604 : : * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
605 : : * and sema that are assigned are one of the extra ones created during
606 : : * InitProcGlobal.
607 : : *
608 : : * Auxiliary processes are presently not expected to wait for real (lockmgr)
609 : : * locks, so we need not set up the deadlock checker. They are never added
610 : : * to the ProcArray or the sinval messaging mechanism, either. They also
611 : : * don't get a VXID assigned, since this is only useful when we actually
612 : : * hold lockmgr locks.
613 : : *
614 : : * Startup process however uses locks but never waits for them in the
615 : : * normal backend sense. Startup process also takes part in sinval messaging
616 : : * as a sendOnly process, so never reads messages from sinval queue. So
617 : : * Startup process does have a VXID and does show up in pg_locks.
618 : : */
619 : : void
6860 alvherre@alvh.no-ip. 620 : 4117 : InitAuxiliaryProcess(void)
621 : : {
622 : : PGPROC *auxproc;
623 : : int proctype;
624 : :
625 : : /*
626 : : * ProcGlobal should be set up already (if we are a backend, we inherit
627 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
628 : : */
629 [ + - - + ]: 4117 : if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
8182 tgl@sss.pgh.pa.us 630 [ # # ]:UBC 0 : elog(PANIC, "proc header uninitialized");
631 : :
8845 tgl@sss.pgh.pa.us 632 [ - + ]:CBC 4117 : if (MyProc != NULL)
8182 tgl@sss.pgh.pa.us 633 [ # # ]:UBC 0 : elog(ERROR, "you already exist");
634 : :
398 heikki.linnakangas@i 635 [ + - ]:CBC 4117 : if (IsUnderPostmaster)
636 : 4117 : RegisterPostmasterChildActive();
637 : :
638 : : /*
639 : : * We use the ProcStructLock to protect assignment and releasing of
640 : : * AuxiliaryProcs entries.
641 : : *
642 : : * While we are holding the ProcStructLock, also copy the current shared
643 : : * estimate of spins_per_delay to local storage.
644 : : */
7372 tgl@sss.pgh.pa.us 645 [ + + ]: 4117 : SpinLockAcquire(ProcStructLock);
646 : :
647 : 4117 : set_spins_per_delay(ProcGlobal->spins_per_delay);
648 : :
649 : : /*
650 : : * Find a free auxproc ... *big* trouble if there isn't one ...
651 : : */
6860 alvherre@alvh.no-ip. 652 [ + - ]: 16840 : for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
653 : : {
654 : 16840 : auxproc = &AuxiliaryProcs[proctype];
655 [ + + ]: 16840 : if (auxproc->pid == 0)
7287 tgl@sss.pgh.pa.us 656 : 4117 : break;
657 : : }
6860 alvherre@alvh.no-ip. 658 [ - + ]: 4117 : if (proctype >= NUM_AUXILIARY_PROCS)
659 : : {
7372 tgl@sss.pgh.pa.us 660 :UBC 0 : SpinLockRelease(ProcStructLock);
6860 alvherre@alvh.no-ip. 661 [ # # ]: 0 : elog(FATAL, "all AuxiliaryProcs are in use");
662 : : }
663 : :
664 : : /* Mark auxiliary proc as in use by me */
665 : : /* use volatile pointer to prevent code rearrangement */
6860 alvherre@alvh.no-ip. 666 :CBC 4117 : ((volatile PGPROC *) auxproc)->pid = MyProcPid;
667 : :
7372 tgl@sss.pgh.pa.us 668 : 4117 : SpinLockRelease(ProcStructLock);
669 : :
654 heikki.linnakangas@i 670 : 4117 : MyProc = auxproc;
664 671 : 4117 : MyProcNumber = GetNumberFromPGProc(MyProc);
672 : :
673 : : /*
674 : : * Initialize all fields of MyProc, except for those previously
675 : : * initialized by InitProcGlobal.
676 : : */
1064 andres@anarazel.de 677 : 4117 : dlist_node_init(&MyProc->links);
2009 peter@eisentraut.org 678 : 4117 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4766 simon@2ndQuadrant.co 679 : 4117 : MyProc->fpVXIDLock = false;
680 : 4117 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1951 andres@anarazel.de 681 : 4117 : MyProc->xid = InvalidTransactionId;
1952 682 : 4117 : MyProc->xmin = InvalidTransactionId;
654 heikki.linnakangas@i 683 : 4117 : MyProc->vxid.procNumber = INVALID_PROC_NUMBER;
684 : 4117 : MyProc->vxid.lxid = InvalidLocalTransactionId;
7287 tgl@sss.pgh.pa.us 685 : 4117 : MyProc->databaseId = InvalidOid;
7444 686 : 4117 : MyProc->roleId = InvalidOid;
2683 michael@paquier.xyz 687 : 4117 : MyProc->tempNamespaceId = InvalidOid;
354 tgl@sss.pgh.pa.us 688 : 4117 : MyProc->isRegularBackend = false;
1349 rhaas@postgresql.org 689 : 4117 : MyProc->delayChkptFlags = 0;
1857 alvherre@alvh.no-ip. 690 : 4117 : MyProc->statusFlags = 0;
1123 andres@anarazel.de 691 : 4117 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
5070 heikki.linnakangas@i 692 : 4117 : MyProc->lwWaitMode = 0;
8845 tgl@sss.pgh.pa.us 693 : 4117 : MyProc->waitLock = NULL;
7782 694 : 4117 : MyProc->waitProcLock = NULL;
1759 fujii@postgresql.org 695 : 4117 : pg_atomic_write_u64(&MyProc->waitStart, 0);
696 : : #ifdef USE_ASSERT_CHECKING
697 : : {
698 : : int i;
699 : :
700 : : /* Last process should have released all locks. */
5160 rhaas@postgresql.org 701 [ + + ]: 69989 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1064 andres@anarazel.de 702 [ - + ]: 65872 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
703 : : }
704 : : #endif
705 : :
706 : : /*
707 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
708 : : * on it. That allows us to repoint the process latch, which so far
709 : : * points to process local one, to the shared one.
710 : : */
5243 tgl@sss.pgh.pa.us 711 : 4117 : OwnLatch(&MyProc->procLatch);
3990 andres@anarazel.de 712 : 4117 : SwitchToSharedLatch();
713 : :
714 : : /* now that we have a proc, report wait events to shared memory */
1719 715 : 4117 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
716 : :
717 : : /* Check that group locking fields are in a proper initial state. */
3601 rhaas@postgresql.org 718 [ - + ]: 4117 : Assert(MyProc->lockGroupLeader == NULL);
719 [ - + ]: 4117 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
720 : :
721 : : /*
722 : : * We might be reusing a semaphore that belonged to a failed process. So
723 : : * be careful and reinitialize its value here. (This is not strictly
724 : : * necessary anymore, but seems like a good idea for cleanliness.)
725 : : */
3292 tgl@sss.pgh.pa.us 726 : 4117 : PGSemaphoreReset(MyProc->sem);
727 : :
728 : : /*
729 : : * Arrange to clean up at process exit.
730 : : */
6860 alvherre@alvh.no-ip. 731 : 4117 : on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
732 : :
733 : : /*
734 : : * Now that we have a PGPROC, we could try to acquire lightweight locks.
735 : : * Initialize local state needed for them. (Heavyweight locks cannot be
736 : : * acquired in aux processes.)
737 : : */
747 heikki.linnakangas@i 738 : 4117 : InitLWLockAccess();
739 : :
740 : : #ifdef EXEC_BACKEND
741 : :
742 : : /*
743 : : * Initialize backend-local pointers to all the shared data structures.
744 : : * (We couldn't do this until now because it needs LWLocks.)
745 : : */
746 : : if (IsUnderPostmaster)
747 : : AttachSharedMemoryStructs();
748 : : #endif
10753 scrappy@hub.org 749 : 4117 : }
750 : :
751 : : /*
752 : : * Used from bufmgr to share the value of the buffer that Startup waits on,
753 : : * or to reset the value to "not waiting" (-1). This allows processing
754 : : * of recovery conflicts for buffer pins. Set is made before backends look
755 : : * at this value, so locking not required, especially since the set is
756 : : * an atomic integer set operation.
757 : : */
758 : : void
5807 simon@2ndQuadrant.co 759 : 16 : SetStartupBufferPinWaitBufId(int bufid)
760 : : {
761 : : /* use volatile pointer to prevent code rearrangement */
762 : 16 : volatile PROC_HDR *procglobal = ProcGlobal;
763 : :
764 : 16 : procglobal->startupBufferPinWaitBufId = bufid;
765 : 16 : }
766 : :
767 : : /*
768 : : * Used by backends when they receive a request to check for buffer pin waits.
769 : : */
770 : : int
771 : 3 : GetStartupBufferPinWaitBufId(void)
772 : : {
773 : : /* use volatile pointer to prevent code rearrangement */
774 : 3 : volatile PROC_HDR *procglobal = ProcGlobal;
775 : :
5251 tgl@sss.pgh.pa.us 776 : 3 : return procglobal->startupBufferPinWaitBufId;
777 : : }
778 : :
779 : : /*
780 : : * Check whether there are at least N free PGPROC objects. If false is
781 : : * returned, *nfree will be set to the number of free PGPROC objects.
782 : : * Otherwise, *nfree will be set to n.
783 : : *
784 : : * Note: this is designed on the assumption that N will generally be small.
785 : : */
786 : : bool
1062 rhaas@postgresql.org 787 : 363 : HaveNFreeProcs(int n, int *nfree)
788 : : {
789 : : dlist_iter iter;
790 : :
791 [ - + ]: 363 : Assert(n > 0);
792 [ - + ]: 363 : Assert(nfree);
793 : :
7488 tgl@sss.pgh.pa.us 794 [ - + ]: 363 : SpinLockAcquire(ProcStructLock);
795 : :
1062 rhaas@postgresql.org 796 : 363 : *nfree = 0;
1064 andres@anarazel.de 797 [ + - + + ]: 1087 : dlist_foreach(iter, &ProcGlobal->freeProcs)
798 : : {
1062 rhaas@postgresql.org 799 : 1085 : (*nfree)++;
800 [ + + ]: 1085 : if (*nfree == n)
1064 andres@anarazel.de 801 : 361 : break;
802 : : }
803 : :
7488 tgl@sss.pgh.pa.us 804 : 363 : SpinLockRelease(ProcStructLock);
805 : :
1062 rhaas@postgresql.org 806 : 363 : return (*nfree == n);
807 : : }
808 : :
809 : : /*
810 : : * Cancel any pending wait for lock, when aborting a transaction, and revert
811 : : * any strong lock count acquisition for a lock being acquired.
812 : : *
813 : : * (Normally, this would only happen if we accept a cancel/die
814 : : * interrupt while waiting; but an ereport(ERROR) before or during the lock
815 : : * wait is within the realm of possibility, too.)
816 : : */
817 : : void
4991 818 : 363352 : LockErrorCleanup(void)
819 : : {
820 : : LOCALLOCK *lockAwaited;
821 : : LWLock *partitionLock;
822 : : DisableTimeoutParams timeouts[2];
823 : :
3971 heikki.linnakangas@i 824 : 363352 : HOLD_INTERRUPTS();
825 : :
4991 rhaas@postgresql.org 826 : 363352 : AbortStrongLockAcquire();
827 : :
828 : : /* Nothing to do if we weren't waiting for a lock */
408 heikki.linnakangas@i 829 : 363352 : lockAwaited = GetAwaitedLock();
7311 tgl@sss.pgh.pa.us 830 [ + + ]: 363352 : if (lockAwaited == NULL)
831 : : {
3971 heikki.linnakangas@i 832 [ - + ]: 363312 : RESUME_INTERRUPTS();
6535 tgl@sss.pgh.pa.us 833 : 363312 : return;
834 : : }
835 : :
836 : : /*
837 : : * Turn off the deadlock and lock timeout timers, if they are still
838 : : * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
839 : : * indicator flag, since this function is executed before
840 : : * ProcessInterrupts when responding to SIGINT; else we'd lose the
841 : : * knowledge that the SIGINT came from a lock timeout and not an external
842 : : * source.
843 : : */
4659 844 : 40 : timeouts[0].id = DEADLOCK_TIMEOUT;
845 : 40 : timeouts[0].keep_indicator = false;
846 : 40 : timeouts[1].id = LOCK_TIMEOUT;
847 : 40 : timeouts[1].keep_indicator = true;
848 : 40 : disable_timeouts(timeouts, 2);
849 : :
850 : : /* Unlink myself from the wait queue, if on it (might not be anymore!) */
7087 851 : 40 : partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
7311 852 : 40 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
853 : :
1064 andres@anarazel.de 854 [ + + ]: 40 : if (!dlist_node_is_detached(&MyProc->links))
855 : : {
856 : : /* We could not have been granted the lock yet */
7087 tgl@sss.pgh.pa.us 857 : 39 : RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
858 : : }
859 : : else
860 : : {
861 : : /*
862 : : * Somebody kicked us off the lock queue already. Perhaps they
863 : : * granted us the lock, or perhaps they detected a deadlock. If they
864 : : * did grant us the lock, we'd better remember it in our local lock
865 : : * table.
866 : : */
2009 peter@eisentraut.org 867 [ + - ]: 1 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
7782 tgl@sss.pgh.pa.us 868 : 1 : GrantAwaitedLock();
869 : : }
870 : :
264 heikki.linnakangas@i 871 : 40 : ResetAwaitedLock();
872 : :
7311 tgl@sss.pgh.pa.us 873 : 40 : LWLockRelease(partitionLock);
874 : :
3971 heikki.linnakangas@i 875 [ - + ]: 40 : RESUME_INTERRUPTS();
876 : : }
877 : :
878 : :
879 : : /*
880 : : * ProcReleaseLocks() -- release locks associated with current transaction
881 : : * at main transaction commit or abort
882 : : *
883 : : * At main transaction commit, we release standard locks except session locks.
884 : : * At main transaction abort, we release all locks including session locks.
885 : : *
886 : : * Advisory locks are released only if they are transaction-level;
887 : : * session-level holds remain, whether this is a commit or not.
888 : : *
889 : : * At subtransaction commit, we don't release any locks (so this func is not
890 : : * needed at all); we will defer the releasing to the parent transaction.
891 : : * At subtransaction abort, we release all locks held by the subtransaction;
892 : : * this is implemented by retail releasing of the locks under control of
893 : : * the ResourceOwner mechanism.
894 : : */
895 : : void
7823 tgl@sss.pgh.pa.us 896 : 331891 : ProcReleaseLocks(bool isCommit)
897 : : {
10328 bruce@momjian.us 898 [ - + ]: 331891 : if (!MyProc)
10328 bruce@momjian.us 899 :UBC 0 : return;
900 : : /* If waiting, get off wait queue (should only be needed after error) */
4991 rhaas@postgresql.org 901 :CBC 331891 : LockErrorCleanup();
902 : : /* Release standard locks, including session-level if aborting */
7782 tgl@sss.pgh.pa.us 903 : 331891 : LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
904 : : /* Release transaction-level advisory locks */
5416 itagaki.takahiro@gma 905 : 331891 : LockReleaseAll(USER_LOCKMETHOD, false);
906 : : }
907 : :
908 : :
909 : : /*
910 : : * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
911 : : */
912 : : static void
7287 tgl@sss.pgh.pa.us 913 : 15457 : RemoveProcFromArray(int code, Datum arg)
914 : : {
915 [ - + ]: 15457 : Assert(MyProc != NULL);
6675 916 : 15457 : ProcArrayRemove(MyProc, InvalidTransactionId);
7287 917 : 15457 : }
918 : :
919 : : /*
920 : : * ProcKill() -- Destroy the per-proc data structure for
921 : : * this process. Release any of its held LW locks.
922 : : */
923 : : static void
8041 peter_e@gmx.net 924 : 15466 : ProcKill(int code, Datum arg)
925 : : {
926 : : PGPROC *proc;
927 : : dlist_head *procgloballist;
928 : :
8867 tgl@sss.pgh.pa.us 929 [ - + ]: 15466 : Assert(MyProc != NULL);
930 : :
931 : : /* not safe if forked by system(), etc. */
792 nathan@postgresql.or 932 [ - + ]: 15466 : if (MyProc->pid != (int) getpid())
792 nathan@postgresql.or 933 [ # # ]:UBC 0 : elog(PANIC, "ProcKill() called in child process");
934 : :
935 : : /* Make sure we're out of the sync rep lists */
5243 tgl@sss.pgh.pa.us 936 :CBC 15466 : SyncRepCleanupAtProcExit();
937 : :
938 : : #ifdef USE_ASSERT_CHECKING
939 : : {
940 : : int i;
941 : :
942 : : /* Last process should have released all locks. */
5160 rhaas@postgresql.org 943 [ + + ]: 262922 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1064 andres@anarazel.de 944 [ - + ]: 247456 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
945 : : }
946 : : #endif
947 : :
948 : : /*
949 : : * Release any LW locks I am holding. There really shouldn't be any, but
950 : : * it's cheap to check again before we cut the knees off the LWLock
951 : : * facility by releasing our PGPROC ...
952 : : */
7436 tgl@sss.pgh.pa.us 953 : 15466 : LWLockReleaseAll();
954 : :
955 : : /*
956 : : * Cleanup waiting for LSN if any.
957 : : */
42 akorotkov@postgresql 958 :GNC 15466 : WaitLSNCleanup();
959 : :
960 : : /* Cancel any pending condition variable sleep, too */
3312 rhaas@postgresql.org 961 :CBC 15466 : ConditionVariableCancelSleep();
962 : :
963 : : /*
964 : : * Detach from any lock group of which we are a member. If the leader
965 : : * exits before all other group members, its PGPROC will remain allocated
966 : : * until the last group process exits; that process must return the
967 : : * leader's PGPROC to the appropriate list.
968 : : */
3601 969 [ + + ]: 15466 : if (MyProc->lockGroupLeader != NULL)
970 : : {
971 : 1516 : PGPROC *leader = MyProc->lockGroupLeader;
972 : 1516 : LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
973 : :
974 : 1516 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
975 [ - + ]: 1516 : Assert(!dlist_is_empty(&leader->lockGroupMembers));
976 : 1516 : dlist_delete(&MyProc->lockGroupLink);
977 [ + + ]: 1516 : if (dlist_is_empty(&leader->lockGroupMembers))
978 : : {
979 : 76 : leader->lockGroupLeader = NULL;
980 [ - + ]: 76 : if (leader != MyProc)
981 : : {
3601 rhaas@postgresql.org 982 :UBC 0 : procgloballist = leader->procgloballist;
983 : :
984 : : /* Leader exited first; return its PGPROC. */
985 [ # # ]: 0 : SpinLockAcquire(ProcStructLock);
1064 andres@anarazel.de 986 : 0 : dlist_push_head(procgloballist, &leader->links);
3601 rhaas@postgresql.org 987 : 0 : SpinLockRelease(ProcStructLock);
988 : : }
989 : : }
3601 rhaas@postgresql.org 990 [ + - ]:CBC 1440 : else if (leader != MyProc)
991 : 1440 : MyProc->lockGroupLeader = NULL;
992 : 1516 : LWLockRelease(leader_lwlock);
993 : : }
994 : :
995 : : /*
996 : : * Reset MyLatch to the process local one. This is so that signal
997 : : * handlers et al can continue using the latch after the shared latch
998 : : * isn't ours anymore.
999 : : *
1000 : : * Similarly, stop reporting wait events to MyProc->wait_event_info.
1001 : : *
1002 : : * After that clear MyProc and disown the shared latch.
1003 : : */
3990 andres@anarazel.de 1004 : 15466 : SwitchBackToLocalLatch();
1719 1005 : 15466 : pgstat_reset_wait_event_storage();
1006 : :
4338 rhaas@postgresql.org 1007 : 15466 : proc = MyProc;
1008 : 15466 : MyProc = NULL;
654 heikki.linnakangas@i 1009 : 15466 : MyProcNumber = INVALID_PROC_NUMBER;
4338 rhaas@postgresql.org 1010 : 15466 : DisownLatch(&proc->procLatch);
1011 : :
1012 : : /* Mark the proc no longer in use */
654 heikki.linnakangas@i 1013 : 15466 : proc->pid = 0;
1014 : 15466 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1015 : 15466 : proc->vxid.lxid = InvalidTransactionId;
1016 : :
3795 rhaas@postgresql.org 1017 : 15466 : procgloballist = proc->procgloballist;
8845 tgl@sss.pgh.pa.us 1018 [ + + ]: 15466 : SpinLockAcquire(ProcStructLock);
1019 : :
1020 : : /*
1021 : : * If we're still a member of a locking group, that means we're a leader
1022 : : * which has somehow exited before its children. The last remaining child
1023 : : * will release our PGPROC. Otherwise, release it now.
1024 : : */
3601 rhaas@postgresql.org 1025 [ + - ]: 15466 : if (proc->lockGroupLeader == NULL)
1026 : : {
1027 : : /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
1028 [ - + ]: 15466 : Assert(dlist_is_empty(&proc->lockGroupMembers));
1029 : :
1030 : : /* Return PGPROC structure (and semaphore) to appropriate freelist */
1064 andres@anarazel.de 1031 : 15466 : dlist_push_tail(procgloballist, &proc->links);
1032 : : }
1033 : :
1034 : : /* Update shared estimate of spins_per_delay */
3715 rhaas@postgresql.org 1035 : 15466 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1036 : :
8845 tgl@sss.pgh.pa.us 1037 : 15466 : SpinLockRelease(ProcStructLock);
1038 : :
1039 : : /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
6820 alvherre@alvh.no-ip. 1040 [ + + ]: 15466 : if (AutovacuumLauncherPid != 0)
5952 tgl@sss.pgh.pa.us 1041 : 34 : kill(AutovacuumLauncherPid, SIGUSR2);
8845 1042 : 15466 : }
1043 : :
1044 : : /*
1045 : : * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
1046 : : * processes (bgwriter, etc). The PGPROC and sema are not released, only
1047 : : * marked as not-in-use.
1048 : : */
1049 : : static void
6860 alvherre@alvh.no-ip. 1050 : 4117 : AuxiliaryProcKill(int code, Datum arg)
1051 : : {
7780 bruce@momjian.us 1052 : 4117 : int proctype = DatumGetInt32(arg);
1053 : : PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
1054 : : PGPROC *proc;
1055 : :
6860 alvherre@alvh.no-ip. 1056 [ + - - + ]: 4117 : Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
1057 : :
1058 : : /* not safe if forked by system(), etc. */
792 nathan@postgresql.or 1059 [ - + ]: 4117 : if (MyProc->pid != (int) getpid())
792 nathan@postgresql.or 1060 [ # # ]:UBC 0 : elog(PANIC, "AuxiliaryProcKill() called in child process");
1061 : :
6860 alvherre@alvh.no-ip. 1062 :CBC 4117 : auxproc = &AuxiliaryProcs[proctype];
1063 : :
1064 [ - + ]: 4117 : Assert(MyProc == auxproc);
1065 : :
1066 : : /* Release any LW locks I am holding (see notes above) */
8845 tgl@sss.pgh.pa.us 1067 : 4117 : LWLockReleaseAll();
1068 : :
1069 : : /* Cancel any pending condition variable sleep, too */
3312 rhaas@postgresql.org 1070 : 4117 : ConditionVariableCancelSleep();
1071 : :
1072 : : /* look at the equivalent ProcKill() code for comments */
3990 andres@anarazel.de 1073 : 4117 : SwitchBackToLocalLatch();
1719 1074 : 4117 : pgstat_reset_wait_event_storage();
1075 : :
4338 rhaas@postgresql.org 1076 : 4117 : proc = MyProc;
1077 : 4117 : MyProc = NULL;
654 heikki.linnakangas@i 1078 : 4117 : MyProcNumber = INVALID_PROC_NUMBER;
4338 rhaas@postgresql.org 1079 : 4117 : DisownLatch(&proc->procLatch);
1080 : :
7372 tgl@sss.pgh.pa.us 1081 [ + + ]: 4117 : SpinLockAcquire(ProcStructLock);
1082 : :
1083 : : /* Mark auxiliary proc no longer in use */
4338 rhaas@postgresql.org 1084 : 4117 : proc->pid = 0;
654 heikki.linnakangas@i 1085 : 4117 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1086 : 4117 : proc->vxid.lxid = InvalidTransactionId;
1087 : :
1088 : : /* Update shared estimate of spins_per_delay */
7372 tgl@sss.pgh.pa.us 1089 : 4117 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1090 : :
1091 : 4117 : SpinLockRelease(ProcStructLock);
10753 scrappy@hub.org 1092 : 4117 : }
1093 : :
1094 : : /*
1095 : : * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1096 : : * given its PID
1097 : : *
1098 : : * Returns NULL if not found.
1099 : : */
1100 : : PGPROC *
3188 rhaas@postgresql.org 1101 : 5164 : AuxiliaryPidGetProc(int pid)
1102 : : {
1103 : 5164 : PGPROC *result = NULL;
1104 : : int index;
1105 : :
1106 [ + + ]: 5164 : if (pid == 0) /* never match dummy PGPROCs */
1107 : 3 : return NULL;
1108 : :
1109 [ + - ]: 23376 : for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1110 : : {
1111 : 23376 : PGPROC *proc = &AuxiliaryProcs[index];
1112 : :
1113 [ + + ]: 23376 : if (proc->pid == pid)
1114 : : {
1115 : 5161 : result = proc;
1116 : 5161 : break;
1117 : : }
1118 : : }
1119 : 5161 : return result;
1120 : : }
1121 : :
1122 : :
1123 : : /*
1124 : : * JoinWaitQueue -- join the wait queue on the specified lock
1125 : : *
1126 : : * It's not actually guaranteed that we need to wait when this function is
1127 : : * called, because it could be that when we try to find a position at which
1128 : : * to insert ourself into the wait queue, we discover that we must be inserted
1129 : : * ahead of everyone who wants a lock that conflict with ours. In that case,
1130 : : * we get the lock immediately. Because of this, it's sensible for this function
1131 : : * to have a dontWait argument, despite the name.
1132 : : *
1133 : : * On entry, the caller has already set up LOCK and PROCLOCK entries to
1134 : : * reflect that we have "requested" the lock. The caller is responsible for
1135 : : * cleaning that up, if we end up not joining the queue after all.
1136 : : *
1137 : : * The lock table's partition lock must be held at entry, and is still held
1138 : : * at exit. The caller must release it before calling ProcSleep().
1139 : : *
1140 : : * Result is one of the following:
1141 : : *
1142 : : * PROC_WAIT_STATUS_OK - lock was immediately granted
1143 : : * PROC_WAIT_STATUS_WAITING - joined the wait queue; call ProcSleep()
1144 : : * PROC_WAIT_STATUS_ERROR - immediate deadlock was detected, or would
1145 : : * need to wait and dontWait == true
1146 : : *
1147 : : * NOTES: The process queue is now a priority queue for locking.
1148 : : */
1149 : : ProcWaitStatus
408 heikki.linnakangas@i 1150 : 2138 : JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1151 : : {
7311 tgl@sss.pgh.pa.us 1152 : 2138 : LOCKMODE lockmode = locallock->tag.mode;
1153 : 2138 : LOCK *lock = locallock->lock;
1154 : 2138 : PROCLOCK *proclock = locallock->proclock;
7087 1155 : 2138 : uint32 hashcode = locallock->hashcode;
408 heikki.linnakangas@i 1156 : 2138 : LWLock *partitionLock PG_USED_FOR_ASSERTS_ONLY = LockHashPartitionLock(hashcode);
1064 andres@anarazel.de 1157 : 2138 : dclist_head *waitQueue = &lock->waitProcs;
943 tgl@sss.pgh.pa.us 1158 : 2138 : PGPROC *insert_before = NULL;
1159 : : LOCKMASK myProcHeldLocks;
1160 : : LOCKMASK myHeldLocks;
8870 1161 : 2138 : bool early_deadlock = false;
3601 rhaas@postgresql.org 1162 : 2138 : PGPROC *leader = MyProc->lockGroupLeader;
1163 : :
408 heikki.linnakangas@i 1164 [ - + ]: 2138 : Assert(LWLockHeldByMeInMode(partitionLock, LW_EXCLUSIVE));
1165 : :
1166 : : /*
1167 : : * Set bitmask of locks this process already holds on this object.
1168 : : */
1169 : 2138 : myHeldLocks = MyProc->heldLocks = proclock->holdMask;
1170 : :
1171 : : /*
1172 : : * Determine which locks we're already holding.
1173 : : *
1174 : : * If group locking is in use, locks held by members of my locking group
1175 : : * need to be included in myHeldLocks. This is not required for relation
1176 : : * extension lock which conflict among group members. However, including
1177 : : * them in myHeldLocks will give group members the priority to get those
1178 : : * locks as compared to other backends which are also trying to acquire
1179 : : * those locks. OTOH, we can avoid giving priority to group members for
1180 : : * that kind of locks, but there doesn't appear to be a clear advantage of
1181 : : * the same.
1182 : : */
1183 : 2138 : myProcHeldLocks = proclock->holdMask;
1184 : 2138 : myHeldLocks = myProcHeldLocks;
3601 rhaas@postgresql.org 1185 [ + + ]: 2138 : if (leader != NULL)
1186 : : {
1187 : : dlist_iter iter;
1188 : :
1064 andres@anarazel.de 1189 [ + - + + ]: 40 : dlist_foreach(iter, &lock->procLocks)
1190 : : {
1191 : : PROCLOCK *otherproclock;
1192 : :
1193 : 30 : otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1194 : :
3601 rhaas@postgresql.org 1195 [ + + ]: 30 : if (otherproclock->groupLeader == leader)
1196 : 14 : myHeldLocks |= otherproclock->holdMask;
1197 : : }
1198 : : }
1199 : :
1200 : : /*
1201 : : * Determine where to add myself in the wait queue.
1202 : : *
1203 : : * Normally I should go at the end of the queue. However, if I already
1204 : : * hold locks that conflict with the request of any previous waiter, put
1205 : : * myself in the queue just in front of the first such waiter. This is not
1206 : : * a necessary step, since deadlock detection would move me to before that
1207 : : * waiter anyway; but it's relatively cheap to detect such a conflict
1208 : : * immediately, and avoid delaying till deadlock timeout.
1209 : : *
1210 : : * Special case: if I find I should go in front of some waiter, check to
1211 : : * see if I conflict with already-held locks or the requests before that
1212 : : * waiter. If not, then just grant myself the requested lock immediately.
1213 : : * This is the same as the test for immediate grant in LockAcquire, except
1214 : : * we are only considering the part of the wait queue before my insertion
1215 : : * point.
1216 : : */
1064 andres@anarazel.de 1217 [ + + + + ]: 2138 : if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1218 : : {
8052 bruce@momjian.us 1219 : 7 : LOCKMASK aheadRequests = 0;
1220 : : dlist_iter iter;
1221 : :
1064 andres@anarazel.de 1222 [ + - + - ]: 7 : dclist_foreach(iter, waitQueue)
1223 : : {
1224 : 7 : PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1225 : :
1226 : : /*
1227 : : * If we're part of the same locking group as this waiter, its
1228 : : * locks neither conflict with ours nor contribute to
1229 : : * aheadRequests.
1230 : : */
3601 rhaas@postgresql.org 1231 [ - + - - ]: 7 : if (leader != NULL && leader == proc->lockGroupLeader)
3601 rhaas@postgresql.org 1232 :UBC 0 : continue;
1233 : :
1234 : : /* Must he wait for me? */
8553 bruce@momjian.us 1235 [ + - ]:CBC 7 : if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1236 : : {
1237 : : /* Must I wait for him ? */
1238 [ + + ]: 7 : if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1239 : : {
1240 : : /*
1241 : : * Yes, so we have a deadlock. Easiest way to clean up
1242 : : * correctly is to call RemoveFromWaitQueue(), but we
1243 : : * can't do that until we are *on* the wait queue. So, set
1244 : : * a flag to check below, and break out of loop. Also,
1245 : : * record deadlock info for later message.
1246 : : */
8371 tgl@sss.pgh.pa.us 1247 : 1 : RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
8870 1248 : 1 : early_deadlock = true;
1249 : 1 : break;
1250 : : }
1251 : : /* I must go before this waiter. Check special case. */
8553 bruce@momjian.us 1252 [ + - ]: 6 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
2180 peter@eisentraut.org 1253 [ + - ]: 6 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1254 : : proclock))
1255 : : {
1256 : : /* Skip the wait and just grant myself the lock. */
8338 bruce@momjian.us 1257 : 6 : GrantLock(lock, proclock, lockmode);
2009 peter@eisentraut.org 1258 : 6 : return PROC_WAIT_STATUS_OK;
1259 : : }
1260 : :
1261 : : /* Put myself into wait queue before conflicting process */
1064 andres@anarazel.de 1262 :UBC 0 : insert_before = proc;
9721 vadim4o@yahoo.com 1263 : 0 : break;
1264 : : }
1265 : : /* Nope, so advance to next waiter */
8052 bruce@momjian.us 1266 : 0 : aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1267 : : }
1268 : : }
1269 : :
1270 : : /*
1271 : : * If we detected deadlock, give up without waiting. This must agree with
1272 : : * CheckDeadLock's recovery code.
1273 : : */
408 heikki.linnakangas@i 1274 [ + + ]:CBC 2132 : if (early_deadlock)
1275 : 1 : return PROC_WAIT_STATUS_ERROR;
1276 : :
1277 : : /*
1278 : : * At this point we know that we'd really need to sleep. If we've been
1279 : : * commanded not to do that, bail out.
1280 : : */
643 rhaas@postgresql.org 1281 [ + + ]: 2131 : if (dontWait)
1282 : 738 : return PROC_WAIT_STATUS_ERROR;
1283 : :
1284 : : /*
1285 : : * Insert self into queue, at the position determined above.
1286 : : */
1064 andres@anarazel.de 1287 [ - + ]: 1393 : if (insert_before)
1064 andres@anarazel.de 1288 :UBC 0 : dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1289 : : else
1064 andres@anarazel.de 1290 :CBC 1393 : dclist_push_tail(waitQueue, &MyProc->links);
1291 : :
8052 bruce@momjian.us 1292 : 1393 : lock->waitMask |= LOCKBIT_ON(lockmode);
1293 : :
1294 : : /* Set up wait information in PGPROC object, too */
408 heikki.linnakangas@i 1295 : 1393 : MyProc->heldLocks = myProcHeldLocks;
9095 tgl@sss.pgh.pa.us 1296 : 1393 : MyProc->waitLock = lock;
7782 1297 : 1393 : MyProc->waitProcLock = proclock;
9095 1298 : 1393 : MyProc->waitLockMode = lockmode;
1299 : :
2009 peter@eisentraut.org 1300 : 1393 : MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
1301 : :
408 heikki.linnakangas@i 1302 : 1393 : return PROC_WAIT_STATUS_WAITING;
1303 : : }
1304 : :
1305 : : /*
1306 : : * ProcSleep -- put process to sleep waiting on lock
1307 : : *
1308 : : * This must be called when JoinWaitQueue() returns PROC_WAIT_STATUS_WAITING.
1309 : : * Returns after the lock has been granted, or if a deadlock is detected. Can
1310 : : * also bail out with ereport(ERROR), if some other error condition, or a
1311 : : * timeout or cancellation is triggered.
1312 : : *
1313 : : * Result is one of the following:
1314 : : *
1315 : : * PROC_WAIT_STATUS_OK - lock was granted
1316 : : * PROC_WAIT_STATUS_ERROR - a deadlock was detected
1317 : : */
1318 : : ProcWaitStatus
1319 : 1393 : ProcSleep(LOCALLOCK *locallock)
1320 : : {
1321 : 1393 : LOCKMODE lockmode = locallock->tag.mode;
1322 : 1393 : LOCK *lock = locallock->lock;
1323 : 1393 : uint32 hashcode = locallock->hashcode;
1324 : 1393 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1325 : 1393 : TimestampTz standbyWaitStart = 0;
1326 : 1393 : bool allow_autovacuum_cancel = true;
1327 : 1393 : bool logged_recovery_conflict = false;
1328 : : ProcWaitStatus myWaitStatus;
1329 : :
1330 : : /* The caller must've armed the on-error cleanup mechanism */
1331 [ - + ]: 1393 : Assert(GetAwaitedLock() == locallock);
1332 [ - + ]: 1393 : Assert(!LWLockHeldByMe(partitionLock));
1333 : :
1334 : : /*
1335 : : * Now that we will successfully clean up after an ereport, it's safe to
1336 : : * check to see if there's a buffer pin deadlock against the Startup
1337 : : * process. Of course, that's only necessary if we're doing Hot Standby
1338 : : * and are not the Startup process ourselves.
1339 : : */
5251 tgl@sss.pgh.pa.us 1340 [ + + + + ]: 1393 : if (RecoveryInProgress() && !InRecovery)
1341 : 1 : CheckRecoveryConflictDeadlock();
1342 : :
1343 : : /* Reset deadlock_state before enabling the timeout handler */
6756 1344 : 1393 : deadlock_state = DS_NOT_YET_CHECKED;
3970 andres@anarazel.de 1345 : 1393 : got_deadlock_timeout = false;
1346 : :
1347 : : /*
1348 : : * Set timer so we can wake up after awhile and check for a deadlock. If a
1349 : : * deadlock is detected, the handler sets MyProc->waitStatus =
1350 : : * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1351 : : * rather than success.
1352 : : *
1353 : : * By delaying the check until we've waited for a bit, we can avoid
1354 : : * running the rather expensive deadlock-check code in most cases.
1355 : : *
1356 : : * If LockTimeout is set, also enable the timeout for that. We can save a
1357 : : * few cycles by enabling both timeout sources in one call.
1358 : : *
1359 : : * If InHotStandby we set lock waits slightly later for clarity with other
1360 : : * code.
1361 : : */
3569 simon@2ndQuadrant.co 1362 [ + + ]: 1393 : if (!InHotStandby)
1363 : : {
1364 [ + + ]: 1392 : if (LockTimeout > 0)
1365 : : {
1366 : : EnableTimeoutParams timeouts[2];
1367 : :
1368 : 100 : timeouts[0].id = DEADLOCK_TIMEOUT;
1369 : 100 : timeouts[0].type = TMPARAM_AFTER;
1370 : 100 : timeouts[0].delay_ms = DeadlockTimeout;
1371 : 100 : timeouts[1].id = LOCK_TIMEOUT;
1372 : 100 : timeouts[1].type = TMPARAM_AFTER;
1373 : 100 : timeouts[1].delay_ms = LockTimeout;
1374 : 100 : enable_timeouts(timeouts, 2);
1375 : : }
1376 : : else
1377 : 1292 : enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
1378 : :
1379 : : /*
1380 : : * Use the current time obtained for the deadlock timeout timer as
1381 : : * waitStart (i.e., the time when this process started waiting for the
1382 : : * lock). Since getting the current time newly can cause overhead, we
1383 : : * reuse the already-obtained time to avoid that overhead.
1384 : : *
1385 : : * Note that waitStart is updated without holding the lock table's
1386 : : * partition lock, to avoid the overhead by additional lock
1387 : : * acquisition. This can cause "waitstart" in pg_locks to become NULL
1388 : : * for a very short period of time after the wait started even though
1389 : : * "granted" is false. This is OK in practice because we can assume
1390 : : * that users are likely to look at "waitstart" when waiting for the
1391 : : * lock for a long time.
1392 : : */
1766 fujii@postgresql.org 1393 : 1392 : pg_atomic_write_u64(&MyProc->waitStart,
1394 : 1392 : get_timeout_start_time(DEADLOCK_TIMEOUT));
1395 : : }
1804 1396 [ + - ]: 1 : else if (log_recovery_conflict_waits)
1397 : : {
1398 : : /*
1399 : : * Set the wait start timestamp if logging is enabled and in hot
1400 : : * standby.
1401 : : */
1402 : 1 : standbyWaitStart = GetCurrentTimestamp();
1403 : : }
1404 : :
1405 : : /*
1406 : : * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1407 : : * will not wait. But a set latch does not necessarily mean that the lock
1408 : : * is free now, as there are many other sources for latch sets than
1409 : : * somebody releasing the lock.
1410 : : *
1411 : : * We process interrupts whenever the latch has been set, so cancel/die
1412 : : * interrupts are processed quickly. This means we must not mind losing
1413 : : * control to a cancel/die interrupt here. We don't, because we have no
1414 : : * shared-state-change work to do after being granted the lock (the
1415 : : * grantor did it all). We do have to worry about canceling the deadlock
1416 : : * timeout and updating the locallock table, but if we lose control to an
1417 : : * error, LockErrorCleanup will fix that up.
1418 : : */
1419 : : do
1420 : : {
3569 simon@2ndQuadrant.co 1421 [ + + ]: 1688 : if (InHotStandby)
1422 : : {
1804 fujii@postgresql.org 1423 : 4 : bool maybe_log_conflict =
943 tgl@sss.pgh.pa.us 1424 [ + - + + ]: 4 : (standbyWaitStart != 0 && !logged_recovery_conflict);
1425 : :
1426 : : /* Set a timer and wait for that or for the lock to be granted */
1804 fujii@postgresql.org 1427 : 4 : ResolveRecoveryConflictWithLock(locallock->tag.lock,
1428 : : maybe_log_conflict);
1429 : :
1430 : : /*
1431 : : * Emit the log message if the startup process is waiting longer
1432 : : * than deadlock_timeout for recovery conflict on lock.
1433 : : */
1434 [ + + ]: 4 : if (maybe_log_conflict)
1435 : : {
1436 : 2 : TimestampTz now = GetCurrentTimestamp();
1437 : :
1438 [ + + ]: 2 : if (TimestampDifferenceExceeds(standbyWaitStart, now,
1439 : : DeadlockTimeout))
1440 : : {
1441 : : VirtualTransactionId *vxids;
1442 : : int cnt;
1443 : :
1444 : 1 : vxids = GetLockConflicts(&locallock->tag.lock,
1445 : : AccessExclusiveLock, &cnt);
1446 : :
1447 : : /*
1448 : : * Log the recovery conflict and the list of PIDs of
1449 : : * backends holding the conflicting lock. Note that we do
1450 : : * logging even if there are no such backends right now
1451 : : * because the startup process here has already waited
1452 : : * longer than deadlock_timeout.
1453 : : */
1454 : 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1455 : : standbyWaitStart, now,
1799 1456 [ + - ]: 1 : cnt > 0 ? vxids : NULL, true);
1804 1457 : 1 : logged_recovery_conflict = true;
1458 : : }
1459 : : }
1460 : : }
1461 : : else
1462 : : {
2581 tmunro@postgresql.or 1463 : 1684 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1464 : 1684 : PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
3569 simon@2ndQuadrant.co 1465 : 1684 : ResetLatch(MyLatch);
1466 : : /* check for deadlocks first, as that's probably log-worthy */
1467 [ + + ]: 1684 : if (got_deadlock_timeout)
1468 : : {
1469 : 34 : CheckDeadLock();
1470 : 34 : got_deadlock_timeout = false;
1471 : : }
1472 [ + + ]: 1684 : CHECK_FOR_INTERRUPTS();
1473 : : }
1474 : :
1475 : : /*
1476 : : * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1477 : : * else asynchronously. Read it just once per loop to prevent
1478 : : * surprising behavior (such as missing log messages).
1479 : : */
2009 peter@eisentraut.org 1480 : 1647 : myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1481 : :
1482 : : /*
1483 : : * If we are not deadlocked, but are waiting on an autovacuum-induced
1484 : : * task, send a signal to interrupt it.
1485 : : */
6627 alvherre@alvh.no-ip. 1486 [ - + - - ]: 1647 : if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1487 : : {
6607 bruce@momjian.us 1488 :UBC 0 : PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1489 : : uint8 statusFlags;
1490 : : uint8 lockmethod_copy;
1491 : : LOCKTAG locktag_copy;
1492 : :
1493 : : /*
1494 : : * Grab info we need, then release lock immediately. Note this
1495 : : * coding means that there is a tiny chance that the process
1496 : : * terminates its current transaction and starts a different one
1497 : : * before we have a change to send the signal; the worst possible
1498 : : * consequence is that a for-wraparound vacuum is canceled. But
1499 : : * that could happen in any case unless we were to do kill() with
1500 : : * the lock held, which is much more undesirable.
1501 : : */
6627 alvherre@alvh.no-ip. 1502 : 0 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1850 1503 : 0 : statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1504 : 0 : lockmethod_copy = lock->tag.locktag_lockmethodid;
1505 : 0 : locktag_copy = lock->tag;
1506 : 0 : LWLockRelease(ProcArrayLock);
1507 : :
1508 : : /*
1509 : : * Only do it if the worker is not working to protect against Xid
1510 : : * wraparound.
1511 : : */
1857 1512 [ # # ]: 0 : if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1513 [ # # ]: 0 : !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1514 : : {
6607 bruce@momjian.us 1515 : 0 : int pid = autovac->pid;
1516 : :
1517 : : /* report the case, if configured to do so */
1850 tgl@sss.pgh.pa.us 1518 [ # # ]: 0 : if (message_level_is_interesting(DEBUG1))
1519 : : {
1520 : : StringInfoData locktagbuf;
1521 : : StringInfoData logbuf; /* errdetail for server log */
1522 : :
1523 : 0 : initStringInfo(&locktagbuf);
1524 : 0 : initStringInfo(&logbuf);
1525 : 0 : DescribeLockTag(&locktagbuf, &locktag_copy);
1526 : 0 : appendStringInfo(&logbuf,
1527 : : "Process %d waits for %s on %s.",
1528 : : MyProcPid,
1529 : : GetLockmodeName(lockmethod_copy, lockmode),
1530 : : locktagbuf.data);
1531 : :
1532 [ # # ]: 0 : ereport(DEBUG1,
1533 : : (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1534 : : pid),
1535 : : errdetail_log("%s", logbuf.data)));
1536 : :
1537 : 0 : pfree(locktagbuf.data);
1538 : 0 : pfree(logbuf.data);
1539 : : }
1540 : :
1541 : : /* send the autovacuum worker Back to Old Kent Road */
6627 alvherre@alvh.no-ip. 1542 [ # # ]: 0 : if (kill(pid, SIGINT) < 0)
1543 : : {
1544 : : /*
1545 : : * There's a race condition here: once we release the
1546 : : * ProcArrayLock, it's possible for the autovac worker to
1547 : : * close up shop and exit before we can do the kill().
1548 : : * Therefore, we do not whinge about no-such-process.
1549 : : * Other errors such as EPERM could conceivably happen if
1550 : : * the kernel recycles the PID fast enough, but such cases
1551 : : * seem improbable enough that it's probably best to issue
1552 : : * a warning if we see some other errno.
1553 : : */
3795 tgl@sss.pgh.pa.us 1554 [ # # ]: 0 : if (errno != ESRCH)
1555 [ # # ]: 0 : ereport(WARNING,
1556 : : (errmsg("could not send signal to process %d: %m",
1557 : : pid)));
1558 : : }
1559 : : }
1560 : :
1561 : : /* prevent signal from being sent again more than once */
6627 alvherre@alvh.no-ip. 1562 : 0 : allow_autovacuum_cancel = false;
1563 : : }
1564 : :
1565 : : /*
1566 : : * If awoken after the deadlock check interrupt has run, and
1567 : : * log_lock_waits is on, then report about the wait.
1568 : : */
6686 tgl@sss.pgh.pa.us 1569 [ + - + + ]:CBC 1647 : if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1570 : : {
1571 : : StringInfoData buf,
1572 : : lock_waiters_sbuf,
1573 : : lock_holders_sbuf;
1574 : : const char *modename;
1575 : : long secs;
1576 : : int usecs;
1577 : : long msecs;
4297 fujii@postgresql.org 1578 : 208 : int lockHoldersNum = 0;
1579 : :
6686 tgl@sss.pgh.pa.us 1580 : 208 : initStringInfo(&buf);
4297 fujii@postgresql.org 1581 : 208 : initStringInfo(&lock_waiters_sbuf);
1582 : 208 : initStringInfo(&lock_holders_sbuf);
1583 : :
6686 tgl@sss.pgh.pa.us 1584 : 208 : DescribeLockTag(&buf, &locallock->tag.lock);
1585 : 208 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1586 : : lockmode);
4902 alvherre@alvh.no-ip. 1587 : 208 : TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
1588 : : GetCurrentTimestamp(),
1589 : : &secs, &usecs);
6686 tgl@sss.pgh.pa.us 1590 : 208 : msecs = secs * 1000 + usecs / 1000;
1591 : 208 : usecs = usecs % 1000;
1592 : :
1593 : : /* Gather a list of all lock holders and waiters */
4297 fujii@postgresql.org 1594 : 208 : LWLockAcquire(partitionLock, LW_SHARED);
278 1595 : 208 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1596 : : &lock_waiters_sbuf, &lockHoldersNum);
4297 1597 : 208 : LWLockRelease(partitionLock);
1598 : :
6686 tgl@sss.pgh.pa.us 1599 [ + + ]: 208 : if (deadlock_state == DS_SOFT_DEADLOCK)
1600 [ + - ]: 3 : ereport(LOG,
1601 : : (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1602 : : MyProcPid, modename, buf.data, msecs, usecs),
1603 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1604 : : "Processes holding the lock: %s. Wait queue: %s.",
1605 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1606 [ + + ]: 205 : else if (deadlock_state == DS_HARD_DEADLOCK)
1607 : : {
1608 : : /*
1609 : : * This message is a bit redundant with the error that will be
1610 : : * reported subsequently, but in some cases the error report
1611 : : * might not make it to the log (eg, if it's caught by an
1612 : : * exception handler), and we want to ensure all long-wait
1613 : : * events get logged.
1614 : : */
1615 [ + - ]: 5 : ereport(LOG,
1616 : : (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1617 : : MyProcPid, modename, buf.data, msecs, usecs),
1618 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1619 : : "Processes holding the lock: %s. Wait queue: %s.",
1620 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1621 : : }
1622 : :
2009 peter@eisentraut.org 1623 [ + + ]: 208 : if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
6686 tgl@sss.pgh.pa.us 1624 [ + - ]: 178 : ereport(LOG,
1625 : : (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1626 : : MyProcPid, modename, buf.data, msecs, usecs),
1627 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1628 : : "Processes holding the lock: %s. Wait queue: %s.",
1629 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
2009 peter@eisentraut.org 1630 [ + + ]: 30 : else if (myWaitStatus == PROC_WAIT_STATUS_OK)
6686 tgl@sss.pgh.pa.us 1631 [ + - ]: 25 : ereport(LOG,
1632 : : (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1633 : : MyProcPid, modename, buf.data, msecs, usecs)));
1634 : : else
1635 : : {
2009 peter@eisentraut.org 1636 [ - + ]: 5 : Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1637 : :
1638 : : /*
1639 : : * Currently, the deadlock checker always kicks its own
1640 : : * process, which means that we'll only see
1641 : : * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1642 : : * DS_HARD_DEADLOCK, and there's no need to print redundant
1643 : : * messages. But for completeness and future-proofing, print
1644 : : * a message if it looks like someone else kicked us off the
1645 : : * lock.
1646 : : */
6686 tgl@sss.pgh.pa.us 1647 [ - + ]: 5 : if (deadlock_state != DS_HARD_DEADLOCK)
6686 tgl@sss.pgh.pa.us 1648 [ # # ]:UBC 0 : ereport(LOG,
1649 : : (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1650 : : MyProcPid, modename, buf.data, msecs, usecs),
1651 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1652 : : "Processes holding the lock: %s. Wait queue: %s.",
1653 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1654 : : }
1655 : :
1656 : : /*
1657 : : * At this point we might still need to wait for the lock. Reset
1658 : : * state so we don't print the above messages again.
1659 : : */
6686 tgl@sss.pgh.pa.us 1660 :CBC 208 : deadlock_state = DS_NO_DEADLOCK;
1661 : :
1662 : 208 : pfree(buf.data);
4297 fujii@postgresql.org 1663 : 208 : pfree(lock_holders_sbuf.data);
1664 : 208 : pfree(lock_waiters_sbuf.data);
1665 : : }
2009 peter@eisentraut.org 1666 [ + + ]: 1647 : } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1667 : :
1668 : : /*
1669 : : * Disable the timers, if they are still running. As in LockErrorCleanup,
1670 : : * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1671 : : * already caused QueryCancelPending to become set, we want the cancel to
1672 : : * be reported as a lock timeout, not a user cancel.
1673 : : */
3569 simon@2ndQuadrant.co 1674 [ + + ]: 1352 : if (!InHotStandby)
1675 : : {
1676 [ + + ]: 1351 : if (LockTimeout > 0)
1677 : : {
1678 : : DisableTimeoutParams timeouts[2];
1679 : :
1680 : 94 : timeouts[0].id = DEADLOCK_TIMEOUT;
1681 : 94 : timeouts[0].keep_indicator = false;
1682 : 94 : timeouts[1].id = LOCK_TIMEOUT;
1683 : 94 : timeouts[1].keep_indicator = true;
1684 : 94 : disable_timeouts(timeouts, 2);
1685 : : }
1686 : : else
1687 : 1257 : disable_timeout(DEADLOCK_TIMEOUT, false);
1688 : : }
1689 : :
1690 : : /*
1691 : : * Emit the log message if recovery conflict on lock was resolved but the
1692 : : * startup process waited longer than deadlock_timeout for it.
1693 : : */
1799 fujii@postgresql.org 1694 [ + + + - ]: 1352 : if (InHotStandby && logged_recovery_conflict)
1695 : 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1696 : : standbyWaitStart, GetCurrentTimestamp(),
1697 : : NULL, false);
1698 : :
1699 : : /*
1700 : : * We don't have to do anything else, because the awaker did all the
1701 : : * necessary updates of the lock table and MyProc. (The caller is
1702 : : * responsible for updating the local lock table.)
1703 : : */
408 heikki.linnakangas@i 1704 : 1352 : return myWaitStatus;
1705 : : }
1706 : :
1707 : :
1708 : : /*
1709 : : * ProcWakeup -- wake up a process by setting its latch.
1710 : : *
1711 : : * Also remove the process from the wait queue and set its links invalid.
1712 : : *
1713 : : * The appropriate lock partition lock must be held by caller.
1714 : : *
1715 : : * XXX: presently, this code is only used for the "success" case, and only
1716 : : * works correctly for that case. To clean up in failure case, would need
1717 : : * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1718 : : * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1719 : : */
1720 : : void
2009 peter@eisentraut.org 1721 : 1350 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
1722 : : {
1064 andres@anarazel.de 1723 [ - + ]: 1350 : if (dlist_node_is_detached(&proc->links))
1064 andres@anarazel.de 1724 :UBC 0 : return;
1725 : :
2009 peter@eisentraut.org 1726 [ - + ]:CBC 1350 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1727 : :
1728 : : /* Remove process from wait queue */
1064 andres@anarazel.de 1729 : 1350 : dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
1730 : :
1731 : : /* Clean up process' state and pass it the ok/fail signal */
9095 tgl@sss.pgh.pa.us 1732 : 1350 : proc->waitLock = NULL;
7782 1733 : 1350 : proc->waitProcLock = NULL;
7823 1734 : 1350 : proc->waitStatus = waitStatus;
1766 fujii@postgresql.org 1735 : 1350 : pg_atomic_write_u64(&MyProc->waitStart, 0);
1736 : :
1737 : : /* And awaken it */
3970 andres@anarazel.de 1738 : 1350 : SetLatch(&proc->procLatch);
1739 : : }
1740 : :
1741 : : /*
1742 : : * ProcLockWakeup -- routine for waking up processes when a lock is
1743 : : * released (or a prior waiter is aborted). Scan all waiters
1744 : : * for lock, waken any that are no longer blocked.
1745 : : *
1746 : : * The appropriate lock partition lock must be held by caller.
1747 : : */
1748 : : void
8052 bruce@momjian.us 1749 : 1369 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1750 : : {
1064 andres@anarazel.de 1751 : 1369 : dclist_head *waitQueue = &lock->waitProcs;
8052 bruce@momjian.us 1752 : 1369 : LOCKMASK aheadRequests = 0;
1753 : : dlist_mutable_iter miter;
1754 : :
1064 andres@anarazel.de 1755 [ + + ]: 1369 : if (dclist_is_empty(waitQueue))
9092 tgl@sss.pgh.pa.us 1756 : 44 : return;
1757 : :
1064 andres@anarazel.de 1758 [ + - + + ]: 3410 : dclist_foreach_modify(miter, waitQueue)
1759 : : {
1760 : 2085 : PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
9036 bruce@momjian.us 1761 : 2085 : LOCKMODE lockmode = proc->waitLockMode;
1762 : :
1763 : : /*
1764 : : * Waken if (a) doesn't conflict with requests of earlier waiters, and
1765 : : * (b) doesn't conflict with already-held locks.
1766 : : */
8553 1767 [ + + ]: 2085 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
2180 peter@eisentraut.org 1768 [ + + ]: 1643 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1769 : : proc->waitProcLock))
1770 : : {
1771 : : /* OK to waken */
7782 tgl@sss.pgh.pa.us 1772 : 1350 : GrantLock(lock, proc->waitProcLock, lockmode);
1773 : : /* removes proc from the lock's waiting process queue */
1064 andres@anarazel.de 1774 : 1350 : ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1775 : : }
1776 : : else
1777 : : {
1778 : : /*
1779 : : * Lock conflicts: Don't wake, but remember requested mode for
1780 : : * later checks.
1781 : : */
8052 bruce@momjian.us 1782 : 735 : aheadRequests |= LOCKBIT_ON(lockmode);
1783 : : }
1784 : : }
1785 : : }
1786 : :
1787 : : /*
1788 : : * CheckDeadLock
1789 : : *
1790 : : * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1791 : : * lock to be released by some other process. Check if there's a deadlock; if
1792 : : * not, just return. (But signal ProcSleep to log a message, if
1793 : : * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1794 : : * the lock's wait queue and signal an error to ProcSleep.
1795 : : */
1796 : : static void
8558 1797 : 34 : CheckDeadLock(void)
1798 : : {
1799 : : int i;
1800 : :
1801 : : /*
1802 : : * Acquire exclusive lock on the entire shared lock data structures. Must
1803 : : * grab LWLocks in partition-number order to avoid LWLock deadlock.
1804 : : *
1805 : : * Note that the deadlock check interrupt had better not be enabled
1806 : : * anywhere that this process itself holds lock partition locks, else this
1807 : : * will wait forever. Also note that LWLockAcquire creates a critical
1808 : : * section, so that this routine cannot be interrupted by cancel/die
1809 : : * interrupts.
1810 : : */
7311 tgl@sss.pgh.pa.us 1811 [ + + ]: 578 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4342 rhaas@postgresql.org 1812 : 544 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
1813 : :
1814 : : /*
1815 : : * Check to see if we've been awoken by anyone in the interim.
1816 : : *
1817 : : * If we have, we can return and resume our transaction -- happy day.
1818 : : * Before we are awoken the process releasing the lock grants it to us so
1819 : : * we know that we don't have to wait anymore.
1820 : : *
1821 : : * We check by looking to see if we've been unlinked from the wait queue.
1822 : : * This is safe because we hold the lock partition lock.
1823 : : */
6254 tgl@sss.pgh.pa.us 1824 [ + + ]: 34 : if (MyProc->links.prev == NULL ||
1825 [ - + ]: 33 : MyProc->links.next == NULL)
6756 1826 : 1 : goto check_done;
1827 : :
1828 : : #ifdef LOCK_DEBUG
1829 : : if (Debug_deadlocks)
1830 : : DumpAllLocks();
1831 : : #endif
1832 : :
1833 : : /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1834 : 33 : deadlock_state = DeadLockCheck(MyProc);
1835 : :
6864 bruce@momjian.us 1836 [ + + ]: 33 : if (deadlock_state == DS_HARD_DEADLOCK)
1837 : : {
1838 : : /*
1839 : : * Oops. We have a deadlock.
1840 : : *
1841 : : * Get this process out of wait state. (Note: we could do this more
1842 : : * efficiently by relying on lockAwaited, but use this coding to
1843 : : * preserve the flexibility to kill some other transaction than the
1844 : : * one detecting the deadlock.)
1845 : : *
1846 : : * RemoveFromWaitQueue sets MyProc->waitStatus to
1847 : : * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1848 : : * return from the signal handler.
1849 : : */
1850 [ - + ]: 5 : Assert(MyProc->waitLock != NULL);
1851 : 5 : RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1852 : :
1853 : : /*
1854 : : * We're done here. Transaction abort caused by the error that
1855 : : * ProcSleep will raise will cause any other locks we hold to be
1856 : : * released, thus allowing other processes to wake up; we don't need
1857 : : * to do that here. NOTE: an exception is that releasing locks we
1858 : : * hold doesn't consider the possibility of waiters that were blocked
1859 : : * behind us on the lock we just failed to get, and might now be
1860 : : * wakable because we're not in front of them anymore. However,
1861 : : * RemoveFromWaitQueue took care of waking up any such processes.
1862 : : */
1863 : : }
1864 : :
1865 : : /*
1866 : : * And release locks. We do this in reverse order for two reasons: (1)
1867 : : * Anyone else who needs more than one of the locks will be trying to lock
1868 : : * them in increasing order; we don't want to release the other process
1869 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
1870 : : * behavior inside LWLockRelease.
1871 : : */
6756 tgl@sss.pgh.pa.us 1872 : 28 : check_done:
7014 bruce@momjian.us 1873 [ + + ]: 578 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4342 rhaas@postgresql.org 1874 : 544 : LWLockRelease(LockHashPartitionLockByIndex(i));
10753 scrappy@hub.org 1875 : 34 : }
1876 : :
1877 : : /*
1878 : : * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1879 : : *
1880 : : * NB: Runs inside a signal handler, be careful.
1881 : : */
1882 : : void
3970 andres@anarazel.de 1883 : 34 : CheckDeadLockAlert(void)
1884 : : {
1885 : 34 : int save_errno = errno;
1886 : :
1887 : 34 : got_deadlock_timeout = true;
1888 : :
1889 : : /*
1890 : : * Have to set the latch again, even if handle_sig_alarm already did. Back
1891 : : * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1892 : : * ever would be a problem, but setting a set latch again is cheap.
1893 : : *
1894 : : * Note that, when this function runs inside procsignal_sigusr1_handler(),
1895 : : * the handler function sets the latch again after the latch is set here.
1896 : : */
1897 : 34 : SetLatch(MyLatch);
1898 : 34 : errno = save_errno;
1899 : 34 : }
1900 : :
1901 : : /*
1902 : : * GetLockHoldersAndWaiters - get lock holders and waiters for a lock
1903 : : *
1904 : : * Fill lock_holders_sbuf and lock_waiters_sbuf with the PIDs of processes holding
1905 : : * and waiting for the lock, and set lockHoldersNum to the number of lock holders.
1906 : : *
1907 : : * The lock table's partition lock must be held on entry and remains held on exit.
1908 : : */
1909 : : void
278 fujii@postgresql.org 1910 : 208 : GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf,
1911 : : StringInfo lock_waiters_sbuf, int *lockHoldersNum)
1912 : : {
1913 : : dlist_iter proc_iter;
1914 : : PROCLOCK *curproclock;
1915 : 208 : LOCK *lock = locallock->lock;
1916 : 208 : bool first_holder = true,
1917 : 208 : first_waiter = true;
1918 : :
1919 : : #ifdef USE_ASSERT_CHECKING
1920 : : {
1921 : 208 : uint32 hashcode = locallock->hashcode;
1922 : 208 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1923 : :
1924 [ - + ]: 208 : Assert(LWLockHeldByMe(partitionLock));
1925 : : }
1926 : : #endif
1927 : :
1928 : 208 : *lockHoldersNum = 0;
1929 : :
1930 : : /*
1931 : : * Loop over the lock's procLocks to gather a list of all holders and
1932 : : * waiters. Thus we will be able to provide more detailed information for
1933 : : * lock debugging purposes.
1934 : : *
1935 : : * lock->procLocks contains all processes which hold or wait for this
1936 : : * lock.
1937 : : */
1938 [ + - + + ]: 620 : dlist_foreach(proc_iter, &lock->procLocks)
1939 : : {
1940 : 412 : curproclock =
1941 : 412 : dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1942 : :
1943 : : /*
1944 : : * We are a waiter if myProc->waitProcLock == curproclock; we are a
1945 : : * holder if it is NULL or something different.
1946 : : */
1947 [ + + ]: 412 : if (curproclock->tag.myProc->waitProcLock == curproclock)
1948 : : {
1949 [ + + ]: 197 : if (first_waiter)
1950 : : {
1951 : 179 : appendStringInfo(lock_waiters_sbuf, "%d",
1952 : 179 : curproclock->tag.myProc->pid);
1953 : 179 : first_waiter = false;
1954 : : }
1955 : : else
1956 : 18 : appendStringInfo(lock_waiters_sbuf, ", %d",
1957 : 18 : curproclock->tag.myProc->pid);
1958 : : }
1959 : : else
1960 : : {
1961 [ + + ]: 215 : if (first_holder)
1962 : : {
1963 : 208 : appendStringInfo(lock_holders_sbuf, "%d",
1964 : 208 : curproclock->tag.myProc->pid);
1965 : 208 : first_holder = false;
1966 : : }
1967 : : else
1968 : 7 : appendStringInfo(lock_holders_sbuf, ", %d",
1969 : 7 : curproclock->tag.myProc->pid);
1970 : :
1971 : 215 : (*lockHoldersNum)++;
1972 : : }
1973 : : }
1974 : 208 : }
1975 : :
1976 : : /*
1977 : : * ProcWaitForSignal - wait for a signal from another backend.
1978 : : *
1979 : : * As this uses the generic process latch the caller has to be robust against
1980 : : * unrelated wakeups: Always check that the desired state has occurred, and
1981 : : * wait again if not.
1982 : : */
1983 : : void
3361 rhaas@postgresql.org 1984 : 19 : ProcWaitForSignal(uint32 wait_event_info)
1985 : : {
2581 tmunro@postgresql.or 1986 : 19 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1987 : : wait_event_info);
3970 andres@anarazel.de 1988 : 19 : ResetLatch(MyLatch);
1989 [ - + ]: 19 : CHECK_FOR_INTERRUPTS();
8930 tgl@sss.pgh.pa.us 1990 : 19 : }
1991 : :
1992 : : /*
1993 : : * ProcSendSignal - set the latch of a backend identified by ProcNumber
1994 : : */
1995 : : void
654 heikki.linnakangas@i 1996 : 8 : ProcSendSignal(ProcNumber procNumber)
1997 : : {
1998 [ + - - + ]: 8 : if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
654 heikki.linnakangas@i 1999 [ # # ]:UBC 0 : elog(ERROR, "procNumber out of range");
2000 : :
654 heikki.linnakangas@i 2001 :CBC 8 : SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
8930 tgl@sss.pgh.pa.us 2002 : 8 : }
2003 : :
2004 : : /*
2005 : : * BecomeLockGroupLeader - designate process as lock group leader
2006 : : *
2007 : : * Once this function has returned, other processes can join the lock group
2008 : : * by calling BecomeLockGroupMember.
2009 : : */
2010 : : void
3601 rhaas@postgresql.org 2011 : 607 : BecomeLockGroupLeader(void)
2012 : : {
2013 : : LWLock *leader_lwlock;
2014 : :
2015 : : /* If we already did it, we don't need to do it again. */
2016 [ + + ]: 607 : if (MyProc->lockGroupLeader == MyProc)
2017 : 531 : return;
2018 : :
2019 : : /* We had better not be a follower. */
2020 [ - + ]: 76 : Assert(MyProc->lockGroupLeader == NULL);
2021 : :
2022 : : /* Create single-member group, containing only ourselves. */
2023 : 76 : leader_lwlock = LockHashPartitionLockByProc(MyProc);
2024 : 76 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2025 : 76 : MyProc->lockGroupLeader = MyProc;
2026 : 76 : dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
2027 : 76 : LWLockRelease(leader_lwlock);
2028 : : }
2029 : :
2030 : : /*
2031 : : * BecomeLockGroupMember - designate process as lock group member
2032 : : *
2033 : : * This is pretty straightforward except for the possibility that the leader
2034 : : * whose group we're trying to join might exit before we manage to do so;
2035 : : * and the PGPROC might get recycled for an unrelated process. To avoid
2036 : : * that, we require the caller to pass the PID of the intended PGPROC as
2037 : : * an interlock. Returns true if we successfully join the intended lock
2038 : : * group, and false if not.
2039 : : */
2040 : : bool
2041 : 1440 : BecomeLockGroupMember(PGPROC *leader, int pid)
2042 : : {
2043 : : LWLock *leader_lwlock;
2044 : 1440 : bool ok = false;
2045 : :
2046 : : /* Group leader can't become member of group */
2047 [ - + ]: 1440 : Assert(MyProc != leader);
2048 : :
2049 : : /* Can't already be a member of a group */
3586 tgl@sss.pgh.pa.us 2050 [ - + ]: 1440 : Assert(MyProc->lockGroupLeader == NULL);
2051 : :
2052 : : /* PID must be valid. */
3601 rhaas@postgresql.org 2053 [ - + ]: 1440 : Assert(pid != 0);
2054 : :
2055 : : /*
2056 : : * Get lock protecting the group fields. Note LockHashPartitionLockByProc
2057 : : * calculates the proc number based on the PGPROC slot without looking at
2058 : : * its contents, so we will acquire the correct lock even if the leader
2059 : : * PGPROC is in process of being recycled.
2060 : : */
3587 2061 : 1440 : leader_lwlock = LockHashPartitionLockByProc(leader);
3601 2062 : 1440 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2063 : :
2064 : : /* Is this the leader we're looking for? */
3586 tgl@sss.pgh.pa.us 2065 [ + - + - ]: 1440 : if (leader->pid == pid && leader->lockGroupLeader == leader)
2066 : : {
2067 : : /* OK, join the group */
3601 rhaas@postgresql.org 2068 : 1440 : ok = true;
2069 : 1440 : MyProc->lockGroupLeader = leader;
2070 : 1440 : dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
2071 : : }
2072 : 1440 : LWLockRelease(leader_lwlock);
2073 : :
2074 : 1440 : return ok;
2075 : : }
|