Age Owner Branch data TLA Line data Source code
1 : : /*-------------------------------------------------------------------------
2 : : *
3 : : * proc.c
4 : : * routines to manage per-process shared memory data structure
5 : : *
6 : : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : : * Portions Copyright (c) 1994, Regents of the University of California
8 : : *
9 : : *
10 : : * IDENTIFICATION
11 : : * src/backend/storage/lmgr/proc.c
12 : : *
13 : : *-------------------------------------------------------------------------
14 : : */
15 : : /*
16 : : * Interface (a):
17 : : * JoinWaitQueue(), ProcSleep(), ProcWakeup()
18 : : *
19 : : * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 : : * the lock wakes the process up again (and gives it an error code so it knows
21 : : * whether it was awoken on an error condition).
22 : : *
23 : : * Interface (b):
24 : : *
25 : : * ProcReleaseLocks -- frees the locks associated with current transaction
26 : : *
27 : : * ProcKill -- destroys the shared memory state (and locks)
28 : : * associated with the process.
29 : : */
30 : : #include "postgres.h"
31 : :
32 : : #include <signal.h>
33 : : #include <unistd.h>
34 : : #include <sys/time.h>
35 : :
36 : : #include "access/transam.h"
37 : : #include "access/twophase.h"
38 : : #include "access/xlogutils.h"
39 : : #include "miscadmin.h"
40 : : #include "pgstat.h"
41 : : #include "postmaster/autovacuum.h"
42 : : #include "replication/slotsync.h"
43 : : #include "replication/syncrep.h"
44 : : #include "storage/condition_variable.h"
45 : : #include "storage/ipc.h"
46 : : #include "storage/lmgr.h"
47 : : #include "storage/pmsignal.h"
48 : : #include "storage/proc.h"
49 : : #include "storage/procarray.h"
50 : : #include "storage/procsignal.h"
51 : : #include "storage/spin.h"
52 : : #include "storage/standby.h"
53 : : #include "utils/timeout.h"
54 : : #include "utils/timestamp.h"
55 : :
56 : : /* GUC variables */
57 : : int DeadlockTimeout = 1000;
58 : : int StatementTimeout = 0;
59 : : int LockTimeout = 0;
60 : : int IdleInTransactionSessionTimeout = 0;
61 : : int TransactionTimeout = 0;
62 : : int IdleSessionTimeout = 0;
63 : : bool log_lock_waits = false;
64 : :
65 : : /* Pointer to this process's PGPROC struct, if any */
66 : : PGPROC *MyProc = NULL;
67 : :
68 : : /*
69 : : * This spinlock protects the freelist of recycled PGPROC structures.
70 : : * We cannot use an LWLock because the LWLock manager depends on already
71 : : * having a PGPROC and a wait semaphore! But these structures are touched
72 : : * relatively infrequently (only at backend startup or shutdown) and not for
73 : : * very long, so a spinlock is okay.
74 : : */
75 : : NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
76 : :
77 : : /* Pointers to shared-memory structures */
78 : : PROC_HDR *ProcGlobal = NULL;
79 : : NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
80 : : PGPROC *PreparedXactProcs = NULL;
81 : :
82 : : static DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
83 : :
84 : : /* Is a deadlock check pending? */
85 : : static volatile sig_atomic_t got_deadlock_timeout;
86 : :
87 : : static void RemoveProcFromArray(int code, Datum arg);
88 : : static void ProcKill(int code, Datum arg);
89 : : static void AuxiliaryProcKill(int code, Datum arg);
90 : : static void CheckDeadLock(void);
91 : :
92 : :
93 : : /*
94 : : * Report shared-memory space needed by PGPROC.
95 : : */
96 : : static Size
157 tomas.vondra@postgre 97 :GIC 2938 : PGProcShmemSize(void)
98 : : {
7322 tgl@sss.pgh.pa.us 99 : 2938 : Size size = 0;
100 : : Size TotalProcs =
841 101 : 2938 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
102 : :
1849 andres@anarazel.de 103 : 2938 : size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
104 : 2938 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
105 : 2938 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
1755 alvherre@alvh.no-ip. 106 : 2938 : size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
107 : :
157 tomas.vondra@postgre 108 : 2938 : return size;
109 : : }
110 : :
111 : : /*
112 : : * Report shared-memory space needed by Fast-Path locks.
113 : : */
114 : : static Size
115 : 2938 : FastPathLockShmemSize(void)
116 : : {
117 : 2938 : Size size = 0;
118 : : Size TotalProcs =
119 : 2938 : add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
120 : : Size fpLockBitsSize,
121 : : fpRelIdSize;
122 : :
123 : : /*
124 : : * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
125 : : * nicely aligned in each backend.
126 : : */
350 127 : 2938 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
186 128 : 2938 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
129 : :
350 130 : 2938 : size = add_size(size, mul_size(TotalProcs, (fpLockBitsSize + fpRelIdSize)));
131 : :
7647 tgl@sss.pgh.pa.us 132 : 2938 : return size;
133 : : }
134 : :
135 : : /*
136 : : * Report shared-memory space needed by InitProcGlobal.
137 : : */
138 : : Size
157 tomas.vondra@postgre 139 : 1909 : ProcGlobalShmemSize(void)
140 : : {
141 : 1909 : Size size = 0;
142 : :
143 : : /* ProcGlobal */
144 : 1909 : size = add_size(size, sizeof(PROC_HDR));
145 : 1909 : size = add_size(size, sizeof(slock_t));
146 : :
147 : 1909 : size = add_size(size, PGProcShmemSize());
148 : 1909 : size = add_size(size, FastPathLockShmemSize());
149 : :
150 : 1909 : return size;
151 : : }
152 : :
153 : : /*
154 : : * Report number of semaphores needed by InitProcGlobal.
155 : : */
156 : : int
7386 tgl@sss.pgh.pa.us 157 : 1909 : ProcGlobalSemas(void)
158 : : {
159 : : /*
160 : : * We need a sema per backend (including autovacuum), plus one for each
161 : : * auxiliary process.
162 : : */
1243 rhaas@postgresql.org 163 : 1909 : return MaxBackends + NUM_AUXILIARY_PROCS;
164 : : }
165 : :
166 : : /*
167 : : * InitProcGlobal -
168 : : * Initialize the global process table during postmaster or standalone
169 : : * backend startup.
170 : : *
171 : : * We also create all the per-process semaphores we will need to support
172 : : * the requested number of backends. We used to allocate semaphores
173 : : * only when backends were actually started up, but that is bad because
174 : : * it lets Postgres fail under load --- a lot of Unix systems are
175 : : * (mis)configured with small limits on the number of semaphores, and
176 : : * running out when trying to start another backend is a common failure.
177 : : * So, now we grab enough semaphores to support the desired max number
178 : : * of backends immediately at initialization --- if the sysadmin has set
179 : : * MaxConnections, max_worker_processes, max_wal_senders, or
180 : : * autovacuum_worker_slots higher than his kernel will support, he'll
181 : : * find out sooner rather than later.
182 : : *
183 : : * Another reason for creating semaphores here is that the semaphore
184 : : * implementation typically requires us to create semaphores in the
185 : : * postmaster, not in backends.
186 : : *
187 : : * Note: this is NOT called by individual backends under a postmaster,
188 : : * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
189 : : * pointers must be propagated specially for EXEC_BACKEND operation.
190 : : */
191 : : void
7386 tgl@sss.pgh.pa.us 192 : 1029 : InitProcGlobal(void)
193 : : {
194 : : PGPROC *procs;
195 : : int i,
196 : : j;
197 : : bool found;
1243 rhaas@postgresql.org 198 : 1029 : uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
199 : :
200 : : /* Used for setup of per-backend fast-path slots. */
201 : : char *fpPtr,
202 : : *fpEndPtr PG_USED_FOR_ASSERTS_ONLY;
203 : : Size fpLockBitsSize,
204 : : fpRelIdSize;
205 : : Size requestSize;
206 : : char *ptr;
207 : :
208 : : /* Create the ProcGlobal shared structure */
10226 bruce@momjian.us 209 : 1029 : ProcGlobal = (PROC_HDR *)
7185 tgl@sss.pgh.pa.us 210 : 1029 : ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
211 [ - + ]: 1029 : Assert(!found);
212 : :
213 : : /*
214 : : * Initialize the data structures.
215 : : */
5200 rhaas@postgresql.org 216 : 1029 : ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
962 andres@anarazel.de 217 : 1029 : dlist_init(&ProcGlobal->freeProcs);
218 : 1029 : dlist_init(&ProcGlobal->autovacFreeProcs);
219 : 1029 : dlist_init(&ProcGlobal->bgworkerFreeProcs);
220 : 1029 : dlist_init(&ProcGlobal->walsenderFreeProcs);
5149 tgl@sss.pgh.pa.us 221 : 1029 : ProcGlobal->startupBufferPinWaitBufId = -1;
309 heikki.linnakangas@i 222 : 1029 : ProcGlobal->walwriterProc = INVALID_PROC_NUMBER;
223 : 1029 : ProcGlobal->checkpointerProc = INVALID_PROC_NUMBER;
552 224 : 1029 : pg_atomic_init_u32(&ProcGlobal->procArrayGroupFirst, INVALID_PROC_NUMBER);
225 : 1029 : pg_atomic_init_u32(&ProcGlobal->clogGroupFirst, INVALID_PROC_NUMBER);
226 : :
227 : : /*
228 : : * Create and initialize all the PGPROC structures we'll need. There are
229 : : * six separate consumers: (1) normal backends, (2) autovacuum workers and
230 : : * special workers, (3) background workers, (4) walsenders, (5) auxiliary
231 : : * processes, and (6) prepared transactions. (For largely-historical
232 : : * reasons, we combine autovacuum and special workers into one category
233 : : * with a single freelist.) Each PGPROC structure is dedicated to exactly
234 : : * one of these purposes, and they do not move between groups.
235 : : */
157 tomas.vondra@postgre 236 : 1029 : requestSize = PGProcShmemSize();
237 : :
238 : 1029 : ptr = ShmemInitStruct("PGPROC structures",
239 : : requestSize,
240 : : &found);
241 : :
242 [ + - + + : 1029 : MemSet(ptr, 0, requestSize);
+ - - + -
- ]
243 : :
244 : 1029 : procs = (PGPROC *) ptr;
245 : 1029 : ptr = (char *) ptr + TotalProcs * sizeof(PGPROC);
246 : :
5215 rhaas@postgresql.org 247 : 1029 : ProcGlobal->allProcs = procs;
248 : : /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
1243 249 : 1029 : ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
250 : :
251 : : /*
252 : : * Allocate arrays mirroring PGPROC fields in a dense manner. See
253 : : * PROC_HDR.
254 : : *
255 : : * XXX: It might make sense to increase padding for these arrays, given
256 : : * how hotly they are accessed.
257 : : */
157 tomas.vondra@postgre 258 : 1029 : ProcGlobal->xids = (TransactionId *) ptr;
259 : 1029 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->xids));
260 : :
261 : 1029 : ProcGlobal->subxidStates = (XidCacheStatus *) ptr;
262 : 1029 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->subxidStates));
263 : :
264 : 1029 : ProcGlobal->statusFlags = (uint8 *) ptr;
265 : 1029 : ptr = (char *) ptr + (TotalProcs * sizeof(*ProcGlobal->statusFlags));
266 : :
267 : : /* make sure wer didn't overflow */
268 [ + - - + ]: 1029 : Assert((ptr > (char *) procs) && (ptr <= (char *) procs + requestSize));
269 : :
270 : : /*
271 : : * Allocate arrays for fast-path locks. Those are variable-length, so
272 : : * can't be included in PGPROC directly. We allocate a separate piece of
273 : : * shared memory and then divide that between backends.
274 : : */
350 275 : 1029 : fpLockBitsSize = MAXALIGN(FastPathLockGroupsPerBackend * sizeof(uint64));
186 276 : 1029 : fpRelIdSize = MAXALIGN(FastPathLockSlotsPerBackend() * sizeof(Oid));
277 : :
157 278 : 1029 : requestSize = FastPathLockShmemSize();
279 : :
280 : 1029 : fpPtr = ShmemInitStruct("Fast-Path Lock Array",
281 : : requestSize,
282 : : &found);
283 : :
284 [ + - + - : 1029 : MemSet(fpPtr, 0, requestSize);
+ - - + -
- ]
285 : :
286 : : /* For asserts checking we did not overflow. */
287 : 1029 : fpEndPtr = fpPtr + requestSize;
288 : :
5200 rhaas@postgresql.org 289 [ + + ]: 135222 : for (i = 0; i < TotalProcs; i++)
290 : : {
962 andres@anarazel.de 291 : 134193 : PGPROC *proc = &procs[i];
292 : :
293 : : /* Common initialization for all PGPROCs, regardless of type. */
294 : :
295 : : /*
296 : : * Set the fast-path lock arrays, and move the pointer. We interleave
297 : : * the two arrays, to (hopefully) get some locality for each backend.
298 : : */
350 tomas.vondra@postgre 299 : 134193 : proc->fpLockBits = (uint64 *) fpPtr;
300 : 134193 : fpPtr += fpLockBitsSize;
301 : :
302 : 134193 : proc->fpRelId = (Oid *) fpPtr;
303 : 134193 : fpPtr += fpRelIdSize;
304 : :
305 [ - + ]: 134193 : Assert(fpPtr <= fpEndPtr);
306 : :
307 : : /*
308 : : * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
309 : : * dummy PGPROCs don't need these though - they're never associated
310 : : * with a real process
311 : : */
1243 rhaas@postgresql.org 312 [ + + ]: 134193 : if (i < MaxBackends + NUM_AUXILIARY_PROCS)
313 : : {
962 andres@anarazel.de 314 : 133360 : proc->sem = PGSemaphoreCreate();
315 : 133360 : InitSharedLatch(&(proc->procLatch));
316 : 133360 : LWLockInitialize(&(proc->fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
317 : : }
318 : :
319 : : /*
320 : : * Newly created PGPROCs for normal backends, autovacuum workers,
321 : : * special workers, bgworkers, and walsenders must be queued up on the
322 : : * appropriate free list. Because there can only ever be a small,
323 : : * fixed number of auxiliary processes, no free list is used in that
324 : : * case; InitAuxiliaryProcess() instead uses a linear search. PGPROCs
325 : : * for prepared transactions are added to a free list by
326 : : * TwoPhaseShmemInit().
327 : : */
5200 rhaas@postgresql.org 328 [ + + ]: 134193 : if (i < MaxConnections)
329 : : {
330 : : /* PGPROC for normal backend, add to freeProcs list */
552 heikki.linnakangas@i 331 : 65851 : dlist_push_tail(&ProcGlobal->freeProcs, &proc->links);
962 andres@anarazel.de 332 : 65851 : proc->procgloballist = &ProcGlobal->freeProcs;
333 : : }
243 nathan@postgresql.or 334 [ + + ]: 68342 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS)
335 : : {
336 : : /* PGPROC for AV or special worker, add to autovacFreeProcs list */
552 heikki.linnakangas@i 337 : 13205 : dlist_push_tail(&ProcGlobal->autovacFreeProcs, &proc->links);
962 andres@anarazel.de 338 : 13205 : proc->procgloballist = &ProcGlobal->autovacFreeProcs;
339 : : }
243 nathan@postgresql.or 340 [ + + ]: 55137 : else if (i < MaxConnections + autovacuum_worker_slots + NUM_SPECIAL_WORKER_PROCS + max_worker_processes)
341 : : {
342 : : /* PGPROC for bgworker, add to bgworkerFreeProcs list */
552 heikki.linnakangas@i 343 : 8230 : dlist_push_tail(&ProcGlobal->bgworkerFreeProcs, &proc->links);
962 andres@anarazel.de 344 : 8230 : proc->procgloballist = &ProcGlobal->bgworkerFreeProcs;
345 : : }
1243 rhaas@postgresql.org 346 [ + + ]: 46907 : else if (i < MaxBackends)
347 : : {
348 : : /* PGPROC for walsender, add to walsenderFreeProcs list */
552 heikki.linnakangas@i 349 : 6972 : dlist_push_tail(&ProcGlobal->walsenderFreeProcs, &proc->links);
962 andres@anarazel.de 350 : 6972 : proc->procgloballist = &ProcGlobal->walsenderFreeProcs;
351 : : }
352 : :
353 : : /* Initialize myProcLocks[] shared memory queues. */
5058 rhaas@postgresql.org 354 [ + + ]: 2281281 : for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
962 andres@anarazel.de 355 : 2147088 : dlist_init(&(proc->myProcLocks[j]));
356 : :
357 : : /* Initialize lockGroupMembers list. */
358 : 134193 : dlist_init(&proc->lockGroupMembers);
359 : :
360 : : /*
361 : : * Initialize the atomic variables, otherwise, it won't be safe to
362 : : * access them for backends that aren't currently in use.
363 : : */
552 heikki.linnakangas@i 364 : 134193 : pg_atomic_init_u32(&(proc->procArrayGroupNext), INVALID_PROC_NUMBER);
365 : 134193 : pg_atomic_init_u32(&(proc->clogGroupNext), INVALID_PROC_NUMBER);
962 andres@anarazel.de 366 : 134193 : pg_atomic_init_u64(&(proc->waitStart), 0);
367 : : }
368 : :
369 : : /* Should have consumed exactly the expected amount of fast-path memory. */
348 tomas.vondra@postgre 370 [ - + ]: 1029 : Assert(fpPtr == fpEndPtr);
371 : :
372 : : /*
373 : : * Save pointers to the blocks of PGPROC structures reserved for auxiliary
374 : : * processes and prepared transactions.
375 : : */
1243 rhaas@postgresql.org 376 : 1029 : AuxiliaryProcs = &procs[MaxBackends];
377 : 1029 : PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
378 : :
379 : : /* Create ProcStructLock spinlock, too */
157 tomas.vondra@postgre 380 : 1029 : ProcStructLock = (slock_t *) ShmemInitStruct("ProcStructLock spinlock",
381 : : sizeof(slock_t),
382 : : &found);
7185 tgl@sss.pgh.pa.us 383 : 1029 : SpinLockInit(ProcStructLock);
10651 scrappy@hub.org 384 : 1029 : }
385 : :
386 : : /*
387 : : * InitProcess -- initialize a per-process PGPROC entry for this backend
388 : : */
389 : : void
9048 tgl@sss.pgh.pa.us 390 : 14852 : InitProcess(void)
391 : : {
392 : : dlist_head *procgloballist;
393 : :
394 : : /*
395 : : * ProcGlobal should be set up already (if we are a backend, we inherit
396 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
397 : : */
3613 rhaas@postgresql.org 398 [ - + ]: 14852 : if (ProcGlobal == NULL)
8080 tgl@sss.pgh.pa.us 399 [ # # ]:UIC 0 : elog(PANIC, "proc header uninitialized");
400 : :
8765 tgl@sss.pgh.pa.us 401 [ - + ]:GIC 14852 : if (MyProc != NULL)
8080 tgl@sss.pgh.pa.us 402 [ # # ]:UIC 0 : elog(ERROR, "you already exist");
403 : :
404 : : /*
405 : : * Before we start accessing the shared memory in a serious way, mark
406 : : * ourselves as an active postmaster child; this is so that the postmaster
407 : : * can detect it if we exit without cleaning up.
408 : : */
296 heikki.linnakangas@i 409 [ + + ]:GIC 14852 : if (IsUnderPostmaster)
333 410 : 14733 : RegisterPostmasterChildActive();
411 : :
412 : : /*
413 : : * Decide which list should supply our PGPROC. This logic must match the
414 : : * way the freelists were constructed in InitProcGlobal().
415 : : */
252 tgl@sss.pgh.pa.us 416 [ + + + + : 14852 : if (AmAutoVacuumWorkerProcess() || AmSpecialWorkerProcess())
+ + ]
3613 rhaas@postgresql.org 417 : 393 : procgloballist = &ProcGlobal->autovacFreeProcs;
551 heikki.linnakangas@i 418 [ + + ]: 14459 : else if (AmBackgroundWorkerProcess())
3613 rhaas@postgresql.org 419 : 2306 : procgloballist = &ProcGlobal->bgworkerFreeProcs;
551 heikki.linnakangas@i 420 [ + + ]: 12153 : else if (AmWalSenderProcess())
2398 michael@paquier.xyz 421 : 1101 : procgloballist = &ProcGlobal->walsenderFreeProcs;
422 : : else
3613 rhaas@postgresql.org 423 : 11052 : procgloballist = &ProcGlobal->freeProcs;
424 : :
425 : : /*
426 : : * Try to get a proc struct from the appropriate free list. If this
427 : : * fails, we must be out of PGPROC structures (not to mention semaphores).
428 : : *
429 : : * While we are holding the ProcStructLock, also copy the current shared
430 : : * estimate of spins_per_delay to local storage.
431 : : */
8743 tgl@sss.pgh.pa.us 432 [ + + ]: 14852 : SpinLockAcquire(ProcStructLock);
433 : :
3613 rhaas@postgresql.org 434 : 14852 : set_spins_per_delay(ProcGlobal->spins_per_delay);
435 : :
962 andres@anarazel.de 436 [ + + ]: 14852 : if (!dlist_is_empty(procgloballist))
437 : : {
428 heikki.linnakangas@i 438 : 14849 : MyProc = dlist_container(PGPROC, links, dlist_pop_head_node(procgloballist));
8743 tgl@sss.pgh.pa.us 439 : 14849 : SpinLockRelease(ProcStructLock);
440 : : }
441 : : else
442 : : {
443 : : /*
444 : : * If we reach here, all the PGPROCs are in use. This is one of the
445 : : * possible places to detect "too many backends", so give the standard
446 : : * error message. XXX do we need to give a different failure message
447 : : * in the autovacuum case?
448 : : */
449 : 3 : SpinLockRelease(ProcStructLock);
551 heikki.linnakangas@i 450 [ + + ]: 3 : if (AmWalSenderProcess())
2398 michael@paquier.xyz 451 [ + - ]: 2 : ereport(FATAL,
452 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
453 : : errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
454 : : max_wal_senders)));
8080 tgl@sss.pgh.pa.us 455 [ + - ]: 1 : ereport(FATAL,
456 : : (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
457 : : errmsg("sorry, too many clients already")));
458 : : }
562 heikki.linnakangas@i 459 : 14849 : MyProcNumber = GetNumberFromPGProc(MyProc);
460 : :
461 : : /*
462 : : * Cross-check that the PGPROC is of the type we expect; if this were not
463 : : * the case, it would get returned to the wrong list.
464 : : */
3693 rhaas@postgresql.org 465 [ - + ]: 14849 : Assert(MyProc->procgloballist == procgloballist);
466 : :
467 : : /*
468 : : * Initialize all fields of MyProc, except for those previously
469 : : * initialized by InitProcGlobal.
470 : : */
962 andres@anarazel.de 471 : 14849 : dlist_node_init(&MyProc->links);
1907 peter@eisentraut.org 472 : 14849 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4664 simon@2ndQuadrant.co 473 : 14849 : MyProc->fpVXIDLock = false;
474 : 14849 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1849 andres@anarazel.de 475 : 14849 : MyProc->xid = InvalidTransactionId;
1850 476 : 14849 : MyProc->xmin = InvalidTransactionId;
8743 tgl@sss.pgh.pa.us 477 : 14849 : MyProc->pid = MyProcPid;
552 heikki.linnakangas@i 478 : 14849 : MyProc->vxid.procNumber = MyProcNumber;
479 : 14849 : MyProc->vxid.lxid = InvalidLocalTransactionId;
480 : : /* databaseId and roleId will be filled in later */
7185 tgl@sss.pgh.pa.us 481 : 14849 : MyProc->databaseId = InvalidOid;
7342 482 : 14849 : MyProc->roleId = InvalidOid;
2581 michael@paquier.xyz 483 : 14849 : MyProc->tempNamespaceId = InvalidOid;
252 tgl@sss.pgh.pa.us 484 : 14849 : MyProc->isRegularBackend = AmRegularBackendProcess();
1247 rhaas@postgresql.org 485 : 14849 : MyProc->delayChkptFlags = 0;
1755 alvherre@alvh.no-ip. 486 : 14849 : MyProc->statusFlags = 0;
487 : : /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
551 heikki.linnakangas@i 488 [ + + ]: 14849 : if (AmAutoVacuumWorkerProcess())
1755 alvherre@alvh.no-ip. 489 : 33 : MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
1021 andres@anarazel.de 490 : 14849 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4968 heikki.linnakangas@i 491 : 14849 : MyProc->lwWaitMode = 0;
8993 tgl@sss.pgh.pa.us 492 : 14849 : MyProc->waitLock = NULL;
7680 493 : 14849 : MyProc->waitProcLock = NULL;
1657 fujii@postgresql.org 494 : 14849 : pg_atomic_write_u64(&MyProc->waitStart, 0);
495 : : #ifdef USE_ASSERT_CHECKING
496 : : {
497 : : int i;
498 : :
499 : : /* Last process should have released all locks. */
5058 rhaas@postgresql.org 500 [ + + ]: 252433 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
962 andres@anarazel.de 501 [ - + ]: 237584 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
502 : : }
503 : : #endif
5712 simon@2ndQuadrant.co 504 : 14849 : MyProc->recoveryConflictPending = false;
505 : :
506 : : /* Initialize fields for sync rep */
4822 heikki.linnakangas@i 507 : 14849 : MyProc->waitLSN = 0;
5298 simon@2ndQuadrant.co 508 : 14849 : MyProc->syncRepState = SYNC_REP_NOT_WAITING;
962 andres@anarazel.de 509 : 14849 : dlist_node_init(&MyProc->syncRepLinks);
510 : :
511 : : /* Initialize fields for group XID clearing. */
3495 rhaas@postgresql.org 512 : 14849 : MyProc->procArrayGroupMember = false;
513 : 14849 : MyProc->procArrayGroupMemberXid = InvalidTransactionId;
552 heikki.linnakangas@i 514 [ - + ]: 14849 : Assert(pg_atomic_read_u32(&MyProc->procArrayGroupNext) == INVALID_PROC_NUMBER);
515 : :
516 : : /* Check that group locking fields are in a proper initial state. */
3499 rhaas@postgresql.org 517 [ - + ]: 14849 : Assert(MyProc->lockGroupLeader == NULL);
518 [ - + ]: 14849 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
519 : :
520 : : /* Initialize wait event information. */
3467 521 : 14849 : MyProc->wait_event_info = 0;
522 : :
523 : : /* Initialize fields for group transaction status update. */
2927 524 : 14849 : MyProc->clogGroupMember = false;
525 : 14849 : MyProc->clogGroupMemberXid = InvalidTransactionId;
526 : 14849 : MyProc->clogGroupMemberXidStatus = TRANSACTION_STATUS_IN_PROGRESS;
527 : 14849 : MyProc->clogGroupMemberPage = -1;
528 : 14849 : MyProc->clogGroupMemberLsn = InvalidXLogRecPtr;
552 heikki.linnakangas@i 529 [ - + ]: 14849 : Assert(pg_atomic_read_u32(&MyProc->clogGroupNext) == INVALID_PROC_NUMBER);
530 : :
531 : : /*
532 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
533 : : * on it. That allows us to repoint the process latch, which so far
534 : : * points to process local one, to the shared one.
535 : : */
5141 tgl@sss.pgh.pa.us 536 : 14849 : OwnLatch(&MyProc->procLatch);
3888 andres@anarazel.de 537 : 14849 : SwitchToSharedLatch();
538 : :
539 : : /* now that we have a proc, report wait events to shared memory */
1617 540 : 14849 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
541 : :
542 : : /*
543 : : * We might be reusing a semaphore that belonged to a failed process. So
544 : : * be careful and reinitialize its value here. (This is not strictly
545 : : * necessary anymore, but seems like a good idea for cleanliness.)
546 : : */
3190 tgl@sss.pgh.pa.us 547 : 14849 : PGSemaphoreReset(MyProc->sem);
548 : :
549 : : /*
550 : : * Arrange to clean up at backend exit.
551 : : */
9001 552 : 14849 : on_shmem_exit(ProcKill, 0);
553 : :
554 : : /*
555 : : * Now that we have a PGPROC, we could try to acquire locks, so initialize
556 : : * local state needed for LWLocks, and the deadlock checker.
557 : : */
4086 heikki.linnakangas@i 558 : 14849 : InitLWLockAccess();
8990 tgl@sss.pgh.pa.us 559 : 14849 : InitDeadLockChecking();
560 : :
561 : : #ifdef EXEC_BACKEND
562 : :
563 : : /*
564 : : * Initialize backend-local pointers to all the shared data structures.
565 : : * (We couldn't do this until now because it needs LWLocks.)
566 : : */
567 : : if (IsUnderPostmaster)
568 : : AttachSharedMemoryStructs();
569 : : #endif
9001 570 : 14849 : }
571 : :
572 : : /*
573 : : * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
574 : : *
575 : : * This is separate from InitProcess because we can't acquire LWLocks until
576 : : * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
577 : : * work until after we've done AttachSharedMemoryStructs.
578 : : */
579 : : void
7185 580 : 14840 : InitProcessPhase2(void)
581 : : {
582 [ - + ]: 14840 : Assert(MyProc != NULL);
583 : :
584 : : /*
585 : : * Add our PGPROC to the PGPROC array in shared memory.
586 : : */
587 : 14840 : ProcArrayAdd(MyProc);
588 : :
589 : : /*
590 : : * Arrange to clean that up at backend exit.
591 : : */
592 : 14840 : on_shmem_exit(RemoveProcFromArray, 0);
593 : 14840 : }
594 : :
595 : : /*
596 : : * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
597 : : *
598 : : * This is called by bgwriter and similar processes so that they will have a
599 : : * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
600 : : * and sema that are assigned are one of the extra ones created during
601 : : * InitProcGlobal.
602 : : *
603 : : * Auxiliary processes are presently not expected to wait for real (lockmgr)
604 : : * locks, so we need not set up the deadlock checker. They are never added
605 : : * to the ProcArray or the sinval messaging mechanism, either. They also
606 : : * don't get a VXID assigned, since this is only useful when we actually
607 : : * hold lockmgr locks.
608 : : *
609 : : * Startup process however uses locks but never waits for them in the
610 : : * normal backend sense. Startup process also takes part in sinval messaging
611 : : * as a sendOnly process, so never reads messages from sinval queue. So
612 : : * Startup process does have a VXID and does show up in pg_locks.
613 : : */
614 : : void
6758 alvherre@alvh.no-ip. 615 : 3919 : InitAuxiliaryProcess(void)
616 : : {
617 : : PGPROC *auxproc;
618 : : int proctype;
619 : :
620 : : /*
621 : : * ProcGlobal should be set up already (if we are a backend, we inherit
622 : : * this by fork() or EXEC_BACKEND mechanism from the postmaster).
623 : : */
624 [ + - - + ]: 3919 : if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
8080 tgl@sss.pgh.pa.us 625 [ # # ]:UIC 0 : elog(PANIC, "proc header uninitialized");
626 : :
8743 tgl@sss.pgh.pa.us 627 [ - + ]:GIC 3919 : if (MyProc != NULL)
8080 tgl@sss.pgh.pa.us 628 [ # # ]:UIC 0 : elog(ERROR, "you already exist");
629 : :
296 heikki.linnakangas@i 630 [ + - ]:GIC 3919 : if (IsUnderPostmaster)
631 : 3919 : RegisterPostmasterChildActive();
632 : :
633 : : /*
634 : : * We use the ProcStructLock to protect assignment and releasing of
635 : : * AuxiliaryProcs entries.
636 : : *
637 : : * While we are holding the ProcStructLock, also copy the current shared
638 : : * estimate of spins_per_delay to local storage.
639 : : */
7270 tgl@sss.pgh.pa.us 640 [ + + ]: 3919 : SpinLockAcquire(ProcStructLock);
641 : :
642 : 3919 : set_spins_per_delay(ProcGlobal->spins_per_delay);
643 : :
644 : : /*
645 : : * Find a free auxproc ... *big* trouble if there isn't one ...
646 : : */
6758 alvherre@alvh.no-ip. 647 [ + - ]: 16167 : for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
648 : : {
649 : 16167 : auxproc = &AuxiliaryProcs[proctype];
650 [ + + ]: 16167 : if (auxproc->pid == 0)
7185 tgl@sss.pgh.pa.us 651 : 3919 : break;
652 : : }
6758 alvherre@alvh.no-ip. 653 [ - + ]: 3919 : if (proctype >= NUM_AUXILIARY_PROCS)
654 : : {
7270 tgl@sss.pgh.pa.us 655 :UIC 0 : SpinLockRelease(ProcStructLock);
6758 alvherre@alvh.no-ip. 656 [ # # ]: 0 : elog(FATAL, "all AuxiliaryProcs are in use");
657 : : }
658 : :
659 : : /* Mark auxiliary proc as in use by me */
660 : : /* use volatile pointer to prevent code rearrangement */
6758 alvherre@alvh.no-ip. 661 :GIC 3919 : ((volatile PGPROC *) auxproc)->pid = MyProcPid;
662 : :
7270 tgl@sss.pgh.pa.us 663 : 3919 : SpinLockRelease(ProcStructLock);
664 : :
552 heikki.linnakangas@i 665 : 3919 : MyProc = auxproc;
562 666 : 3919 : MyProcNumber = GetNumberFromPGProc(MyProc);
667 : :
668 : : /*
669 : : * Initialize all fields of MyProc, except for those previously
670 : : * initialized by InitProcGlobal.
671 : : */
962 andres@anarazel.de 672 : 3919 : dlist_node_init(&MyProc->links);
1907 peter@eisentraut.org 673 : 3919 : MyProc->waitStatus = PROC_WAIT_STATUS_OK;
4664 simon@2ndQuadrant.co 674 : 3919 : MyProc->fpVXIDLock = false;
675 : 3919 : MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
1849 andres@anarazel.de 676 : 3919 : MyProc->xid = InvalidTransactionId;
1850 677 : 3919 : MyProc->xmin = InvalidTransactionId;
552 heikki.linnakangas@i 678 : 3919 : MyProc->vxid.procNumber = INVALID_PROC_NUMBER;
679 : 3919 : MyProc->vxid.lxid = InvalidLocalTransactionId;
7185 tgl@sss.pgh.pa.us 680 : 3919 : MyProc->databaseId = InvalidOid;
7342 681 : 3919 : MyProc->roleId = InvalidOid;
2581 michael@paquier.xyz 682 : 3919 : MyProc->tempNamespaceId = InvalidOid;
252 tgl@sss.pgh.pa.us 683 : 3919 : MyProc->isRegularBackend = false;
1247 rhaas@postgresql.org 684 : 3919 : MyProc->delayChkptFlags = 0;
1755 alvherre@alvh.no-ip. 685 : 3919 : MyProc->statusFlags = 0;
1021 andres@anarazel.de 686 : 3919 : MyProc->lwWaiting = LW_WS_NOT_WAITING;
4968 heikki.linnakangas@i 687 : 3919 : MyProc->lwWaitMode = 0;
8743 tgl@sss.pgh.pa.us 688 : 3919 : MyProc->waitLock = NULL;
7680 689 : 3919 : MyProc->waitProcLock = NULL;
1657 fujii@postgresql.org 690 : 3919 : pg_atomic_write_u64(&MyProc->waitStart, 0);
691 : : #ifdef USE_ASSERT_CHECKING
692 : : {
693 : : int i;
694 : :
695 : : /* Last process should have released all locks. */
5058 rhaas@postgresql.org 696 [ + + ]: 66623 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
962 andres@anarazel.de 697 [ - + ]: 62704 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
698 : : }
699 : : #endif
700 : :
701 : : /*
702 : : * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
703 : : * on it. That allows us to repoint the process latch, which so far
704 : : * points to process local one, to the shared one.
705 : : */
5141 tgl@sss.pgh.pa.us 706 : 3919 : OwnLatch(&MyProc->procLatch);
3888 andres@anarazel.de 707 : 3919 : SwitchToSharedLatch();
708 : :
709 : : /* now that we have a proc, report wait events to shared memory */
1617 710 : 3919 : pgstat_set_wait_event_storage(&MyProc->wait_event_info);
711 : :
712 : : /* Check that group locking fields are in a proper initial state. */
3499 rhaas@postgresql.org 713 [ - + ]: 3919 : Assert(MyProc->lockGroupLeader == NULL);
714 [ - + ]: 3919 : Assert(dlist_is_empty(&MyProc->lockGroupMembers));
715 : :
716 : : /*
717 : : * We might be reusing a semaphore that belonged to a failed process. So
718 : : * be careful and reinitialize its value here. (This is not strictly
719 : : * necessary anymore, but seems like a good idea for cleanliness.)
720 : : */
3190 tgl@sss.pgh.pa.us 721 : 3919 : PGSemaphoreReset(MyProc->sem);
722 : :
723 : : /*
724 : : * Arrange to clean up at process exit.
725 : : */
6758 alvherre@alvh.no-ip. 726 : 3919 : on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
727 : :
728 : : /*
729 : : * Now that we have a PGPROC, we could try to acquire lightweight locks.
730 : : * Initialize local state needed for them. (Heavyweight locks cannot be
731 : : * acquired in aux processes.)
732 : : */
645 heikki.linnakangas@i 733 : 3919 : InitLWLockAccess();
734 : :
735 : : #ifdef EXEC_BACKEND
736 : :
737 : : /*
738 : : * Initialize backend-local pointers to all the shared data structures.
739 : : * (We couldn't do this until now because it needs LWLocks.)
740 : : */
741 : : if (IsUnderPostmaster)
742 : : AttachSharedMemoryStructs();
743 : : #endif
10651 scrappy@hub.org 744 : 3919 : }
745 : :
746 : : /*
747 : : * Used from bufmgr to share the value of the buffer that Startup waits on,
748 : : * or to reset the value to "not waiting" (-1). This allows processing
749 : : * of recovery conflicts for buffer pins. Set is made before backends look
750 : : * at this value, so locking not required, especially since the set is
751 : : * an atomic integer set operation.
752 : : */
753 : : void
5705 simon@2ndQuadrant.co 754 : 18 : SetStartupBufferPinWaitBufId(int bufid)
755 : : {
756 : : /* use volatile pointer to prevent code rearrangement */
757 : 18 : volatile PROC_HDR *procglobal = ProcGlobal;
758 : :
759 : 18 : procglobal->startupBufferPinWaitBufId = bufid;
760 : 18 : }
761 : :
762 : : /*
763 : : * Used by backends when they receive a request to check for buffer pin waits.
764 : : */
765 : : int
766 : 3 : GetStartupBufferPinWaitBufId(void)
767 : : {
768 : : /* use volatile pointer to prevent code rearrangement */
769 : 3 : volatile PROC_HDR *procglobal = ProcGlobal;
770 : :
5149 tgl@sss.pgh.pa.us 771 : 3 : return procglobal->startupBufferPinWaitBufId;
772 : : }
773 : :
774 : : /*
775 : : * Check whether there are at least N free PGPROC objects. If false is
776 : : * returned, *nfree will be set to the number of free PGPROC objects.
777 : : * Otherwise, *nfree will be set to n.
778 : : *
779 : : * Note: this is designed on the assumption that N will generally be small.
780 : : */
781 : : bool
960 rhaas@postgresql.org 782 : 355 : HaveNFreeProcs(int n, int *nfree)
783 : : {
784 : : dlist_iter iter;
785 : :
786 [ - + ]: 355 : Assert(n > 0);
787 [ - + ]: 355 : Assert(nfree);
788 : :
7386 tgl@sss.pgh.pa.us 789 [ - + ]: 355 : SpinLockAcquire(ProcStructLock);
790 : :
960 rhaas@postgresql.org 791 : 355 : *nfree = 0;
962 andres@anarazel.de 792 [ + - + + ]: 1063 : dlist_foreach(iter, &ProcGlobal->freeProcs)
793 : : {
960 rhaas@postgresql.org 794 : 1061 : (*nfree)++;
795 [ + + ]: 1061 : if (*nfree == n)
962 andres@anarazel.de 796 : 353 : break;
797 : : }
798 : :
7386 tgl@sss.pgh.pa.us 799 : 355 : SpinLockRelease(ProcStructLock);
800 : :
960 rhaas@postgresql.org 801 : 355 : return (*nfree == n);
802 : : }
803 : :
804 : : /*
805 : : * Cancel any pending wait for lock, when aborting a transaction, and revert
806 : : * any strong lock count acquisition for a lock being acquired.
807 : : *
808 : : * (Normally, this would only happen if we accept a cancel/die
809 : : * interrupt while waiting; but an ereport(ERROR) before or during the lock
810 : : * wait is within the realm of possibility, too.)
811 : : */
812 : : void
4889 813 : 348886 : LockErrorCleanup(void)
814 : : {
815 : : LOCALLOCK *lockAwaited;
816 : : LWLock *partitionLock;
817 : : DisableTimeoutParams timeouts[2];
818 : :
3869 heikki.linnakangas@i 819 : 348886 : HOLD_INTERRUPTS();
820 : :
4889 rhaas@postgresql.org 821 : 348886 : AbortStrongLockAcquire();
822 : :
823 : : /* Nothing to do if we weren't waiting for a lock */
306 heikki.linnakangas@i 824 : 348886 : lockAwaited = GetAwaitedLock();
7209 tgl@sss.pgh.pa.us 825 [ + + ]: 348886 : if (lockAwaited == NULL)
826 : : {
3869 heikki.linnakangas@i 827 [ - + ]: 348846 : RESUME_INTERRUPTS();
6433 tgl@sss.pgh.pa.us 828 : 348846 : return;
829 : : }
830 : :
831 : : /*
832 : : * Turn off the deadlock and lock timeout timers, if they are still
833 : : * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
834 : : * indicator flag, since this function is executed before
835 : : * ProcessInterrupts when responding to SIGINT; else we'd lose the
836 : : * knowledge that the SIGINT came from a lock timeout and not an external
837 : : * source.
838 : : */
4557 839 : 40 : timeouts[0].id = DEADLOCK_TIMEOUT;
840 : 40 : timeouts[0].keep_indicator = false;
841 : 40 : timeouts[1].id = LOCK_TIMEOUT;
842 : 40 : timeouts[1].keep_indicator = true;
843 : 40 : disable_timeouts(timeouts, 2);
844 : :
845 : : /* Unlink myself from the wait queue, if on it (might not be anymore!) */
6985 846 : 40 : partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
7209 847 : 40 : LWLockAcquire(partitionLock, LW_EXCLUSIVE);
848 : :
962 andres@anarazel.de 849 [ + + ]: 40 : if (!dlist_node_is_detached(&MyProc->links))
850 : : {
851 : : /* We could not have been granted the lock yet */
6985 tgl@sss.pgh.pa.us 852 : 39 : RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
853 : : }
854 : : else
855 : : {
856 : : /*
857 : : * Somebody kicked us off the lock queue already. Perhaps they
858 : : * granted us the lock, or perhaps they detected a deadlock. If they
859 : : * did grant us the lock, we'd better remember it in our local lock
860 : : * table.
861 : : */
1907 peter@eisentraut.org 862 [ + - ]: 1 : if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
7680 tgl@sss.pgh.pa.us 863 : 1 : GrantAwaitedLock();
864 : : }
865 : :
162 heikki.linnakangas@i 866 : 40 : ResetAwaitedLock();
867 : :
7209 tgl@sss.pgh.pa.us 868 : 40 : LWLockRelease(partitionLock);
869 : :
3869 heikki.linnakangas@i 870 [ - + ]: 40 : RESUME_INTERRUPTS();
871 : : }
872 : :
873 : :
874 : : /*
875 : : * ProcReleaseLocks() -- release locks associated with current transaction
876 : : * at main transaction commit or abort
877 : : *
878 : : * At main transaction commit, we release standard locks except session locks.
879 : : * At main transaction abort, we release all locks including session locks.
880 : : *
881 : : * Advisory locks are released only if they are transaction-level;
882 : : * session-level holds remain, whether this is a commit or not.
883 : : *
884 : : * At subtransaction commit, we don't release any locks (so this func is not
885 : : * needed at all); we will defer the releasing to the parent transaction.
886 : : * At subtransaction abort, we release all locks held by the subtransaction;
887 : : * this is implemented by retail releasing of the locks under control of
888 : : * the ResourceOwner mechanism.
889 : : */
890 : : void
7721 tgl@sss.pgh.pa.us 891 : 318614 : ProcReleaseLocks(bool isCommit)
892 : : {
10226 bruce@momjian.us 893 [ - + ]: 318614 : if (!MyProc)
10226 bruce@momjian.us 894 :UIC 0 : return;
895 : : /* If waiting, get off wait queue (should only be needed after error) */
4889 rhaas@postgresql.org 896 :GIC 318614 : LockErrorCleanup();
897 : : /* Release standard locks, including session-level if aborting */
7680 tgl@sss.pgh.pa.us 898 : 318614 : LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
899 : : /* Release transaction-level advisory locks */
5314 itagaki.takahiro@gma 900 : 318614 : LockReleaseAll(USER_LOCKMETHOD, false);
901 : : }
902 : :
903 : :
904 : : /*
905 : : * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
906 : : */
907 : : static void
7185 tgl@sss.pgh.pa.us 908 : 14840 : RemoveProcFromArray(int code, Datum arg)
909 : : {
910 [ - + ]: 14840 : Assert(MyProc != NULL);
6573 911 : 14840 : ProcArrayRemove(MyProc, InvalidTransactionId);
7185 912 : 14840 : }
913 : :
914 : : /*
915 : : * ProcKill() -- Destroy the per-proc data structure for
916 : : * this process. Release any of its held LW locks.
917 : : */
918 : : static void
7939 peter_e@gmx.net 919 : 14849 : ProcKill(int code, Datum arg)
920 : : {
921 : : PGPROC *proc;
922 : : dlist_head *procgloballist;
923 : :
8765 tgl@sss.pgh.pa.us 924 [ - + ]: 14849 : Assert(MyProc != NULL);
925 : :
926 : : /* not safe if forked by system(), etc. */
690 nathan@postgresql.or 927 [ - + ]: 14849 : if (MyProc->pid != (int) getpid())
690 nathan@postgresql.or 928 [ # # ]:UIC 0 : elog(PANIC, "ProcKill() called in child process");
929 : :
930 : : /* Make sure we're out of the sync rep lists */
5141 tgl@sss.pgh.pa.us 931 :GIC 14849 : SyncRepCleanupAtProcExit();
932 : :
933 : : #ifdef USE_ASSERT_CHECKING
934 : : {
935 : : int i;
936 : :
937 : : /* Last process should have released all locks. */
5058 rhaas@postgresql.org 938 [ + + ]: 252433 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
962 andres@anarazel.de 939 [ - + ]: 237584 : Assert(dlist_is_empty(&(MyProc->myProcLocks[i])));
940 : : }
941 : : #endif
942 : :
943 : : /*
944 : : * Release any LW locks I am holding. There really shouldn't be any, but
945 : : * it's cheap to check again before we cut the knees off the LWLock
946 : : * facility by releasing our PGPROC ...
947 : : */
7334 tgl@sss.pgh.pa.us 948 : 14849 : LWLockReleaseAll();
949 : :
950 : : /* Cancel any pending condition variable sleep, too */
3210 rhaas@postgresql.org 951 : 14849 : ConditionVariableCancelSleep();
952 : :
953 : : /*
954 : : * Detach from any lock group of which we are a member. If the leader
955 : : * exits before all other group members, its PGPROC will remain allocated
956 : : * until the last group process exits; that process must return the
957 : : * leader's PGPROC to the appropriate list.
958 : : */
3499 959 [ + + ]: 14849 : if (MyProc->lockGroupLeader != NULL)
960 : : {
961 : 1448 : PGPROC *leader = MyProc->lockGroupLeader;
962 : 1448 : LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
963 : :
964 : 1448 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
965 [ - + ]: 1448 : Assert(!dlist_is_empty(&leader->lockGroupMembers));
966 : 1448 : dlist_delete(&MyProc->lockGroupLink);
967 [ + + ]: 1448 : if (dlist_is_empty(&leader->lockGroupMembers))
968 : : {
969 : 70 : leader->lockGroupLeader = NULL;
970 [ - + ]: 70 : if (leader != MyProc)
971 : : {
3499 rhaas@postgresql.org 972 :UIC 0 : procgloballist = leader->procgloballist;
973 : :
974 : : /* Leader exited first; return its PGPROC. */
975 [ # # ]: 0 : SpinLockAcquire(ProcStructLock);
962 andres@anarazel.de 976 : 0 : dlist_push_head(procgloballist, &leader->links);
3499 rhaas@postgresql.org 977 : 0 : SpinLockRelease(ProcStructLock);
978 : : }
979 : : }
3499 rhaas@postgresql.org 980 [ + - ]:GIC 1378 : else if (leader != MyProc)
981 : 1378 : MyProc->lockGroupLeader = NULL;
982 : 1448 : LWLockRelease(leader_lwlock);
983 : : }
984 : :
985 : : /*
986 : : * Reset MyLatch to the process local one. This is so that signal
987 : : * handlers et al can continue using the latch after the shared latch
988 : : * isn't ours anymore.
989 : : *
990 : : * Similarly, stop reporting wait events to MyProc->wait_event_info.
991 : : *
992 : : * After that clear MyProc and disown the shared latch.
993 : : */
3888 andres@anarazel.de 994 : 14849 : SwitchBackToLocalLatch();
1617 995 : 14849 : pgstat_reset_wait_event_storage();
996 : :
4236 rhaas@postgresql.org 997 : 14849 : proc = MyProc;
998 : 14849 : MyProc = NULL;
552 heikki.linnakangas@i 999 : 14849 : MyProcNumber = INVALID_PROC_NUMBER;
4236 rhaas@postgresql.org 1000 : 14849 : DisownLatch(&proc->procLatch);
1001 : :
1002 : : /* Mark the proc no longer in use */
552 heikki.linnakangas@i 1003 : 14849 : proc->pid = 0;
1004 : 14849 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1005 : 14849 : proc->vxid.lxid = InvalidTransactionId;
1006 : :
3693 rhaas@postgresql.org 1007 : 14849 : procgloballist = proc->procgloballist;
8743 tgl@sss.pgh.pa.us 1008 [ + + ]: 14849 : SpinLockAcquire(ProcStructLock);
1009 : :
1010 : : /*
1011 : : * If we're still a member of a locking group, that means we're a leader
1012 : : * which has somehow exited before its children. The last remaining child
1013 : : * will release our PGPROC. Otherwise, release it now.
1014 : : */
3499 rhaas@postgresql.org 1015 [ + - ]: 14849 : if (proc->lockGroupLeader == NULL)
1016 : : {
1017 : : /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
1018 [ - + ]: 14849 : Assert(dlist_is_empty(&proc->lockGroupMembers));
1019 : :
1020 : : /* Return PGPROC structure (and semaphore) to appropriate freelist */
962 andres@anarazel.de 1021 : 14849 : dlist_push_tail(procgloballist, &proc->links);
1022 : : }
1023 : :
1024 : : /* Update shared estimate of spins_per_delay */
3613 rhaas@postgresql.org 1025 : 14849 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1026 : :
8743 tgl@sss.pgh.pa.us 1027 : 14849 : SpinLockRelease(ProcStructLock);
1028 : :
1029 : : /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
6718 alvherre@alvh.no-ip. 1030 [ + + ]: 14849 : if (AutovacuumLauncherPid != 0)
5850 tgl@sss.pgh.pa.us 1031 : 33 : kill(AutovacuumLauncherPid, SIGUSR2);
8743 1032 : 14849 : }
1033 : :
1034 : : /*
1035 : : * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
1036 : : * processes (bgwriter, etc). The PGPROC and sema are not released, only
1037 : : * marked as not-in-use.
1038 : : */
1039 : : static void
6758 alvherre@alvh.no-ip. 1040 : 3919 : AuxiliaryProcKill(int code, Datum arg)
1041 : : {
7678 bruce@momjian.us 1042 : 3919 : int proctype = DatumGetInt32(arg);
1043 : : PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
1044 : : PGPROC *proc;
1045 : :
6758 alvherre@alvh.no-ip. 1046 [ + - - + ]: 3919 : Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
1047 : :
1048 : : /* not safe if forked by system(), etc. */
690 nathan@postgresql.or 1049 [ - + ]: 3919 : if (MyProc->pid != (int) getpid())
690 nathan@postgresql.or 1050 [ # # ]:UIC 0 : elog(PANIC, "AuxiliaryProcKill() called in child process");
1051 : :
6758 alvherre@alvh.no-ip. 1052 :GIC 3919 : auxproc = &AuxiliaryProcs[proctype];
1053 : :
1054 [ - + ]: 3919 : Assert(MyProc == auxproc);
1055 : :
1056 : : /* Release any LW locks I am holding (see notes above) */
8743 tgl@sss.pgh.pa.us 1057 : 3919 : LWLockReleaseAll();
1058 : :
1059 : : /* Cancel any pending condition variable sleep, too */
3210 rhaas@postgresql.org 1060 : 3919 : ConditionVariableCancelSleep();
1061 : :
1062 : : /* look at the equivalent ProcKill() code for comments */
3888 andres@anarazel.de 1063 : 3919 : SwitchBackToLocalLatch();
1617 1064 : 3919 : pgstat_reset_wait_event_storage();
1065 : :
4236 rhaas@postgresql.org 1066 : 3919 : proc = MyProc;
1067 : 3919 : MyProc = NULL;
552 heikki.linnakangas@i 1068 : 3919 : MyProcNumber = INVALID_PROC_NUMBER;
4236 rhaas@postgresql.org 1069 : 3919 : DisownLatch(&proc->procLatch);
1070 : :
7270 tgl@sss.pgh.pa.us 1071 [ + + ]: 3919 : SpinLockAcquire(ProcStructLock);
1072 : :
1073 : : /* Mark auxiliary proc no longer in use */
4236 rhaas@postgresql.org 1074 : 3919 : proc->pid = 0;
552 heikki.linnakangas@i 1075 : 3919 : proc->vxid.procNumber = INVALID_PROC_NUMBER;
1076 : 3919 : proc->vxid.lxid = InvalidTransactionId;
1077 : :
1078 : : /* Update shared estimate of spins_per_delay */
7270 tgl@sss.pgh.pa.us 1079 : 3919 : ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
1080 : :
1081 : 3919 : SpinLockRelease(ProcStructLock);
10651 scrappy@hub.org 1082 : 3919 : }
1083 : :
1084 : : /*
1085 : : * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1086 : : * given its PID
1087 : : *
1088 : : * Returns NULL if not found.
1089 : : */
1090 : : PGPROC *
3086 rhaas@postgresql.org 1091 : 5082 : AuxiliaryPidGetProc(int pid)
1092 : : {
1093 : 5082 : PGPROC *result = NULL;
1094 : : int index;
1095 : :
1096 [ + + ]: 5082 : if (pid == 0) /* never match dummy PGPROCs */
1097 : 3 : return NULL;
1098 : :
1099 [ + + ]: 23947 : for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1100 : : {
1101 : 23946 : PGPROC *proc = &AuxiliaryProcs[index];
1102 : :
1103 [ + + ]: 23946 : if (proc->pid == pid)
1104 : : {
1105 : 5078 : result = proc;
1106 : 5078 : break;
1107 : : }
1108 : : }
1109 : 5079 : return result;
1110 : : }
1111 : :
1112 : :
1113 : : /*
1114 : : * JoinWaitQueue -- join the wait queue on the specified lock
1115 : : *
1116 : : * It's not actually guaranteed that we need to wait when this function is
1117 : : * called, because it could be that when we try to find a position at which
1118 : : * to insert ourself into the wait queue, we discover that we must be inserted
1119 : : * ahead of everyone who wants a lock that conflict with ours. In that case,
1120 : : * we get the lock immediately. Because of this, it's sensible for this function
1121 : : * to have a dontWait argument, despite the name.
1122 : : *
1123 : : * On entry, the caller has already set up LOCK and PROCLOCK entries to
1124 : : * reflect that we have "requested" the lock. The caller is responsible for
1125 : : * cleaning that up, if we end up not joining the queue after all.
1126 : : *
1127 : : * The lock table's partition lock must be held at entry, and is still held
1128 : : * at exit. The caller must release it before calling ProcSleep().
1129 : : *
1130 : : * Result is one of the following:
1131 : : *
1132 : : * PROC_WAIT_STATUS_OK - lock was immediately granted
1133 : : * PROC_WAIT_STATUS_WAITING - joined the wait queue; call ProcSleep()
1134 : : * PROC_WAIT_STATUS_ERROR - immediate deadlock was detected, or would
1135 : : * need to wait and dontWait == true
1136 : : *
1137 : : * NOTES: The process queue is now a priority queue for locking.
1138 : : */
1139 : : ProcWaitStatus
306 heikki.linnakangas@i 1140 : 2066 : JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
1141 : : {
7209 tgl@sss.pgh.pa.us 1142 : 2066 : LOCKMODE lockmode = locallock->tag.mode;
1143 : 2066 : LOCK *lock = locallock->lock;
1144 : 2066 : PROCLOCK *proclock = locallock->proclock;
6985 1145 : 2066 : uint32 hashcode = locallock->hashcode;
306 heikki.linnakangas@i 1146 : 2066 : LWLock *partitionLock PG_USED_FOR_ASSERTS_ONLY = LockHashPartitionLock(hashcode);
962 andres@anarazel.de 1147 : 2066 : dclist_head *waitQueue = &lock->waitProcs;
841 tgl@sss.pgh.pa.us 1148 : 2066 : PGPROC *insert_before = NULL;
1149 : : LOCKMASK myProcHeldLocks;
1150 : : LOCKMASK myHeldLocks;
8768 1151 : 2066 : bool early_deadlock = false;
3499 rhaas@postgresql.org 1152 : 2066 : PGPROC *leader = MyProc->lockGroupLeader;
1153 : :
306 heikki.linnakangas@i 1154 [ - + ]: 2066 : Assert(LWLockHeldByMeInMode(partitionLock, LW_EXCLUSIVE));
1155 : :
1156 : : /*
1157 : : * Set bitmask of locks this process already holds on this object.
1158 : : */
1159 : 2066 : myHeldLocks = MyProc->heldLocks = proclock->holdMask;
1160 : :
1161 : : /*
1162 : : * Determine which locks we're already holding.
1163 : : *
1164 : : * If group locking is in use, locks held by members of my locking group
1165 : : * need to be included in myHeldLocks. This is not required for relation
1166 : : * extension lock which conflict among group members. However, including
1167 : : * them in myHeldLocks will give group members the priority to get those
1168 : : * locks as compared to other backends which are also trying to acquire
1169 : : * those locks. OTOH, we can avoid giving priority to group members for
1170 : : * that kind of locks, but there doesn't appear to be a clear advantage of
1171 : : * the same.
1172 : : */
1173 : 2066 : myProcHeldLocks = proclock->holdMask;
1174 : 2066 : myHeldLocks = myProcHeldLocks;
3499 rhaas@postgresql.org 1175 [ + + ]: 2066 : if (leader != NULL)
1176 : : {
1177 : : dlist_iter iter;
1178 : :
962 andres@anarazel.de 1179 [ + - + + ]: 47 : dlist_foreach(iter, &lock->procLocks)
1180 : : {
1181 : : PROCLOCK *otherproclock;
1182 : :
1183 : 35 : otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1184 : :
3499 rhaas@postgresql.org 1185 [ + + ]: 35 : if (otherproclock->groupLeader == leader)
1186 : 16 : myHeldLocks |= otherproclock->holdMask;
1187 : : }
1188 : : }
1189 : :
1190 : : /*
1191 : : * Determine where to add myself in the wait queue.
1192 : : *
1193 : : * Normally I should go at the end of the queue. However, if I already
1194 : : * hold locks that conflict with the request of any previous waiter, put
1195 : : * myself in the queue just in front of the first such waiter. This is not
1196 : : * a necessary step, since deadlock detection would move me to before that
1197 : : * waiter anyway; but it's relatively cheap to detect such a conflict
1198 : : * immediately, and avoid delaying till deadlock timeout.
1199 : : *
1200 : : * Special case: if I find I should go in front of some waiter, check to
1201 : : * see if I conflict with already-held locks or the requests before that
1202 : : * waiter. If not, then just grant myself the requested lock immediately.
1203 : : * This is the same as the test for immediate grant in LockAcquire, except
1204 : : * we are only considering the part of the wait queue before my insertion
1205 : : * point.
1206 : : */
962 andres@anarazel.de 1207 [ + + + + ]: 2066 : if (myHeldLocks != 0 && !dclist_is_empty(waitQueue))
1208 : : {
7950 bruce@momjian.us 1209 : 7 : LOCKMASK aheadRequests = 0;
1210 : : dlist_iter iter;
1211 : :
962 andres@anarazel.de 1212 [ + - + - ]: 7 : dclist_foreach(iter, waitQueue)
1213 : : {
1214 : 7 : PGPROC *proc = dlist_container(PGPROC, links, iter.cur);
1215 : :
1216 : : /*
1217 : : * If we're part of the same locking group as this waiter, its
1218 : : * locks neither conflict with ours nor contribute to
1219 : : * aheadRequests.
1220 : : */
3499 rhaas@postgresql.org 1221 [ - + - - ]: 7 : if (leader != NULL && leader == proc->lockGroupLeader)
3499 rhaas@postgresql.org 1222 :UIC 0 : continue;
1223 : :
1224 : : /* Must he wait for me? */
8451 bruce@momjian.us 1225 [ + - ]:GIC 7 : if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1226 : : {
1227 : : /* Must I wait for him ? */
1228 [ + + ]: 7 : if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1229 : : {
1230 : : /*
1231 : : * Yes, so we have a deadlock. Easiest way to clean up
1232 : : * correctly is to call RemoveFromWaitQueue(), but we
1233 : : * can't do that until we are *on* the wait queue. So, set
1234 : : * a flag to check below, and break out of loop. Also,
1235 : : * record deadlock info for later message.
1236 : : */
8269 tgl@sss.pgh.pa.us 1237 : 1 : RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
8768 1238 : 1 : early_deadlock = true;
1239 : 1 : break;
1240 : : }
1241 : : /* I must go before this waiter. Check special case. */
8451 bruce@momjian.us 1242 [ + - ]: 6 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
2078 peter@eisentraut.org 1243 [ + - ]: 6 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1244 : : proclock))
1245 : : {
1246 : : /* Skip the wait and just grant myself the lock. */
8236 bruce@momjian.us 1247 : 6 : GrantLock(lock, proclock, lockmode);
1907 peter@eisentraut.org 1248 : 6 : return PROC_WAIT_STATUS_OK;
1249 : : }
1250 : :
1251 : : /* Put myself into wait queue before conflicting process */
962 andres@anarazel.de 1252 :UIC 0 : insert_before = proc;
9619 vadim4o@yahoo.com 1253 : 0 : break;
1254 : : }
1255 : : /* Nope, so advance to next waiter */
7950 bruce@momjian.us 1256 : 0 : aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1257 : : }
1258 : : }
1259 : :
1260 : : /*
1261 : : * If we detected deadlock, give up without waiting. This must agree with
1262 : : * CheckDeadLock's recovery code.
1263 : : */
306 heikki.linnakangas@i 1264 [ + + ]:GIC 2060 : if (early_deadlock)
1265 : 1 : return PROC_WAIT_STATUS_ERROR;
1266 : :
1267 : : /*
1268 : : * At this point we know that we'd really need to sleep. If we've been
1269 : : * commanded not to do that, bail out.
1270 : : */
541 rhaas@postgresql.org 1271 [ + + ]: 2059 : if (dontWait)
1272 : 768 : return PROC_WAIT_STATUS_ERROR;
1273 : :
1274 : : /*
1275 : : * Insert self into queue, at the position determined above.
1276 : : */
962 andres@anarazel.de 1277 [ - + ]: 1291 : if (insert_before)
962 andres@anarazel.de 1278 :UIC 0 : dclist_insert_before(waitQueue, &insert_before->links, &MyProc->links);
1279 : : else
962 andres@anarazel.de 1280 :GIC 1291 : dclist_push_tail(waitQueue, &MyProc->links);
1281 : :
7950 bruce@momjian.us 1282 : 1291 : lock->waitMask |= LOCKBIT_ON(lockmode);
1283 : :
1284 : : /* Set up wait information in PGPROC object, too */
306 heikki.linnakangas@i 1285 : 1291 : MyProc->heldLocks = myProcHeldLocks;
8993 tgl@sss.pgh.pa.us 1286 : 1291 : MyProc->waitLock = lock;
7680 1287 : 1291 : MyProc->waitProcLock = proclock;
8993 1288 : 1291 : MyProc->waitLockMode = lockmode;
1289 : :
1907 peter@eisentraut.org 1290 : 1291 : MyProc->waitStatus = PROC_WAIT_STATUS_WAITING;
1291 : :
306 heikki.linnakangas@i 1292 : 1291 : return PROC_WAIT_STATUS_WAITING;
1293 : : }
1294 : :
1295 : : /*
1296 : : * ProcSleep -- put process to sleep waiting on lock
1297 : : *
1298 : : * This must be called when JoinWaitQueue() returns PROC_WAIT_STATUS_WAITING.
1299 : : * Returns after the lock has been granted, or if a deadlock is detected. Can
1300 : : * also bail out with ereport(ERROR), if some other error condition, or a
1301 : : * timeout or cancellation is triggered.
1302 : : *
1303 : : * Result is one of the following:
1304 : : *
1305 : : * PROC_WAIT_STATUS_OK - lock was granted
1306 : : * PROC_WAIT_STATUS_ERROR - a deadlock was detected
1307 : : */
1308 : : ProcWaitStatus
1309 : 1291 : ProcSleep(LOCALLOCK *locallock)
1310 : : {
1311 : 1291 : LOCKMODE lockmode = locallock->tag.mode;
1312 : 1291 : LOCK *lock = locallock->lock;
1313 : 1291 : uint32 hashcode = locallock->hashcode;
1314 : 1291 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1315 : 1291 : TimestampTz standbyWaitStart = 0;
1316 : 1291 : bool allow_autovacuum_cancel = true;
1317 : 1291 : bool logged_recovery_conflict = false;
1318 : : ProcWaitStatus myWaitStatus;
1319 : :
1320 : : /* The caller must've armed the on-error cleanup mechanism */
1321 [ - + ]: 1291 : Assert(GetAwaitedLock() == locallock);
1322 [ - + ]: 1291 : Assert(!LWLockHeldByMe(partitionLock));
1323 : :
1324 : : /*
1325 : : * Now that we will successfully clean up after an ereport, it's safe to
1326 : : * check to see if there's a buffer pin deadlock against the Startup
1327 : : * process. Of course, that's only necessary if we're doing Hot Standby
1328 : : * and are not the Startup process ourselves.
1329 : : */
5149 tgl@sss.pgh.pa.us 1330 [ + + + + ]: 1291 : if (RecoveryInProgress() && !InRecovery)
1331 : 1 : CheckRecoveryConflictDeadlock();
1332 : :
1333 : : /* Reset deadlock_state before enabling the timeout handler */
6654 1334 : 1291 : deadlock_state = DS_NOT_YET_CHECKED;
3868 andres@anarazel.de 1335 : 1291 : got_deadlock_timeout = false;
1336 : :
1337 : : /*
1338 : : * Set timer so we can wake up after awhile and check for a deadlock. If a
1339 : : * deadlock is detected, the handler sets MyProc->waitStatus =
1340 : : * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1341 : : * rather than success.
1342 : : *
1343 : : * By delaying the check until we've waited for a bit, we can avoid
1344 : : * running the rather expensive deadlock-check code in most cases.
1345 : : *
1346 : : * If LockTimeout is set, also enable the timeout for that. We can save a
1347 : : * few cycles by enabling both timeout sources in one call.
1348 : : *
1349 : : * If InHotStandby we set lock waits slightly later for clarity with other
1350 : : * code.
1351 : : */
3467 simon@2ndQuadrant.co 1352 [ + + ]: 1291 : if (!InHotStandby)
1353 : : {
1354 [ + + ]: 1290 : if (LockTimeout > 0)
1355 : : {
1356 : : EnableTimeoutParams timeouts[2];
1357 : :
1358 : 103 : timeouts[0].id = DEADLOCK_TIMEOUT;
1359 : 103 : timeouts[0].type = TMPARAM_AFTER;
1360 : 103 : timeouts[0].delay_ms = DeadlockTimeout;
1361 : 103 : timeouts[1].id = LOCK_TIMEOUT;
1362 : 103 : timeouts[1].type = TMPARAM_AFTER;
1363 : 103 : timeouts[1].delay_ms = LockTimeout;
1364 : 103 : enable_timeouts(timeouts, 2);
1365 : : }
1366 : : else
1367 : 1187 : enable_timeout_after(DEADLOCK_TIMEOUT, DeadlockTimeout);
1368 : :
1369 : : /*
1370 : : * Use the current time obtained for the deadlock timeout timer as
1371 : : * waitStart (i.e., the time when this process started waiting for the
1372 : : * lock). Since getting the current time newly can cause overhead, we
1373 : : * reuse the already-obtained time to avoid that overhead.
1374 : : *
1375 : : * Note that waitStart is updated without holding the lock table's
1376 : : * partition lock, to avoid the overhead by additional lock
1377 : : * acquisition. This can cause "waitstart" in pg_locks to become NULL
1378 : : * for a very short period of time after the wait started even though
1379 : : * "granted" is false. This is OK in practice because we can assume
1380 : : * that users are likely to look at "waitstart" when waiting for the
1381 : : * lock for a long time.
1382 : : */
1664 fujii@postgresql.org 1383 : 1290 : pg_atomic_write_u64(&MyProc->waitStart,
1384 : 1290 : get_timeout_start_time(DEADLOCK_TIMEOUT));
1385 : : }
1702 1386 [ + - ]: 1 : else if (log_recovery_conflict_waits)
1387 : : {
1388 : : /*
1389 : : * Set the wait start timestamp if logging is enabled and in hot
1390 : : * standby.
1391 : : */
1392 : 1 : standbyWaitStart = GetCurrentTimestamp();
1393 : : }
1394 : :
1395 : : /*
1396 : : * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1397 : : * will not wait. But a set latch does not necessarily mean that the lock
1398 : : * is free now, as there are many other sources for latch sets than
1399 : : * somebody releasing the lock.
1400 : : *
1401 : : * We process interrupts whenever the latch has been set, so cancel/die
1402 : : * interrupts are processed quickly. This means we must not mind losing
1403 : : * control to a cancel/die interrupt here. We don't, because we have no
1404 : : * shared-state-change work to do after being granted the lock (the
1405 : : * grantor did it all). We do have to worry about canceling the deadlock
1406 : : * timeout and updating the locallock table, but if we lose control to an
1407 : : * error, LockErrorCleanup will fix that up.
1408 : : */
1409 : : do
1410 : : {
3467 simon@2ndQuadrant.co 1411 [ + + ]: 2307 : if (InHotStandby)
1412 : : {
1702 fujii@postgresql.org 1413 : 4 : bool maybe_log_conflict =
841 tgl@sss.pgh.pa.us 1414 [ + - + + ]: 4 : (standbyWaitStart != 0 && !logged_recovery_conflict);
1415 : :
1416 : : /* Set a timer and wait for that or for the lock to be granted */
1702 fujii@postgresql.org 1417 : 4 : ResolveRecoveryConflictWithLock(locallock->tag.lock,
1418 : : maybe_log_conflict);
1419 : :
1420 : : /*
1421 : : * Emit the log message if the startup process is waiting longer
1422 : : * than deadlock_timeout for recovery conflict on lock.
1423 : : */
1424 [ + + ]: 4 : if (maybe_log_conflict)
1425 : : {
1426 : 2 : TimestampTz now = GetCurrentTimestamp();
1427 : :
1428 [ + + ]: 2 : if (TimestampDifferenceExceeds(standbyWaitStart, now,
1429 : : DeadlockTimeout))
1430 : : {
1431 : : VirtualTransactionId *vxids;
1432 : : int cnt;
1433 : :
1434 : 1 : vxids = GetLockConflicts(&locallock->tag.lock,
1435 : : AccessExclusiveLock, &cnt);
1436 : :
1437 : : /*
1438 : : * Log the recovery conflict and the list of PIDs of
1439 : : * backends holding the conflicting lock. Note that we do
1440 : : * logging even if there are no such backends right now
1441 : : * because the startup process here has already waited
1442 : : * longer than deadlock_timeout.
1443 : : */
1444 : 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1445 : : standbyWaitStart, now,
1697 1446 [ + - ]: 1 : cnt > 0 ? vxids : NULL, true);
1702 1447 : 1 : logged_recovery_conflict = true;
1448 : : }
1449 : : }
1450 : : }
1451 : : else
1452 : : {
2479 tmunro@postgresql.or 1453 : 2303 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1454 : 2303 : PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
3467 simon@2ndQuadrant.co 1455 : 2303 : ResetLatch(MyLatch);
1456 : : /* check for deadlocks first, as that's probably log-worthy */
1457 [ + + ]: 2303 : if (got_deadlock_timeout)
1458 : : {
1459 : 31 : CheckDeadLock();
1460 : 31 : got_deadlock_timeout = false;
1461 : : }
1462 [ + + ]: 2303 : CHECK_FOR_INTERRUPTS();
1463 : : }
1464 : :
1465 : : /*
1466 : : * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1467 : : * else asynchronously. Read it just once per loop to prevent
1468 : : * surprising behavior (such as missing log messages).
1469 : : */
1907 peter@eisentraut.org 1470 : 2266 : myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1471 : :
1472 : : /*
1473 : : * If we are not deadlocked, but are waiting on an autovacuum-induced
1474 : : * task, send a signal to interrupt it.
1475 : : */
6525 alvherre@alvh.no-ip. 1476 [ - + - - ]: 2266 : if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1477 : : {
6505 bruce@momjian.us 1478 :UIC 0 : PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1479 : : uint8 statusFlags;
1480 : : uint8 lockmethod_copy;
1481 : : LOCKTAG locktag_copy;
1482 : :
1483 : : /*
1484 : : * Grab info we need, then release lock immediately. Note this
1485 : : * coding means that there is a tiny chance that the process
1486 : : * terminates its current transaction and starts a different one
1487 : : * before we have a change to send the signal; the worst possible
1488 : : * consequence is that a for-wraparound vacuum is canceled. But
1489 : : * that could happen in any case unless we were to do kill() with
1490 : : * the lock held, which is much more undesirable.
1491 : : */
6525 alvherre@alvh.no-ip. 1492 : 0 : LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1748 1493 : 0 : statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1494 : 0 : lockmethod_copy = lock->tag.locktag_lockmethodid;
1495 : 0 : locktag_copy = lock->tag;
1496 : 0 : LWLockRelease(ProcArrayLock);
1497 : :
1498 : : /*
1499 : : * Only do it if the worker is not working to protect against Xid
1500 : : * wraparound.
1501 : : */
1755 1502 [ # # ]: 0 : if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1503 [ # # ]: 0 : !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1504 : : {
6505 bruce@momjian.us 1505 : 0 : int pid = autovac->pid;
1506 : :
1507 : : /* report the case, if configured to do so */
1748 tgl@sss.pgh.pa.us 1508 [ # # ]: 0 : if (message_level_is_interesting(DEBUG1))
1509 : : {
1510 : : StringInfoData locktagbuf;
1511 : : StringInfoData logbuf; /* errdetail for server log */
1512 : :
1513 : 0 : initStringInfo(&locktagbuf);
1514 : 0 : initStringInfo(&logbuf);
1515 : 0 : DescribeLockTag(&locktagbuf, &locktag_copy);
1516 : 0 : appendStringInfo(&logbuf,
1517 : : "Process %d waits for %s on %s.",
1518 : : MyProcPid,
1519 : : GetLockmodeName(lockmethod_copy, lockmode),
1520 : : locktagbuf.data);
1521 : :
1522 [ # # ]: 0 : ereport(DEBUG1,
1523 : : (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1524 : : pid),
1525 : : errdetail_log("%s", logbuf.data)));
1526 : :
1527 : 0 : pfree(locktagbuf.data);
1528 : 0 : pfree(logbuf.data);
1529 : : }
1530 : :
1531 : : /* send the autovacuum worker Back to Old Kent Road */
6525 alvherre@alvh.no-ip. 1532 [ # # ]: 0 : if (kill(pid, SIGINT) < 0)
1533 : : {
1534 : : /*
1535 : : * There's a race condition here: once we release the
1536 : : * ProcArrayLock, it's possible for the autovac worker to
1537 : : * close up shop and exit before we can do the kill().
1538 : : * Therefore, we do not whinge about no-such-process.
1539 : : * Other errors such as EPERM could conceivably happen if
1540 : : * the kernel recycles the PID fast enough, but such cases
1541 : : * seem improbable enough that it's probably best to issue
1542 : : * a warning if we see some other errno.
1543 : : */
3693 tgl@sss.pgh.pa.us 1544 [ # # ]: 0 : if (errno != ESRCH)
1545 [ # # ]: 0 : ereport(WARNING,
1546 : : (errmsg("could not send signal to process %d: %m",
1547 : : pid)));
1548 : : }
1549 : : }
1550 : :
1551 : : /* prevent signal from being sent again more than once */
6525 alvherre@alvh.no-ip. 1552 : 0 : allow_autovacuum_cancel = false;
1553 : : }
1554 : :
1555 : : /*
1556 : : * If awoken after the deadlock check interrupt has run, and
1557 : : * log_lock_waits is on, then report about the wait.
1558 : : */
6584 tgl@sss.pgh.pa.us 1559 [ + + + + ]:GIC 2266 : if (log_lock_waits && deadlock_state != DS_NOT_YET_CHECKED)
1560 : : {
1561 : : StringInfoData buf,
1562 : : lock_waiters_sbuf,
1563 : : lock_holders_sbuf;
1564 : : const char *modename;
1565 : : long secs;
1566 : : int usecs;
1567 : : long msecs;
4195 fujii@postgresql.org 1568 : 21 : int lockHoldersNum = 0;
1569 : :
6584 tgl@sss.pgh.pa.us 1570 : 21 : initStringInfo(&buf);
4195 fujii@postgresql.org 1571 : 21 : initStringInfo(&lock_waiters_sbuf);
1572 : 21 : initStringInfo(&lock_holders_sbuf);
1573 : :
6584 tgl@sss.pgh.pa.us 1574 : 21 : DescribeLockTag(&buf, &locallock->tag.lock);
1575 : 21 : modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1576 : : lockmode);
4800 alvherre@alvh.no-ip. 1577 : 21 : TimestampDifference(get_timeout_start_time(DEADLOCK_TIMEOUT),
1578 : : GetCurrentTimestamp(),
1579 : : &secs, &usecs);
6584 tgl@sss.pgh.pa.us 1580 : 21 : msecs = secs * 1000 + usecs / 1000;
1581 : 21 : usecs = usecs % 1000;
1582 : :
1583 : : /* Gather a list of all lock holders and waiters */
4195 fujii@postgresql.org 1584 : 21 : LWLockAcquire(partitionLock, LW_SHARED);
176 1585 : 21 : GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1586 : : &lock_waiters_sbuf, &lockHoldersNum);
4195 1587 : 21 : LWLockRelease(partitionLock);
1588 : :
6584 tgl@sss.pgh.pa.us 1589 [ + + ]: 21 : if (deadlock_state == DS_SOFT_DEADLOCK)
1590 [ + - ]: 3 : ereport(LOG,
1591 : : (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1592 : : MyProcPid, modename, buf.data, msecs, usecs),
1593 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1594 : : "Processes holding the lock: %s. Wait queue: %s.",
1595 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1596 [ + + ]: 18 : else if (deadlock_state == DS_HARD_DEADLOCK)
1597 : : {
1598 : : /*
1599 : : * This message is a bit redundant with the error that will be
1600 : : * reported subsequently, but in some cases the error report
1601 : : * might not make it to the log (eg, if it's caught by an
1602 : : * exception handler), and we want to ensure all long-wait
1603 : : * events get logged.
1604 : : */
1605 [ + - ]: 2 : ereport(LOG,
1606 : : (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1607 : : MyProcPid, modename, buf.data, msecs, usecs),
1608 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1609 : : "Processes holding the lock: %s. Wait queue: %s.",
1610 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1611 : : }
1612 : :
1907 peter@eisentraut.org 1613 [ + + ]: 21 : if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
6584 tgl@sss.pgh.pa.us 1614 [ + - ]: 10 : ereport(LOG,
1615 : : (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1616 : : MyProcPid, modename, buf.data, msecs, usecs),
1617 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1618 : : "Processes holding the lock: %s. Wait queue: %s.",
1619 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1907 peter@eisentraut.org 1620 [ + + ]: 11 : else if (myWaitStatus == PROC_WAIT_STATUS_OK)
6584 tgl@sss.pgh.pa.us 1621 [ + - ]: 9 : ereport(LOG,
1622 : : (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1623 : : MyProcPid, modename, buf.data, msecs, usecs)));
1624 : : else
1625 : : {
1907 peter@eisentraut.org 1626 [ - + ]: 2 : Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1627 : :
1628 : : /*
1629 : : * Currently, the deadlock checker always kicks its own
1630 : : * process, which means that we'll only see
1631 : : * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1632 : : * DS_HARD_DEADLOCK, and there's no need to print redundant
1633 : : * messages. But for completeness and future-proofing, print
1634 : : * a message if it looks like someone else kicked us off the
1635 : : * lock.
1636 : : */
6584 tgl@sss.pgh.pa.us 1637 [ - + ]: 2 : if (deadlock_state != DS_HARD_DEADLOCK)
6584 tgl@sss.pgh.pa.us 1638 [ # # ]:UIC 0 : ereport(LOG,
1639 : : (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1640 : : MyProcPid, modename, buf.data, msecs, usecs),
1641 : : (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1642 : : "Processes holding the lock: %s. Wait queue: %s.",
1643 : : lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1644 : : }
1645 : :
1646 : : /*
1647 : : * At this point we might still need to wait for the lock. Reset
1648 : : * state so we don't print the above messages again.
1649 : : */
6584 tgl@sss.pgh.pa.us 1650 :GIC 21 : deadlock_state = DS_NO_DEADLOCK;
1651 : :
1652 : 21 : pfree(buf.data);
4195 fujii@postgresql.org 1653 : 21 : pfree(lock_holders_sbuf.data);
1654 : 21 : pfree(lock_waiters_sbuf.data);
1655 : : }
1907 peter@eisentraut.org 1656 [ + + ]: 2266 : } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1657 : :
1658 : : /*
1659 : : * Disable the timers, if they are still running. As in LockErrorCleanup,
1660 : : * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1661 : : * already caused QueryCancelPending to become set, we want the cancel to
1662 : : * be reported as a lock timeout, not a user cancel.
1663 : : */
3467 simon@2ndQuadrant.co 1664 [ + + ]: 1250 : if (!InHotStandby)
1665 : : {
1666 [ + + ]: 1249 : if (LockTimeout > 0)
1667 : : {
1668 : : DisableTimeoutParams timeouts[2];
1669 : :
1670 : 97 : timeouts[0].id = DEADLOCK_TIMEOUT;
1671 : 97 : timeouts[0].keep_indicator = false;
1672 : 97 : timeouts[1].id = LOCK_TIMEOUT;
1673 : 97 : timeouts[1].keep_indicator = true;
1674 : 97 : disable_timeouts(timeouts, 2);
1675 : : }
1676 : : else
1677 : 1152 : disable_timeout(DEADLOCK_TIMEOUT, false);
1678 : : }
1679 : :
1680 : : /*
1681 : : * Emit the log message if recovery conflict on lock was resolved but the
1682 : : * startup process waited longer than deadlock_timeout for it.
1683 : : */
1697 fujii@postgresql.org 1684 [ + + + - ]: 1250 : if (InHotStandby && logged_recovery_conflict)
1685 : 1 : LogRecoveryConflict(PROCSIG_RECOVERY_CONFLICT_LOCK,
1686 : : standbyWaitStart, GetCurrentTimestamp(),
1687 : : NULL, false);
1688 : :
1689 : : /*
1690 : : * We don't have to do anything else, because the awaker did all the
1691 : : * necessary updates of the lock table and MyProc. (The caller is
1692 : : * responsible for updating the local lock table.)
1693 : : */
306 heikki.linnakangas@i 1694 : 1250 : return myWaitStatus;
1695 : : }
1696 : :
1697 : :
1698 : : /*
1699 : : * ProcWakeup -- wake up a process by setting its latch.
1700 : : *
1701 : : * Also remove the process from the wait queue and set its links invalid.
1702 : : *
1703 : : * The appropriate lock partition lock must be held by caller.
1704 : : *
1705 : : * XXX: presently, this code is only used for the "success" case, and only
1706 : : * works correctly for that case. To clean up in failure case, would need
1707 : : * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1708 : : * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1709 : : */
1710 : : void
1907 peter@eisentraut.org 1711 : 1247 : ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
1712 : : {
962 andres@anarazel.de 1713 [ - + ]: 1247 : if (dlist_node_is_detached(&proc->links))
962 andres@anarazel.de 1714 :UIC 0 : return;
1715 : :
1907 peter@eisentraut.org 1716 [ - + ]:GIC 1247 : Assert(proc->waitStatus == PROC_WAIT_STATUS_WAITING);
1717 : :
1718 : : /* Remove process from wait queue */
962 andres@anarazel.de 1719 : 1247 : dclist_delete_from_thoroughly(&proc->waitLock->waitProcs, &proc->links);
1720 : :
1721 : : /* Clean up process' state and pass it the ok/fail signal */
8993 tgl@sss.pgh.pa.us 1722 : 1247 : proc->waitLock = NULL;
7680 1723 : 1247 : proc->waitProcLock = NULL;
7721 1724 : 1247 : proc->waitStatus = waitStatus;
1664 fujii@postgresql.org 1725 : 1247 : pg_atomic_write_u64(&MyProc->waitStart, 0);
1726 : :
1727 : : /* And awaken it */
3868 andres@anarazel.de 1728 : 1247 : SetLatch(&proc->procLatch);
1729 : : }
1730 : :
1731 : : /*
1732 : : * ProcLockWakeup -- routine for waking up processes when a lock is
1733 : : * released (or a prior waiter is aborted). Scan all waiters
1734 : : * for lock, waken any that are no longer blocked.
1735 : : *
1736 : : * The appropriate lock partition lock must be held by caller.
1737 : : */
1738 : : void
7950 bruce@momjian.us 1739 : 1271 : ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1740 : : {
962 andres@anarazel.de 1741 : 1271 : dclist_head *waitQueue = &lock->waitProcs;
7950 bruce@momjian.us 1742 : 1271 : LOCKMASK aheadRequests = 0;
1743 : : dlist_mutable_iter miter;
1744 : :
962 andres@anarazel.de 1745 [ + + ]: 1271 : if (dclist_is_empty(waitQueue))
8990 tgl@sss.pgh.pa.us 1746 : 44 : return;
1747 : :
962 andres@anarazel.de 1748 [ + - + + ]: 3149 : dclist_foreach_modify(miter, waitQueue)
1749 : : {
1750 : 1922 : PGPROC *proc = dlist_container(PGPROC, links, miter.cur);
8934 bruce@momjian.us 1751 : 1922 : LOCKMODE lockmode = proc->waitLockMode;
1752 : :
1753 : : /*
1754 : : * Waken if (a) doesn't conflict with requests of earlier waiters, and
1755 : : * (b) doesn't conflict with already-held locks.
1756 : : */
8451 1757 [ + + ]: 1922 : if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
2078 peter@eisentraut.org 1758 [ + + ]: 1525 : !LockCheckConflicts(lockMethodTable, lockmode, lock,
1759 : : proc->waitProcLock))
1760 : : {
1761 : : /* OK to waken */
7680 tgl@sss.pgh.pa.us 1762 : 1247 : GrantLock(lock, proc->waitProcLock, lockmode);
1763 : : /* removes proc from the lock's waiting process queue */
962 andres@anarazel.de 1764 : 1247 : ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1765 : : }
1766 : : else
1767 : : {
1768 : : /*
1769 : : * Lock conflicts: Don't wake, but remember requested mode for
1770 : : * later checks.
1771 : : */
7950 bruce@momjian.us 1772 : 675 : aheadRequests |= LOCKBIT_ON(lockmode);
1773 : : }
1774 : : }
1775 : : }
1776 : :
1777 : : /*
1778 : : * CheckDeadLock
1779 : : *
1780 : : * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1781 : : * lock to be released by some other process. Check if there's a deadlock; if
1782 : : * not, just return. (But signal ProcSleep to log a message, if
1783 : : * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1784 : : * the lock's wait queue and signal an error to ProcSleep.
1785 : : */
1786 : : static void
8456 1787 : 31 : CheckDeadLock(void)
1788 : : {
1789 : : int i;
1790 : :
1791 : : /*
1792 : : * Acquire exclusive lock on the entire shared lock data structures. Must
1793 : : * grab LWLocks in partition-number order to avoid LWLock deadlock.
1794 : : *
1795 : : * Note that the deadlock check interrupt had better not be enabled
1796 : : * anywhere that this process itself holds lock partition locks, else this
1797 : : * will wait forever. Also note that LWLockAcquire creates a critical
1798 : : * section, so that this routine cannot be interrupted by cancel/die
1799 : : * interrupts.
1800 : : */
7209 tgl@sss.pgh.pa.us 1801 [ + + ]: 527 : for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4240 rhaas@postgresql.org 1802 : 496 : LWLockAcquire(LockHashPartitionLockByIndex(i), LW_EXCLUSIVE);
1803 : :
1804 : : /*
1805 : : * Check to see if we've been awoken by anyone in the interim.
1806 : : *
1807 : : * If we have, we can return and resume our transaction -- happy day.
1808 : : * Before we are awoken the process releasing the lock grants it to us so
1809 : : * we know that we don't have to wait anymore.
1810 : : *
1811 : : * We check by looking to see if we've been unlinked from the wait queue.
1812 : : * This is safe because we hold the lock partition lock.
1813 : : */
6152 tgl@sss.pgh.pa.us 1814 [ + + ]: 31 : if (MyProc->links.prev == NULL ||
1815 [ - + ]: 30 : MyProc->links.next == NULL)
6654 1816 : 1 : goto check_done;
1817 : :
1818 : : #ifdef LOCK_DEBUG
1819 : : if (Debug_deadlocks)
1820 : : DumpAllLocks();
1821 : : #endif
1822 : :
1823 : : /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1824 : 30 : deadlock_state = DeadLockCheck(MyProc);
1825 : :
6762 bruce@momjian.us 1826 [ + + ]: 30 : if (deadlock_state == DS_HARD_DEADLOCK)
1827 : : {
1828 : : /*
1829 : : * Oops. We have a deadlock.
1830 : : *
1831 : : * Get this process out of wait state. (Note: we could do this more
1832 : : * efficiently by relying on lockAwaited, but use this coding to
1833 : : * preserve the flexibility to kill some other transaction than the
1834 : : * one detecting the deadlock.)
1835 : : *
1836 : : * RemoveFromWaitQueue sets MyProc->waitStatus to
1837 : : * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1838 : : * return from the signal handler.
1839 : : */
1840 [ - + ]: 5 : Assert(MyProc->waitLock != NULL);
1841 : 5 : RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1842 : :
1843 : : /*
1844 : : * We're done here. Transaction abort caused by the error that
1845 : : * ProcSleep will raise will cause any other locks we hold to be
1846 : : * released, thus allowing other processes to wake up; we don't need
1847 : : * to do that here. NOTE: an exception is that releasing locks we
1848 : : * hold doesn't consider the possibility of waiters that were blocked
1849 : : * behind us on the lock we just failed to get, and might now be
1850 : : * wakable because we're not in front of them anymore. However,
1851 : : * RemoveFromWaitQueue took care of waking up any such processes.
1852 : : */
1853 : : }
1854 : :
1855 : : /*
1856 : : * And release locks. We do this in reverse order for two reasons: (1)
1857 : : * Anyone else who needs more than one of the locks will be trying to lock
1858 : : * them in increasing order; we don't want to release the other process
1859 : : * until it can get all the locks it needs. (2) This avoids O(N^2)
1860 : : * behavior inside LWLockRelease.
1861 : : */
6654 tgl@sss.pgh.pa.us 1862 : 25 : check_done:
6912 bruce@momjian.us 1863 [ + + ]: 527 : for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4240 rhaas@postgresql.org 1864 : 496 : LWLockRelease(LockHashPartitionLockByIndex(i));
10651 scrappy@hub.org 1865 : 31 : }
1866 : :
1867 : : /*
1868 : : * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1869 : : *
1870 : : * NB: Runs inside a signal handler, be careful.
1871 : : */
1872 : : void
3868 andres@anarazel.de 1873 : 31 : CheckDeadLockAlert(void)
1874 : : {
1875 : 31 : int save_errno = errno;
1876 : :
1877 : 31 : got_deadlock_timeout = true;
1878 : :
1879 : : /*
1880 : : * Have to set the latch again, even if handle_sig_alarm already did. Back
1881 : : * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1882 : : * ever would be a problem, but setting a set latch again is cheap.
1883 : : *
1884 : : * Note that, when this function runs inside procsignal_sigusr1_handler(),
1885 : : * the handler function sets the latch again after the latch is set here.
1886 : : */
1887 : 31 : SetLatch(MyLatch);
1888 : 31 : errno = save_errno;
1889 : 31 : }
1890 : :
1891 : : /*
1892 : : * GetLockHoldersAndWaiters - get lock holders and waiters for a lock
1893 : : *
1894 : : * Fill lock_holders_sbuf and lock_waiters_sbuf with the PIDs of processes holding
1895 : : * and waiting for the lock, and set lockHoldersNum to the number of lock holders.
1896 : : *
1897 : : * The lock table's partition lock must be held on entry and remains held on exit.
1898 : : */
1899 : : void
176 fujii@postgresql.org 1900 : 21 : GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf,
1901 : : StringInfo lock_waiters_sbuf, int *lockHoldersNum)
1902 : : {
1903 : : dlist_iter proc_iter;
1904 : : PROCLOCK *curproclock;
1905 : 21 : LOCK *lock = locallock->lock;
1906 : 21 : bool first_holder = true,
1907 : 21 : first_waiter = true;
1908 : :
1909 : : #ifdef USE_ASSERT_CHECKING
1910 : : {
1911 : 21 : uint32 hashcode = locallock->hashcode;
1912 : 21 : LWLock *partitionLock = LockHashPartitionLock(hashcode);
1913 : :
1914 [ - + ]: 21 : Assert(LWLockHeldByMe(partitionLock));
1915 : : }
1916 : : #endif
1917 : :
1918 : 21 : *lockHoldersNum = 0;
1919 : :
1920 : : /*
1921 : : * Loop over the lock's procLocks to gather a list of all holders and
1922 : : * waiters. Thus we will be able to provide more detailed information for
1923 : : * lock debugging purposes.
1924 : : *
1925 : : * lock->procLocks contains all processes which hold or wait for this
1926 : : * lock.
1927 : : */
1928 [ + - + + ]: 66 : dlist_foreach(proc_iter, &lock->procLocks)
1929 : : {
1930 : 45 : curproclock =
1931 : 45 : dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1932 : :
1933 : : /*
1934 : : * We are a waiter if myProc->waitProcLock == curproclock; we are a
1935 : : * holder if it is NULL or something different.
1936 : : */
1937 [ + + ]: 45 : if (curproclock->tag.myProc->waitProcLock == curproclock)
1938 : : {
1939 [ + + ]: 20 : if (first_waiter)
1940 : : {
1941 : 11 : appendStringInfo(lock_waiters_sbuf, "%d",
1942 : 11 : curproclock->tag.myProc->pid);
1943 : 11 : first_waiter = false;
1944 : : }
1945 : : else
1946 : 9 : appendStringInfo(lock_waiters_sbuf, ", %d",
1947 : 9 : curproclock->tag.myProc->pid);
1948 : : }
1949 : : else
1950 : : {
1951 [ + + ]: 25 : if (first_holder)
1952 : : {
1953 : 21 : appendStringInfo(lock_holders_sbuf, "%d",
1954 : 21 : curproclock->tag.myProc->pid);
1955 : 21 : first_holder = false;
1956 : : }
1957 : : else
1958 : 4 : appendStringInfo(lock_holders_sbuf, ", %d",
1959 : 4 : curproclock->tag.myProc->pid);
1960 : :
1961 : 25 : (*lockHoldersNum)++;
1962 : : }
1963 : : }
1964 : 21 : }
1965 : :
1966 : : /*
1967 : : * ProcWaitForSignal - wait for a signal from another backend.
1968 : : *
1969 : : * As this uses the generic process latch the caller has to be robust against
1970 : : * unrelated wakeups: Always check that the desired state has occurred, and
1971 : : * wait again if not.
1972 : : */
1973 : : void
3259 rhaas@postgresql.org 1974 : 105 : ProcWaitForSignal(uint32 wait_event_info)
1975 : : {
2479 tmunro@postgresql.or 1976 : 105 : (void) WaitLatch(MyLatch, WL_LATCH_SET | WL_EXIT_ON_PM_DEATH, 0,
1977 : : wait_event_info);
3868 andres@anarazel.de 1978 : 105 : ResetLatch(MyLatch);
1979 [ - + ]: 105 : CHECK_FOR_INTERRUPTS();
8828 tgl@sss.pgh.pa.us 1980 : 105 : }
1981 : :
1982 : : /*
1983 : : * ProcSendSignal - set the latch of a backend identified by ProcNumber
1984 : : */
1985 : : void
552 heikki.linnakangas@i 1986 : 93 : ProcSendSignal(ProcNumber procNumber)
1987 : : {
1988 [ + - - + ]: 93 : if (procNumber < 0 || procNumber >= ProcGlobal->allProcCount)
552 heikki.linnakangas@i 1989 [ # # ]:UIC 0 : elog(ERROR, "procNumber out of range");
1990 : :
552 heikki.linnakangas@i 1991 :GIC 93 : SetLatch(&ProcGlobal->allProcs[procNumber].procLatch);
8828 tgl@sss.pgh.pa.us 1992 : 93 : }
1993 : :
1994 : : /*
1995 : : * BecomeLockGroupLeader - designate process as lock group leader
1996 : : *
1997 : : * Once this function has returned, other processes can join the lock group
1998 : : * by calling BecomeLockGroupMember.
1999 : : */
2000 : : void
3499 rhaas@postgresql.org 2001 : 593 : BecomeLockGroupLeader(void)
2002 : : {
2003 : : LWLock *leader_lwlock;
2004 : :
2005 : : /* If we already did it, we don't need to do it again. */
2006 [ + + ]: 593 : if (MyProc->lockGroupLeader == MyProc)
2007 : 523 : return;
2008 : :
2009 : : /* We had better not be a follower. */
2010 [ - + ]: 70 : Assert(MyProc->lockGroupLeader == NULL);
2011 : :
2012 : : /* Create single-member group, containing only ourselves. */
2013 : 70 : leader_lwlock = LockHashPartitionLockByProc(MyProc);
2014 : 70 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2015 : 70 : MyProc->lockGroupLeader = MyProc;
2016 : 70 : dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
2017 : 70 : LWLockRelease(leader_lwlock);
2018 : : }
2019 : :
2020 : : /*
2021 : : * BecomeLockGroupMember - designate process as lock group member
2022 : : *
2023 : : * This is pretty straightforward except for the possibility that the leader
2024 : : * whose group we're trying to join might exit before we manage to do so;
2025 : : * and the PGPROC might get recycled for an unrelated process. To avoid
2026 : : * that, we require the caller to pass the PID of the intended PGPROC as
2027 : : * an interlock. Returns true if we successfully join the intended lock
2028 : : * group, and false if not.
2029 : : */
2030 : : bool
2031 : 1378 : BecomeLockGroupMember(PGPROC *leader, int pid)
2032 : : {
2033 : : LWLock *leader_lwlock;
2034 : 1378 : bool ok = false;
2035 : :
2036 : : /* Group leader can't become member of group */
2037 [ - + ]: 1378 : Assert(MyProc != leader);
2038 : :
2039 : : /* Can't already be a member of a group */
3484 tgl@sss.pgh.pa.us 2040 [ - + ]: 1378 : Assert(MyProc->lockGroupLeader == NULL);
2041 : :
2042 : : /* PID must be valid. */
3499 rhaas@postgresql.org 2043 [ - + ]: 1378 : Assert(pid != 0);
2044 : :
2045 : : /*
2046 : : * Get lock protecting the group fields. Note LockHashPartitionLockByProc
2047 : : * calculates the proc number based on the PGPROC slot without looking at
2048 : : * its contents, so we will acquire the correct lock even if the leader
2049 : : * PGPROC is in process of being recycled.
2050 : : */
3485 2051 : 1378 : leader_lwlock = LockHashPartitionLockByProc(leader);
3499 2052 : 1378 : LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2053 : :
2054 : : /* Is this the leader we're looking for? */
3484 tgl@sss.pgh.pa.us 2055 [ + - + - ]: 1378 : if (leader->pid == pid && leader->lockGroupLeader == leader)
2056 : : {
2057 : : /* OK, join the group */
3499 rhaas@postgresql.org 2058 : 1378 : ok = true;
2059 : 1378 : MyProc->lockGroupLeader = leader;
2060 : 1378 : dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
2061 : : }
2062 : 1378 : LWLockRelease(leader_lwlock);
2063 : :
2064 : 1378 : return ok;
2065 : : }
|